Cluster Setup
A quick guide for installing a K8s cluster. This cluster is intended for testing purposes and NOT suitable for use in Production.
Installation
Install Docker Engine
# https://docs.docker.com/engine/install/ubuntu/
# Remove existing Docker packages
for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do sudo apt-get remove $pkg; done
# Add Docker's official GPG key:
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-pluginEnable Containerd CNI plugin
# Comment out below line:
disabled_plugins = ["cri"] --> #disabled_plugins = ["cri"]
# Restart Containerd
sudo systemctl restart containerdInstall Kubernetes
# On both MASTER & WORKER
# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
# Disable SWAP
sudo swapoff -a
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl gpg
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectlInstall Helm
[On MASTER]
# https://helm.sh/docs/intro/install/
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helmConfiguration
[On MASTER] Initiate Kubernetes Master (Create Cluster)
# https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
# Initialize the control plane using the following command.
sudo kubeadm init --pod-network-cidr=192.168.0.0/16
# Execute the following commands to configure kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Install the Tigera Calico operator and custom resource definitions.
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.2/manifests/tigera-operator.yaml
# Install Calico by creating the necessary custom resource.
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.2/manifests/custom-resources.yaml
# Confirm that all of the pods are running with the following command.
watch kubectl get pods -n calico-system
# Generate Token for Worker
sudo kubeadm token create --print-join-command[On WORKER] Initiate Kubernetes Worker (Join Cluster)
sudo swapoff -a
# Paste the token generated above (sudo kubeadm join <IP> <TOKEN>)[On MASTER] Setup Kubernetes Dashboard
# https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/
# Add kubernetes-dashboard repository
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# Deploy a Helm Release named "kubernetes-dashboard" using the kubernetes-dashboard chart
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
# Setup proxy
kubectl proxy
# Access dashboard at: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard-kong-proxy:443/proxy/#/login
# Setup port forwarding
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
# Open https://127.0.0.1:8443Creating a Service Account
Create file named dashboard-adminuser.yaml
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token Deploy config
kubectl apply -f dashboard-adminuser.yaml -n kubernetes-dashboardCreating a ClusterRoleBinding
Create file named dashboard-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboardDeploy config
kubectl apply -f dashboard-clusterrole.yaml -n kubernetes-dashboardGet secret
kubectl -n kubernetes-dashboard create token admin-userGenerate a long-lived Bearer Token for ServiceAccount
Create file named dashboard-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token Deploy config
kubectl apply -f dashboard-secret -n kubernetes-dashboardGet secret
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -dLast updated
Was this helpful?