Kubernetes Multi Master CNI Flannel
I user this specification for this guide, detail VM :
Hostname
- Set hostname
1
hostnamectl set-hostname lb-kube-01
- set /etc/hosts like this
1
2
3
4
5
6
7
8
9
10
11
12vim /etc/hosts
...
10.10.0.100 vip-master-kube
10.10.0.101 lb-kube-01
10.10.0.102 lb-kube-02
10.10.0.11 kube-master01
10.10.0.12 kube-master02
10.10.0.13 kube-master03
10.10.0.21 kube-worker01
10.10.0.22 kube-worker02
10.10.0.23 kube-worker03
...
Setup Load Balancer
- In lb-kube-01, lb-kube-02
- Install Keepalived
1
2$ sudo apt update && sudo apt upgrade -y
$ sudo apt install keepalived -y - Edit keepalived.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65# lb-kube-01
$ sudo vim /etc/keepalived/keepalived.conf
---
! /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens3
virtual_router_id 151
priority 255
authentication {
auth_type PASS
auth_pass P@##D321!
}
virtual_ipaddress {
10.10.0.100/24
}
track_script {
check_apiserver
}
}
---
# lb-kube-02
---
! /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state SLAVE
interface ens3
virtual_router_id 151
priority 254
authentication {
auth_type PASS
auth_pass P@##D321!
}
virtual_ipaddress {
10.10.0.100/24
}
track_script {
check_apiserver
}
}
--- - Create check-apiserver.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19# ALL Node Load Balancer
$ sudo vim /etc/keepalived/check_apiserver.sh
---
#!/bin/sh
APISERVER_VIP=10.10.0.100
APISERVER_DEST_PORT=8443
errorExit() {
echo "*** $*" 1>&2
exit 1
}
curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/"
if ip addr | grep -q ${APISERVER_VIP}; then
curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/"
fi
---
$ sudo chmod +x /etc/keepalived/check_apiserver.sh
$ sudo systemctl restart keepalived - Install Haproxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30# ALL None Load Balancer
$ sudo apt install haproxy -y
$ sudo vim /etc/haproxy/haproxy.cfg
---
global
...
defaults
...
#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
bind 10.10.0.100:6443
mode tcp
option tcplog
default_backend apiserver
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server kube-master01 10.10.0.11:6443 check
server kube-master02 10.10.0.12:6443 check
server kube-master03 10.10.0.13:6443 check
---
$ sudo systemctl restart haproxySetup Kubernetes Master
- ALL Node Master and Worker
- Update Package
1
$ sudo apt update; sudo apt upgrade -y
- Install Docker
1
2$ sudo apt install docker.io -y
$ sudo systemctl enable docker - Install Kubectl, Kubelet, Kubeadm
1
2
3
4
5
6
7$ sudo apt install -y apt-transport-https; curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
$ sudo vim /etc/apt/sources.list.d/kubernetes.list
---
deb http://apt.kubernetes.io/ kubernetes-xenial main
---
$ sudo apt update; sudo apt install -y kubectl kubelet kubeadm
$ sudo apt-mark hold kubectl kubelet kubeadm docker.io
- ALL Node Master
- Inisialisasi Master
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51# In kube-master01
$ vim kubeadm-config.yml
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: stable
controlPlaneEndpoint: "10.10.0.100:6443"
networking:
podSubnet: 10.244.0.0/16
---
$ kubeadm init --config=kubeadm-config.yml --upload-certs
---
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 10.10.0.100:6443 --token o9fqj0.67rlluqczomeks6o \
--discovery-token-ca-cert-hash sha256:bd7d95a0faf6a4d25c76a768be58eb0472d1e85027442506d7208b5a247b885a \
--control-plane --certificate-key 8dcc6cf62b71d2698aaf95d76c8e3fa3d70c78c0a0a7459e0b44836991f644cc
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.0.100:6443 --token o9fqj0.67rlluqczomeks6o \
--discovery-token-ca-cert-hash sha256:bd7d95a0faf6a4d25c76a768be58eb0472d1e85027442506d7208b5a247b885a
---
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
$ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# In kube-master02 and kube-master03
$ kubeadm join 10.10.0.100:6443 --token o9fqj0.67rlluqczomeks6o --discovery-token-ca-cert-hash sha256:bd7d95a0faf6a4d25c76a768be58eb0472d1e85027442506d7208b5a247b885a --control-plane --certificate-key 8dcc6cf62b71d2698aaf95d76c8e3fa3d70c78c0a0a7459e0b44836991f644cc
$ kubectl get nodes
- ALL Node Worker
- Inisialisasi Worker
1
$ kubeadm join 10.10.0.100:6443 --token o9fqj0.67rlluqczomeks6o --discovery-token-ca-cert-hash sha256:bd7d95a0faf6a4d25c76a768be58eb0472d1e85027442506d7208b5a247b885a
- Verify Cluster
1
2$ kubectl get node
$ kubectl get all -A