k8s学习(34):k8s高可用集群部署

k8s基于kubeadm+keepalived搭建高可用

步骤

1 配置主机,安装docker,kubeadm,kubectl,kubelet

2 ssh免密登录

3 docker配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF

4 开启ipvs

1
2
3
4
5
6
7
8
9
10
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
1
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

安装kubeadm-1.18.2

部署Keepalived+lvs实现对apiserver高可用

master节点安装Keepalived+lvs

1
yum install -y socat keepalived ipvsadm conntrack

修改master1的/etc/keepalived/keeplived.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens33
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_oass just0kk
}
virtual_ipaddress {
192.168.0.199
}
}
virtual_server 192.168.0.199 6443 {
deplay_loop 6
lb_algo loadbalance
lb_kind DR
net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 192.168.0.6 6443
weigh 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.0.16 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.0.26 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}

master2节点的keepalived.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens33
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass just0kk
}
virtual_ipaddress {
192.168.0.199
}
}
virtual_server 192.168.0.199 6443 {
delay_loop 6
lb_algo loadbalance
lb_kind DR net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 192.168.0.6 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.0.16 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.0.26 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}

master3节点的keepalived.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens33
virtual_router_id 80
priority 30
advert_int 1
authentication {
auth_type PASS
auth_pass just0kk
}
virtual_ipaddress {
192.168.0.199
}
}
virtual_server 192.168.0.199 6443 {
delay_loop 6
lb_algo loadbalance
lb_kind DR
net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 192.168.0.6 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.0.16 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.0.26 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}

注:Keepalived要配置BACKUP,而且是非抢占模式nopreempt,假设master1宕机,启动之后VIP不会飘到master1,可以保证k8s始终处于正常状态。如果master1启动,VIP立刻飘到master1,apiserver还未完全启动,这时候集群就会出问题

按顺序启动 master1>2>3

1
systemctl enable keepalived  && systemctl start keepalived  && systemctl status keepalived

启动之后ip a查看vip已经绑定到ens33

初始化集群

master1节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
controlPlaneEndpoint: 192.168.0.199:6443
## 国内源用这个
#imageRepository: registry.aliyuncs.com/google_containers
apiServer:
certSANs:
- 192.168.0.6
- 192.168.0.16
- 192.168.0.26
- 192.168.0.56
- 192.168.0.199
networking:
podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

master1节点

1
kubeadm init --config kubeadm-config.yaml

master1节点

1
2
3
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

查看集群状态

1
kubectl get nodes

master1节点,安装calico网络插件

1
kubectl apply -f https://raw.githubusercontent.com/luckylucky421/kubernetes1.17.3/master/calico.yaml

查看集群状态

1
kubectl get nodes

把master1节点的证书拷贝到master2和master3上

1 在master2、3上创建证书存放目录

1
cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/

2 拷贝

1
2
scp -r /etc/kubernetes/pki/ master2:/etc/kubernetes/pki/
scp -r /etc/kubernetes/pki/ master3:/etc/kubernetes/pki/

3 以master身份加入到集群,–control-plane参数

1
2
kubeadm join 192.168.0.199:6443 --token 7dwluq.x6nypje7h55rnrhl \
--discovery-token-ca-cert-hash sha256:fa75619ab0bb6273126350a9dbda9aa6c89828c2c4650299fe1647ab510a7e6c --control-plane
1
2
3
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g)$HOME/.kube/config

node节点加入

1
2
kubeadm join 192.168.0.199:6443 --token 7dwluq.x6nypje7h55rnrhl \
--discovery-token-ca-cert-hash sha256:fa75619ab0bb6273126350a9dbda9aa6c89828c2c4650299fe1647ab510a7e6c

查看node状态

1
kubectl get nodes

额外的:使用traefik 作为集群ingress

1 生成traefik证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
mkdir  ~/ikube/tls/ -p
echo """
[req]
distinguished_name = req_distinguished_name
prompt = yes

[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_value = CN

stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_value = Beijing

localityName = Locality Name (eg, city)
localityName_value = Haidian

organizationName = Organization Name (eg, company)
organizationName_value = Channelsoft

organizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_value = R & D Department

commonName = Common Name (eg, your name or your server\'s hostname)
commonName_value = *.multi.io


emailAddress = Email Address
emailAddress_value = lentil1016@gmail.com
""" > ~/ikube/tls/openssl.cnf
openssl req -newkey rsa:4096 -nodes -config ~/ikube/tls/openssl.cnf -days 3650 -x509 -out ~/ikube/tls/tls.crt -keyout ~/ikube/tls/tls.key
kubectl create -n kube-system secret tls ssl --cert ~/ikube/tls/tls.crt --key ~/ikube/tls/tls.key

2 执行yaml创建traefik

1
kubectl apply -f https://raw.githubusercontent.com/luckylucky421/kubernetes1.17.3/master/traefik.yaml

3 查看是否执行成功

1
kubectl get pods -n kube-system|grep traefik

额外的:安装kubernetes-dashboard 2.0版本

1 执行yaml

1
kubectl apply -f https://raw.githubusercontent.com/luckylucky421/kubernetes1.17.3/master/kubernetes-dashboard.yaml

2 修改dashboard的svc类型为NodePort

1
xxx

3 使用默认的token登录dashboard,登录成功后,只能看到default名称空间

1
2
kubectl get secret -n kubernetes-dashboard
kubectl describe secret kubernetes-dashboard-token-ngcmg -n kubernetes-dashboard

4 创建管理员token,可查看任何名称空间权限

1
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard

5 查看token,并登录

1
kubectl  describe  secret  kubernetes-dashboard-token-ngcmg  -n   kubernetes-dashboard

额外的:安装metrics监控插件

1
2
3
kubectl apply -f https://raw.githubusercontent.com/luckylucky421/kubernetes1.17.3/master/metrics.yaml