kubeadm部署高可用
堆叠ETCD模型 这种方法需要更少的基础架构。etcd成员和控制平面节点位于同一位置。
https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/high-availability/
ETCD 是一个高可用的分布式键值数据库,可用于服务发现。ETCD 采用 raft 一致性算法,基于 Go 语言实现。etcd作为一个高可用键值存储系统,天生就是为集群化而设计的。由于Raft算法在做决策时需要多数节点的投票,所以etcd一般部署集群推荐奇数个节点,推荐的数量为3、5或者7个节点构成一个集群。
配置三台机器 kubeadm 的最低要求给主节点
角色 | IP |
---|---|
master1 | 10.0.0.2 |
master2 | 10.0.0.3 |
master3 | 10.0.0.4 |
VIP(虚拟ip) | 10.0.0.10 |
1.haproxy+keepalived
构建haproxy容器
cat >Dockerfile<<\EOF
FROM alpine:3.7
RUN apk add tzdata \
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone
RUN apk add --no-cache haproxy
CMD ["haproxy","-f","/etc/haproxy/haproxy.cfg"]
EOF
docker build . -t haproxy:v1
主备启用haproxy容器
mkdir -p /usr/local/etc/haproxy/
cat >/usr/local/etc/haproxy/haproxy.cfg<<\EOF
global
#log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 65535
chroot /var/lib/haproxy
user haproxy
group haproxy
#daemon
defaults
log global
mode tcp
option httplog
option dontlognull
retries 3
option redispatch
timeout queue 1m
timeout connect 50s
timeout client 50s
timeout server 50s
timeout check 10s
maxconn 102400
listen stats
stats enable
bind *:8081
mode http
option httplog
log global
maxconn 10
stats refresh 30s
stats uri /haproxy/stats
stats auth haproxy:haproxy9527
stats hide-version
# stats admin if TRUE
frontend fe_k8s_6444
bind *:6444
mode tcp
log global
option tcplog
default_backend be_k8s_6443
backend be_k8s_6443
mode tcp
balance roundrobin
server k8s-master01 10.0.0.2:6443 maxconn 4096 check weight 1 check inter 6s fall 3 rise 3
server k8s-master02 10.0.0.3:6443 maxconn 4096 check weight 1 check inter s fall 3 rise 3
EOF
docker run -d --name haproxy \
--restart=always \
--net=host \
-v /usr/local/etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg \
haproxy:v1
平滑重启
docker kill -s HUP haproxy
构建keepalived容器
cat >Dockerfile<<\EOF
FROM alpine:3.7
RUN apk add tzdata \
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone
RUN apk add --no-cache keepalived
CMD ["keepalived","-f","/etc/keepalived/keepalived.conf","-P","-l","-n"]
EOF
docker build . -t keepalived:v1
主备节点启动keepalive
mkdir -p /usr/local/etc/keepalived
cat >/usr/local/etc/keepalived/keepalived.conf<&1 | grep open"
timeout 1
interval 1 # check every 1 second
fall 2 # require 2 failures for KO
rise 2 # require 2 successes for OK
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 50
priority 100
advert_int 1
nopreempt # 不抢占
track_script {
chk_haproxy
}
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
10.0.0.10/24 dev ens33
}
}
EOF
# state MASTER或BUCKUP
# priority 权重,备节点权重设置90,备要小于主节点
# 未设置weight时,weight默认值为0,此时当vrrp_script连续检测失败时,vrrp实例进入FAULT状态。会导致VIP转移
# nopreempt 不抢占,当节点不可用时,才漂移
docker run -d --name keepalived \
--restart=always \
--net=host --cap-add=NET_ADMIN \
-v /usr/local/etc/keepalived/keepalived.conf:/etc/keepalived/keepalived.conf \
keepalived:v1
2. kubeadm部署
要对kubeadm源代码修改 做证书年限更改的操作,所有机器
master1
mkdir /usr/local/kubernetes/manifests -p
cd /usr/local/kubernetes/manifests/
kubeadm config print init-defaults > kubeadm-config.yaml
#更改以下内容
advertiseAddress: 10.0.0.2
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kubernetesVersion: v1.19.2
podSubnet: 192.168.0.0/16
serviceSubnet: 10.96.0.0/12
#结尾添加:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
#在clusterName: kubernetes:下添加
controlPlaneEndpoint: "10.0.0.10:6444"
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
wget https://kuboard.cn/install-script/calico/calico-3.13.1.yaml
kubectl apply -f calico-3.13.1.yaml
master2,master3
kubeadm join 10.0.0.10:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:01c9edd09d8838d0b12ba6e043b72cb4f6814613ef5b857b62d6645b4397a11a \
--control-plane --certificate-key fd5e0e7c3e78d718aa0c935e46423f96eb8420231ead56e4e5070c5090f0606a
node节点
kubeadm join 10.0.0.10:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:01c9edd09d8838d0b12ba6e043b72cb4f6814613ef5b857b62d6645b4397a11a
检查群集运行状况
kubectl get pods --all-namespaces
docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes 0369cf4303ff etcdctl \
--cert /etc/kubernetes/pki/etcd/peer.crt \
--key /etc/kubernetes/pki/etcd/peer.key \
--cacert /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://10.0.0.2:2379 endpoint health --cluster
kubectl get endpoints kube-controller-manager -n kube-system -o yaml
kubectl get endpoints kube-scheduler -n kube-system -o yaml
3.kubeadm的etcd备份恢复
yum -y install etcd
cd /data/etcd/
# 在每个节点备份
ETCDCTL_API=3 etcdctl snapshot save snap.db \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/peer.crt \
--key=/etc/kubernetes/pki/etcd/peer.key
# 查看备份
ETCDCTL_API=3 etcdctl snapshot status snap.db
# 查看现有成员状态
ETCDCTL_API=3 etcdctl \
--endpoints='10.0.0.2:2379,10.0.0.3:2379,10.0.0.4:2379' \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/peer.crt \
--key=/etc/kubernetes/pki/etcd/peer.key \
--write-out="table" \
endpoint status
# 剔除
ETCDCTL_API=3 etcdctl \
--endpoints='10.0.0.2:2379,10.0.0.3:2379,10.0.0.4:2379' \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/peer.crt \
--key=/etc/kubernetes/pki/etcd/peer.key \
member remove a705d171b1d0bf1c
#
# member add k8s-slave02 --peer-urls=http://10.0.0.4:2380
# member list
恢复
#1.先暂停kube-apiserver和etcd容器
mv /etc/kubernetes/manifests /etc/kubernetes/manifests.bak
mv /var/lib/etcd/ /var/lib/etcd.bak
#2.恢复
ETCDCTL_API=3 etcdctl \
snapshot restore snap.db \
--data-dir=/var/lib/etcd
#3.启动kube-apiserver和etcd容器
mv /etc/kubernetes/manifests.bak /etc/kubernetes/manifests
4.dashboard
github地址
yaml文件地址
mkdir /data/k8s-yaml/dashboard/ && cd /data/k8s-yaml/dashboard/
# 需要网络可以访问
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.4/aio/deploy/recommended.yaml
# 查看是否已经在运行了
kubectl get pod -n kubernetes-dashboard
# 新建sa,绑定权限
kubectl create sa -name admin-user -n kubernetes-dashboard
kubectl create clusterrolebinding admin-user --clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:admin-user
# 获取token
kubectl -n kubernetes-dashboard describe secret \
$(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
#1. 编辑kubernetes-dashboard,将里面的type: ClusterIP改为type: NodePort即可
#kubectl --namespace=kubernetes-dashboard edit service kubernetes-dashboard
#2.通过ingrees访问
通过ingress访问
cat dash-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
annotations:
kubernetes.io/ingress.class: "nginx"
# 开启use-regex,启用path的正则匹配
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /
# 默认为 true,启用 TLS 时,http请求会 308 重定向到https
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# 默认为 http,开启后端服务使用 proxy_pass https://协议
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
tls:
- hosts:
- dash.zs.com
secretName: kubernetes-dashboard-certs
rules:
- host: dash.zs.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
# 简单的方法
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
rules:
- host: dash.zs.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443