Kubeadm 方式安装最新版 kubernetes 高可用集群

1、虚拟机信息(5 台 centos 7.6)

  • master-01 10.0.2.152
  • master-02 10.0.2.153
  • master-03 10.0.2.154
  • worker-01 10.0.2.155
  • worker-02 10.0.2.156

2、安装包版本(下面这些版本信息仅供参考,使用 kubeadm 部署 kubernetes 集群时默认会安装最新版本的安装包)

  • kubernetes 1.19.3
  • etcd 3.4.13-0
  • docker 19.03.13
  • calico 3.16.4
  • cni 3.16.4
  • coredns 1.7.0
  • kubernetes dashboard 2.0.4

3、 centos 系统环境配置(5 台虚拟机都要执行这些操作)
设置 /etc/hosts

# cat >> /etc/hosts << EOF
10.0.2.152 master-01
10.0.2.153 master-02
10.0.2.154 master-03
10.0.2.155 worker-01
10.0.2.156 worker-02
EOF

安装依赖包

# yum update
# yum install -y conntrack ipvsadm ipset jq sysstat curl wget iptables libseccomp

关闭防火墙、swap、selinux、networkmanager,清空默认的 iptables

# systemctl stop firewalld && systemctl disable firewalld

# swapoff -a
# sed -i 's/.*swap.*/#&/' /etc/fstab

# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# setenforce 0

# systemctl stop NetworkManager && systemctl disable NetworkManager

# iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT

系统内核参数设置

// 根据 kubeadmin 安装要求:https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/ 和 calico 安装要求:https://docs.projectcalico.org/getting-started/kubernetes/requirements 来设置这些参数
# modprobe br_netfilter
# ls /proc/sys/net/bridge
bridge-nf-call-arptables  bridge-nf-call-iptables        bridge-nf-filter-vlan-tagged
bridge-nf-call-ip6tables  bridge-nf-filter-pppoe-tagged  bridge-nf-pass-vlan-input-dev
# cat >> /etc/sysctl.d/kubernetes.conf <

4、安装配置 docker(5台虚拟机都要执行这些操作)

// 参考:https://docs.docker.com/engine/install/centos/
# yum install -y yum-utils device-mapper-persistent-data lvm2
# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# yum -y install docker-ce docker-ce-cli containerd.io
# systemctl enable docker && systemctl start docker
# mkdir /data/docker -p
// 设置 registry-mirrors 镜像加速器,可以提升获取 docker 官方镜像的速度
// 设置 cgroupdriver 为 systemd,和 kubelet 的保持一致
# cat >> /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://4wvlvmti.mirror.aliyuncs.com"],
  "graph": "/data/docker",
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
# systemctl daemon-reload
# systemctl docker restart

5、安装配置 kubelet、kubeadm、kubectl(5 台虚拟机都要执行这些操作)

# cat >> /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# yum install -y kubelet kubeadm kubectl
# systemctl enable kubelet 
// 先不用启动 kubelet,目前就算启动也会报错 

6、安装配置 keepalived,以提供 master 节点的高可用访问,其中 10.0.2.157 是 VIP (master-01 10.0.2.152、master-02 10.0.2.153)

// master-01 10.0.2.152、master-02 10.0.2.153 要执行下面操作
# yum install -y keepalived
# cp /etc/keepalived/keepalived.conf{,.bak}
# vim /etc/keepalived/check-apiserver.sh
#!/bin/sh
errorExit() {
 echo "*** $*" 1>&2
 exit 1
}

curl -s -m 2 -k https://localhost:6443/ -o /dev/null || errorExit "Error GET https://localhost:6443/"
if ip addr | grep -q 10.0.2.157; then
 curl -s -m 2 -k https://10.0.2.157:6443/ -o /dev/null || errorExit "Error GET https://10.0.2.157:6443/"
fi

# chmod +x /etc/keepalived/check-apiserver.sh

// master-01 10.0.2.152 要执行下面操作
# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
 router_id keepalive-master
}

vrrp_script check_apiserver {
 script "/etc/keepalived/check-apiserver.sh"
 interval 3
 weight -2
}

vrrp_instance VI-kube-master {
   state MASTER
   interface eth0
   virtual_router_id 68
   priority 100
   dont_track_primary
   advert_int 3
   virtual_ipaddress {
     10.0.2.157
   }
   track_script {
       check_apiserver
   }
}

// master-02 10.0.2.153 要执行下面操作
# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
 router_id keepalive-backup
}

vrrp_script check_apiserver {
 script "/etc/keepalived/check-apiserver.sh"
 interval 3
 weight -2
}

vrrp_instance VI-kube-master {
   state BACKUP
   interface eth0
   virtual_router_id 68
   priority 99
   dont_track_primary
   advert_int 3
   virtual_ipaddress {
     10.0.2.157
   }
   track_script {
       check_apiserver
   }
}

// master-01 10.0.2.152、master-02 10.0.2.153 要执行下面操作
# systemctl enable keepalived && systemctl start keepalived
// 看到下面的日志会有报错, 可以先忽略。因为 kubernetes apiserver 还没有部署。
# journalctl -f -u keepalived

7、在 master-01 上初始化 kubernetes 集群,并配置第 1 个 master(master-01 10.0.2.152)

// 初始化 kubeadm-config.yaml 文件,
// kubernetesVersion 指定 kubernetes 版本为 v1.19.3
// controlPlaneEndpoint 指定 apiserver 的入口,这里使用 keepalived 的 vip 10.0.2.157 和 apiserver 的端口 6443,如果前面没有部署 keepalived,那可以选择其中一个 apiserver 节点的 ip 和端口
// networking.podSubnet 指定 pod 的 ip 地址网段,不能与已有 kubernetes 集群使用的网段重复,创建成功后不能修改
// networking.serviceSubnet 指定 service 的 ip 地址网段,不能与已有 kubernetes 集群使用的网段重复,创建成功后不能修改
# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.19.3
controlPlaneEndpoint: "10.0.2.157:6443"
networking:
    podSubnet: "172.20.0.0/16"
    # serviceSubnet: "172.21.0.0/20"
imageRepository: registry.aliyuncs.com/google_containers

// 这条命令执行后输出的所有信息要保存好,后面还要用到
# kubeadm init --config=kubeadm-config.yaml --upload-certs
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# curl -k https://10.0.2.157:6443/healthz
ok
# kubectl get pods --all-namespaces
NAMESPACE     NAME                                READY   STATUS    RESTARTS   AGE
kube-system   coredns-6d56c8448f-l25gl            0/1     Pending   0          29m
kube-system   coredns-6d56c8448f-s6mx7            0/1     Pending   0          29m
kube-system   etcd-master-01                      1/1     Running   1          29m
kube-system   kube-apiserver-master-01            1/1     Running   1          29m
kube-system   kube-controller-manager-master-01   1/1     Running   1          29m
kube-system   kube-proxy-hvpt8                    1/1     Running   1          29m
kube-system   kube-scheduler-master-01            1/1     Running   1          29m


8、master-01 上安装配置 calico 网络插件(master-01 10.0.2.152)

// 官网安装说明:https://docs.projectcalico.org/getting-started/kubernetes/self-managed-onprem/onpremises
// 当节点数量大于 50台的时候使用:https://docs.projectcalico.org/manifests/calico-typha.yaml
// 当节点数量小于50台使用:https://docs.projectcalico.org/manifests/calico.yaml
# mkdir -p /etc/kubernetes/addons
# cd /etc/kubernetes/addons
# wget https://docs.projectcalico.org/manifests/calico.yaml
# kubectl apply -f calico.yaml
# kubectl get pods -n kube-system
NAME                                     READY   STATUS    RESTARTS   AGE
calico-kube-controllers-7d569d95-xg769   1/1     Running   0          50s
calico-node-gmh9d                        1/1     Running   0          50s
coredns-6d56c8448f-l25gl                 1/1     Running   0          43m
coredns-6d56c8448f-s6mx7                 1/1     Running   0          43m
etcd-master-01                           1/1     Running   1          44m
kube-apiserver-master-01                 1/1     Running   1          44m
kube-controller-manager-master-01        1/1     Running   1          44m
kube-proxy-hvpt8                         1/1     Running   1          43m
kube-scheduler-master-01                 1/1     Running   1          44m

9、master-02 上配置第 2 个 master(master-02 10.0.2.153)

// 使用步骤 6 中 kubeadm init --config=kubeadm-config.yaml --upload-certs 这条命令执行后输出的加入 master 节点的命令
// You can now join any number of the control-plane node running the following command on each as root:
//   kubeadm join 10.0.2.157:6443 --token 9irotq.mhvolcklnjyxbzf8 \
//     --discovery-token-ca-cert-hash sha256:ed08b302533983ad6200b75dae318bd6c9bf64976f1bb32d25efd38916c5805d \
//     --control-plane --certificate-key c43293eddf36a32caa706206dc2efd6e0917e03ce82a0dc7090e8e540ea1da98
#   kubeadm join 10.0.2.157:6443 --token 9irotq.mhvolcklnjyxbzf8 \
    --discovery-token-ca-cert-hash sha256:ed08b302533983ad6200b75dae318bd6c9bf64976f1bb32d25efd38916c5805d \
    --control-plane --certificate-key c43293eddf36a32caa706206dc2efd6e0917e03ce82a0dc7090e8e540ea1da98
    
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# kubectl get nodes
NAME        STATUS   ROLES    AGE   VERSION
master-01   Ready    master   52m   v1.19.3
master-02   Ready    master   86s   v1.19.3
# kubectl get pods -n kube-system
NAME                                     READY   STATUS    RESTARTS   AGE
calico-kube-controllers-7d569d95-xg769   1/1     Running   0          9m57s
calico-node-6vczq                        1/1     Running   0          2m45s
calico-node-gmh9d                        1/1     Running   0          9m57s
coredns-6d56c8448f-l25gl                 1/1     Running   0          53m
coredns-6d56c8448f-s6mx7                 1/1     Running   0          53m
etcd-master-01                           1/1     Running   1          53m
etcd-master-02                           1/1     Running   0          2m43s
kube-apiserver-master-01                 1/1     Running   1          53m
kube-apiserver-master-02                 1/1     Running   0          2m43s
kube-controller-manager-master-01        1/1     Running   2          53m
kube-controller-manager-master-02        1/1     Running   0          2m44s
kube-proxy-hvpt8                         1/1     Running   1          53m
kube-proxy-kvv76                         1/1     Running   0          2m45s
kube-scheduler-master-01                 1/1     Running   2          53m
kube-scheduler-master-02                 1/1     Running   0          2m43s

10、master-03 上配置第 3 个 master(master-03 10.0.2.154)

#   kubeadm join 10.0.2.157:6443 --token 9irotq.mhvolcklnjyxbzf8 \
    --discovery-token-ca-cert-hash sha256:ed08b302533983ad6200b75dae318bd6c9bf64976f1bb32d25efd38916c5805d \
    --control-plane --certificate-key c43293eddf36a32caa706206dc2efd6e0917e03ce82a0dc7090e8e540ea1da98
    
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# kubectl get nodes
NAME        STATUS   ROLES    AGE    VERSION
master-01   Ready    master   57m    v1.19.3
master-02   Ready    master   7m1s   v1.19.3
master-03   Ready    master   95s    v1.19.3
# kubectl get pods -n kube-system -o wide
NAME                                     READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES
calico-kube-controllers-7d569d95-xg769   1/1     Running   0          16m     172.20.184.67   master-01              
calico-node-6vczq                        1/1     Running   0          9m2s    10.0.2.153      master-02              
calico-node-gmh9d                        1/1     Running   0          16m     10.0.2.152      master-01              
calico-node-lwzcn                        1/1     Running   0          3m36s   10.0.2.154      master-03              
coredns-6d56c8448f-l25gl                 1/1     Running   0          59m     172.20.184.66   master-01              
coredns-6d56c8448f-s6mx7                 1/1     Running   0          59m     172.20.184.65   master-01              
etcd-master-01                           1/1     Running   1          59m     10.0.2.152      master-01              
etcd-master-02                           1/1     Running   0          9m      10.0.2.153      master-02              
etcd-master-03                           1/1     Running   0          3m33s   10.0.2.154      master-03              
kube-apiserver-master-01                 1/1     Running   1          59m     10.0.2.152      master-01              
kube-apiserver-master-02                 1/1     Running   0          9m      10.0.2.153      master-02              
kube-apiserver-master-03                 1/1     Running   0          3m34s   10.0.2.154      master-03              
kube-controller-manager-master-01        1/1     Running   2          59m     10.0.2.152      master-01              
kube-controller-manager-master-02        1/1     Running   0          9m1s    10.0.2.153      master-02              
kube-controller-manager-master-03        1/1     Running   0          3m34s   10.0.2.154      master-03              
kube-proxy-hvpt8                         1/1     Running   1          59m     10.0.2.152      master-01              
kube-proxy-kvv76                         1/1     Running   0          9m2s    10.0.2.153      master-02              
kube-proxy-wxvkw                         1/1     Running   0          3m36s   10.0.2.154      master-03              
kube-scheduler-master-01                 1/1     Running   2          59m     10.0.2.152      master-01              
kube-scheduler-master-02                 1/1     Running   0          9m      10.0.2.153      master-02              
kube-scheduler-master-03                 1/1     Running   0          3m34s   10.0.2.154      master-03              

11、worker-01 上配置第 1 个 worker(worker-01 10.0.2.155)

// 使用步骤 6 中 kubeadm init --config=kubeadm-config.yaml --upload-certs 这条命令执行后输出的加入 woker 节点的命令
// Then you can join any number of worker nodes by running the following on each as root:
// kubeadm join 10.0.2.157:6443 --token 9irotq.mhvolcklnjyxbzf8 \
//     --discovery-token-ca-cert-hash sha256:ed08b302533983ad6200b75dae318bd6c9bf64976f1bb32d25efd38916c5805d 
# kubeadm join 10.0.2.157:6443 --token 9irotq.mhvolcklnjyxbzf8 \
    --discovery-token-ca-cert-hash sha256:ed08b302533983ad6200b75dae318bd6c9bf64976f1bb32d25efd38916c5805d 

12、worker-02 上配置第 2 个 worker(worker-02 10.0.2.156)

# kubeadm join 10.0.2.157:6443 --token 9irotq.mhvolcklnjyxbzf8 \
    --discovery-token-ca-cert-hash sha256:ed08b302533983ad6200b75dae318bd6c9bf64976f1bb32d25efd38916c5805d 

13、master-01 上可以看到所有的 worker 已经添加进 kubernetes 高可用集群(或者在其他 master 节点上查看也行)(master-01 10.0.2.152)

# kubectl get nodes -o wide
NAME        STATUS   ROLES    AGE     VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME
master-01   Ready    master   68m     v1.19.3   10.0.2.152            CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.13
master-02   Ready    master   17m     v1.19.3   10.0.2.153            CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.13
master-03   Ready    master   11m     v1.19.3   10.0.2.154            CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.13
worker-01   Ready       4m17s   v1.19.3   10.0.2.155            CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.13
worker-02   Ready       91s     v1.19.3   10.0.2.156            CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.13

14、master-01 上安装配置 dashboard(master-01 10.0.2.152)

// 官网地址:https://github.com/kubernetes/dashboard#getting-started
# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.4/aio/deploy/recommended.yaml
// 修改 service 为 nodeport 类型,来提供对外服务。
# vim recommended.yaml
省略其他部分……
---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort    # 添加此行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000    # 添加此行
  selector:
    k8s-app: kubernetes-dashboard

---
省略其他部分……

# kubectl apply -f recommended.yaml

# kubectl get deployment kubernetes-dashboard -n kubernetes-dashboard
NAME                   READY   UP-TO-DATE   AVAILABLE   AGE
kubernetes-dashboard   1/1     1            1           6m20s
# kubectl -n kubernetes-dashboard get pods -o wide
NAME                                         READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES
dashboard-metrics-scraper-7b59f7d4df-blvgh   1/1     Running   0          6m49s   172.20.171.2    worker-01              
kubernetes-dashboard-665f4c5ff-lps28         1/1     Running   0          6m50s   172.20.37.194   worker-02              
# kubectl get services kubernetes-dashboard -n kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.99.69.223           443:30000/TCP   2m29s

// 创建service account
# kubectl create sa dashboard-admin -n kubernetes-dashboard
// 创建角色绑定关系
# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin
// 获取 dashboard-admin 的 secret 的 token
# kubectl describe secret -n kubernetes-dashboard $(kubectl get secrets -n kubernetes-dashboard | grep dashboard-admin-token | awk '{print $1}') | egrep ^token | awk '{print $2}'
eyJhbGciOiJSUzI1NiIsImtpZCI6IkQybEhHdEQxWWNoMmpTb2dGekZ4Vm1vdjRROWpNaXVCZDhPeWdJR0dTcTQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tN2JndG4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNWIxYWNhZTktMmE0OC00NTBiLWJlM2UtNDA5N2Y0Yjk4YzQ1Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.c44oW-QKpJntqI_Lj-_nwRnI9K3lFJTJH77R6QOYOcw_qjEnXm9yB7tn0LZhNptcfE8HRMlgiwEIIvRw1HCZSkH68lAexKQi2aeOyR3baEQ8GLcTMpX8SHf8DZjXxIqNpWo5NA_lwhBBn2UdjfXecvSboUGkjJhFTRR7yMyR2-vWP4tgGe3xl-Bab8d8dAGfLx5LUNA8Au9v7pHxOF600qXbp5fPEQTK0GJ9j4SnosqfhUredZWYHStk2vYb3DMIMNF4wCATDjra8R5s3J_Ix6lkS-675hR74s6bIjNOgTDpc0jM-5SgqrXqTGwn1Qz4P2onxZtRInYhBP0wwyzhig

15、使用 token 登录 kubernetes dashboard(使用 keepalived 的 VIP 地址,或者任意一台 master 的 IP 地址)


使用 token 登录

dashboard

至此,kubeadm 方式安装的 kubernetes 高可用集群,就可以使用了。

参考文档:

  1. https://docs.projectcalico.org/getting-started/kubernetes/requirements
  2. https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/

你可能感兴趣的:(Kubeadm 方式安装最新版 kubernetes 高可用集群)