系统参数优化

cat << EOF | tee 99-k8s.conf
#sysctls for k8s node config
net.ipv4.tcp_slow_start_after_idle=0
net.core.rmem_max=16777216
fs.inotify.max_user_watches=524288
kernel.softlockup_all_cpu_backtrace=1
kernel.softlockup_panic=1
fs.file-max=2097152
fs.inotify.max_user_instances=8192
fs.inotify.max_queued_events=16384
vm.max_map_count=262144
fs.may_detach_mounts=1
net.core.netdev_max_backlog=16384
net.ipv4.tcp_wmem=4096 12582912 16777216
net.core.wmem_max=16777216
net.core.somaxconn=32768
net.ipv4.ip_forward=1
net.ipv4.tcp_max_syn_backlog=8096
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.tcp_rmem=4096 12582912 16777216
EOF
# 分发文件
scp -r 99-k8s.conf 192.168.2.175:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.176:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.177:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.185:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.187:/etc/sysctl.d/
# 生效设置
sysctl -p /etc/sysctl.d/99-k8s.conf 
ssh 192.168.2.175 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.176 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.177 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.185 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.187 sysctl -p /etc/sysctl.d/99-k8s.conf

node 节点依赖安装

# 每个node 节点执行
# centos8 
dnf install -y epel-release
sed -i "s/enabled=0/enabled=1/" /etc/yum.repos.d/CentOS-PowerTools.repo
dnf  -y update
 dnf install -y dnf-utils ipvsadm telnet wget net-tools \
                conntrack ipset jq iptables curl sysstat \
                libseccomp socat nfs-utils fuse lvm2 device-mapper-persistent-data fuse-devel
# ubuntu 
apt update 
apt upgrade -y
apt install -y ipvsadm telnet wget net-tools conntrack ipset \
 jq iptables curl sysstat libltdl7 libseccomp2 socat nfs-common \
 fuse ceph-common software-properties-common

docker 部署

# 每个node 节点执行
# centos 
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 添加docker配置
mkdir -p /etc/docker
cat << EOF | tee /etc/docker/daemon.json
{
    "max-concurrent-downloads": 20,
    "data-root": "/apps/docker/data",
    "exec-root": "/apps/docker/root",
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
    "log-driver": "json-file",
    "bridge": "none", 
    "oom-score-adjust": -1000,
    "debug": false,
    "log-opts": {
        "max-size": "100M",
        "max-file": "10"
    },
    "default-ulimits": {
        "nofile": {
            "Name": "nofile",
            "Hard": 65535,
            "Soft": 65535
        },
        "nproc": {
            "Name": "nproc",
            "Hard": 65535,
            "Soft": 65535
        },
       "core": {
            "Name": "core",
            "Hard": -1,
            "Soft": -1
      }

    }
}
EOF
 # 安装docker 
dnf install -y   http://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.13-3.1.el7.x86_64.rpm
dnf install -y docker-ce
# reload service 配置
 systemctl daemon-reload
# 重启docker
 systemctl restart docker
# 设置开机启动
systemctl enable docker
# Ubuntu 
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get update
sudo apt-get install \
    apt-transport-https \
    ca-certificates \
    curl \
    gnupg-agent \
    software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
 sudo add-apt-repository \
   "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu \
   $(lsb_release -cs) \
   stable"
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io
# 添加docker配置
mkdir -p /etc/docker
cat << EOF | tee /etc/docker/daemon.json
{
    "max-concurrent-downloads": 20,
    "data-root": "/apps/docker/data",
    "exec-root": "/apps/docker/root",
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
    "log-driver": "json-file",
    "bridge": "none", 
    "oom-score-adjust": -1000,
    "debug": false,
    "log-opts": {
        "max-size": "100M",
        "max-file": "10"
    },
    "default-ulimits": {
        "nofile": {
            "Name": "nofile",
            "Hard": 65535,
            "Soft": 65535
        },
        "nproc": {
            "Name": "nproc",
            "Hard": 65535,
            "Soft": 65535
        },
       "core": {
            "Name": "core",
            "Hard": -1,
            "Soft": -1
      }

    }
}
EOF
# reload service 配置
 systemctl daemon-reload
# 重启docker
 systemctl restart docker
# 设置开机启动
systemctl enable docker
# Ubuntu AND centos 配置一致
#自动加载ipvs 
cat << EOF | tee /etc/modules-load.d/k8s-basic-modules.conf
br_netfilter
nf_conntrack_ipv4
EOF

cat << EOF | tee /etc/modules-load.d/k8s-ipvs-modules.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
EOF

cni 部署

# 下载cni
mkdir cni
cd cni
wget https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz
tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz
rm -rf cni-plugins-linux-amd64-v0.8.5.tgz
#  创建二进制远程存放文件夹
ssh 192.168.2.175 mkdir -p /apps/cni/bin
ssh 192.168.2.176 mkdir -p /apps/cni/bin
ssh 192.168.2.177 mkdir -p /apps/cni/bin
ssh 192.168.2.185 mkdir -p /apps/cni/bin
ssh 192.168.2.187 mkdir -p /apps/cni/bin
#  上传解压二进制文件
scp -r * 192.168.2.175:/apps/cni/bin/
scp -r * 192.168.2.176:/apps/cni/bin/
scp -r * 192.168.2.177:/apps/cni/bin/
scp -r * 192.168.2.185:/apps/cni/bin/
scp -r * 192.168.2.187:/apps/cni/bin/

bootstrap-kubeconfig 配置

export KUBE_APISERVER=https://127.0.0.1:6443
# 创建bootstrap配置
export TOKEN_ID=$(head -c 16 /dev/urandom | od -An -t x | tr -dc a-f3-9|cut -c 1-6)
export TOKEN_SECRET=$(head -c 16 /dev/urandom | md5sum | head -c 16)
export BOOTSTRAP_TOKEN=${TOKEN_ID}.${TOKEN_SECRET}
cd ${HOST_PATH}
# 创建bootstrap  kubeconfig 配置
# 设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:bootstrap:${TOKEN_ID} \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
  --cluster=${CLUSTER_NAME} \
  --user=system:bootstrap:${TOKEN_ID} \
  --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
#  创建远程目录
ssh 192.168.2.175  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.176  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.177  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.185  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.187  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
# 分发bootstrap.kubeconfig
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.175:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.176:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.177:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.185:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.187:/apps/k8s/conf/

kubelet bootstrap 相关配置

# 创建bootstrap secret yaml
mkdir yaml
cat << EOF | tee ${HOST_PATH}/yaml/bootstrap-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  # Name MUST be of form "bootstrap-token-"
  name: bootstrap-token-${TOKEN_ID}
  namespace: kube-system

# Type MUST be 'bootstrap.kubernetes.io/token'
type: bootstrap.kubernetes.io/token
stringData:
  # Human readable description. Optional.
  description: "The default bootstrap token generated by 'kubelet '."

  # Token ID and secret. Required.
  token-id: ${TOKEN_ID}
  token-secret: ${TOKEN_SECRET}

  # Allowed usages.
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"

  # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
EOF
# 生成集群授权yaml
cat << EOF | tee ${HOST_PATH}/yaml/kube-api-rbac.yaml
---
# kube-controller-manager 绑定
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: controller-node-clusterrolebing
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-controller-manager
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:kube-controller-manager
---
# 创建kube-scheduler 绑定
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: scheduler-node-clusterrolebing
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-scheduler
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:kube-scheduler
---
# 创建kube-controller-manager 到auth-delegator 绑定
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: controller-manager:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:kube-controller-manager
---
#授予 kubernetes 证书访问 kubelet API 的权限
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kube-system-cluster-admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:serviceaccount:kube-system:default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-node-clusterbinding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kube-apiserver:kubelet-apis
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: kubernetes
EOF
# 创建kubelet-bootstrap 授权
cat << EOF | tee ${HOST_PATH}/yaml/kubelet-bootstrap-rbac.yaml
---
# 允许 system:bootstrappers 组用户创建 CSR 请求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers
---
# 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-client-auto-approve-csr
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers
---
# 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-client-auto-renew-crt
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
# 自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-server-auto-renew-crt
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
EOF
# 生成yaml 提交到k8s 集群
kubectl apply -f yaml/bootstrap-secret.yaml
kubectl apply -f yaml/kube-api-rbac.yaml
kubectl apply -f yaml/kubelet-bootstrap-rbac.yaml 

kubelet 二进制文件准备

# 进入二进制所在文件夹
cd ${HOST_PATH}/kubernetes/server/bin
scp -r kubelet 192.168.2.175:/apps/k8s/bin
scp -r kubelet 192.168.2.176:/apps/k8s/bin
scp -r kubelet 192.168.2.177:/apps/k8s/bin
scp -r kubelet 192.168.2.187:/apps/k8s/bin
scp -r kubelet 192.168.2.185:/apps/k8s/bin

kubelet 配置文件

# 192.168.2.175 节点配置
cat << EOF | tee /apps/k8s/conf/kubelet
KUBELET_OPTS="--bootstrap-kubeconfig=/apps/k8s/conf/bootstrap.kubeconfig \\
              --network-plugin=cni \\
              --cni-conf-dir=/apps/cni/etc/net.d \\
              --cni-bin-dir=/apps/cni/bin \\
              --kubeconfig=/apps/k8s/conf/kubelet.kubeconfig \\
              --node-ip=192.168.2.175 \\    # 其它节点记得修改 记得删除这个注释
              --hostname-override=k8s-master-1 \\  # 其它节点记得修改 记得删除这个注释
              --cert-dir=/apps/k8s/ssl \\
              --runtime-cgroups=/systemd/system.slice \\
              --root-dir=/apps/work/kubernetes/kubelet \\
              --log-dir=/apps/k8s/log \\
              --alsologtostderr=true \\
              --config=/apps/k8s/conf/kubelet.yaml \\
              --logtostderr=false \\
              --pod-infra-container-image=docker.io/juestnow/pause-amd64:3.1 \\
              --image-pull-progress-deadline=30s \\
              --v=2 \\
              --volume-plugin-dir=/apps/k8s/kubelet-plugins/volume"
EOF

cat << EOF | tee /apps/k8s/conf/kubelet.yaml
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
staticPodPath: "/apps/work/kubernetes/manifests"
syncFrequency: 30s
fileCheckFrequency: 20s
httpCheckFrequency: 20s
address: 192.168.2.175 # 其它节点记得修改 记得删除这个注释
port: 10250
readOnlyPort: 0
tlsCipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
rotateCertificates: true
authentication:
  x509:
    clientCAFile: "/apps/k8s/ssl/k8s/k8s-ca.pem"
  webhook:
    enabled: true
    cacheTTL: 2m0s
  anonymous:
    enabled: false
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
registryPullQPS: 5
registryBurst: 10
eventRecordQPS: 15
eventBurst: 30
enableDebuggingHandlers: true
healthzPort: 10248
healthzBindAddress: 192.168.2.175 # 其它节点记得修改 记得删除这个注释
oomScoreAdj: -999
clusterDomain: cluster.local # 集群域名根据自己修改 记得删除这个注释
clusterDNS:
- 10.66.0.2 # dns IP根据自己修改 记得删除这个注释
streamingConnectionIdleTimeout: 4h0m0s
nodeStatusUpdateFrequency: 10s
nodeStatusReportFrequency: 5m0s
nodeLeaseDurationSeconds: 40
imageMinimumGCAge: 2m0s
imageGCHighThresholdPercent: 70
imageGCLowThresholdPercent: 50
volumeStatsAggPeriod: 1m0s
kubeletCgroups: "/systemd/system.slice"
cgroupsPerQOS: true
cgroupDriver: cgroupfs
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
topologyManagerPolicy: none
runtimeRequestTimeout: 2m0s
hairpinMode: hairpin-veth
maxPods: 100
podPidsLimit: -1
resolvConf: "/etc/resolv.conf"
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
maxOpenFiles: 1000000
contentType: application/vnd.kubernetes.protobuf
kubeAPIQPS: 15
kubeAPIBurst: 30
serializeImagePulls: false
evictionHard:
  imagefs.available: 10%
  memory.available: 500Mi
  nodefs.available: 10%
evictionSoft:
  imagefs.available: 15%
  memory.available: 500Mi
  nodefs.available: 15%
evictionSoftGracePeriod:
  imagefs.available: 2m
  memory.available: 2m
  nodefs.available: 2m
evictionPressureTransitionPeriod: 20s
podsPerCore: 0
evictionMinimumReclaim:
  imagefs.available: 500Mi
  memory.available: 0Mi
  nodefs.available: 500Mi
enableControllerAttachDetach: true
makeIPTablesUtilChains: true
iptablesMasqueradeBit: 14
iptablesDropBit: 15
featureGates:
  EndpointSlice: true
  ServiceTopology: true
failSwapOn: false
containerLogMaxSize: 10Mi
containerLogMaxFiles: 5
configMapAndSecretChangeDetectionStrategy: Watch
systemReserved:
  cpu: 1000m
  ephemeral-storage: 1Gi
  memory: 1024Mi
kubeReserved:
  cpu: 500m
  ephemeral-storage: 1Gi
  memory: 512Mi
systemReservedCgroup: "/systemd/system.slice"
kubeReservedCgroup: "/systemd/system.slice"
enforceNodeAllocatable:
- pods
- kube-reserved
- system-reserved
allowedUnsafeSysctls:
- kernel.msg*
- kernel.shm*
- kernel.sem
- fs.mqueue.*
- net.*
EOF
# 其它节点参考192.168.2.175 配置

配置k8s-ha-master

# 项目地址: https://github.com/qist/k8s/tree/master/dockerfile/k8s-ha-master
# CP_HOSTS masterIP 加端口 默认监听端口6443 不能在master 监听端口重复不然启动不了
# 每个节点部署
cat << EOF | tee /apps/work/kubernetes/manifests/k8s-ha-master.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: k8s-ha-master
    tier: control-plane
  name: k8s-ha-master
  namespace: kube-system
spec:
  containers:
  - args:
    - "CP_HOSTS=192.168.2.175:5443,192.168.2.176:5443,192.168.2.177:5443"
    image: juestnow/k8s-ha-master:1.17.9
    imagePullPolicy: IfNotPresent
    name: k8s-ha-master
    env:
    - name: CP_HOSTS
      value: "192.168.2.175:5443,192.168.2.176:5443,192.168.2.177:5443"
  hostNetwork: true
  priorityClassName: system-cluster-critical
status: {}
EOF

创建 kubelet systemd文件

cat << EOF | tee kubelet.service
[Unit]
Description=Kubernetes Kubelet
[Service]
LimitNOFILE=65535
LimitNPROC=65535
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/k8s/conf/kubelet
ExecStart=/apps/k8s/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 上传启动文件到服务器
scp -r kubelet.service 192.168.2.175:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.176:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.177:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.185:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.187:/usr/lib/systemd/system

启动 kubelet

# 刷新service
ssh  192.168.2.175 systemctl daemon-reload
ssh  192.168.2.176 systemctl daemon-reload
ssh  192.168.2.177 systemctl daemon-reload
ssh  192.168.2.185 systemctl daemon-reload
ssh  192.168.2.187 systemctl daemon-reload
# 设置开机启动
ssh  192.168.2.175 systemctl enable kubelet.service
ssh  192.168.2.176 systemctl enable kubelet.service
ssh  192.168.2.177 systemctl enable kubelet.service
ssh  192.168.2.185 systemctl enable kubelet.service
ssh  192.168.2.187 systemctl enable kubelet.service
# 启动 kubelet
ssh  192.168.2.175 systemctl  start kubelet.service
ssh  192.168.2.176 systemctl  start kubelet.service
ssh  192.168.2.177 systemctl  start kubelet.service
ssh  192.168.2.185 systemctl  start kubelet.service
ssh  192.168.2.187 systemctl  start kubelet.service
# 查看启动状态
ssh  192.168.2.175 systemctl  status kubelet.service
ssh  192.168.2.176 systemctl  status kubelet.service
ssh  192.168.2.177 systemctl  status kubelet.service
ssh  192.168.2.185 systemctl  status kubelet.service
ssh  192.168.2.187 systemctl  status kubelet.service
# 这里会出现不断重启 请等待 k8s-ha-master  pod 正常启动 kubelet  进程恢复正常
[root@k8s-master-1 conf]# ps -ef | grep nginx
root       59570   59553  0 May06 ?        00:00:00 /bin/sh /usr/bin/nginx-proxy CP_HOSTS=192.168.2.175:5443,192.168.2.176:5443,192.168.2.177:5443
root       59590   59570  0 May06 ?        00:00:00 nginx: master process nginx -g daemon off;
100        59591   59590  0 May06 ?        00:00:00 nginx: worker process
100        59592   59590  0 May06 ?        00:00:06 nginx: worker process
100        59593   59590  0 May06 ?        00:00:00 nginx: worker process
100        59594   59590  0 May06 ?        00:00:00 nginx: worker process
[root@k8s-master-1 conf]# docker ps| grep nginx-prox
910352e2477c        67659abde8d5               "/usr/bin/nginx-prox…"   12 hours ago        Up 12 hours                             k8s_ha-proxy_ha-proxy-k8s-master-1_kube-system_6b1dc0cfc50cb49428adaf42aba38a56_0

验证kubelet 是否部署成功

root@Qist:/mnt/g/work/ipv6/1/yaml# kubectl get node
NAME           STATUS     ROLES    AGE   VERSION
k8s-master-1   NotReady         11h   v1.18.2
k8s-master-2   NotReady      11h   v1.18.2
k8s-master-3   NotReady         11h   v1.18.2
k8s-node-1     NotReady         11h   v1.18.2
k8s-node-2     NotReady      11h   v1.18.2
# 没部署网络插件 cni 就会出现这样的装
root@Qist:/mnt/g/work/ipv6/1# kubectl get nodes -o wide
NAME           STATUS   ROLES    AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
k8s-master-1   NotReady       12h   v1.18.2   192.168.2.175           CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-master-2   NotReady       12h   v1.18.2   192.168.2.176           CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-master-3   NotReady       12h   v1.18.2   192.168.2.177           CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-node-1     NotReady       12h   v1.18.2   192.168.2.185           CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-node-2     NotReady       12h   v1.18.2   192.168.2.187           CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
# 所有部署节点已经注册到K8S 集群