Kubeadm 高可用集群部署

目录

Kubeadm

环境准备

所有节点安装kubeadm,kubelet和kubectl

高可用组件安装、配置

部署K8S集群


Kubeadm

master节点cpu核心数要求大于2
●最新的版本不一定好,但相对于旧版本,核心功能稳定,但新增功能、接口相对不稳
●学会一个版本的 高可用部署,其他版本操作都差不多
●宿主机尽量升级到CentOS 7.9
●内核kernel升级到 4.19+ 这种稳定的内核
●部署可K8S版本时,尽量找 1.xx.5 这种大于5的小版本(这种一般是比较稳定的版本)

环境准备

//所有节点,关闭防火墙规则,关闭selinux,关闭swap交换
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab


//修改主机名
hostnamectl set-hostname master01
hostnamectl set-hostname master02
hostnamectl set-hostname master03
hostnamectl set-hostname node01
hostnamectl set-hostname node02


//所有节点修改hosts文件
vim /etc/hosts
192.168.142.25 master01
192.168.142.26 master02
192.168.142.27 master03
192.168.142.20 node01
192.168.142.23 node02


//所有节点时间同步
yum -y install ntpdate
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com

systemctl enable --now crond

crontab -e
*/30 * * * * /usr/sbin/ntpdate time2.aliyun.com


//所有节点实现Linux的资源限制
vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited


//所有节点升级内核
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm -O /opt/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm -O /opt/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm

cd /opt/
yum localinstall -y kernel-ml*

#更改内核启动方式
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
grubby --default-kernel
reboot


//调整内核参数
cat > /etc/sysctl.d/k8s.conf < net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

#生效参数
sysctl --system  


//加载 ip_vs 模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done


所有节点安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2 
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
yum install -y docker-ce docker-ce-cli containerd.io

mkdir /etc/docker
cat > /etc/docker/daemon.json < {
  "registry-mirrors": ["https://6ijb8ubo.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "500m", "max-file": "3"
  }
}
EOF

systemctl daemon-reload
systemctl restart docker.service
systemctl enable docker.service 

docker info | grep "Cgroup Driver"
Cgroup Driver: systemd

所有节点安装kubeadm,kubelet和kubectl

//定义kubernetes源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.20.15 kubeadm-1.20.15 kubectl-1.20.15

#配置Kubelet使用阿里云的pause镜像
cat > /etc/sysconfig/kubelet < KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
EOF


//开机自启kubelet
systemctl enable --now kubelet

高可用组件安装、配置

//所有 master 节点部署 Haproxy
yum -y install haproxy keepalived

cat > /etc/haproxy/haproxy.cfg << EOF
global
    log         127.0.0.1 local0 info
    log         127.0.0.1 local1 warning
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s
    maxconn                 3000

frontend monitor-in
    bind *:33305
    mode http
    option httplog
    monitor-uri /monitor

frontend k8s-master
    bind *:16443
    mode tcp
    option tcplog
    default_backend k8s-master

backend k8s-master
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    server k8s-master1 192.168.142.25:6443  check inter 10000 fall 2 rise 2 weight 1
    server k8s-master2 192.168.142.26:6443  check inter 10000 fall 2 rise 2 weight 1
    server k8s-master3 192.168.142.27:6443  check inter 10000 fall 2 rise 2 weight 1
EOF


//所有 master 节点部署 keepalived
yum -y install keepalived

cd /etc/keepalived/
vim keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_HA1            #路由标识符,每个节点配置不同
}

vrrp_script chk_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 2
    weight 2
}

vrrp_instance VI_1 {
    state MASTER                #本机实例状态,MASTER/BACKUP,备机配置文件中设置BACKUP
    interface ens33
    virtual_router_id 51
    priority 100                #本机初始权重,备机设置小于主机的值
    advert_int 1
    virtual_ipaddress {
        192.168.80.100          #设置VIP地址
    }
    track_script {
        chk_haproxy
    }
}


vim check_haproxy.sh
#!/bin/bash
if ! killall -0 haproxy; then
    systemctl stop keepalived
fi


systemctl enable --now haproxy
systemctl enable --now keepalived

部署K8S集群

//在 master01 节点上设置集群初始化配置文件
kubeadm config print init-defaults > /opt/kubeadm-config.yaml

cd /opt/
vim kubeadm-config.yaml
......
11 localAPIEndpoint:
12   advertiseAddress: 192.168.142.25        #指定当前master节点的IP地址
13   bindPort: 6443

21 apiServer:
22   certSANs:                                #在apiServer属性下面添加一个certsSANs的列表,添加所有master节点的IP地址和集群VIP地址
23   - 192.168.80.100
24   - 192.168.142.25
25   - 192.168.142.26
26   - 192.168.142.27

30 clusterName: kubernetes
31 controlPlaneEndpoint: "192.168.80.100:6443"        #指定集群VIP地址
32 controllerManager: {}

38 imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers            #指定镜像下载地址
39 kind: ClusterConfiguration
40 kubernetesVersion: v1.20.15                #指定kubernetes版本号
41 networking:
42   dnsDomain: cluster.local
43   podSubnet: "10.244.0.0/16"                #指定pod网段,10.244.0.0/16用于匹配flannel默认网段
44   serviceSubnet: 10.96.0.0/16            #指定service网段
45 scheduler: {}
#末尾再添加以下内容
--- 
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs                                    #把默认的kube-proxy调度方式改为ipvs模式


#更新集群初始化配置文件
kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml


//所有节点拉取镜像
#拷贝yaml配置文件给其他主机,通过配置文件进行拉取镜像
for i in master02 master03 node01 node02; do scp /opt/new.yaml $i:/opt/; done

kubeadm config images pull --config /opt/new.yaml


//master01 节点进行初始化
kubeadm init --config new.yaml --upload-certs | tee kubeadm-init.log
#提示:
.........
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:
#master节点加入使用的命令,记录!
  kubeadm join 192.168.80.100:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:e76e4525ca29a9ccd5c24142a724bdb6ab86512420215242c4313fb830a4eb98 \
    --control-plane --certificate-key 0f2a7ff2c46ec172f834e237fcca8a02e7c29500746594c25d995b78c92dde96

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:
#node节点加入使用的命令。记录!
kubeadm join 192.168.80.100:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:e76e4525ca29a9ccd5c24142a724bdb6ab86512420215242c4313fb830a4eb98

#若初始化失败,进行的操作
kubeadm reset -f
ipvsadm --clear 
rm -rf ~/.kube
再次进行初始化


//master01 节点进行环境配置
#配置 kubectl
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

#修改controller-manager和scheduler配置文件
vim /etc/kubernetes/manifests/kube-scheduler.yaml 
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
......
    #- --port=0                    #搜索port=0,把这一行注释掉

systemctl restart kubelet

#部署网络插件flannel
所有节点上传 flannel 镜像 flannel.tar 和网络插件 cni-plugins-linux-amd64-v0.8.6.tgz 到 /opt 目录,master节点上传 kube-flannel.yml 文件
cd /opt
docker load < flannel.tar

mv /opt/cni /opt/cni_bak
mkdir -p /opt/cni/bin
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin

kubectl apply -f kube-flannel.yml 


//所有节点加入集群
#master 节点加入集群
kubeadm join 192.168.80.100:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:e76e4525ca29a9ccd5c24142a724bdb6ab86512420215242c4313fb830a4eb98 \
    --control-plane --certificate-key 0f2a7ff2c46ec172f834e237fcca8a02e7c29500746594c25d995b78c92dde96

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#node 节点加入集群
kubeadm join 192.168.80.100:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:e76e4525ca29a9ccd5c24142a724bdb6ab86512420215242c4313fb830a4eb98


#在 master01 查看集群信息
kubectl get nodes
NAME       STATUS   ROLES                  AGE    VERSION
master01   Ready    control-plane,master   2h5m   v1.20.15
master02   Ready    control-plane,master   2h5m   v1.20.15
master03   Ready    control-plane,master   2h5m   v1.20.15
node01     Ready                     2h5m   v1.20.15
node02     Ready                     2h5m   v1.20.15


kubectl get pod -n kube-system 
NAME                               READY   STATUS    RESTARTS   AGE
coredns-74ff55c5b-4fg44            1/1     Running   2          2h5m
coredns-74ff55c5b-jsdxz            1/1     Running   0          2h5m
etcd-master01                      1/1     Running   1          2h5m
etcd-master02                      1/1     Running   1          2h5m
etcd-master03                      1/1     Running   1          2h5m
kube-apiserver-master01            1/1     Running   1          2h5m
kube-apiserver-master02            1/1     Running   1          2h5m
kube-apiserver-master03            1/1     Running   1          2h5m
kube-controller-manager-master01   1/1     Running   3          2h5m
kube-controller-manager-master02   1/1     Running   1          2h5m
kube-controller-manager-master03   1/1     Running   2          2h5m
kube-flannel-ds-8qtx6              1/1     Running   2          2h4m
kube-flannel-ds-lmzdz              1/1     Running   0          2h4m
kube-flannel-ds-nb9qx              1/1     Running   1          2h4m
kube-flannel-ds-t4l4x              1/1     Running   1          2h4m
kube-flannel-ds-v592x              1/1     Running   1          2h4m
kube-proxy-6gd5j                   1/1     Running   1          2h5m
kube-proxy-f8k96                   1/1     Running   3          2h5m
kube-proxy-h7nrf                   1/1     Running   1          2h5m
kube-proxy-j96b6                   1/1     Running   1          2h5m
kube-proxy-mgmx6                   1/1     Running   0          2h5m
kube-scheduler-master01            1/1     Running   1          2h5m
kube-scheduler-master02            1/1     Running   2          2h5m
kube-scheduler-master03            1/1     Running   2          2h5m

//问题解决
1、加入集群的 Token 过期
注意:Token值在集群初始化后,有效期为 24小时 ,过了24小时过期。进行重新生成Token,再次加入集群,新生成的Token为 2小时。

1.1、生成Node节点加入集群的 Token
kubeadm token create --print-join-command
kubeadm join 192.168.80.100:16443 --token menw99.1hbsurvl5fiz119n     --discovery-token-ca-cert-hash sha256:e76e4525ca29a9ccd5c24142a724bdb6ab865    12420215242c4313fb830a4eb98


1.2、生成Master节点加入集群的 --certificate-key
kubeadm init phase upload-certs  --upload-certs
I1105 12:33:08.201601   93226 version.go:254] remote version is much newer: v1.22.3; falling back to: stable-1.20
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
38dba94af7a38700c3698b8acdf8e23f273be07877f5c86f4977dc023e333deb

#master节点加入集群的命令
kubeadm join 192.168.80.100:16443 --token menw99.1hbsurvl5fiz119n     --discovery-token-ca-cert-hash sha256:e76e4525ca29a9ccd5c24142a724bdb6ab86512420215242c4313fb830a4eb98 \
 --control-plane --certificate-key 38dba94af7a38700c3698b8acdf8e23f273be07877f5c86f4977dc023e333deb


2、master节点 无法部署非系统Pod
解析:主要是因为master节点被加上污点,污点是不允许部署非系统 Pod,在 测试 环境,可以将污点去除,节省资源,可利用率。

2.1、查看污点
kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule


2.2、取消污点
kubectl  taint node  -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-
node/master01 untainted
node/master02 untainted
node/master03 untainted

kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints
Taints:            
Taints:            
Taints:            


3、修改NodePort的默认端口
原理:默认k8s的使用端口的范围为30000左右,作为对外部提供的端口。我们也可以通过对配置文件的修改去指定默认的对外端口的范围。

#报错
The Service "nginx-svc" is invalid: spec.ports[0].nodePort: Invalid value: 80: provided port is not in the valid range. The range of valid ports is 30000-32767


[root@k8s-master1 ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --service-cluster-ip-range=10.96.0.0/16
- --service-node-port-range=1-65535    #找到后进行添加即可

#无需重启,k8s会自动生效


4、外部 etcd 部署配置
kubeadm config print init-defaults > /opt/kubeadm-config.yaml

cd /opt/
vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.80.14
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - 10.96.0.1
  - 127.0.0.1
  - localhost
  - kubernetes
  - kubernetes.default
  - kubernetes.default.svc
  - kubernetes.default.svc.cluster.local
  - 192.168.80.100
  - 192.168.142.25
  - 192.168.142.26
  - 192.168.142.27
  - master01
  - master02
  - master03
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.80.100:16443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  external:                                #使用外部etcd的方式
    endpoints:
    - https://192.168.142.25:2379
    - https://192.168.142.26:2379
    - https://192.168.142.27:2379
    caFile: /opt/etcd/ssl/ca.pem           #需要把etcd的证书都复制到所有master节点上
    certFile: /opt/etcd/ssl/server.pem
    keyFile: /opt/etcd/ssl/server-key.pem
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.15
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

你可能感兴趣的:(linux,运维,服务器,kubernetes)