系统版本
[root@k8s-master1 k8s]# cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core)
docker版本
[root@k8s-master1 ~]# docker -v
Docker version 20.10.15, build fd82621
k8s版本
[root@k8s-master1 ~]# kubelet --version
Kubernetes v1.23.6
[root@k8s-master1 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:49:13Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:43:11Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
[root@k8s-master1 ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:48:05Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
1、ip规划
k8s-master1 : 192.168.73.130
k8s-master2 : 192.168.73.131
k8s-master3 : 192.168.73.132
k8s-node1 : 192.168.73.133
k8s-node2 : 192.168.73.134
2、ip配置
nmcli connection modify ens33 ipv4.method manual ipv4.address 192.168.73.130/24 ipv4.gateway 192.168.73.2 connection.autoconnect yes
nmcli connection up ens33
nmcli connection modify ens33 ipv4.method manual ipv4.address 192.168.73.131/24 ipv4.gateway 192.168.73.2 connection.autoconnect yes
nmcli connection up ens33
nmcli connection modify ens33 ipv4.method manual ipv4.address 192.168.73.132/24 ipv4.gateway 192.168.73.2 connection.autoconnect yes
nmcli connection up ens33
nmcli connection modify ens33 ipv4.method manual ipv4.address 192.168.73.133/24 ipv4.gateway 192.168.73.2 connection.autoconnect yes
nmcli connection up ens33
nmcli connection modify ens33 ipv4.method manual ipv4.address 192.168.73.134/24 ipv4.gateway 192.168.73.2 connection.autoconnect yes
nmcli connection up ens33
1、关闭firewalld和selinux(所有主机)
vi /etc/selinux/config
getenforce
setenforce 0
getenforce
systemctl stop firewalld
systemctl disable firewalld
2、配置解析/etc/hosts(所有主机)及主机名
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-master2
hostnamectl set-hostname k8s-master3
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
vim /etc/hosts
192.168.73.130 k8s-master1
192.168.73.131 k8s-master2
192.168.73.132 k8s-master3
192.168.73.133 k8s-node1
192.168.73.134 k8s-node2
将文件传入其他主机
for i in {131..134};do scp /etc/hosts 192.168.73.$i:/etc/hosts ;done
3、添加内核参数文件 /etc/sysctl.d/k8s.conf(所有主机)
vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
4、执行命令(所有主机)
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
5、各种安装包的查找及安装
配置清华镜像源作为yum源,下载各种软件包
基本包的镜像源
/etc/yum.repos.d/qinghua-base.repo
[base]
name=CentOS-$releasever - Base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/os/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
enabled=1
gpgcheck=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-7
#released updates
[updates]
name=CentOS-$releasever - Updates
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
enabled=1
gpgcheck=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-7
#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
enabled=1
gpgcheck=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-7
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/centosplus/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
gpgcheck=0
enabled=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-7
6、k8s镜像源
/etc/yum.repos.d/k8s-base.repo
[k8s-base]
name=k8s-base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
ebabled=1
gpgcheck=0
7、docker镜像源
/etc/yum.repos.d/docker-base.repo
[docker-base]
name=docker-base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/7/x86_64/stable/
ebabled=1
gpgcheck=0
8、查询k8s相关软件的版本
[root@localhost yum.repos.d]# yum list |grep kubeadm
kubeadm.x86_64 1.23.6-0 k8s-base
[root@localhost yum.repos.d]# yum list |grep kubectl
kubectl.x86_64 1.23.6-0 k8s-base
[root@localhost yum.repos.d]#
[root@localhost yum.repos.d]# yum list |grep kubelet
kubelet.x86_64 1.23.6-0 k8s-base
9、下载相关的k8s软件包
[root@localhost yum.repos.d]# mkdir -p /data/bao/k8s
yum -y install --downloadonly --downloaddir=/data/bao/k8s kubeadm kubelet kubectl
10、查看下载的软件包
[root@localhost k8s]# ll /data/bao/k8s/
总用量 66812
-rw-r--r--. 1 root root 7401938 3月 17 08:32 4d300a7655f56307d35f127d99dc192b6aa4997f322234e754f16aaa60fd8906-cri-tools-1.23.0-0.x86_64.rpm
-rw-r--r--. 1 root root 21556178 4月 21 06:23 68a98b2ae673eef4a5ddbf1f3c830db0df8fbb888e035aea6054677d88f8a8bc-kubelet-1.23.6-0.x86_64.rpm
-rw-r--r--. 1 root root 9921862 4月 21 06:23 868c4a6ee448d1e8488938812a19a991b5132c81de511cd737d93493b98451cc-kubectl-1.23.6-0.x86_64.rpm
-rw-r--r--. 1 root root 9478890 4月 21 12:39 89104c7beafab5f04d6789e5425963fc8f91ba9711c9603f1ad89003cdea4fe4-kubeadm-1.23.6-0.x86_64.rpm
-rw-r--r--. 1 root root 191000 4月 4 2020 conntrack-tools-1.4.4-7.el7.x86_64.rpm
-rw-r--r--. 1 root root 19487362 9月 10 2020 db7cb5cb0b3f6875f54d10f02e625573988e3e91fd4fc5eef0b1876bb18604ad-kubernetes-cni-0.8.7-0.x86_64.rpm
-rw-r--r--. 1 root root 18400 4月 4 2020 libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm
-rw-r--r--. 1 root root 18212 4月 4 2020 libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm
-rw-r--r--. 1 root root 23584 8月 11 2017 libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm
-rw-r--r--. 1 root root 296632 8月 11 2017 socat-1.7.3.2-2.el7.x86_64.rpm
11、查询docker相关的软件版本
[root@localhost k8s]# yum list |grep docker-ce
docker-ce.x86_64 3:20.10.15-3.el7 docker-base
docker-ce-cli.x86_64 1:20.10.15-3.el7 docker-base
docker-ce-rootless-extras.x86_64 20.10.15-3.el7 docker-base
docker-ce-selinux.noarch 17.03.3.ce-1.el7 docker-base
12、下载相关的docker软件包
[root@localhost k8s]# mkdir -p /data/bao/docker
yum -y install --downloadonly --downloaddir=/data/bao/docker docker-ce
yum -y install --downloadonly --downloaddir=/data/bao/docker libcgroup fuse3-libs
13、查看下载的相关软件包
[root@localhost docker]# ll /data/bao/docker/
总用量 110812
-rw-r--r--. 1 root root 261632 8月 23 2019 audit-2.8.5-4.el7.x86_64.rpm
-rw-r--r--. 1 root root 104408 8月 23 2019 audit-libs-2.8.5-4.el7.x86_64.rpm
-rw-r--r--. 1 root root 78256 8月 23 2019 audit-libs-python-2.8.5-4.el7.x86_64.rpm
-rw-r--r--. 1 root root 302068 11月 12 2018 checkpolicy-2.5-8.el7.x86_64.rpm
-rw-r--r--. 1 root root 34647136 5月 6 05:05 containerd.io-1.6.4-3.1.el7.x86_64.rpm
-rw-r--r--. 1 root root 40816 7月 6 2020 container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm
-rw-r--r--. 1 root root 23234292 5月 6 05:05 docker-ce-20.10.15-3.el7.x86_64.rpm
-rw-r--r--. 1 root root 30837648 5月 6 05:05 docker-ce-cli-20.10.15-3.el7.x86_64.rpm
-rw-r--r--. 1 root root 8638396 5月 6 05:05 docker-ce-rootless-extras-20.10.15-3.el7.x86_64.rpm
-rw-r--r--. 1 root root 3930044 3月 11 00:47 docker-scan-plugin-0.17.0-3.el7.x86_64.rpm
-rw-r--r--. 1 root root 83764 4月 29 2020 fuse3-libs-3.6.1-4.el7.x86_64.rpm
-rw-r--r--. 1 root root 55796 4月 29 2020 fuse-overlayfs-0.7.2-6.el7_8.x86_64.rpm
-rw-r--r--. 1 root root 67720 8月 23 2019 libcgroup-0.41-21.el7.x86_64.rpm
-rw-r--r--. 1 root root 166012 4月 4 2020 libselinux-2.5-15.el7.x86_64.rpm
-rw-r--r--. 1 root root 241168 4月 4 2020 libselinux-python-2.5-15.el7.x86_64.rpm
-rw-r--r--. 1 root root 154876 4月 4 2020 libselinux-utils-2.5-15.el7.x86_64.rpm
-rw-r--r--. 1 root root 154244 11月 12 2018 libsemanage-2.5-14.el7.x86_64.rpm
-rw-r--r--. 1 root root 115284 11月 12 2018 libsemanage-python-2.5-14.el7.x86_64.rpm
-rw-r--r--. 1 root root 304196 11月 12 2018 libsepol-2.5-10.el7.x86_64.rpm
-rw-r--r--. 1 root root 938736 4月 4 2020 policycoreutils-2.5-34.el7.x86_64.rpm
-rw-r--r--. 1 root root 468316 4月 4 2020 policycoreutils-python-2.5-34.el7.x86_64.rpm
-rw-r--r--. 1 root root 32880 7月 4 2014 python-IPy-0.75-6.el7.noarch.rpm
-rw-r--r--. 1 root root 509568 11月 18 2020 selinux-policy-3.13.1-268.el7_9.2.noarch.rpm
-rw-r--r--. 1 root root 7335504 11月 18 2020 selinux-policy-targeted-3.13.1-268.el7_9.2.noarch.rpm
-rw-r--r--. 1 root root 635184 11月 12 2018 setools-libs-3.3.8-4.el7.x86_64.rpm
-rw-r--r--. 1 root root 83452 4月 29 2020 slirp4netns-0.4.3-4.el7_8.x86_64.rpm
14、安装依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
yum install -y epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp
15、安装docker
[root@localhost k8s]# cd /data/bao/docker/
[root@localhost docker]# yum -y install *
16、查看docker版本
[root@localhost docker]# docker -v
Docker version 20.10.15, build fd82621
17、安装k8s相关插件
[root@localhost docker]# cd /data/bao/k8s/
[root@localhost k8s]# yum -y install *
18、查看k8s相关插件的版本
[root@localhost k8s]# kubelet --version
Kubernetes v1.23.6
[root@localhost k8s]# kubectl version
Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:49:13Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@localhost k8s]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:48:05Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
19、设置开机启动并启动相关的软件
systemctl enable docker kubelet
systemctl restart docker
20、查看docker状态
[root@localhost k8s]# systemctl status docker -l
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
Active: active (running) since 二 2022-05-10 15:57:00 CST; 2min 6s ago
Docs: https://docs.docker.com
Main PID: 9862 (dockerd)
Memory: 37.5M
CGroup: /system.slice/docker.service
└─9862 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.025887674+08:00" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.025911795+08:00" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.025920488+08:00" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.054959588+08:00" level=info msg="Loading containers: start."
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.238876078+08:00" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.313376634+08:00" level=info msg="Loading containers: done."
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.370410721+08:00" level=info msg="Docker daemon" commit=4433bf6 graphdriver(s)=overlay2 version=20.10.15
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.370760686+08:00" level=info msg="Daemon has completed initialization"
5月 10 15:57:00 k8s-node1 systemd[1]: Started Docker Application Container Engine.
5月 10 15:57:00 k8s-node1 dockerd[9862]: time="2022-05-10T15:57:00.418165781+08:00" level=info msg="API listen on /var/run/docker.sock"
各种镜像的查找及安装
21、查看创建集群需要的镜像版本
[root@localhost k8s]# kubeadm config images list
I0510 16:01:13.265348 10209 version.go:255] remote version is much newer: v1.24.0; falling back to: stable-1.23
k8s.gcr.io/kube-apiserver:v1.23.6
k8s.gcr.io/kube-controller-manager:v1.23.6
k8s.gcr.io/kube-scheduler:v1.23.6
k8s.gcr.io/kube-proxy:v1.23.6
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
22、国内拉取镜像
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.23.6
或者直接指定拉取镜像(使用,若使用其他方式拉取,镜像标签按需求更改)
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
23、镜像打标签,删除不用镜像
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.6 k8s.gcr.io/kube-apiserver:v1.23.6
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.6 k8s.gcr.io/kube-scheduler:v1.23.6
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.23.6 k8s.gcr.io/kube-proxy:v1.23.6
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.6 k8s.gcr.io/kube-controller-manager:v1.23.6
docker tag registry.aliyuncs.com/google_containers/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6
docker tag registry.aliyuncs.com/google_containers/pause:3.6 k8s.gcr.io/pause:3.6
docker tag registry.aliyuncs.com/google_containers/etcd:3.5.1-0 k8s.gcr.io/etcd:3.5.1-0
docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.23.6
docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.6
docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.6
docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.6
docker rmi registry.aliyuncs.com/google_containers/pause:3.6
docker rmi registry.aliyuncs.com/google_containers/coredns:v1.8.6
docker rmi registry.aliyuncs.com/google_containers/etcd:3.5.1-0
24、打包镜像,传入其他主机
mkdir -p /data/bao/k8s-images;cd /data/bao/k8s-images
docker save k8s.gcr.io/kube-apiserver:v1.23.6 > kube-apiserver.tar
docker save k8s.gcr.io/kube-scheduler:v1.23.6 > kube-scheduler.tar
docker save k8s.gcr.io/kube-controller-manager:v1.23.6 > kube-controller-manager.tar
docker save k8s.gcr.io/kube-proxy:v1.23.6 > kube-proxy.tar
docker save k8s.gcr.io/coredns/coredns:v1.8.6 > coredns.tar
docker save k8s.gcr.io/pause:3.6 > pause.tar
docker save k8s.gcr.io/etcd:3.5.1-0 > etcd.tar
25、查看打包好的镜像包
[root@localhost k8s-images]# ll /data/bao/k8s-images/
总用量 755464
-rw-r--r--. 1 root root 46967296 5月 10 16:17 coredns.tar
-rw-r--r--. 1 root root 293936128 5月 10 16:17 etcd.tar
-rw-r--r--. 1 root root 136526848 5月 10 16:17 kube-apiserver.tar
-rw-r--r--. 1 root root 126348288 5月 10 16:17 kube-controller-manager.tar
-rw-r--r--. 1 root root 114260480 5月 10 16:17 kube-proxy.tar
-rw-r--r--. 1 root root 54852608 5月 10 16:17 kube-scheduler.tar
-rw-r--r--. 1 root root 692736 5月 10 16:17 pause.tar
26、将所有的安装包及镜像包传入其他主机
for i in {130..132};do scp -r /data 192.168.73.$i:/data ;done
27、其他主机安装docker和k8s相关插件
cd /data/data/bao/docker/ ; yum -y install *
cd /data/data/bao/k8s/ ; yum -y install *
28、设置软件开机自起并启动
systemctl enable docker kubelet
systemctl restart docker
29、将打包好的镜像导入每台主机
cd /data/data/bao/k8s-images
docker load < kube-apiserver.tar
docker load < kube-scheduler.tar
docker load < kube-controller-manager.tar
docker load < kube-proxy.tar
docker load < coredns.tar
docker load < pause.tar
docker load < flannel.tar
docker load < etcd.tar
30、查看导入的docker镜像
[root@localhost k8s-images]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-apiserver v1.23.6 8fa62c12256d 3 weeks ago 135MB
k8s.gcr.io/kube-scheduler v1.23.6 595f327f224a 3 weeks ago 53.5MB
k8s.gcr.io/kube-controller-manager v1.23.6 df7b72818ad2 3 weeks ago 125MB
k8s.gcr.io/kube-proxy v1.23.6 4c0375452406 3 weeks ago 112MB
k8s.gcr.io/etcd 3.5.1-0 25f8c7f3da61 6 months ago 293MB
k8s.gcr.io/coredns/coredns v1.8.6 a4ca41631cc7 7 months ago 46.8MB
k8s.gcr.io/pause 3.6 6270bb605e12 8 months ago 683kB
31、、关闭swap(所有主机)
swapoff -a
sysctl -p /etc/sysctl.d/k8s.conf
注释掉/etc/fstab中的swap条目
mount -a
echo "KUBELET_EXTRA_ARGS=--fail-swap-on=false" > /etc/sysconfig/kubelet
32、设置开机自启动
systemctl enable kubelet.service docker
systemctl restart docker
systemctl restart kubelet
33、haproxy部署
k8s-master1节点
yum install haproxy -y
[root@k8s-node3 yum.repos.d]# cat /etc/haproxy/haproxy.cfg
# /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
log /dev/log local0
log /dev/log local1 notice
daemon
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
#defaults
listen stats 0.0.0.0:12345
mode http
log global
maxconn 10
stats enable
stats hide-version
stats refresh 30s
stats show-node
#stats auth admin:p@sssw0rd
stats uri /stats
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 1
timeout http-request 10s
timeout queue 20s
timeout connect 5s
timeout client 20s
timeout server 20s
timeout http-keep-alive 10s
timeout check 10s
#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
bind 0.0.0.0:12567
mode tcp
option tcplog
default_backend kube-api-server
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend kube-api-server
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server k8s-master1 192.168.73.130:6443 check
server k8s-master2 192.168.73.131:6443 check
server k8s-master3 192.168.73.132:6443 check
[root@k8s-node3 yum.repos.d]# systemctl enable haproxy --now
[root@k8s-node3 yum.repos.d]# systemctl restart haproxy
34、、访问haproxy页面查看k8s集群健康状态
http://192.168.73.130:12345/stats
35、etcd集群安装
参考 https://blog.csdn.net/liao__ran/article/details/124684090?spm=1001.2014.3001.5502 安装etcd集群
36、生成 kubeadm 默认配置文件
[root@localhost k8s-images]# cd /etc/kubernetes/
kubeadm config print init-defaults --component-configs \
KubeProxyConfiguration,KubeletConfiguration > kubeadm-config.yaml
37、k8s集群初始化所需文件,修改后
[root@localhost kubernetes]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.73.130 # 本机IP
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
imagePullPolicy: IfNotPresent
name: k8s-master1 # 本主机名
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.73.130:12567" # 虚拟IP和haproxy端口,新增
controllerManager: {}
dns:
type: CoreDNS
etcd:
external:
endpoints:
- https://192.168.73.130:2379
- https://192.168.73.131:2379
- https://192.168.73.132:2379
caFile: /data/etcd/ssl/ca.pem
certFile: /data/etcd/ssl/server.pem
keyFile: /data/etcd/ssl/server-key.pem
imageRepository: k8s.gcr.io # 镜像仓库源要根据自己实际情况修改
kind: ClusterConfiguration
kubernetesVersion: 1.23.6 # k8s版本
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16 #新增
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 0
clusterCIDR: ""
configSyncPeriod: 0s
conntrack:
maxPerCore: null
min: null
tcpCloseWaitTimeout: null
tcpEstablishedTimeout: null
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
tcpFinTimeout: 0s
tcpTimeout: 0s
udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: ipvs
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
showHiddenMetricsForVersion: ""
udpIdleTimeout: 0s
winkernel:
enableDSR: false
networkName: ""
sourceVip: ""
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
38、dry-run模式验证语法,验证无报错才能进行初始化操作
kubeadm init --config kubeadm-config.yaml --dry-run
39、集群初始化
初始化日志
kubeadm init --config=kubeadm-config.yaml --upload-certs --ignore-preflight-errors=Swap
报错
[kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
参考
https://blog.51cto.com/8999a/5005591
解决方案;
所有master跟node节点的docker修改为
[root@k8s-master ~]# cat /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
然后重启docker
systemctl restart docker
40、进行环境重置
kubeadm reset
41、继续进行初始化
[root@localhost kubernetes]# kubeadm init --config=kubeadm-config.yaml --upload-certs --ignore-preflight-errors=Swap
W0511 01:33:54.949759 8830 strict.go:55] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta3", Kind:"ClusterConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "type"
[init] Using Kubernetes version: v1.23.6
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.73.130]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] External etcd mode: Skipping etcd/ca certificate authority generation
[certs] External etcd mode: Skipping etcd/server certificate generation
[certs] External etcd mode: Skipping etcd/peer certificate generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation
[certs] External etcd mode: Skipping apiserver-etcd-client certificate generation
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 10.578072 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster
NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently.
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
--control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b
主节点加入集群
kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
--control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
从节点加入集群
kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b
42、其他主节点加入集群
k8s-master2加入集群
[root@localhost k8s-images]# kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
> --control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master2 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.73.131 192.168.73.130]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Skipping etcd check in external mode
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[control-plane-join] using external etcd - no local stacked instance added
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master2 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master2 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
43、k8s-master3加入集群
[root@localhost k8s-images]# kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
> --control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master3 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.73.132 192.168.73.130]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Skipping etcd check in external mode
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[control-plane-join] using external etcd - no local stacked instance added
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master3 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master3 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
44、node节点加入集群
k8s-node1加入集群
[root@localhost data]# kubeadm join 192.168.73.130:12567 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
45、若证书过期后,可以重新更新证书和token认证,然后再加入
可以参考
https://blog.csdn.net/hunheidaode/article/details/118341134
或者 重新更新证书
[root@k8s-master1 kubernetes]# kubeadm init phase upload-certs --upload-certs
W0803 17:38:07.670046 44778 version.go:102] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get "https://dl.k8s.io/release/stable-1.txt": dial tcp: lookup dl.k8s.io on [::1]:53: read udp [::1]:56684->[::1]:53: read: connection refused
W0803 17:38:07.670925 44778 version.go:103] falling back to the local client version: v1.21.3
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
63e54ed5b06bc1f7b93adaeda0df792b12064c11f9d274d9f3d2b5b012cbc584
重新生成token认证
[root@k8s-master1 kubernetes]# kubeadm token generate
9psojs.mqrgtud16qjymfok
重新加入
kubeadm join 192.168.73.130:12567 --token 0d3mtm.x0o720qncyjl4c0n \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b --v=5
或者 创建永久token
[root@localhost kubernetes]# kubeadm token create --ttl 0
jhdsm4.pq5yxkkw3xa2d765
[root@localhost kubernetes]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
2b8fmw.wtv48ygu93kjs4zd 1h 2022-05-10T20:03:20Z Proxy for managing TTL for the kubeadm-certs secret
5u3uio.x9q1noeofzyibq0r 1h 2022-05-10T19:34:09Z Proxy for managing TTL for the kubeadm-certs secret
abcdef.0123456789abcdef 23h 2022-05-11T17:34:09Z authentication,signing system:bootstrappers:kubeadm:default-node-token
jhdsm4.pq5yxkkw3xa2d765 authentication,signing system:bootstrappers:kubeadm:default-node-token
重新加入
kubeadm join 192.168.73.130:12567 --token 0d3mtm.x0o720qncyjl4c0n \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b --v=5
46、Certificate-key介绍
https://www.kancloud.cn/pshizhsysu/kubernetes/2055662
47、K8S节点网络配置(Flanneld)
部署flannel,开网
https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
查看需要的镜像
[root@k8s-node1 data]# cat kube-flannel.yml |grep image
#image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le and mips64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
#image: flannelcni/flannel:v0.17.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel:v0.17.0
#image: flannelcni/flannel:v0.17.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel:v0.17.0
48、下载镜像
[root@k8s-node1 data]# docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
v1.0.1: Pulling from rancher/mirrored-flannelcni-flannel-cni-plugin
5758d4e389a3: Pull complete
5f157e942e60: Pull complete
Digest: sha256:5dd61f95e28fa7ef897ff2fa402ce283e5078d334401d2f62d00a568f779f2d5
Status: Downloaded newer image for rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
[root@k8s-node1 data]#
[root@k8s-node1 data]#
[root@k8s-node1 data]#
[root@k8s-node1 data]# docker pull rancher/mirrored-flannelcni-flannel:v0.17.0
v0.17.0: Pulling from rancher/mirrored-flannelcni-flannel
5758d4e389a3: Already exists
052816d6a684: Pull complete
96c6af7b4b9a: Pull complete
f28287b73045: Pull complete
6a87e811090e: Pull complete
3df5ffe16b5b: Pull complete
ed7fc7d3dae5: Pull complete
Digest: sha256:4bf659e449be809763b04f894f53a3d8610e00cf2cd979bb4fffc9470eb40d1b
Status: Downloaded newer image for rancher/mirrored-flannelcni-flannel:v0.17.0
docker.io/rancher/mirrored-flannelcni-flannel:v0.17.0
49、打包传入其他主机,并导入其他主机
[root@k8s-node1 k8s-images]# docker save rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 > flannel-cni-plugin.tar
[root@k8s-node1 k8s-images]# docker save rancher/mirrored-flannelcni-flannel:v0.17.0 > flannel.tar
for i in {130..132};do scp flannel* 192.168.73.$i:/data/data/bao/k8s-images/ ;done
docker load < /data/data/bao/k8s-images/flannel.tar
docker load < /data/data/bao/k8s-images/flannel-cni-plugin.tar
开网
kubectl create -f kube-flannel.yml
50、其他在安装过程中的报错
kubectl-proxy报错,在kubeadm-conf.yaml文件中已经更改,若有其他报错,则更改回来,后续报错,则按此更改
server.go:482] failed complete: unrecognized feature gate: SupportIPVSProxyMode
更改k8s的环境
kubectl edit cm kube-proxy -n kube-system
注释掉下面的两行
#featureGates:
# SupportIPVSProxyMode: true
删除kube-proxy pod,等待pod重启
51、创建pod失败报错
network: failed to set bridge addr: "cni0" already has an IP address different from 10.244.3.1/24
cni0网卡和flannel网卡的网段不一样导致的
删除cni0网卡,让其自己重新创建网卡
ifconfig cni0 down
ip link delete cni0
52、测试
可以按此简单测试
https://blog.csdn.net/liao__ran/article/details/102647786
https://blog.csdn.net/liao__ran/article/details/106009227
53、去除Master节点污点,使其可以分配Pod资源;
kubectl taint nodes --all node-role.kubernetes.io/master-