转载于:https://blog.csdn.net/liao__ran/article/details/124703425?spm=1001.2014.3001.5502
一、 环境准备
● 三台2cpu、4G内存、20G硬盘
● 192.168.0.150 k8s-master1
● 192.168.0.151 k8s-master2
● 192.168.0.152 k8s-master3
1、系统版本
cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core)
2、docker版本
docker -v
Docker version 20.10.15, build fd82621
3、k8s版本
kubelet --version
Kubernetes v1.23.6
kubectl version
Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:49:13Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:43:11Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.6", GitCommit:"ad3338546da947756e8a88aa6822e9c11e7eac22", GitTreeState:"clean", BuildDate:"2022-04-14T08:48:05Z", GoVersion:"go1.17.9", Compiler:"gc", Platform:"linux/amd64"}
二、基础配置
4、ip规划
k8s-master1 : 192.168.73.150
k8s-master2 : 192.168.73.151
k8s-master3 : 192.168.73.152
5、ip配置
nmcli connection modify ens192 ipv4.method manual ipv4.address 192.168.0.150/24 ipv4.gateway 192.168.0.1 connection.autoconnect yes
nmcli connection up ens192
nmcli connection modify ens192 ipv4.method manual ipv4.address 192.168.0.151/24 ipv4.gateway 192.168.0.1 connection.autoconnect yes
nmcli connection up ens192
nmcli connection modify ens192 ipv4.method manual ipv4.address 192.168.0.152/24 ipv4.gateway 192.168.0.1 connection.autoconnect yes
nmcli connection up ens192
6、关闭firewalld和selinux(所有主机)
vi /etc/selinux/config
SELINUX=disabled
getenforce
setenforce 0
getenforce
systemctl stop firewalld
systemctl disable firewalld
7、配置解析/etc/hosts(所有主机)及主机名
hostnamectl set-hostname k8s-150
hostnamectl set-hostname k8s-151
hostnamectl set-hostname k8s-152
vim /etc/hosts
......此处省略部分内容......
192.168.73.150 k8s-150
192.168.73.151 k8s-151
192.168.73.152 k8s-152
将文件传入其他主机
for i in {151..152};do scp /etc/hosts 192.168.0.$i:/etc/hosts ;done
8、添加内核参数文件
● /etc/sysctl.d/k8s.conf(所有主机)
vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
9、执行命令(所有主机)
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
三、各种安装包的查找及安装
配置清华镜像源作为yum源,下载各种软件包
10、基本包的镜像源
/etc/yum.repos.d/qinghua-base.repo
[base]
name=CentOS-7 - Base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/7/os/x86_64/
#baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/7/os/x86_64/
#mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=os
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
#released updates
[updates]
name=CentOS-7 - Updates
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/7/updates/x86_64/
#mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=updates
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
#additional packages that may be useful
[extras]
name=CentOS-7 - Extras
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/7/extras/x86_64/
#mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=extras
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-7 - Plus
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/7/centosplus/x86_64/
#mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=centosplus
gpgcheck=0
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
11、k8s镜像源
vi /etc/yum.repos.d/k8s-base.repo
[k8s-base]
name=k8s-base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
ebabled=1
gpgcheck=0
12、docker镜像源
vi /etc/yum.repos.d/docker-base.repo
[docker-base]
name=docker-base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/7/x86_64/stable/
ebabled=1
gpgcheck=0
13、查询、下载k8s相关软件的版本
● 查询相关k8s软件包
● --showduplicates # 在 list/search 命令下,显示源里重复的条目
● yum search kubeadm --showduplicates #展示kubeadm所有版本
yum list |grep -E "kubeadm|kubectl|kubelet"
kubeadm.x86_64 1.23.6-0 k8s-base
kubectl.x86_64 1.23.6-0 k8s-base
kubelet.x86_64 1.23.6-0 k8s-base
● 下载相关的k8s软件包
mkdir -p /data/bao/k8s
yum -y install --downloadonly --downloaddir=/data/bao/k8s kubeadm-1.23.6-0.x86_64 kubelet-1.23.6-0.x86_64 kubectl-1.23.6-0.x86_64
● 查看下载的软件包
ll /data/bao/k8s/
总用量 66388
-rw-r--r-- 1 root root 10382294 5月 26 13:24 17013403794d47f80ade3299c74c3a646d37f195c1057da4db74fd3fd78270f1-kubectl-1.23.6-0.x86_64.rpm
-rw-r--r-- 1 root root 9976054 5月 26 13:25 7f171021fcae441d9128b4c298a8082281e93864f5c137f097b89ec4749d7b7b-kubeadm-1.23.6-0.x86_64.rpm
-rw-r--r-- 1 root root 6173842 5月 20 06:07 9165e89a2de0f1a2acfb151177ac6985022ee0c2a8a78d45a4982aa1b11ffd68-cri-tools-1.24.0-0.x86_64.rpm
-rw-r--r-- 1 root root 191000 4月 4 2020 conntrack-tools-1.4.4-7.el7.x86_64.rpm
-rw-r--r-- 1 root root 21393566 5月 26 13:25 d184b7647df76898e431cfc9237dea3f8830e3e3398d17b0bf90c1b479984b3f-kubelet-1.23.6-0.x86_64.rpm
-rw-r--r-- 1 root root 19487362 9月 4 2020 db7cb5cb0b3f6875f54d10f02e625573988e3e91fd4fc5eef0b1876bb18604ad-kubernetes-cni-0.8.7-0.x86_64.rpm
-rw-r--r-- 1 root root 18400 4月 4 2020 libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm
-rw-r--r-- 1 root root 18212 4月 4 2020 libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm
-rw-r--r-- 1 root root 23584 8月 11 2017 libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm
-rw-r--r-- 1 root root 296632 8月 11 2017 socat-1.7.3.2-2.el7.x86_64.rpm
14、查询、下载docker相关的软件版本
● 查询相关docker软件包
yum list |grep docker-ce
docker-ce.x86_64 3:20.10.16-3.el7 docker-base
docker-ce-cli.x86_64 1:20.10.16-3.el7 docker-base
docker-ce-rootless-extras.x86_64 20.10.16-3.el7 docker-base
docker-ce-selinux.noarch 17.03.3.ce-1.el7 docker-base
● 下载相关的docker软件包
mkdir -p /data/bao/docker
yum -y install --downloadonly --downloaddir=/data/bao/docker docker-ce
yum -y install --downloadonly --downloaddir=/data/bao/docker libcgroup fuse3-libs
● 查看下载的相关软件包
ll /data/bao/docker/
总用量 111024
-rw-r--r-- 1 root root 261632 8月 23 2019 audit-2.8.5-4.el7.x86_64.rpm
-rw-r--r-- 1 root root 104408 8月 23 2019 audit-libs-2.8.5-4.el7.x86_64.rpm
-rw-r--r-- 1 root root 78256 8月 23 2019 audit-libs-python-2.8.5-4.el7.x86_64.rpm
-rw-r--r-- 1 root root 302068 11月 12 2018 checkpolicy-2.5-8.el7.x86_64.rpm
-rw-r--r-- 1 root root 34647136 5月 6 05:05 containerd.io-1.6.4-3.1.el7.x86_64.rpm
-rw-r--r-- 1 root root 40816 7月 6 2020 container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm
-rw-r--r-- 1 root root 23446596 5月 12 22:12 docker-ce-20.10.16-3.el7.x86_64.rpm
-rw-r--r-- 1 root root 30838224 5月 12 22:12 docker-ce-cli-20.10.16-3.el7.x86_64.rpm
-rw-r--r-- 1 root root 8639896 5月 12 22:12 docker-ce-rootless-extras-20.10.16-3.el7.x86_64.rpm
-rw-r--r-- 1 root root 3930044 3月 11 00:47 docker-scan-plugin-0.17.0-3.el7.x86_64.rpm
-rw-r--r-- 1 root root 83764 4月 29 2020 fuse3-libs-3.7.1-4.el7.x86_64.rpm
-rw-r--r-- 1 root root 55796 4月 29 2020 fuse-overlayfs-0.7.2-6.el7_8.x86_64.rpm
-rw-r--r-- 1 root root 67720 8月 23 2019 libcgroup-0.41-21.el7.x86_64.rpm
-rw-r--r-- 1 root root 166012 4月 4 2020 libselinux-2.5-15.el7.x86_64.rpm
-rw-r--r-- 1 root root 241168 4月 4 2020 libselinux-python-2.5-15.el7.x86_64.rpm
-rw-r--r-- 1 root root 154876 4月 4 2020 libselinux-utils-2.5-15.el7.x86_64.rpm
-rw-r--r-- 1 root root 154244 11月 12 2018 libsemanage-2.5-14.el7.x86_64.rpm
-rw-r--r-- 1 root root 115284 11月 12 2018 libsemanage-python-2.5-14.el7.x86_64.rpm
-rw-r--r-- 1 root root 304196 11月 12 2018 libsepol-2.5-10.el7.x86_64.rpm
-rw-r--r-- 1 root root 938736 4月 4 2020 policycoreutils-2.5-34.el7.x86_64.rpm
-rw-r--r-- 1 root root 468316 4月 4 2020 policycoreutils-python-2.5-34.el7.x86_64.rpm
-rw-r--r-- 1 root root 32880 7月 4 2014 python-IPy-0.75-6.el7.noarch.rpm
-rw-r--r-- 1 root root 509568 11月 18 2020 selinux-policy-3.13.1-268.el7_9.2.noarch.rpm
-rw-r--r-- 1 root root 7335504 11月 18 2020 selinux-policy-targeted-3.13.1-268.el7_9.2.noarch.rpm
-rw-r--r-- 1 root root 635184 11月 12 2018 setools-libs-3.3.8-4.el7.x86_64.rpm
-rw-r--r-- 1 root root 83452 4月 29 2020 slirp4netns-0.4.3-4.el7_8.x86_64.rpm
15、安装依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
yum install -y epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp
16、安装docker
cd /data/bao/docker/
yum -y install *
● 查看docker版本
docker -v
Docker version 20.10.16, build aa7e414
17、安装k8s相关插件
cd /data/bao/k8s/
yum -y install *
18、查看k8s相关插件的版本
kubelet --version
Kubernetes v1.23.6
kubectl version
WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.
Client Version: version.Info{Major:"1", Minor:"24", GitVersion:"v1.23.6", GitCommit:"3ddd0f45aa91e2f30c70734b175631bec5b5825a", GitTreeState:"clean", BuildDate:"2022-05-24T12:26:19Z", GoVersion:"go1.18.2", Compiler:"gc", Platform:"linux/amd64"}
Kustomize Version: v4.5.4
The connection to the server localhost:8080 was refused - did you specify the right host or port?
kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"24", GitVersion:"v1.23.6", GitCommit:"3ddd0f45aa91e2f30c70734b175631bec5b5825a", GitTreeState:"clean", BuildDate:"2022-05-24T12:24:38Z", GoVersion:"go1.18.2", Compiler:"gc", Platform:"linux/amd64"}
19、设置开机启动并启动相关的软件
systemctl enable docker kubelet
systemctl restart docker
20、查看docker状态
systemctl status docker -l
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
Active: active (running) since 三 2022-06-01 16:41:56 CST; 10s ago
Docs: https://docs.docker.com
Main PID: 1909 (dockerd)
Memory: 33.7M
CGroup: /system.slice/docker.service
└─1909 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
6月 01 16:41:55 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:55.965486761+08:00" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc
6月 01 16:41:55 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:55.965500076+08:00" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc
6月 01 16:41:55 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:55.965507764+08:00" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
6月 01 16:41:55 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:55.977528960+08:00" level=info msg="Loading containers: start."
6月 01 16:41:56 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:56.125451314+08:00" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
6月 01 16:41:56 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:56.171713122+08:00" level=info msg="Loading containers: done."
6月 01 16:41:56 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:56.189061694+08:00" level=info msg="Docker daemon" commit=f756502 graphdriver(s)=overlay2 version=20.10.16
6月 01 16:41:56 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:56.189143194+08:00" level=info msg="Daemon has completed initialization"
6月 01 16:41:56 192-168-0-152 systemd[1]: Started Docker Application Container Engine.
6月 01 16:41:56 192-168-0-152 dockerd[1909]: time="2022-06-01T16:41:56.208259846+08:00" level=info msg="API listen on /var/run/docker.sock"
四、各种镜像的查找及安装
21、查看创建集群需要的镜像版本
kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.23.6
k8s.gcr.io/kube-controller-manager:v1.23.6
k8s.gcr.io/kube-scheduler:v1.23.6
k8s.gcr.io/kube-proxy:v1.23.6
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
22、国内拉取镜像
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.23.6 #如有问题采用下行的docker pull 直接拉取
或者直接指定拉取镜像(使用,若使用其他方式拉取,镜像标签按需求更改)
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
23、镜像打标签,删除不用镜像
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.6 k8s.gcr.io/kube-apiserver:v1.23.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.6 k8s.gcr.io/kube-controller-manager:v1.23.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.6 k8s.gcr.io/kube-proxy:v1.23.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.6 k8s.gcr.io/kube-scheduler:v1.23.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0 k8s.gcr.io/etcd:3.5.1-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 k8s.gcr.io/pause:3.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
24、打包镜像,传入其他主机
mkdir -p /data/bao/k8s-images;cd /data/bao/k8s-images
docker save k8s.gcr.io/kube-apiserver:v1.23.6 > kube-apiserver.tar
docker save k8s.gcr.io/kube-scheduler:v1.23.6 > kube-scheduler.tar
docker save k8s.gcr.io/kube-controller-manager:v1.23.6 > kube-controller-manager.tar
docker save k8s.gcr.io/kube-proxy:v1.23.6 > kube-proxy.tar
docker save k8s.gcr.io/coredns/coredns:v1.8.6 > coredns.tar
docker save k8s.gcr.io/pause:3.6 > pause.tar
docker save k8s.gcr.io/etcd:3.5.1-0 > etcd.tar
25、查看打包好的镜像包
[root@localhost k8s-images]# ll /data/bao/k8s-images/
总用量 755464
-rw-r--r-- 1 root root 46967296 6月 1 17:06 coredns.tar
-rw-r--r-- 1 root root 293936128 6月 1 17:06 etcd.tar
-rw-r--r-- 1 root root 136526848 6月 1 17:06 kube-apiserver.tar
-rw-r--r-- 1 root root 126348288 6月 1 17:06 kube-controller-manager.tar
-rw-r--r-- 1 root root 114260480 6月 1 17:06 kube-proxy.tar
-rw-r--r-- 1 root root 54852608 6月 1 17:06 kube-scheduler.tar
-rw-r--r-- 1 root root 692736 6月 1 17:06 pause.tar
26、将所有的安装包及镜像包传入其他主机
for i in {151..152};do scp -r /data/* 192.168.0.$i:/data/ ;done
或者同步,前提是每台机都装了rsync
for i in {151..152}; do rsync -aXSH --delete /data/* 192.168.0.$i:/data/ ;done
27、其他主机安装docker和k8s相关插件
cd /data/bao/docker/ ; yum -y install *
cd /data/bao/k8s/ ; yum -y install *
● 或者在150上远程:
for i in 151 152 ;do ssh 192.168.0.$i "cd /data/bao/docker/ ; yum -y install * && cd /data/bao/k8s/ ; yum -y install *" ; done
28、所有主机设置软件开机自起并启动
systemctl enable docker kubelet
systemctl restart docker
29、将打包好的镜像导入每台主机
cd /data/bao/k8s-images
docker load < kube-apiserver.tar
docker load < kube-scheduler.tar
docker load < kube-controller-manager.tar
docker load < kube-proxy.tar
docker load < coredns.tar
docker load < pause.tar
docker load < etcd.tar
30、查看导入的docker镜像
[root@localhost k8s-images]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-apiserver v1.23.6 8fa62c12256d 6 weeks ago 135MB
k8s.gcr.io/kube-controller-manager v1.23.6 df7b72818ad2 6 weeks ago 125MB
k8s.gcr.io/kube-proxy v1.23.6 4c0375452406 6 weeks ago 112MB
k8s.gcr.io/kube-scheduler v1.23.6 595f327f224a 6 weeks ago 53.5MB
k8s.gcr.io/etcd 3.5.1-0 25f8c7f3da61 7 months ago 293MB
k8s.gcr.io/coredns/coredns v1.8.6 a4ca41631cc7 7 months ago 46.8MB
k8s.gcr.io/pause 3.7 6270bb605e12 9 months ago 683kB
31、关闭swap(所有主机)
swapoff -a
sysctl -p /etc/sysctl.d/k8s.conf
● 注释掉/etc/fstab中的swap条目
sed -i 's@/dev/mapper/rhel-swap@#/dev/mapper/rhel-swap@' /etc/fstab
mount -a
echo "KUBELET_EXTRA_ARGS=--fail-swap-on=false" > /etc/sysconfig/kubelet
32、设置开机自启动
systemctl enable kubelet.service docker
systemctl restart docker
systemctl restart kubelet
33、haproxy部署
● k8s-150节点
yum install haproxy -y
cat /etc/haproxy/haproxy.cfg #将原文件备份后,清空,复制粘贴下面内容
# /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
log /dev/log local0
log /dev/log local1 notice
daemon
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
#defaults
listen stats 0.0.0.0:12345
mode http
log global
maxconn 10
stats enable
stats hide-version
stats refresh 30s
stats show-node
#stats auth admin:p@sssw0rd
stats uri /stats
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 1
timeout http-request 10s
timeout queue 20s
timeout connect 5s
timeout client 20s
timeout server 20s
timeout http-keep-alive 10s
timeout check 10s
#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
bind 0.0.0.0:12567
mode tcp
option tcplog
default_backend kube-api-server
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend kube-api-server
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server k8s-master1 192.168.0.150:6443 check
server k8s-master2 192.168.0.151:6443 check
server k8s-master3 192.168.0.152:6443 check
systemctl enable haproxy --now
systemctl restart haproxy
34、访问haproxy页面查看k8s集群健康状态
http://192.168.0.150:12345/stats
35、etcd集群安装
● 安装etcd集群参考文档:
● etcd版本
etcdctl --version
etcdctl version: 3.5.1
API version: 2
● 系统版本
[root@localhost etcd]# cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core)
A、安装etcd 包
● 下载链接:https://pan.baidu.com/s/1x-Xr6j3n0R7a0OxgVVX07w?pwd=2e39
● 提取码: 2e39
● 将etcd压缩包上传至 /data/ 目录下
tar -xzf /data/etcd-v3.5.1-linux-amd64.tar.gz
mv etcd-v3.5.1-linux-amd64 etcd
B、配置etcd 相关配置文件
● k8s-150的etced配置文件 /data/etcd/etcd.conf
○ mkdir /data/etcddata && vi /data/etcd/etcd.conf
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/data/etcddata"
ETCD_LISTEN_PEER_URLS="https://192.168.0.150:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.150:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.150:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.150:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.0.150:2380,etcd02=https://192.168.0.151:2380,etcd03=https://192.168.0.152:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
● k8s-151的etced配置文件 /data/etcd/etcd.conf
○ mkdir /data/etcddata && vi /data/etcd/etcd.conf
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/data/etcddata"
ETCD_LISTEN_PEER_URLS="https://192.168.0.151:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.151:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.151:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.151:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.0.150:2380,etcd02=https://192.168.0.151:2380,etcd03=https://192.168.0.152:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
● k8s-152的etced配置文件 /data/etcd/etcd.conf
○ mkdir /data/etcddata && vi /data/etcd/etcd.conf
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/data/etcddata"
ETCD_LISTEN_PEER_URLS="https://192.168.0.152:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.152:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.152:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.152:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.0.150:2380,etcd02=https://192.168.0.151:2380,etcd03=https://192.168.0.152:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
● etcd启动文件 /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/data/etcd/
EnvironmentFile=/data/etcd/etcd.conf
ExecStart=/data/etcd/etcd \
--initial-cluster-state=new \
--cert-file=/data/etcd/ssl/server.pem \
--key-file=/data/etcd/ssl/server-key.pem \
--peer-cert-file=/data/etcd/ssl/server.pem \
--peer-key-file=/data/etcd/ssl/server-key.pem \
--trusted-ca-file=/data/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/data/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
C、创建TLS证书
cd /data/
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
mv cfssl-certinfo_1.6.1_linux_amd64 cfssl-certinfo
mv cfssl_1.6.1_linux_amd64 cfssl
mv cfssljson_1.6.1_linux_amd64 cfssljson
chmod +x cfssl*
● tls.sh 文件内容如下全部内容(先修改其中的IP地址,其中hosts尽可能多加)
vi /data/tls.sh
# etcd
# cat ca-config.json
cat > ca-config.json < ca-csr.json < server-csr.json < kubeadm-config.yaml
37、k8s集群初始化所需文件,
● 修改150机子后进行初始化,后面将其他master加进去;其他master不要初始化
cat /etc/kubernetes/kubeadm-config.yaml
● 以下为原文
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.0.150 # 本机IP(第12行)
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
imagePullPolicy: IfNotPresent
name: k8s-150 # 本主机名(第17行)
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.0.150:12567" # 虚拟IP和haproxy端口,新增(第24行下新增,成为25行)
controllerManager: {}
dns:
type: CoreDNS #新增(28行)
etcd:
#local: #注释该行(第30行)
#dataDir: /var/lib/etcd #注释该行(第31行)
external: #新增(成为第32行)
endpoints: #新增(成为第33行)
- https://192.168.0.150:2379 #新增
- https://192.168.0.151:2379 #新增
- https://192.168.0.152:2379 #新增
caFile: /data/etcd/ssl/ca.pem #新增
certFile: /data/etcd/ssl/server.pem #新增
keyFile: /data/etcd/ssl/server-key.pem #新增(成为第39行)
imageRepository: k8s.gcr.io # 修改:镜像仓库源要根据自己实际情况修改(40行)
kind: ClusterConfiguration
kubernetesVersion: 1.23.6 #修改: k8s版本(42行)
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16 #新增(45行)
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 0
clusterCIDR: ""
configSyncPeriod: 0s
conntrack:
maxPerCore: null
min: null
tcpCloseWaitTimeout: null
tcpEstablishedTimeout: null
#detectLocal: #注释该行(第64行)1.23.6版本没有这三行,1.24.1版本才有
#bridgeInterface: "" #注释该行(第65行)1.23.6版本没有这三行,1.24.1版本才有
#interfaceNamePrefix: "" #注释该行(第66行)1.23.6版本没有这三行,1.24.1版本才有
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
tcpFinTimeout: 0s
tcpTimeout: 0s
udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: ipvs #添加ipvs(85行)
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
showHiddenMetricsForVersion: ""
udpIdleTimeout: 0s
winkernel:
enableDSR: false
#forwardHealthCheckVip: false #添加注释(95行)1.23.6版本没有这三行,1.24.1版本才有
networkName: "" #原文93行不用做修改,仅作为位置参考
#rootHnsEndpointName: "" #添加注释(97行)1.23.6版本没有这三行,1.24.1版本才有
sourceVip: ""
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
38、dry-run模式验证语法,验证无报错才能进行初始化操作
cd /etc/kubernetes/
kubeadm init --config /etc/kubernetes/kubeadm-config.yaml --dry-run
● 注: kubeadm init 报错 ”unknown service runtime.v1alpha2.RuntimeService”解决方案:https://blog.csdn.net/weixin_40668374/article/details/124849090
39、集群初始化
● 初始化日志
kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --upload-certs --ignore-preflight-errors=Swap
● 报错情况与解决方案
[kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
参考:https://blog.51cto.com/8999a/5005591
● 解决方案;(不报错也可编辑运行一下)
所有master跟node节点的docker修改为
cat /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
● 然后重启docker
systemctl restart docker
40、进行环境重置
kubeadm reset
41、继续进行初始化
A、 初始化(使150成为主节点)
● 注意最后输出信息(标红部分)
kubeadm init --config=kubeadm-config.yaml --upload-certs --ignore-preflight-errors=Swap
W0608 14:30:44.899416 5150 strict.go:55] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta3", Kind:"ClusterConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "type"
[init] Using Kubernetes version: v1.23.6
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-150 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.150]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] External etcd mode: Skipping etcd/ca certificate authority generation
[certs] External etcd mode: Skipping etcd/server certificate generation
[certs] External etcd mode: Skipping etcd/peer certificate generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation
[certs] External etcd mode: Skipping apiserver-etcd-client certificate generation
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 7.023184 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster
NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently.
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
a2f8bee77de0ca3f54dc28f0bd5f6001b4c3b8e1890c980265d86d644b577622
[mark-control-plane] Marking the node k8s-150 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-150 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
--control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b
B、主节点加入集群(其他主节点上运行)
● (以下命令是根据上面初始化输出信息---标红部分获取来的)
kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
--control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
C、从节点加入集群
● (以下命令是根据上面初始化输出信息---标红部分获取来的)
kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b
42、其他主节点加入集群
A、k8s-master2加入集群
[root@localhost k8s-images]# kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
> --control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master2 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.151 192.168.0.150]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Skipping etcd check in external mode
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[control-plane-join] using external etcd - no local stacked instance added
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master2 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master2 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
● 在k8s-master2:192.168.0.151上命令行执行上面红色部分;进行加载环境
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
B、k8s-master3加入集群
[root@localhost k8s-images]# kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b \
> --control-plane --certificate-key 5de74b8a94efe7b0dc7f8d27a4310ba69a1d75eedef06981b6461c171798f630
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master3 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.152 192.168.0.150]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Skipping etcd check in external mode
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[control-plane-join] using external etcd - no local stacked instance added
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master3 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master3 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
● 在k8s-master3:192.168.0.152上命令行执行上面红色部分;进行加载环境
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
43、node节点加入集群
k8s-node1加入集群
[root@localhost data]# kubeadm join 192.168.0.150:12567 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
44、查看节点
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-150 NotReady control-plane,master 3h31m v1.23.6
k8s-151 NotReady control-plane,master 40m v1.23.6
k8s-152 NotReady control-plane,master 40m v1.23.6
kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-64897985d-fxbbh 0/1 Pending 0 157m
kube-system coredns-64897985d-q5xlj 0/1 Pending 0 157m
kube-system kube-apiserver-k8s-150 1/1 Running 4 3h31m
kube-system kube-apiserver-k8s-151 1/1 Running 1 41m
kube-system kube-apiserver-k8s-152 1/1 Running 1 40m
kube-system kube-controller-manager-k8s-150 1/1 Running 4 3h31m
kube-system kube-controller-manager-k8s-151 1/1 Running 1 41m
kube-system kube-controller-manager-k8s-152 1/1 Running 1 40m
kube-system kube-proxy-7pw2s 1/1 Running 1 40m
kube-system kube-proxy-87l8z 1/1 Running 3 157m
kube-system kube-proxy-r6lp9 1/1 Running 1 41m
kube-system kube-scheduler-k8s-150 1/1 Running 4 3h31m
kube-system kube-scheduler-k8s-151 1/1 Running 1 41m
kube-system kube-scheduler-k8s-152 1/1 Running 1 40m
45、若证书过期后,可以重新更新证书和token认证,然后再加入
可以参考
https://blog.csdn.net/hunheidaode/article/details/118341134
或者 重新更新证书
[root@k8s-master1 kubernetes]# kubeadm init phase upload-certs --upload-certs
W0803 17:38:07.670046 44778 version.go:102] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get "https://dl.k8s.io/release/stable-1.txt": dial tcp: lookup dl.k8s.io on [::1]:53: read udp [::1]:56684->[::1]:53: read: connection refused
W0803 17:38:07.670925 44778 version.go:103] falling back to the local client version: v1.21.3
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
63e54ed5b06bc1f7b93adaeda0df792b12064c11f9d274d9f3d2b5b012cbc584
重新生成token认证
[root@k8s-master1 kubernetes]# kubeadm token generate
9psojs.mqrgtud16qjymfok
重新加入
kubeadm join 192.168.0.150:12567 --token 0d3mtm.x0o720qncyjl4c0n \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b --v=5
或者 创建永久token
[root@localhost kubernetes]# kubeadm token create --ttl 0
jhdsm4.pq5yxkkw3xa2d765
[root@localhost kubernetes]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
2b8fmw.wtv48ygu93kjs4zd 1h 2022-05-10T20:03:20Z Proxy for managing TTL for the kubeadm-certs secret
5u3uio.x9q1noeofzyibq0r 1h 2022-05-10T19:34:09Z Proxy for managing TTL for the kubeadm-certs secret
abcdef.0123456789abcdef 23h 2022-05-11T17:34:09Z authentication,signing system:bootstrappers:kubeadm:default-node-token
jhdsm4.pq5yxkkw3xa2d765 authentication,signing system:bootstrappers:kubeadm:default-node-token
重新加入
kubeadm join 192.168.0.150:12567 --token 0d3mtm.x0o720qncyjl4c0n \
--discovery-token-ca-cert-hash sha256:4977075cf76c9e7b7a3a2b8ffa9098cc083fd1bc9d26d414527682bcfbf63e4b --v=5
46、Certificate-key介绍
https://www.kancloud.cn/pshizhsysu/kubernetes/2055662
47、K8S节点网络配置(Flanneld)
部署flannel,开网
http://47.254.230.138:8080/wwwwww/raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kube-flannel.docx
打开上述网页或者文档,拷贝所有内容写入本地kube-flannel.yml文件内
查看需要的镜像
[root@k8s-node1 data]# cat kube-flannel.yml |grep image
#image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le and mips64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 #改成自己所下载的镜像版本
#image: flannelcni/flannel:v0.17.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel:v0.17.0 #改成自己所下载的镜像版本
#image: flannelcni/flannel:v0.17.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel:v0.17.0 #改成自己所下载的镜像版本
48、下载镜像
[root@k8s-node1 data]# docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
v1.0.1: Pulling from rancher/mirrored-flannelcni-flannel-cni-plugin
5758d4e389a3: Pull complete
5f157e942e60: Pull complete
Digest: sha256:5dd61f95e28fa7ef897ff2fa402ce283e5078d334401d2f62d00a568f779f2d5
Status: Downloaded newer image for rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
[root@k8s-node1 data]# docker pull rancher/mirrored-flannelcni-flannel:v0.17.0
v0.17.0: Pulling from rancher/mirrored-flannelcni-flannel
5758d4e389a3: Already exists
052816d6a684: Pull complete
96c6af7b4b9a: Pull complete
f28287b73045: Pull complete
6a87e811090e: Pull complete
3df5ffe16b5b: Pull complete
ed7fc7d3dae5: Pull complete
Digest: sha256:4bf659e449be809763b04f894f53a3d8610e00cf2cd979bb4fffc9470eb40d1b
Status: Downloaded newer image for rancher/mirrored-flannelcni-flannel:v0.17.0
docker.io/rancher/mirrored-flannelcni-flannel:v0.17.0
49、打包传入其他主机,并导入其他主机
docker save rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 > /data/bao/k8s-images/flannel-cni-plugin.tar
docker save rancher/mirrored-flannelcni-flannel:v0.17.0 > /data/bao/k8s-images/flannel.tar
cd /data/bao/k8s-images/
for i in {151..152};do scp flannel* 192.168.0.$i:/data/bao/k8s-images/ ;done
● 其他机上(151、152上)
docker load < /data/bao/k8s-images/flannel.tar
docker load < /data/bao/k8s-images/flannel-cni-plugin.tar
● 开网(150上操作即可)
kubectl create -f kube-flannel.yml
50、其他在安装过程中的报错
kubectl-proxy报错,在kubeadm-conf.yaml文件中已经更改,若有其他报错,则更改回来,后续报错,则按此更改
server.go:482] failed complete: unrecognized feature gate: SupportIPVSProxyMode
更改k8s的环境
kubectl edit cm kube-proxy -n kube-system
注释掉下面的两行
#featureGates:
# SupportIPVSProxyMode: true
删除kube-proxy pod,等待pod重启
51、创建pod失败报错
network: failed to set bridge addr: "cni0" already has an IP address different from 10.244.3.1/24
cni0网卡和flannel网卡的网段不一样导致的
删除cni0网卡,让其自己重新创建网卡
ifconfig cni0 down
ip link delete cni0
52、测试
可以按此简单测试
https://blog.csdn.net/liao__ran/article/details/102647786
https://blog.csdn.net/liao__ran/article/details/106009227
53、去除Master节点污点,使其可以分配Pod资源;
kubectl taint nodes --all node-role.kubernetes.io/master-