Centos8系统发布已有一段时间,不少小伙伴开始上手使用。kubernetes1.20.4 也发布d多时,今天作者使用kubeadm在Centos8.4系统上部署kubernetes。
【所有节点上执行】
1.1 关闭无用服务
关闭selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
关闭交换分区
sed -i "/swap/{s/^/#/g}" /etc/fstab
swapoff -a
关闭防火墙
[root@k8s-master1 ~]# systemctl stop firewalld
[root@k8s-master1 ~]# systemctl disable firewalld
关闭其他无用模块
systemctl disable auditd
systemctl disable irqbalance
systemctl disable tuned
查看系统版本
[root@k8s-master1 ~]# cat /etc/redhat-release
Red Hat Enterprise Linux release 8.4 (Ootpa)
修改主机名称
hostnamectl set-hostname k8s-master1
修改/etc/hosts
cat >> /etc/hosts << EOF
192.168.10.21 k8s-master1
192.168.10.22 k8s-node1
192.168.10.23 k8s-node2
EOF
打开路由
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
ip_vs模块
cat > /etc/sysconfig/modules/ipvs.modules <
1.3 依赖安装
yum源
mkdir /etc/yum.repos.d/bak && cp -rf /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
yum clean all && yum makecache
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装依赖和服务升级
yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
yum -y update
1.4 系统优化
内核优化
cat >>/etc/sysctl.conf <
句柄数
ulimit -n 655350
永修生效修改如下两个文件
cat >>/etc/security/limits.conf <
或者
echo ulimit -n 655350 >>/etc/profile
加载内核模块
cat </etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
【所有节点安装】
2.1安装配置docker
#配置docker的yum库
yum -y install yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum 安装 docker
yum -y install docker-ce-19.03.6-3.el8 docker-ce-cli-19.03.6-3.el8 containerd.io
如果containerd.io 版本不一致可以使用强制安装
yum -y install docker-ce-20.10.9-3.el8 docker-ce-cli-20.10.9-3.el8 containerd.io --allowerasing
修改docker cgroup driver为systemd
mkdir /etc/docker
cat < /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
#启动docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker
安装docker-ce如果出现以下错
[root@k8s-master1 ~]# yum install -y docker-ce-19.03.15-3.el8
Updating Subscription Management repositories.
Unable to read consumer identity
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
Last metadata expiration check: 0:01:06 ago on Tue 22 Mar 2022 08:06:55 PM CST.
Error:
Problem: package docker-ce-3:19.03.15-3.el8.x86_64 requires containerd.io >= 1.2.2-3, but none of the providers can be installed
- package containerd.io-1.3.7-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.3.7-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.3.9-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.3.9-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.10-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.10-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.11-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.11-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.12-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.12-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.13-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.13-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.3-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.3-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.3-3.2.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.3-3.2.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.4-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.4-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.6-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.6-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.8-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.8-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.9-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.4.9-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.5.10-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- package containerd.io-1.5.10-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.2-1.module_el8.5.0+911+f19012f9.x86_64
- problem with installed package buildah-1.19.7-1.module+el8.4.0+10607+f4da7515.x86_64
- package buildah-1.19.7-1.module+el8.4.0+10607+f4da7515.x86_64 requires runc >= 1.0.0-26, but none of the providers can be installed
- package buildah-1.22.3-2.module_el8.5.0+911+f19012f9.x86_64 requires runc >= 1.0.0-26, but none of the providers can be installed
- package containerd.io-1.3.7-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.3.7-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.3.9-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.3.9-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.10-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.10-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.11-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.11-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.12-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.12-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.13-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.13-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.3-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.3-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.3-3.2.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.3-3.2.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.4-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.4-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.6-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.6-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.8-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.8-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.9-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.4.9-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.5.10-3.1.el8.x86_64 conflicts with runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- package containerd.io-1.5.10-3.1.el8.x86_64 obsoletes runc provided by runc-1.0.0-70.rc92.module+el8.4.0+10607+f4da7515.x86_64
- conflicting requests
- package runc-1.0.0-56.rc5.dev.git2abd837.module_el8.3.0+569+1bada2e4.x86_64 is filtered out by modular filtering
- package runc-1.0.0-66.rc10.module_el8.5.0+1004+c00a74f5.x86_64 is filtered out by modular filtering
- package runc-1.0.0-72.rc92.module_el8.5.0+1006+8d0e68a2.x86_64 is filtered out by modular filtering
(try to add '--allowerasing' to command line to replace conflicting packages or '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)
[root@k8s-master1 ~]#
解决方法
[root@k8s-master1 ~]# wget https://download.docker.com/linux/centos/7/x86_64/edge/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
[root@k8s-master1 ~]# yum install containerd.io-1.2.6-3.3.el7.x86_64.rpm
或强制安装docker-ce即可成功
[root@k8s-master1 yum.repos.d]# yum install -y docker-ce-19.03.15-3.el8 --allowerasing
添加阿里kubernetes源
[root@k8s-master1 yum.repos.d]# cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y makecache
安装
yum install kubectl-1.20.4 kubelet-1.20.4 kubeadm-1.20.4 -y
rpm -aq kubelet kubectl kubeadm
systemctl enable kubelet
【只在master节点执行】
kubeadm init --kubernetes-version=1.20.4 \
--apiserver-advertise-address=192.168.10.210 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.10.0.0/16 --pod-network-cidr=10.122.0.0/16
init] Using Kubernetes version: v1.20.4
[preflight] Running pre-flight checks
[WARNING FileExisting-tc]: tc not found in system path
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.9. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.10.0.1 192.168.10.21]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master1 localhost] and IPs [192.168.10.21 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master1 localhost] and IPs [192.168.10.21 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 105.504218 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 9bj03d.emo7rnjry4ivsj3r
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.10.21:6443 --token 9bj03d.emo7rnjry4ivsj3r \
--discovery-token-ca-cert-hash sha256:74bd6b3eda417d4606a233520746926a56d9c85e75cda4b8cb3902b75afab314
[root@k8s-master1 ~]#
配置kubelete
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
当然你是root,也可以按着输出提示设置变量 export
KUBECONFIG=/etc/kubernetes/admin.conf
查看节点
如下可见,有一个master节点,因为没有配置网络所以是not ready。
[root@t-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
t-master NotReady control-plane,master 3h10m v1.20.4
4.2 状态说明
网络
[root@t-master ~]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:82:95:fb brd ff:ff:ff:ff:ff:ff
inet 10.10.239.47/24 brd 10.10.239.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe82:95fb/64 scope link
valid_lft forever preferred_lft forever
3: dummy0: mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether d6:63:14:c1:f9:61 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: mtu 1500 qdisc noop state DOWN group default
link/ether aa:6f:f7:42:ce:01 brd ff:ff:ff:ff:ff:ff
inet 10.1.0.1/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.1.0.10/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
pod
[root@t-master ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-54d67798b7-hf4t8 0/1 Pending 0 2m10s
coredns-54d67798b7-m5ffl 0/1 Pending 0 2m10s
etcd-t-master 1/1 Running 0 2m17s
kube-apiserver-t-master 1/1 Running 0 2m17s
kube-controller-manager-t-master 1/1 Running 0 2m17s
kube-proxy-xj5l4 1/1 Running 0 2m11s
kube-scheduler-t-master 1/1 Running 0 2m17s
说明:
稍等一会儿,kube-system中的pod会慢慢成为Running状态。
但是coredns一直会Pending,用describe查看可见,因为master有污点。
[root@t-master ~]# kubectl describe -n kube-system
Warning FailedScheduling 3m35s default-scheduler 0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.
我们不打算改master,接入node之后就可以了。
4.3 node节点接入
【node节点执行】
kubeadm join 10.10.239.47:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b0a2ca593b614fcd25801643f32706fd54cd7d7af7838e6c381c0ffafd4b89c0
4.4 flannel网络
【master 节点执行】
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unsed in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.122.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
启动
kubectl create -f kube-flannel.yml
查看状态
每个节点启动了一个pod,coredns状态变成了Running
kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-59d64cd4d4-8h9b7 1/1 Running 0 16h
coredns-59d64cd4d4-mpfln 1/1 Running 0 16h
etcd-crust-m01 1/1 Running 2 24h
etcd-crust-m02 1/1 Running 0 24h
etcd-crust-m03 1/1 Running 0 24h
kube-apiserver-crust-m01 1/1 Running 2 24h
kube-apiserver-crust-m02 1/1 Running 0 24h
kube-apiserver-crust-m03 1/1 Running 0 24h
kube-controller-manager-crust-m01 1/1 Running 2 24h
kube-controller-manager-crust-m02 1/1 Running 0 24h
kube-controller-manager-crust-m03 1/1 Running 0 24h
kube-flannel-ds-amd64-8fw85 1/1 Running 0 63m
kube-flannel-ds-amd64-b6xts 1/1 Running 0 16h
kube-flannel-ds-amd64-gl542 1/1 Running 0 59m
kube-flannel-ds-amd64-krdzq 1/1 Running 0 4h3m
kube-flannel-ds-amd64-lqv9p 1/1 Running 0 16h
kube-flannel-ds-amd64-wndqk 1/1 Running 0 16h
kube-proxy-7rqmj 1/1 Running 0 52m
kube-proxy-8s68r 1/1 Running 0 4h3m
kube-proxy-hqvbs 1/1 Running 0 63m
kube-proxy-srjq2 1/1 Running 0 24h
kube-proxy-t6mvq 1/1 Running 1 24h
kube-proxy-wbs9h 1/1 Running 0 24h
kube-scheduler-crust-m01 1/1 Running 2 24h
kube-scheduler-crust-m02 1/1 Running 0 24h
kube-scheduler-crust-m03 1/1 Running 0 24h
删除flannel网络
删除pod
在master上删除flannel的所有pod
kubectl delete -f kube-flannel.yml
删除node节点上残留网络
在node节点清理flannel网络留下的网络和文件
删除cni0
# ifconfig cni0 down
# ip link delete cni0
# rm -rf /var/lib/cni/
删除flannel网络
# ifconfig flannel.1 down
# ip link delete flannel.1
# rm -f /etc/cni/net.d/*
重启 kubectl
4.2 查看pod
可见coredns 启动了
[root@t-master ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-54d67798b7-bc2lc 1/1 Running 0 49m
coredns-54d67798b7-lmqtx 1/1 Running 0 49m
etcd-t-master 1/1 Running 0 50m
kube-apiserver-t-master 1/1 Running 0 50m
kube-controller-manager-t-master 1/1 Running 0 50m
kube-flannel-ds-amd64-mhvhj 1/1 Running 0 31m
kube-flannel-ds-amd64-sldgb 1/1 Running 0 31m
kube-proxy-dtnc6 1/1 Running 0 49m
kube-proxy-tkcr6 1/1 Running 0 40m
kube-scheduler-t-master 1/1 Running 0 50m
查看node
可见节点已经Ready
[root@t-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
t-master Ready control-plane,master 59m v1.20.5
t-node-01 Ready 49m v1.20.5