前言:
k8s动不动就要抛弃docker,好吧,让我们体验一下没有docker的k8s。
其他相关文档:《k8s-1.21高可用集群搭建》
【所有节点上执行】
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
# sed -i "/swap/{s/^/#/g}" /etc/fstab
# swapoff -a
# systemctl stop firewalld
# systemctl disable firewalld
# systemctl disable auditd || \
systemctl disable postfix || \
systemctl disable irqbalance || \
systemctl disable remote-fs || \
systemctl disable tuned || \
systemctl disable rhel-configure
# hostname xxxx
# vim /etc/hostmame
# cat >> /etc/hosts << EOF
10.10.xxx.47 t-master
10.10.xxx.46 t-node-01
10.10.xxx.45 t-node-02
10.10.xxx.44 t-node-03
EOF
# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# modprobe br_netfilter
# sysctl -p /etc/sysctl.d/k8s.conf
# cat > /etc/sysconfig/modules/ipvs.modules <
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# chmod 755 /etc/sysconfig/modules/ipvs.modules
# bash /etc/sysconfig/modules/ipvs.modules
# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# mkdir /etc/yum.repos.d/bak && cp -rf /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
# wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
# yum clean all && yum makecache
# cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
# yum -y update
# cat >>/etc/sysctl.conf <
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65535
EOF
# sysctl -p
ulimit -n 655350
永修生效修改如下两个文件
# cat >>/etc/security/limits.conf <
* soft memlock unlimited
* hard memlock unlimited
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
EOF
vim /etc/systemd/system.conf
DefaultLimitNOFILE=655350
或者
echo ulimit -n 655350 >>/etc/profile
# cat </etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
# chmod +x /etc/sysconfig/modules/ipvs.modules
# bash /etc/sysconfig/modules/ipvs.modules
【所有节点安装】
# yum install -y yum-utils device-mapper-persistent-data lvm2
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# yum list | grep containerd
# yum install containerd.io-1.4.4 -y
# mkdir -p /etc/containerd
# containerd config default > /etc/containerd/config.toml
# sed -i "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
# sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g" /etc/containerd/config.toml
# sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml
# systemctl daemon-reload
# systemctl enable containerd
# systemctl restart containerd
[root@t-master ~]# ctr images pull docker.io/library/nginx:alpine
docker.io/library/nginx:alpine: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178: done |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:c1f4e1974241c3f9ddb2866b2bf8e7afbceaa42dae82aabda5e946d03f054ed2: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:29d3f97df6fd99736a0676f9e57e53dfa412cf60b26d95008df9da8197f1f366: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:bfad9487e175364fd6315426feeee34bf5e6f516d2fe6a4e9b592315e330828e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:9aae54b2144e5b2b00c610f8805128f4f86822e1e52d3714c463744a431f0f4a: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:5df810e1c460527fe400cdd2cab62228f5fb3da0f2dce86a6a6c354972f19b6e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:a5f0adaddd5456b7c5a3753ab541b5fad750f0a6499a15f63571b964eb3e2616: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:e6a4c36d7c0e358e5fc02ccdac645b18b85dcfec09d4fb5f8cbdc187ce9467a0: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:345aee38d3533398e0eb7118e4323a8970f7615136f2170dfb2b0278bbd9099d: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 31.7s total: 8.7 Mi (280.9 KiB/s)
unpacking linux/amd64 sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178...
done
[root@t-master ~]# ctr images ls
REF TYPE DIGEST SIZE PLATFORMS LABELS
docker.io/library/nginx:alpine application/vnd.docker.distribution.manifest.list.v2+json sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178 9.4 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x -
id | containerd 命令 | docker 命令 | 备注 |
---|---|---|---|
1 | ctr image ls | docker images | 获取image信息 |
2 | ctr image pull nginx | docker pull nginx | pull 一个nginx的image |
3 | ctr image tag nginx nginx-test | docker tag nginx nginx-test | tag 一个nginx的image |
4 | ctr image push nginx-test | docker push nginx-test | push nginx-test的image |
5 | ctr image pull nginx | docker pull nginx | pull 一个nginx的image |
6 | ctr image import nginx.tar | docker load导入本地镜像ctr不支持压缩 |
|
7 | ctr run -d --env 111 nginx-test nginx | docker run -d --name=nginx nginx-test | 运行的一个容器 |
8 | ctr task ls | docker ps | 查看运行的容器 |
【所有节点执行】
注意:确保前边yum源已经添加。
yum install -y kubelet-1.20.5 kubeadm-1.20.5 kubectl-1.20.5
crictl config runtime-endpoint /run/containerd/containerd.sock
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet
[root@t-master ~]# mkdir /etc/systemd/system/containerd.service.d
[root@t-master ~]# cat > /etc/systemd/system/containerd.service.d/http_proxy.conf << EOF
[Service]
Environment="HTTP_PROXY=http://10.10.222.191:808/"
EOF
# systemctl restart containerd
# crictl pull nginx:alpine
# crictl rmi nginx:alpine
# crictl images
【master上执行】
# kubeadm config print init-defaults > kubeadm.yaml
修改如下几处:
- imageRepository修改为国内库(如阿里云)
- criSocket 修改为刚才定义的containerd的socket
- serviceSubnet: 10.1.0.0/16 给service定义一个子网络。
- podSubnet: 10.244.0.0/16 pod子网要和后边的flannel里的网络对上,否则DNS启动不起来。
- kube-proxy 的模式为 ipvs
- 使用的containerd作为运行时,指定cgroupDriver为systemd
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.10.239.47
bindPort: 6443
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: t-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.1.0.0/16
podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
[root@t-master ~]# crictl images watch
IMAGE TAG IMAGE ID SIZE
docker.io/library/nginx alpine 72ab4137bd85a 9.82MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns 1.7.0 bfe3a36ebd252 14MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd 3.4.13-0 0369cf4303ffd 86.7MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver v1.20.0 ca9843d3b5454 30.4MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager v1.20.0 b9fa1895dcaa6 29.4MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy v1.20.0 10cc881966cfd 49.5MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler v1.20.0 3138b6e3d4712 14MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause 3.2 80d28bedfe5de 300kB
# kubeadm init --config=kubeadm.yaml
[root@t-master ~]# kubeadm init --config=kubeadm.yaml
[init] Using Kubernetes version: v1.20.5
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local t-master] and IPs [10.96.0.1 10.10.239.47]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost t-master] and IPs [10.10.239.47 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost t-master] and IPs [10.10.239.47 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 67.001606 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node t-master as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node t-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.239.47:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b0a2ca593b614fcd25801643f32706fd54cd7d7af7838e6c381c0ffafd4b89c0
初始化报错如下:
[root@t-master ~]# kubeadm init --config=kubeadm.yaml
[init] Using Kubernetes version: v1.20.5
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[解决]
# modprobe br_netfilter
# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
当然你是root,也可以按着输出提示设置变量 export KUBECONFIG=/etc/kubernetes/admin.conf
如下可见,有一个master节点,因为没有配置网络所以是not ready。
[root@t-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
t-master NotReady control-plane,master 3h10m v1.20.5
[root@t-master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:82:95:fb brd ff:ff:ff:ff:ff:ff
inet 10.10.239.47/24 brd 10.10.239.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe82:95fb/64 scope link
valid_lft forever preferred_lft forever
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether d6:63:14:c1:f9:61 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether aa:6f:f7:42:ce:01 brd ff:ff:ff:ff:ff:ff
inet 10.1.0.1/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.1.0.10/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
[root@t-master ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-54d67798b7-hf4t8 0/1 Pending 0 2m10s
coredns-54d67798b7-m5ffl 0/1 Pending 0 2m10s
etcd-t-master 1/1 Running 0 2m17s
kube-apiserver-t-master 1/1 Running 0 2m17s
kube-controller-manager-t-master 1/1 Running 0 2m17s
kube-proxy-xj5l4 1/1 Running 0 2m11s
kube-scheduler-t-master 1/1 Running 0 2m17s
说明:
稍等一会儿,kube-system中的pod会慢慢成为Running状态。
但是coredns一直会Pending,用describe查看可见,因为master有污点。
[root@t-master ~]# kubectl describe -n kube-system
Warning FailedScheduling 3m35s default-scheduler 0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.
我们不打算改master,接入node之后就可以了。
【node节点执行】
kubeadm join 10.10.239.47:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b0a2ca593b614fcd25801643f32706fd54cd7d7af7838e6c381c0ffafd4b89c0
【master 节点执行】
# kubectl create -f kube-flannel.yml
可见 多了一个flannel.1 的网卡,地址来自kube-flannel.yml 里的配置,注意需要和初始化时的podsubnet一致。
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:82:95:fb brd ff:ff:ff:ff:ff:ff
inet 10.10.239.47/24 brd 10.10.239.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe82:95fb/64 scope link
valid_lft forever preferred_lft forever
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether d6:63:14:c1:f9:61 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether aa:6f:f7:42:ce:01 brd ff:ff:ff:ff:ff:ff
inet 10.1.0.1/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.1.0.10/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
5: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether 5e:66:53:43:02:ed brd ff:ff:ff:ff:ff:ff
inet 10.244.0.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::5c66:53ff:fe43:2ed/64 scope link
valid_lft forever preferred_lft forever
可见coredns 启动了
[root@t-master ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-54d67798b7-bc2lc 1/1 Running 0 49m
coredns-54d67798b7-lmqtx 1/1 Running 0 49m
etcd-t-master 1/1 Running 0 50m
kube-apiserver-t-master 1/1 Running 0 50m
kube-controller-manager-t-master 1/1 Running 0 50m
kube-flannel-ds-amd64-mhvhj 1/1 Running 0 31m
kube-flannel-ds-amd64-sldgb 1/1 Running 0 31m
kube-proxy-dtnc6 1/1 Running 0 49m
kube-proxy-tkcr6 1/1 Running 0 40m
kube-scheduler-t-master 1/1 Running 0 50m
可见节点已经Ready
[root@t-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
t-master Ready control-plane,master 59m v1.20.5
t-node-01 Ready <none> 49m v1.20.5
Warning FailedCreatePodSandBox 24s kubelet, node2 Failed create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "071c8fc6acb87838fd4ee341479a0769a97401c481a93b5b54f8812ba6fa0ed4" network for pod "coredns-5bfd685c78-mmjxc": NetworkPlugin cni failed to set up pod "coredns-5bfd685c78-mmjxc_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/
Warning FailedCreatePodSandBox 7m9s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "64cc7c14f3f2fb90ce373494ea2589c5d5ec9e64842a1b162b347ee30c0c02bf": stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/