Ubuntu 虚拟机配置 Kubernetes 集群(含实际操作和附录flannel.yml)

一、环境说明
1、安装两台ubuntu 18.04的虚拟机,网络为桥接模式;
  1.1、K8S最低配置至少是2个CPU,2GB内存
  1.2、K8S要求网卡MAC地址【ifconfig可查询】和机器产品码保证唯一【获取机器唯一码:cat /sys/class/dmi/id/product_uuid】
2、除有说明外,都在 root 用户下进行操作(虽然命令中有 sudo ==)
3、这里实施为:
   k8s-master     192.168.89.133       9C004D56-F7E3-8C5F-1589-B51CFEF1DBED
   k8s-worker     192.168.89.134       EB884D56-98B2-810D-9C2C-FCE9E75394AD
# 对于kubernetes v1.18.0的话,需要操作iptables,原文内容如下【操作的时k8s-master】
# 将桥接的IPv4流量传递到iptables的链
Letting iptables see bridged traffic
As a requirement for your Linux Node’s iptables to correctly see bridged traffic, you should ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config, e.g.
 
cat < /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
 
# 具体操作如下:
root@k8s-master:~# vim /etc/sysctl.d/k8s.conf
# 输入以下内容:
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
 
root@k8s-master:~# sysctl --system
* Applying /etc/sysctl.d/10-console-messages.conf ...
kernel.printk = 4 4 1 7
* Applying /etc/sysctl.d/10-ipv6-privacy.conf ...
* Applying /etc/sysctl.d/10-kernel-hardening.conf ...
kernel.kptr_restrict = 1
* Applying /etc/sysctl.d/10-link-restrictions.conf ...
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/10-lxd-inotify.conf ...
fs.inotify.max_user_instances = 1024
* Applying /etc/sysctl.d/10-magic-sysrq.conf ...
kernel.sysrq = 176
* Applying /etc/sysctl.d/10-network-security.conf ...
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.tcp_syncookies = 1
* Applying /etc/sysctl.d/10-ptrace.conf ...
kernel.yama.ptrace_scope = 1
* Applying /etc/sysctl.d/10-zeropage.conf ...
vm.mmap_min_addr = 65536
* Applying /usr/lib/sysctl.d/50-default.conf ...
net.ipv4.conf.all.promote_secondaries = 1
net.core.default_qdisc = fq_codel
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
* Applying /etc/sysctl.conf ...

 

二、安装Docker
master和worker安装Docker操作步骤如下:
apt update
apt remove docker docker-engine docker.io
apt install docker.io -y
systemctl start docker
systemctl enable docker
docker --version
 
配置一下 Docker 镜像加速,并将 Docker cgroup 驱动程序从"cgroupfs"改到"systemd"
vim /etc/docker/daemon.json
输入以下内容:
{
    "registry-mirrors": ["https://registry.docker-cn.com"],
    "exec-opts": ["native.cgroupdriver=systemd"]      
}
 
systemctl daemon-reload
systemctl restart docker
 
# 将当前登录用户添加至docker组,便于拉取镜像操作【需要退出root用户】
sudo usermod -a -G docker $USER
# 执行成功后,退出当前用户之后,再重新登录就会完全生效
k8s-master安装Docker操作步骤如下【同理:k8s-worker操作】:
root@k8s-master:~# apt update
root@k8s-master:~# apt remove docker docker-engine docker.io
root@k8s-master:~# apt install docker.io -y
root@k8s-master:~# systemctl start docker
root@k8s-master:~# systemctl enable docker
root@k8s-master:~# docker --version
Docker version 19.03.6, build 369ce74a3c
root@k8s-master:~# vim /etc/docker/daemon.json
root@k8s-master:~# systemctl daemon-reload
root@k8s-master:~# systemctl restart docker
root@k8s-master:~# exit
master@k8s-master:~$ sudo usermod -a -G docker $USER
[sudo] password for master:
master@k8s-master:~$ logout
 
三、安装Kubernetes
apt-get update && apt-get install -y apt-transport-https curl
 
# 获取kubernetes的apt keys【国内用阿里云镜像链接,国外是google镜像链接】
curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
# 国外链接
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
 
# 编辑kubernetes.list,写入deb内容:
vim /etc/apt/sources.list.d/kubernetes.list
国内写入的内容:deb https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial main
  https://mirrors.aliyun.com/kubernetes/apt,这里面能看到kubeadm、kubelet、kubectl的版本号】
国外写入的内容:deb https://apt.kubernetes.io/ kubernetes-xenial main
 
apt-get update
apt install software-properties-common
 
# 关闭防火墙和swap
ufw disable
ufw status
# 临时关闭swap
swapoff -a
# 永久关闭swap,编辑下面文件,将swap那一行注释掉即可或者执行:sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab
vim /etc/fstab
 
# 检测是否需要关闭selinux
# 如果这个/etc/selinux/config文件存在,且是enforcing,则需要进行以下调整
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
 
# 安装kubelet kubeadm kubectl
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
 
# 验证是否安装成功【kubelet现在每隔几秒就会重启,因为它陷入了一个等待 kubeadm 指令的死循环】
kubeadm version
k8s-master安装Docker操作步骤如下【同理:k8s-worker操作】:
root@k8s-master:~# apt-get update && apt-get install -y apt-transport-https curl
root@k8s-master:~# curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
OK
root@k8s-master:~# vim /etc/apt/sources.list.d/kubernetes.list
root@k8s-master:~# apt-get update
root@k8s-master:~# apt install software-properties-common
root@k8s-master:~# ufw disable
Firewall stopped and disabled on system startup
root@k8s-master:~# ufw status
Status: inactive
root@k8s-master:~# swapoff -a
root@k8s-master:~# vim /etc/fstab
root@k8s-master:~# apt-get install -y kubelet kubeadm kubectl
...
root@k8s-master:~# apt-mark hold kubelet kubeadm kubectl
kubelet set on hold.
kubeadm set on hold.
kubectl set on hold.
root@k8s-master:~# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.0", GitCommit:"9e991415386e4cf155a24b1da15becaa390438d8", GitTreeState:"clean", BuildDate:"2020-03-25T14:56:30Z", GoVersion:"go1.13.8", Compiler:"gc", Platform:"linux/amd64"}

四、配置 Master 节点

在执行 init 的时候 会向  k8s.gcr.io    获取  kube-apiserver 等 docker 镜像, 但是 k8s.gcr.io 访问不通。 
所以,我们要从拉取 别人的镜像, 然后改名, 再执行 kubeadm init  .  
1、因为国内无法访问k8s.gcr.io,因此需要把相应包的docker从docker hub拉取下来,再改tag 【如果能访问 k8s.gcr.io则跳过此步骤
  获取对应的版本包,可以使用如下命令,如果要指定版本,可后面加需要的版本号使用:kubeadm config images list  --kubernetes-version v1. 18.0
【避免在kubeadm init的时候出现版本不匹配,这里定义好版本为:1.18.0, init的时候也指定版本为1.18.0】
master@k8s-master:~$ kubeadm config images list --kubernetes-version v1.18.0
W0408 04:29:16.180988   17356 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
k8s.gcr.io/kube-apiserver:v1.18.0
k8s.gcr.io/kube-controller-manager:v1.18.0
k8s.gcr.io/kube-scheduler:v1.18.0
k8s.gcr.io/kube-proxy:v1.18.0
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
【这里以获取kube-apiserver v1.18.0版本的包为例,进行操作说明】【前提是master用户已经加入到docker组,否则用root用户】
# 从阿里云镜像仓库拉取对应版本的包
master@k8s-master:~$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
v1.18.0: Pulling from google_containers/kube-apiserver-amd64
597de8ba0c30: Pull complete
9fa155fddd2d: Pull complete
Digest: sha256:39b3d5a305ec4e340204ecfc81e8cfce87aada5832eb8ee51ef2165b8b31abe3
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
master@k8s-master:~$ docker images
REPOSITORY                                                                 TAG                 IMAGE ID            CREATED             SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64   v1.18.0             74060cea7f70        13 days ago         173MB
 
# 更新对应的镜像标签,使得成为:k8s.gcr.io/kube-apiserver:v1.18.0
master@k8s-master:~$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0 k8s.gcr.io/kube-apiserver:v1.18.0
master@k8s-master:~$ docker images
REPOSITORY                                                                 TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver                                                  v1.18.0             74060cea7f70        13 days ago         173MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64   v1.18.0             74060cea7f70        13 days ago         173MB
 
# 删除之前阿里云的镜像
master@k8s-master:~$ docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64@sha256:39b3d5a305ec4e340204ecfc81e8cfce87aada5832eb8ee51ef2165b8b31abe3
master@k8s-master:~$ docker images
REPOSITORY                  TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver   v1.18.0             74060cea7f70        13 days ago         173MB

 

# 这里采用阿里云镜像仓库,拉取的话会快很多, docker hub还是稍微有点慢
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:v1.18.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:v1.18.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.18.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.4.3-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
 
# 修改镜像标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0 k8s.gcr.io/kube-apiserver:v1.18.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:v1.18.0 k8s.gcr.io/kube-controller-manager:v1.18.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:v1.18.0 k8s.gcr.io/kube-scheduler:v1.18.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.18.0 k8s.gcr.io/kube-proxy:v1.18.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
 
# 删除不用的镜像【把从阿里云下载的镜像删除】
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.18.0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:v1.18.0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:v1.18.0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.18.0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.4.3-0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
2、执行kubeadm init操作
kubeadm init --kubernetes-version v1.18.0 --apiserver-advertise-address 192.168.89.133 --pod-network-cidr=10.244.0.0/16
--kubernetes-version 指定kubernetes的版本号,如果不指定,它可能装的不是和我们自己下的镜像包版本
--apiserver-advertise-address 指明用 Master 的哪个 interface 与 Cluster 的其他节点通信。如果 Master 有多个 interface,建议明确指定,如果不指定,kubeadm 会自动选择有默认网关的 interface。
--pod-network-cidr 指定 Pod 网络的范围。Kubernetes 支持多种网络方案,而且不同网络方案对 --pod-network-cidr 有自己的要求,这里设置为 10.244.0.0/16 是因为我们将使用 flannel 网络方案,必须设置成这个 CIDR。在后面的实践中我们会切换到其他网络方案,比如 Canal。
初始化过程如下:
① kubeadm 执行初始化前的检查。
② 生成 token 和证书。
③ 生成 KubeConfig 文件,kubelet 需要这个文件与 Master 通信。
④ 安装 Master 组件,会从 goolge 的 Registry 下载组件的 Docker 镜像,这一步可能会花一些时间,主要取决于网络质量。
⑤ 安装附加组件 kube-proxy 和 kube-dns。
⑥ Kubernetes Master 初始化成功。
⑦ 提示如何配置 kubectl,后面会实践。
⑧ 提示如何安装 Pod 网络,后面会实践。
⑨ 提示如何注册其他节点到 Cluster,后面会实践。
 
成功后输出以下类似的信息,保存好 kubeadm join 那一行的信息,用于后续加入节点。
如果忘记可以用  kubeadm token create --print-join-command  来生成。
root@k8s-master:~# kubeadm init --kubernetes-version v1.18.0 --pod-network-cidr=10.244.0.0/16
......
Your Kubernetes control-plane has initialized successfully!
 
 
To start using your cluster, you need to run the following as a regular user:
 
 
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
 
Then you can join any number of worker nodes by running the following on each as root:
 
 
kubeadm join 192.168.89.133:6443 --token rzx7og.woqfn476yqsc5cy5 \
    --discovery-token-ca-cert-hash sha256:bc8c286a9955186ae60828d433a72f598bec1fc102a7810878c3a3435926e34c
 
从root用户下来,切换至普通用户,完成如下操作:
root@k8s-master:~# exit
master@k8s-master:~$ mkdir -p $HOME/.kube
master@k8s-master:~$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[sudo] password for master:
master@k8s-master:~$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
3、 安装 Pod 网络【kube-flannel.yml 内容见下面】
# 由于国内网络问题,因此提前像下载核心docker包一样进行下载,注意下载的版本一定要和kube-flannel.yml里面指定的版本一致,这里是v0.12.0
root@k8s-master:~/k8s# docker pull yilutech/flannel:v0.12.0-amd64
v0.12.0-amd64: Pulling from yilutech/flannel
921b31ab772b: Pull complete
4882ae1d65d3: Pull complete
ac6ef98d5d6d: Pull complete
8ba0f465eea4: Pull complete
fd2c2618e30c: Pull complete
Digest: sha256:6d451d92c921f14bfb38196aacb6e506d4593c5b3c9d40a8b8a2506010dc3e10
Status: Downloaded newer image for yilutech/flannel:v0.12.0-amd64
docker.io/yilutech/flannel:v0.12.0-amd64
root@k8s-master:~/k8s# docker tag yilutech/flannel:v0.12.0-amd64 quay.io/coreos/flannel:v0.12.0-arm64
root@k8s-master:~/k8s# docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
yilutech/flannel                     v0.12.0-amd64       4e9f801d2217        3 weeks ago         52.8MB
quay.io/coreos/flannel               v0.12.0-arm64       4e9f801d2217        3 weeks ago         52.8MB
k8s.gcr.io/kube-proxy                v1.17.4             6dec7cfde1e5        3 weeks ago         116MB
k8s.gcr.io/kube-apiserver            v1.17.4             2e1ba57fe95a        3 weeks ago         171MB
k8s.gcr.io/kube-controller-manager   v1.17.4             7f997fcf3e94        3 weeks ago         161MB
k8s.gcr.io/kube-scheduler            v1.17.4             5db16c1c7aff        3 weeks ago         94.4MB
k8s.gcr.io/coredns                   1.6.5               70f311871ae1        5 months ago        41.6MB
k8s.gcr.io/etcd                      3.4.3-0             303ce5db0e90        5 months ago        288MB
k8s.gcr.io/pause                     3.1                 da86e6ba6ca1        2 years ago         742kB
root@k8s-master:~/k8s# docker rmi yilutech/flannel:v0.12.0-amd64
Untagged: yilutech/flannel:v0.12.0-amd64
Untagged: yilutech/flannel@sha256:6d451d92c921f14bfb38196aacb6e506d4593c5b3c9d40a8b8a2506010dc3e10
root@k8s-master:~/k8s# docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
quay.io/coreos/flannel               v0.12.0-arm64       4e9f801d2217        3 weeks ago         52.8MB
k8s.gcr.io/kube-proxy                v1.17.4             6dec7cfde1e5        3 weeks ago         116MB
k8s.gcr.io/kube-apiserver            v1.17.4             2e1ba57fe95a        3 weeks ago         171MB
k8s.gcr.io/kube-controller-manager   v1.17.4             7f997fcf3e94        3 weeks ago         161MB
k8s.gcr.io/kube-scheduler            v1.17.4             5db16c1c7aff        3 weeks ago         94.4MB
k8s.gcr.io/coredns                   1.6.5               70f311871ae1        5 months ago        41.6MB
k8s.gcr.io/etcd                      3.4.3-0             303ce5db0e90        5 months ago        288MB
k8s.gcr.io/pause                     3.1                 da86e6ba6ca1        2 years ago         742kB
 
# 【前提是kube-flannel.yml提前下载好并上传至k8s目录】
root@k8s-master:~/k8s# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
 
# 验证是否安装成功
root@k8s-master:~/k8s# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-6955765f44-c7tkf             0/1     Pending   0          31m
kube-system   coredns-6955765f44-l62v7             0/1     Pending   0          31m
kube-system   etcd-k8s-master                      1/1     Running   0          31m
kube-system   kube-apiserver-k8s-master            1/1     Running   0          31m
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          31m
kube-system   kube-flannel-ds-amd64-gq2fs          1/1     Running   0          14s
kube-system   kube-proxy-zmssr                     1/1     Running   0          31m
kube-system   kube-scheduler-k8s-master            1/1     Running   0          31m
root@k8s-master:~/k8s# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS              RESTARTS   AGE
kube-system   coredns-6955765f44-c7tkf             0/1     ContainerCreating   0          31m
kube-system   coredns-6955765f44-l62v7             0/1     Running             0          31m
kube-system   etcd-k8s-master                      1/1     Running             0          31m
kube-system   kube-apiserver-k8s-master            1/1     Running             0          31m
kube-system   kube-controller-manager-k8s-master   1/1     Running             0          31m
kube-system   kube-flannel-ds-amd64-gq2fs          1/1     Running             0          18s
kube-system   kube-proxy-zmssr                     1/1     Running             0          31m
kube-system   kube-scheduler-k8s-master            1/1     Running             0          31m
root@k8s-master:~/k8s# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-6955765f44-c7tkf             0/1     Running   0          31m
kube-system   coredns-6955765f44-l62v7             0/1     Running   0          31m
kube-system   etcd-k8s-master                      1/1     Running   0          31m
kube-system   kube-apiserver-k8s-master            1/1     Running   0          31m
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          31m
kube-system   kube-flannel-ds-amd64-gq2fs          1/1     Running   0          21s
kube-system   kube-proxy-zmssr                     1/1     Running   0          31m
kube-system   kube-scheduler-k8s-master            1/1     Running   0          31m

五、配置 Worker 节点

1、首先需要安装对应的docker镜像,这里需要的是三个镜像,具体操作如下:
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.18.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull yilutech/flannel:v0.12.0-amd64
 
 
# 修改镜像标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.18.0 k8s.gcr.io/kube-proxy:v1.18.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag yilutech/flannel:v0.12.0-amd64 quay.io/coreos/flannel:v0.12.0-arm64
 
 
# 删除不用的镜像【把从阿里云下载的镜像删除】
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.18.0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker rmi yilutech/flannel:v0.12.0-amd64
 
2、在worker机器上执行如下操作,也就是上面配置Master kubeadm init时产生的
root@k8s-worker:~# kubeadm join 192.168.89.133:6443 --token rzx7og.woqfn476yqsc5cy5 --discovery-token-ca-cert-hash sha256:bc8c286a9955186ae60828d433a72f598bec1fc102a7810878c3a3435926e34c
W0408 04:48:13.634359   21851 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
 
 
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
 
 
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
 
返回 master 节点,输入以下命令:kubectl get nodes
root@k8s-master:~/k8s# kubectl get nodes
NAME         STATUS     ROLES    AGE   VERSION
k8s-master   Ready      master   43m   v1.18.0
k8s-worker   NotReady      43s   v1.18.0
root@k8s-master:~/k8s# kubectl get nodes
NAME         STATUS   ROLES    AGE    VERSION
k8s-master   Ready    master   44m    v1.18.0
k8s-worker   Ready       106s   v1.18.0
 
# 如果想知道某个 node 的详细信息,可以采用 kubectl describe node node_name
root@k8s-master:~/k8s# kubectl describe node k8s-worker
Name:               k8s-worker
Roles:              
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=k8s-worker
                    kubernetes.io/os=linux
Annotations:        flannel.alpha.coreos.com/backend-data: {"VtepMAC":"9a:b1:a1:72:81:f2"}
                    flannel.alpha.coreos.com/backend-type: vxlan
                    flannel.alpha.coreos.com/kube-subnet-manager: true
                    flannel.alpha.coreos.com/public-ip: 192.168.89.134
                    kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 08 Apr 2020 05:52:24 +0000
Taints:             
Unschedulable:      false
Lease:
  HolderIdentity:  k8s-worker
  AcquireTime:     
  RenewTime:       Wed, 08 Apr 2020 06:31:31 +0000
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 08 Apr 2020 06:10:26 +0000   Wed, 08 Apr 2020 06:10:26 +0000   FlannelIsUp                  Flannel is running on this node
  MemoryPressure       False   Wed, 08 Apr 2020 06:30:26 +0000   Wed, 08 Apr 2020 05:52:24 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Wed, 08 Apr 2020 06:30:26 +0000   Wed, 08 Apr 2020 05:52:24 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Wed, 08 Apr 2020 06:30:26 +0000   Wed, 08 Apr 2020 05:52:24 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Wed, 08 Apr 2020 06:30:26 +0000   Wed, 08 Apr 2020 05:53:15 +0000   KubeletReady                 kubelet is posting ready status. AppArmor enabled
Addresses:
  InternalIP:  192.168.89.134
  Hostname:    k8s-worker
Capacity:
  cpu:                8
  ephemeral-storage:  41019616Ki
  hugepages-1Gi:      0
  hugepages-2Mi:      0
  memory:             4015876Ki
  pods:               110
Allocatable:
  cpu:                8
  ephemeral-storage:  37803678044
  hugepages-1Gi:      0
  hugepages-2Mi:      0
  memory:             3913476Ki
  pods:               110
System Info:
  Machine ID:                 af93f132247f413b8f6c92477e61b168
  System UUID:                EB884D56-98B2-810D-9C2C-FCE9E75394AD
  Boot ID:                    cf4fc8fe-a850-4017-af4b-4f1e1b2ad596
  Kernel Version:             4.15.0-96-generic
  OS Image:                   Ubuntu 18.04.4 LTS
  Operating System:           linux
  Architecture:               amd64
  Container Runtime Version:  docker://19.3.6
  Kubelet Version:            v1.18.0
  Kube-Proxy Version:         v1.18.0
PodCIDR:                      10.244.2.0/24
PodCIDRs:                     10.244.2.0/24
Non-terminated Pods:          (3 in total)
  Namespace                   Name                           CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                   ----                           ------------  ----------  ---------------  -------------  ---
  default                     nginx-f89759699-ghtm6          0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m36s
  kube-system                 kube-flannel-ds-amd64-rkv62    100m (1%)     100m (1%)   50Mi (1%)        50Mi (1%)      21m
  kube-system                 kube-proxy-zq724               0 (0%)        0 (0%)      0 (0%)           0 (0%)         27m
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource           Requests   Limits
  --------           --------   ------
  cpu                100m (1%)  100m (1%)
  memory             50Mi (1%)  50Mi (1%)
  ephemeral-storage  0 (0%)     0 (0%)
  hugepages-1Gi      0 (0%)     0 (0%)
  hugepages-2Mi      0 (0%)     0 (0%)
Events:
  Type     Reason                   Age   From                    Message
  ----     ------                   ----  ----                    -------
  Normal   Starting                 46m   kubelet, k8s-worker     Starting kubelet.
  Normal   NodeHasSufficientMemory  46m   kubelet, k8s-worker     Node k8s-worker status is now: NodeHasSufficientMemory
  Normal   NodeHasNoDiskPressure    46m   kubelet, k8s-worker     Node k8s-worker status is now: NodeHasNoDiskPressure
  Normal   NodeHasSufficientPID     46m   kubelet, k8s-worker     Node k8s-worker status is now: NodeHasSufficientPID
  Normal   NodeAllocatableEnforced  46m   kubelet, k8s-worker     Updated Node Allocatable limit across pods
  Warning  ImageGCFailed            39m   kubelet, k8s-worker     failed to get imageFs info: unable to find data in memory cache
  Normal   Starting                 39m   kubelet, k8s-worker     Starting kubelet.
  Normal   NodeHasSufficientMemory  39m   kubelet, k8s-worker     Node k8s-worker status is now: NodeHasSufficientMemory
  Normal   NodeHasNoDiskPressure    39m   kubelet, k8s-worker     Node k8s-worker status is now: NodeHasNoDiskPressure
  Normal   NodeHasSufficientPID     39m   kubelet, k8s-worker     Node k8s-worker status is now: NodeHasSufficientPID
  Normal   NodeAllocatableEnforced  39m   kubelet, k8s-worker     Updated Node Allocatable limit across pods
  Normal   NodeReady                38m   kubelet, k8s-worker     Node k8s-worker status is now: NodeReady
  Normal   Starting                 22m   kube-proxy, k8s-worker  Starting kube-proxy.
 

六、验证操作

# 1、登录到k8s-master服务器,创建deployment控制器
master@k8s-master:~$ kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
 
# 2、设置nginx应用端口80映射到node上的端口对外暴漏
master@k8s-master:~$ kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed
 
# 3、查看nginx pod及对外暴漏的node端口
master@k8s-master:~$ kubectl get pod
NAME                     READY   STATUS              RESTARTS   AGE
nginx-86c57db685-dnfpm   0/1     ContainerCreating   0          36s
master@k8s-master:~$ kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1              443/TCP        15h
nginx        NodePort    10.96.61.234           80:30121/TCP   15s
master@k8s-master:~$ kubectl get pod
NAME                     READY   STATUS    RESTARTS   AGE
nginx-86c57db685-dnfpm   1/1     Running   0          83s
master@k8s-master:~$ kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1              443/TCP        15h
nginx        NodePort    10.96.61.234           80:30121/TCP   60s
 
# 【上面看到的PORT(S)列下的80:30121/TCP,80为nginx pod端口,30121为映射的对外暴漏的node端口,pod中的status状态为Running即正常】
 
# 4、ginx pod running后 执行kubectl get pod -o wide 查看nginx pod所在的node
master@k8s-master:~$ kubectl get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE     IP          NODE         NOMINATED NODE   READINESS GATES
nginx-86c57db685-dnfpm   1/1     Running   0          5m47s   10.10.1.2   k8s-worker              
 
# 5、由于nodeport直接访问不行,使用port-forward方式进行
root@k8s-master:~# kubectl port-forward svc/nginx 8080:80 --address 0.0.0.0
Forwarding from 0.0.0.0:8080 -> 80
Handling connection for 8080
Handling connection for 8080
 
# 6、打开浏览器输入地址:http://192.168.89.131:8080/【如果是nodeport,则直接是:http://192.168.89.131:30121/】
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and working. Further configuration is required.
 
For online documentation and support please refer to nginx.org.
Commercial support is available at nginx.com.
 
Thank you for using nginx.

 

附录:【 kube-flannel.yml文件内容
 
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-ppc64le
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - ppc64le
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-s390x
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - s390x
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg

 

你可能感兴趣的:(Ubuntu 虚拟机配置 Kubernetes 集群(含实际操作和附录flannel.yml))