source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp /opt/kubernetes/package/kubernetes/node/bin/kube-proxy root@${master_ip}:/opt/kubernetes/bin
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/bin/kube-proxy "
done
#master01操作
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.27.128.200:6443 \
--kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap \
--token=416569d477d651706738c3b6b8e2023e \
--kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#执行上面的操作后,会在当前目录生成一个bootstrap.kubeconfig的config文件
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp bootstrap.kubeconfig root@${master_ip}:/opt/kubernetes/cfg/
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/cfg/bootstrap.kubeconfig"
done
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "mkdir -p /etc/cni/net.d"
ssh root@${master_ip} 'cat > /etc/cni/net.d/10-default.conf << EOF
{
"name": "flannel",
"type": "flannel",
"delegate": {
"bridge": "docker0",
"isDefaultGateway": true,
"mtu": 1400
}
}
EOF'
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "cat /etc/cni/net.d/10-default.conf"
done
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "yum install -y ipvsadm ipset conntrack"
done
cd /opt/kubernetes/ssl
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
system:kube-proxy
;system:node-proxier
将User system:kube-proxy
与 Role system:node-proxier
绑定,该 Role 授予了调用 kube-apiserver
Proxy 相关 API 的权限;
cd /opt/kubernetes/ssl
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
cd /opt/kubernetes/ssl
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp kube-proxy-key.pem kube-proxy.pem root@${master_ip}:/opt/kubernetes/ssl/
done
#验证是否发送成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/ssl/{kube-proxy-key.pem,kube-proxy.pem}"
done
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.27.128.200:6443 \
--kubeconfig=kube-proxy.kubeconfig
--embed-certs=true:将 ca.pem 和 admin.pem 证书内容嵌入到生成的 kubectl-proxy.kubeconfig 文件中(不加时,写入的是证书文件路径)
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
cd /opt/kubernetes/ssl
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp kube-proxy.kubeconfig root@${master_ip}:/opt/kubernetes/cfg/
done
#验证是否发送成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/cfg/kube-proxy.kubeconfig"
done
cat > kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \\
--bind-address=##NODE_IP## \\
--hostname-override=##NODE_IP## \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \\
--masquerade-all \\
--feature-gates=SupportIPVSProxyMode=true \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--logtostderr=true \\
--v=2 \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#bindAddress
: 监听地址
#hostnameOverride
: 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 ipvs 规则
source /root/env.sh
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-proxy.service > kube-proxy-${NODE_IPS[i]}.service
done
#验证是否更改成功
ls -ld kube-proxy-*.service
cd /opt/kubernetes/ssl
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
scp kube-proxy-${node_ip}.service root@${node_ip}:/usr/lib/systemd/system/kube-proxy.service
done
#验证是否发送成功
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "ls -ld /usr/lib/systemd/system/kube-proxy.service"
done
#启动服务前必须先创建工作目录
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "mkdir /var/lib/kube-proxy"
ssh root@${node_ip} "systemctl restart kube-proxy && systemctl enable kube-proxy"
done
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "systemctl status kube-proxy | grep Active"
done
#确保状态为 active (running)
,否则查看日志,确认原因
journalctl -u kube-proxy
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "netstat -nltp | grep kube-proxy"
done
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "ipvsadm -ln"
done
#输出:
>>> 172.27.128.11
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 172.27.128.11:6443 Masq 1 0 0
-> 172.27.128.12:6443 Masq 1 0 0
-> 172.27.128.13:6443 Masq 1 0 0
>>> 172.27.128.12
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 172.27.128.11:6443 Masq 1 0 0
-> 172.27.128.12:6443 Masq 1 0 0
-> 172.27.128.13:6443 Masq 1 0 0
>>> 172.27.128.13
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 172.27.128.11:6443 Masq 1 0 0
-> 172.27.128.12:6443 Masq 1 0 0
-> 172.27.128.13:6443 Masq 1 0 0
#可见所有通过 https 访问 K8S SVC kubernetes 的请求都转发到 kube-apiserver 节点的 6443 端口