目录
1. 基础环境
2. 基础环境配置
2.1 所有节点配置hosts
2.2 关闭防火墙,selinux,dnsmasq,swap
2.3 配置时间同步
2.4 节点修改资源限制
2.5 安装基本软件
2.6 升级系统内核
2.7 修改内核参数
2.8 加载ipvs模块
3. 软件包准备
4. 安装docker,cri-docker
4.1 安装docker-ce
4.2 安装cri-docker
4.3 安装containerd
4.4 安装crictl 客户工具
4.5 安装cfssl工具
5. 生成kubernetes集群证书
5.1 生成etcd的ca证书
5.2 创建kubernetes各组件证书
5.3 创建kueb-apiserver证书
5.4 创建proxy-client证书以及ca
5.5 创建kube-controller-manager证书与认证文件
5.6 生成kube-scheduler证书文件
5.7.生成kubernetes集群管理员证书
6. etcd 部署
6.1 安装etcd
6.2 配置etcdctl 客户端工具
7. 部署kubernetes
7.1 安装kube-apiserver
7.2 安装kube-controller-manager
7.3 安装kube-scheduler
7.4 在master节点部署kubectl工具
7.5 部署kubelet
7.6 部署kube-proxy
8. 安装组件
8.1 安装calico网络插件
8.2 安装calicoctl客户端
8.3 安装dashboard
主机名称 | ip地址 |
master1 | 10.66.6.2 |
node1 | 10.66.6.4 |
node2 | 10.66.6.5 |
说明:
master节点为2台以nginx为代理实现高可用
系统使用ubuntu 20.04
通过公钥做ssh免密.
10.66.6.2 master1
10.66.6.4 node1
10.66.6.5 node2
#关闭防火墙
systemctl disable --now firewalld
#关闭dnsmasq
systemctl disable --now dnsmasq
#关闭postfix
systemctl disable --now postfix
#关闭NetworkManager
systemctl disable --now NetworkManager
#关闭selinux
sed -ri 's/(^SELINUX=).*/\1disabled/' /etc/selinux/config
setenforce 0
#关闭swap
sed -ri 's@(^.*swap *swap.*0 0$)@#\1@' /etc/fstab
swapoff -a
#安装ntpdate
apt-get install ntpdate -y
#执行同步,可以使用自己的ntp服务器如果没有
ntpdate ntp1.aliyun.com
#添加定时任务
crontab -e
0 */1 * * * ntpdate ntp1.aliyun.com
cat > /etc/security/limits.conf <
apt-get install ipvsadm ipset conntrack sysstat libseccomp psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl -y
#查看系统内核
uname -r
#查看软件库中内核
sudo apt list | grep linux-generic*
#下载内核
apt-get install linux-generic-hwe-20.04-edge/focal-updates
#下载脚本
wget https://raw.githubusercontent.com/pimlie/ubuntu-mainline-kernel.sh/master/ubuntu-mainline-kernel.sh
#把脚本放在可执行路径
install ubuntu-mainline-kernel.sh /usr/local/bin/
#检查最新的可用内核版本
ubuntu-mainline-kernel.sh -c
#获得最新版本并确认这就是您想要安装在系统上的版本之后,运行
ubuntu-mainline-kernel.sh -i
#重启服务器后确认
reboot
uname -rs
cat >/etc/sysctl.conf<
cat >/etc/modules-load.d/ipvs.conf <
以下均为github下载地址
kubernetes 1.25.6 地址
https://dl.k8s.io/v1.25.6/kubernetes-server-linux-amd64.tar.gz
etcd 地址
https://github.com/etcd-io/etcd/releases/download/v3.5.7/etcd-v3.5.7-linux-amd64.tar.gz
docker-ce 地址
https://github.com/containerd/containerd/releases
cri-docker 地址
https://github.com/Mirantis/cri-dockerd/releases
containerd 地址
https://github.com/containerd/containerd/releases
cfssl 地址
https://github.com/cloudflare/cfssl/releases
tar xf docker-23.0.1.tgz
cp docker/* /usr/bin
container启动文件
cat > /usr/lib/systemd/system/containerd.service << EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
docker 启动文件
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
docker的socket文件
cat > /usr/lib/systemd/system/ << EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
创建docker配置文件
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
启动添加开机自启动
groupadd docker
systemctl enable --now containerd.service
systemctl enable --now docker.socket
systemctl enable --now docker.service
tar xf cri-dockerd-0.3.1.amd64.tgz
cp cri-dockerd/* /usr/bin
创建启动文件
cat > /usr/lib/systemd/system/cri-docker.service << EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=kubernetes/pause:latest
ExecReload=/bin/kill -s HUP
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
创建cri-docker socker文件
cat > /usr/lib/systemd/system/cri-docker.socket << EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
添加开机自启动,并启动
systemctl enable --now cri-docker.socket
systemctl enable --now cri-docker
tar xf containerd-1.6.19-linux-amd64.tar.gz -C /
cat > /usr/lib/systemd/system/containerd.service <
systemctl enable --now containerd.service
添加配置重启
mkdir /etc/containerd
/usr/local/bin/containerd config default > /etc/containerd/config.toml
systemctl restart containerd
#解压
tar xf crictl-v1.22.0-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <
#主节点操作
tar xf cfssl-1.6.3.tar.gz -C /usr/bin
mkdir /opt/pki/{etcd,kubernetes} -p
在主节点操作
mkdir /opt/pki/etcd/ -p
cd /opt/pki/etcd/
#创建etcd证书的ca
mkdir ca
#生成etcd证书ca配置文件与申请文件
cd ca/
生成配置文件
cat > ca-config.json < ca-csr.json <
生成etcd服务端证书
cat > etcd-server-csr.json << EOF
{
"CN": "etcd-server",
"hosts": [
"10.66.6.2",
"10.66.6.3",
"10.66.6.4",
"10.66.6.5",
"10.66.6.6",
"127.0.0.1"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-server",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=ca/ca.pem \
-ca-key=ca/ca-key.pem \
-config=ca/ca-config.json \
-profile=etcd \
etcd-server-csr.json | cfssljson -bare etcd-server
生成etcd客户端证书
#生成etcd证书申请文件
cd /opt/pki/etcd/
cat > etcd-client-csr.json << EOF
{
"CN": "etcd-client",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-client",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=ca/ca.pem \
-ca-key=ca/ca-key.pem \
-config=ca/ca-config.json \
-profile=etcd \
etcd-client-csr.json | cfssljson -bare etcd-client
拷贝证书到master和node节点
for i in $master;do
ssh $i "mkdir /etc/etcd/ssl -p"
scp /opt/pki/etcd/ca/ca.pem /opt/pki/etcd/{etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem} $i:/etc/etcd/ssl/
done
5.2.1 创建kubernetes的ca
mkdir /opt/pki/kubernetes/ -p
cd /opt/pki/kubernetes/
mkdir ca
cd ca
创建ca配置文件
cat > ca-config.json <
生成ca申请文件
cat > ca-csr.json <
生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
mkdir /opt/pki/kubernetes/kube-apiserver -p
cd /opt/pki/kubernetes/kube-apiserver
生成申请文件
cat > kube-apiserver-csr.json < EOF
< EOF
{
"CN": "kube-apiserver",
"hosts": [
"127.0.0.1",
"10.66.6.2",
"10.66.6.3",
"10.66.6.4",
"10.66.6.5",
"10.66.6.6",
"10.66.6.7",
"10.200.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "kube-apiserver",
"OU": "System"
}
]
}
EOF
生成证书
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-apiserver-csr.json | cfssljson -bare kube-apiserver
for i in master;do
ssh $i "mkdir /etc/kubernetes/pki -p"
scp /opt/pki/kubernetes/ca/{ca.pem,ca-key.pem} /opt/pki/kubernetes/kube-apiserver/{kube-apiserver-key.pem,kube-apiserver.pem} $i:/etc/kubernetes/pki
done
#拷贝证书到node节点
master="node1 node2"
for i in $master;do
ssh $i "mkdir /etc/kubernetes/pki -p"
scp /opt/pki/kubernetes/ca/ca.pem $i:/etc/kubernetes/pki
done
mkdir /opt/pki/proxy-client
cd /opt/pki/proxy-client
生成ca配置文件
cat > front-proxy-ca-csr.json <
生成ca文件
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
生成客户端证书申请文件
cat > front-proxy-client-csr.json <
生成证书
cfssl gencert \
-ca=front-proxy-ca.pem \
-ca-key=front-proxy-ca-key.pem \
-config=../kubernetes/ca/ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client
拷贝证书到节点
for i in $master;do
scp /opt/pki/proxy-client/{front-proxy-ca.pem,front-proxy-client.pem,front-proxy-client-key.pem} $i:/etc/kubernetes/pki
done
for i in $node;do
scp /opt/pki/proxy-client/front-proxy-ca.pem $i:/etc/kubernetes/pki
done
mkdir /opt/pki/kubernetes/kube-controller-manager
cd /opt/pki/kubernetes/kube-controller-manager
生成配置文件
cat > kube-controller-manager-csr.json <
生成证书文件
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
拷贝证书到节点
for i in $master;do
scp /opt/pki/kubernetes/kube-controller-manager/kube-controller-manager.kubeconfig $i:/etc/kubernetes
done
mkdir /opt/pki/kubernetes/kube-scheduler
cd /opt/pki/kubernetes/kube-scheduler
生成申请文件
cat > kube-scheduler-csr.json <
生成证书
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
拷贝证书到节点
for i in $master;do
scp /opt/pki/kubernetes/kube-scheduler/kube-scheduler.kubeconfig $i:/etc/kubernetes
done
mkdir /opt/pki/kubernetes/admin
cd /opt/pki/kubernetes/admin
生成证书申请文件
cat > admin-csr.json <
生成证书
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
tar xf etcd-v3.5.5-linux-amd64.tar.gz
cp etcd-v3.5.5-linux-amd64/etcd* /usr/bin/
rm -rf etcd-v3.5.5-linux-amd64
创建配置文件
cat > /etc/etcd/etcd.config.yml <
生成启动文件
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
systemctl enable --now etcd
#设置全局变量
cat > /etc/profile.d/etcdctl.sh <
分发二进制文件
tar xf kubernetes-server-linux-amd64.tar.gz
#分发master组件
for i in $master;do
scp kubernetes/server/bin/{kubeadm,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy,kubelet,kubectl} $i:/usr/bin
done
#分发node组件
for i in $node;do
scp kubernetes/server/bin/{kube-proxy,kubelet} $i:/usr/bin
done
#创建ServiceAccount Key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
#分发master组件
for i in $master;do
scp /etc/kubernetes/pki/{sa.pub,sa.key} $i:/etc/kubernetes/pki/
done
创建service文件
a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
cat > /etc/systemd/system/kube-apiserver.service <
启动服务
systemctl enable --now kube-apiserver.service
#生成service文件
cat > /etc/systemd/system/kube-controller-manager.service <
#启动
#启动服务
systemctl enable --now kube-controller-manager.service
#生成service文件
cat > /etc/systemd/system/kube-scheduler.service <
mkdir /root/.kube/ -p
cp /opt/pki/kubernetes/admin/admin.kubeconfig /root/.kube/config
验证
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
7.5.1 使用TLS Bootstrapping自动认证kubelet
创建TLS Bootstrapping认证文件
mkdir /opt/pki/kubernetes/kubelet -p
cd /opt/pki/kubernetes/kubelet
#生成随机认证key
a=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c6`
b=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c16`
生成权限绑定文件
cat > bootstrap.secret.yaml <
生成配置文件
#生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=https://10.66.6.2:6443 \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user \
--token=$a.$b \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=bootstrap-kubelet.kubeconfig
#创建权限
kubectl apply -f bootstrap.secret.yaml
分发认证文件
for i in $node;do
ssh $i "mkdir /etc/kubernetes -p"
scp /opt/pki/kubernetes/kubelet/bootstrap-kubelet.kubeconfig $i:/etc/kubernetes
done
7.5.2 部署kubernetes 组件
使用docker容器运行方式
mkdir /etc/systemd/system/kubelet.service.d/ -p
mkdir /etc/kubernetes/manifests/ -p
生成service文件
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
生成service 配置文件
cat > /usr/lib/systemd/system/kubelet.service.d/10-kubelet.conf <<
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=10.66.6.2"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
使用container的方式部署
a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
mkdir /etc/systemd/system/kubelet.service.d/ -p
mkdir /etc/kubernetes/manifests/ -p
#生成service文件
cat > /etc/systemd/system/kubelet.service < /etc/systemd/system/kubelet.service.d/10-kubelet.conf <
kubelet配置文件生成
a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
#生成配置文件
cat > /etc/kubernetes/kubelet-conf.yml <
启动服务
systemctl enable --now kubelet.service
mkdir /opt/pki/kubernetes/kube-proxy/ -p
cd /opt/pki/kubernetes/kube-proxy/
生成配置文件
kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
cat >kube-proxy-scret.yml<
kubectl apply -f kube-proxy-scret.yml
JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://10.66.6.2:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context kubernetes \
--kubeconfig=kube-proxy.kubeconfig
拷贝文件到节点
for i in $node;do
scp /opt/pki/kubernetes/kube-proxy/kube-proxy.kubeconfig $i:/etc/kubernetes
done
生成service文件
cat > /etc/systemd/system/kube-proxy.service <
生成配置文件
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 10.66.6.2
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 10.100.0.0/16
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "10.66.6.2"
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
启动服务
systemctl enable --now kube-proxy.service
验证工作模式
curl 127.0.0.1:10249/proxyMode
yaml文件下载
https://raw.githubusercontent.com/projectcalico/calico/v3.24.5/manifests/calico-typha.yaml
修改如下:
- name: CALICO_IPV4POOL_CIDR
value: "10.100.0.0/16"
kubectl apply -f calico-typha.yaml
#验证
kubectl get node
mkdir /etc/calico -p
cat >/etc/calico/calicoctl.cfg <
#验证
calicoctl node status
地址:
https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
修改yaml文件
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #添加
ports:
- port: 443
targetPort: 8443
nodePort: 30001 #添加
selector:
k8s-app: kubernetes-dashboard
#创建
kubectl apply -f dashboard.yaml
创建用户
cat >admin.yaml<
创建用户
kubectl apply -f admin.yaml
#获取用户token
kubectl describe secrets -n kubernetes-dashboard admin-user
下载地址:
https://github.com/kubernetes-sigs/metrics-server/
拷贝证书文件
for i in $node;do
scp /opt/pki/proxy-client/front-proxy-ca.pem $i:/etc/kubernetes/pki/
done
修改配置
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- mountPath: /etc/kubernetes/pki
name: ca-ssl
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
kubectl apply -f components.yaml
#验证
kubectl top node