modprobe br_netfilter
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.bridge.bridge-nf-call-ip6tables=1
sysctl -w net.bridge.bridge-nf-call-iptables=1
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
swapoff -a
sed -i /swap/d /etc/fstab
wget https://github.com/containerd/containerd/releases/download/v1.5.7/containerd-1.5.7-linux-amd64.tar.gz
tar xvf containerd-1.5.7-linux-amd64.tar.gz
cp -r bin/* /usr/local/bin/
containerd config default > /etc/containerd/config.toml
# 将https://github.com/containerd/containerd/blob/main/containerd.service文件拷贝到/usr/lib/systemd/system/
systemctl start containerd
echo "[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
" >/etc/yum.repos.d/kubernetes.repo
yum -y install kubectl kubelet kubeadm
yum install -y libseccomp cri-tools kubeadm-$k8s_ver kubectl-$k8s_ver kubelet-$k8s_ver --disableexcludes=kubernetes
从阿里云上下载镜像,执行kubeadm config images list
可以知道需要哪些镜像,然后将这些镜像拉取到k8s.io空间中:
K8S_VERSION="v1.22.3"
ctr -n k8s.io image pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION
ctr -n k8s.io image pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:$K8S_VERSION
ctr -n k8s.io image pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:$K8S_VERSION
ctr -n k8s.io image pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:$K8S_VERSION
ctr -n k8s.io images pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
ctr -n k8s.io images pull registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:1.2.6
ctr -n k8s.io images pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
kubeadm init --kubernetes-version v1.22.3 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr 10.244.0.0/16
kubeadm init phase certs all
创建证书kubeadm init phase kubeconfig all
为admin、controller-manager、kubelet、scheduler生成kubeconfig配置文件kubeadm init phase kubelet-start
启动kubeletkubeadm init phase control-plane all
为apiserver、controller-manager、scheduler生成Pod描述文件kubeadm init phase etcd local
生成单节点的etcd的Pod描述文件kubeadm init phase upload-config all
将kubeadm的集群配置文件和kubelet的配置文件上传为configmapkubeadm init phase upload-certs --upload-certs
上传证书kubeadm init phase mark-control-plane
将节点标志为控制节点并打标kubeadm init phase bootstrap-token
kubeadm init phase kubelet-finalize all
在TLS引导后更新kubelet相关的配置kubeadm init phase addon all
安装额外的组件,coredns、kube-proxy--pod-network-cidr
参数kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubeadm join $APISERVER_ADDRESS --token $TOKEN --discovery-token-ca-cert-hash $CA_CERT_HASH
,当Master节点部署完成后会给出该命令mkdir -p /etc/uec
pushd /etc/uec
# 1 生成根证书
# 生成根证书的私钥
openssl genrsa -out cakey.pem
# 生成自签名证书时需要带上-x509参数
openssl req -new -x509 -key cakey.pem -out cacert.pem -subj '/CN=Mirror CA/O=xxx/ST=xxx/L=xxx/C=CN' -days 3650
cat cacert.pem >>/etc/pki/tls/certs/ca-bundle.crt
# 2 生成私钥和证书请求文件
openssl genrsa -out k8s.io.key 2048
openssl req -new -key k8s.io.key -out k8s.io.csr -subj '/CN=*.k8s.io/O=xxx/ST=xxx/L=xxx/C=CN'
# 3 颁发证书
openssl x509 -req -in k8s.io.csr -CA cacert.pem -CAkey cakey.pem -CAcreateserial -out k8s.io.crt -days 3650 -config ./openssl.cfg -extensions k8s.io
高可用根据业务类型分为2种实现:
对于运行在kubernetes中的程序,第一种方式通过多副本的RS/Deployment进行部署,第二种方式则要依赖额外的组件提供。
而对于kubernetes自身的高可用,则需要分别对master上运行的组件进行处理。
对于kube-scheduler和kube-controller-manager而言,提供了--leader-elect
选项,该选项默认为true,与该选项相关的其他参数有:
从上面的一些选项就可以看出领导者选举的具体实现方案:
对于kube-apiserver而言,它是无状态的,可以部署多个实例,可以同时运行,但是,Node连接时只能指定一个IP,因此,可以部署多个kube-apiserver实例,然后使用nginx+keepalived+VIP对外暴露一个IP。而Node在加入集群时,则直接指定VIP即可。