基础运行系统使用CentOS 7.9。
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache
yum update -y
yum install -y vim wget
yum install -y yum-utils device-mapper-persistent-data lvm2 nfs-utils
systemctl stop firewalld
systemctl disable firewalld
reboot
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
swapoff -a
sed -i 's/.* swap/#&/' /etc/fstab
cat /etc/fstab
cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
user.max_user_namespaces=28633
vm.swappiness=0
EOF
sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
先下载以下版本的软件后,使用下面脚本进行安装配置
containerd 1.7.1 => https://github.com/containerd/containerd/releases/download/v1.7.1/containerd-1.7.1-linux-amd64.tar.gz
runc 1.1.7 => https://github.com/opencontainers/runc/releases/download/v1.1.7/runc.amd64
cni-plugins 1.3.0 => https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz
containerd.service内容
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
#uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration
#Environment="ENABLE_CRI_SANDBOXES=sandboxed"
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
mkdir -p /etc/containerd
tar Cxzvf /usr/local containerd-1.7.1-linux-amd64.tar.gz
mkdir -p /usr/local/lib/systemd/system/
cp containerd.service /usr/local/lib/systemd/system/containerd.service
install -m 755 runc.amd64 /usr/local/sbin/runc
mkdir -p /opt/cni/bin
tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.3.0.tgz
containerd config default > /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
sed -i 's/k8s.gcr.io\/pause:3.6/registry.aliyuncs.com\/google_containers\/pause:3.2/g' /etc/containerd/config.toml
systemctl restart containerd
systemctl enable containerd --now
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
swapoff -a
sed -i 's/.* swap/#&/' /etc/fstab
cat /etc/fstab
yum remove -y docker-ce docker-ce-cli containerd.io
yum autoremove -y
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sudo sysctl --system
lsmod | grep br_netfilter
lsmod | grep overlay
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
cat << EOF > /etc/yum.repos.d/kubernetes.repo
# choose the right baseurl which you feel better in your country.
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum makecache -y
hostnamectl set-hostname master1
hostnamectl set-hostname master2
hostnamectl set-hostname master3
yum install -y kubeadm-1.26.5-0 kubelet-1.26.5-0 kubectl-1.26.5-0 --disableexcludes=kubernetes
yum install -y yum-plugin-versionlock
yum versionlock add kubeadm kubectl kubelet
systemctl enable kubelet.service --now
crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock
使用CentOS7.9作为Nginx主机的OS
mv -y /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache
yum update -y
yum install -y vim wget
yum install -y yum-utils device-mapper-persistent-data lvm2
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
swapoff -a
sed -i 's/.* swap/#&/' /etc/fstab
cat /etc/fstab
cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
user.max_user_namespaces=28633
vm.swappiness=0
EOF
sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
yum install -y epel-release
yum install -y nginx nginx-mod-stream
mv /etc/nginx/nginx.conf /etc/nginx/nginx.conf.orig
cat << EOF > /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.3.83:6443 max_fails=1 fail_timeout=10s;
server 192.168.3.84:6443 max_fails=1 fail_timeout=10s;
server 192.168.3.85:6443 max_fails=1 fail_timeout=10s;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
upstream k8s-ingress {
server 192.168.3.87:443 max_fails=1 fail_timeout=10s;
}
server {
listen 443;
proxy_pass k8s-ingress;
}
upstream k8s-ingress-80 {
server 192.168.3.87:80 max_fails=1 fail_timeout=10s;
}
server {
listen 80;
proxy_pass k8s-ingress-80;
}
}
http {
}
EOF
nginx -t
nginx -s reload
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
kubeadm init --control-plane-endpoint kapiserver:6443 --upload-certs --kubernetes-version=v1.26.5 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16
wget -c https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
kubeadm join kapiserver:6443 --token asdfasdf --discovery-token-ca-cert-hash sha256:12312 --control-plane --certificate-key fasdf
kubectl -n kube-system rollout restart deployment coredns
kubeadm token create
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
kubeadm join --token <token> kapiserver:6443 --discovery-token-ca-cert-hash sha256:<hash>
kubectl get nodes
到此为止,基础的3Master的Kubernetes集群已经可以成功启动了。由于是本地环境,对于Nginx没有做KeepAlive保活,一旦Nginx挂掉,会导致服务不可用的情况,这个需要注意。如果有高可用的要求的小伙伴,可以对Nginx做KeepAlive配置。