三台centos7虚拟机(内存每个节点4G)
[root@master ~]# cat /etc/redhat-release
CentOS Linux release 7.9.2009 (Core)
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.50.162 master
192.168.50.163 node1
192.168.50.164 node2
vim /etc/resolv.conf
nameserver 114.114.114.114
kubelet要求必须禁用交换分区,所以kubeadm初始化时回检测swap是否关闭,如果没有关闭会报错
#查看当前启用的交换分区
swapon --show
#临时关闭
swapoff -a
#永久关闭
echo vm.swappiness = 0 >> /etc/sysctl.conf
systemctl disable --now firewalld
[root@master ~]# cat /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
#每个节点安装chrony
yum install chrony
#修改配置文件,同步阿里时间服务器
[root@master ~]# vim /etc/chrony.conf
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server ntp1.aliyun.com iburst
vim /etc/yum.repos.d/kubernetes.repo
[k8s]
name=k8s
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-20.10.9-3.el7 docker-ce-cli-20.10.9-3.el7 docker-compose-plugin containerd.io
vim /etc/docker/daemon.json
{
"registry-mirrors": [ "https://dockerpull.com",
"https://dockerhub.icu",
"https://arq04nd6.mirror.aliyuncs.com"]
}
systemctl daemon-reload
systemctl restart docker
# 验证
docker pull nginx
wget -c http://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el8.x86_64.rpm
yum install cri-dockerd-0.3.4-3.el8.x86_64.rpm
地址过气的话请去github下载
systemctl enable cri-docker
$ vi /usr/lib/systemd/system/cri-docker.service # 找到第10行ExecStart=
# 修改为ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
# 重启Docker组件
$ systemctl daemon-reload && systemctl restart docker cri-docker.socket cri-docker
# 检查Docker组件状态
$ systemctl status docker cir-docker.socket cri-docker
yum install -y kubelet-1.23.17 kubeadm-1.23.17 kubectl-1.23.17
# 设置开机自启
systemctl enable kubelet
# 只在master节点上操作
创建初始化文件 kubeadm-init.yaml
[root@master ~]# kubeadm config print init-defaults > kubeadm-init.yaml
修改如下配置:
- advertiseAddress:为控制平面地址,( Master 主机 IP )
advertiseAddress: 1.2.3.4
修改为 advertiseAddress: 172.16.100.21
- criSocket:为 containerd 的 socket 文件地址
criSocket: unix:///var/run/containerd/containerd.sock
修改为 criSocket: unix:///var/run/cri-dockerd.sock
- name: node 修改node为 k8s-master
name: node
修改为 name: k8s-master
clusterName下面添加 VIP和端口
controlPlaneEndpoint: 172.16.100.20:16443
- imageRepository:阿里云镜像代理地址,否则拉取镜像会失败
imageRepository: registry.k8s.io
修改为:imageRepository: registry.aliyuncs.com/google_containers
- kubernetesVersion:为 k8s 版本
kubernetesVersion: 1.28.0
修改为:kubernetesVersion: 1.23.17
注意:一定要配置镜像代理,否则会由于防火墙问题导致集群安装失败
文件尾部添加:
文件末尾增加启用ipvs功能
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
# 根据配置文件启动 kubeadm 初始化 k8s
$ kubeadm init --config=kubeadm-init.yaml --upload-certs --v=6 --image-repository=registry.aliyuncs.com/google_containers
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=172.20.0.0/16
成功界面:
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kubeaz
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.50.162:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:f25f34c2cbe66eff44cc75b83965125ecc9ef10ca1a0ffa84c5ac02875b6a91d
加入node节点
所有的工作节点加入集群
注意:加入集群时需要添加 --cri-socket unix:///var/run/cri-dockerd.sock
kubeadm join 192.168.50.159:6443 --token 69bzwi.wf2ctlqht0r77ook \
--discovery-token-ca-cert-hash sha256:26bd1dea437d362fb677a0847db5de5f6a464fad888454d960be932bdad97e4c \
--cri-socket unix:///var/run/cri-dockerd.sock
查看集群:
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master NotReady control-plane,master 84m v1.23.17
node1 NotReady 76m v1.23.17
node2 NotReady 79m v1.23.17
此时node都是NotReady状态,还需要安装网络插件,就能解决
获取yaml文件:
curl -O https://calico-v3-25.netlify.app/archive/v3.25/manifests/calico.yaml
更改里面镜像前缀,方便下载
sed -i 's#docker.io/##g' calico.yaml
修改内容
[root@k8s-master ~]# vim calico.yaml
以下两行默认没有开启,开始后修改第二行为kubeadm初始化使用指定的pod network即可。
3680 # The default IPv4 pool to create on startup if none exists. Pod IPs will be
3681 # chosen from this range. Changing this value after installation will have
3682 # no effect. This should fall within `--cluster-cidr`.
3683 - name: CALICO_IPV4POOL_CIDR
3684 value: "172.20.0.0/16"
3685 # Disable file logging so `kubectl logs` works.
应用yaml
kubectl apply -f calico.yaml