systemctl list-unit-files
192.168.50.208 master1.k8s.org
192.168.50.117 master2.k8s.org
192.168.50.126 master3.k8s.org
192.168.50.119 node1.k8s.org
192.168.50.120 node2.k8s.org
192.168.50.250 api.k8s.org-vip
192.168.50.121 keepalived1.nginx.org
192.168.50.122 keepalived2.nginx.org
apt install chrony -y
systemctl enable chrony
systemctl restart chrony
timedatectl
free -m
swapoff -a
wget http://192.168.50.101/containerd-1.6.10-linux-amd64.tar.gz
或者到github下载
https://github.com/containerd/containerd/releases
tar Cxzvf /usr/local containerd-1.6.10-linux-amd64.tar.gz
mkdir -p /usr/local/lib/systemd/system/
cat </usr/local/lib/systemd/system/containerd.service
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
#uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration
#Environment="ENABLE_CRI_SANDBOXES=sandboxed"
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now containerd
wget http://192.168.50.101/runc.amd64
或者到github下载
https://github.com/opencontainers/runc/releases
install -m 755 runc.amd64 /usr/local/sbin/runc
mkdir /etc/containerd
containerd config default > /etc/containerd/config.toml
systemctl restart containerd
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat </etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
cat <
两台nginx主机
192.168.50.121 keepalived1.nginx.org
192.168.50.122 keepalived2.nginx.org
stream {
upstream backend {
hash $remote_addr consistent;
server 192.168.50.208:6443 max_fails=3 fail_timeout=30s;
server 192.168.50.117:6443 max_fails=3 fail_timeout=30s;
server 192.168.50.126:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 6443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
#!/bin/bash
# 判断nginx是否存活,如果非存活状态则停止keepalived使vip绑定到180
nginx_nums=`ps -ef |grep 'nginx: master'|grep -v grep|wc -l`
if [ $nginx_nums == 0 ]
then
echo 'nginx is down'
/etc/init.d/keepalived stop
else
echo 'nginx is running'
fi
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
[email protected]
[email protected]
}
notification_email_from [email protected]
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict # 注释掉为单播模式
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script check_web {
script "/tmp/check_k8s.sh" #表示将一个脚本信息赋值给变量check_web
interval 2 #执行监控脚本的间隔时间
# weight 2 #利用权重值和优先级进行运算,从而降低主服务优先级使之变为备服务器(建议先忽略)
}
vrrp_instance VI_1 {
state MASTER
interface eth0 # 内网网卡名,如果是ens33则改为ens33
virtual_router_id 51
priority 100
advert_int 1
nopreempt # 非抢占模式
unicast_src_ip 192.168.50.121 # 单播的源地址,填本机地址
unicast_peer {
192.168.50.122 #集群其他机器地址,有多个地址则全写上
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.50.250
}
track_script {
check_k8s
}
}
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
[email protected]
[email protected]
}
notification_email_from [email protected]
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict # 注释掉为单播模式
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script check_web {
script "/tmp/check_k8s.sh" #表示将一个脚本信息赋值给变量check_web
interval 2 #执行监控脚本的间隔时间
# weight 2 #利用权重值和优先级进行运算,从而降低主服务优先级使之变为备服务器(建议先忽略)
}
vrrp_instance VI_1 {
state MASTER
interface eth0 # 内网网卡名,如果是ens33则改为ens33
virtual_router_id 51
priority 100
advert_int 1
nopreempt # 非抢占模式
unicast_src_ip 192.168.50.122 # 单播的源地址,填本机地址
unicast_peer {
192.168.50.121 #集群其他机器地址,有多个地址则全写上
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.50.250
}
track_script {
check_k8s
}
}
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
SystemdCgroup = true
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.8"
修改完重启
systemctl restart containerd
kubeadm init --control-plane-endpoint "192.168.50.250:6443" --pod-network-cidr="10.244.0.0/16" --image-repository registry.aliyuncs.com/google_containers
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 192.168.50.250:6443 --token gky9nd.g3xzh2fzrpmjxg3s --discovery-token-ca-cert-hash sha256:0b34d796a7821b1710ede59d589e98add2a3e0d0241c8a79f73676d2b06a13e1 --control-plane
scp -rp /etc/kubernetes/pki/ca.* master2.k8s.org:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/sa.* master2.k8s.org:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/front-proxy-ca.* master2.k8s.org:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/etcd/ca.* master2.k8s.org:/etc/kubernetes/pki/etcd
scp -rp /etc/kubernetes/admin.conf master2.k8s.org:/etc/kubernetes
kubeadm join 192.168.50.250:6443 --token gky9nd.g3xzh2fzrpmjxg3s --discovery-token-ca-cert-hash sha256:0b34d796a7821b1710ede59d589e98add2a3e0d0241c8a79f73676d2b06a13e1
kubectl apply -f http://192.168.50.101/kube-flannel.yml
可以去官网下载:
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
⚡ root@master1 ~/oeosProject kubectl get nodes,svc,pods -A
NAME STATUS ROLES AGE VERSION
node/master1.k8s.org Ready control-plane 3h38m v1.25.4
node/master2.k8s.org Ready control-plane 3h13m v1.25.4
node/master3.k8s.org Ready control-plane 3h11m v1.25.4
node/node1.k8s.org Ready 3h37m v1.25.4
node/node2.k8s.org Ready 3h36m v1.25.4
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.96.0.1 443/TCP 3h38m
kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 3h38m
kubernetes-dashboard service/dashboard-metrics-scraper ClusterIP 10.100.235.68 8000/TCP 3h1m
kubernetes-dashboard service/kubernetes-dashboard ClusterIP 10.102.26.11 443/TCP 3h1m
oeos service/uwsgi-deploy NodePort 10.96.128.65 80:32479/TCP 3h34m
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel pod/kube-flannel-ds-57bhl 1/1 Running 0 3h8m
kube-flannel pod/kube-flannel-ds-fkdqd 1/1 Running 0 3h8m
kube-flannel pod/kube-flannel-ds-jpxvn 1/1 Running 0 3h8m
kube-flannel pod/kube-flannel-ds-l6jjz 1/1 Running 0 3h8m
kube-flannel pod/kube-flannel-ds-sjjrh 1/1 Running 0 3h8m
kube-system pod/coredns-c676cc86f-ptfnt 1/1 Running 0 3h38m
kube-system pod/coredns-c676cc86f-x9pmh 1/1 Running 0 3h38m
kube-system pod/etcd-master1.k8s.org 1/1 Running 4 3h38m
kube-system pod/etcd-master2.k8s.org 1/1 Running 0 3h13m
kube-system pod/etcd-master3.k8s.org 1/1 Running 0 3h11m
kube-system pod/kube-apiserver-master1.k8s.org 1/1 Running 4 3h38m
kube-system pod/kube-apiserver-master2.k8s.org 1/1 Running 0 3h12m
kube-system pod/kube-apiserver-master3.k8s.org 1/1 Running 2 3h10m
kube-system pod/kube-controller-manager-master1.k8s.org 1/1 Running 5 3h38m
kube-system pod/kube-controller-manager-master2.k8s.org 1/1 Running 0 3h12m
kube-system pod/kube-controller-manager-master3.k8s.org 1/1 Running 2 3h9m
kube-system pod/kube-proxy-2nw42 1/1 Running 0 3h37m
kube-system pod/kube-proxy-5m4dk 1/1 Running 0 3h36m
kube-system pod/kube-proxy-c5prx 1/1 Running 0 3h38m
kube-system pod/kube-proxy-h79pn 1/1 Running 0 3h11m
kube-system pod/kube-proxy-hzd7c 1/1 Running 0 3h13m
kube-system pod/kube-scheduler-master1.k8s.org 1/1 Running 5 3h38m
kube-system pod/kube-scheduler-master2.k8s.org 1/1 Running 0 3h13m
kube-system pod/kube-scheduler-master3.k8s.org 1/1 Running 2 3h11m
kubernetes-dashboard pod/dashboard-metrics-scraper-64bcc67c9c-mkpdd 1/1 Running 0 3h1m
kubernetes-dashboard pod/kubernetes-dashboard-66c887f759-kgpzl 1/1 Running 0 3h1m
oeos pod/uwsgi-deploy-869846f746-kw7ld 1/1 Running 0 22m
oeos pod/uwsgi-deploy-869846f746-rdcjg 1/1 Running 0 22m
kubeadm init phase upload-certs --upload-certs
kubeadm token create --print-join-command
kubectl drain k8s-node1 --delete-local-data --force --ignore-daemonsets
kubectl delete nodes k8s-node1
kubeadm reset