root@master-node:/home/hjw# kubeadm token create --print-join-command
kubeadm join 192.168.1.105:6443 --token beyohu.mc68nls3y8wibd1k --discovery-token-ca-cert-hash sha256:1bed0c23843d9251fd8bdf6a72d89b0db0d3df24f02e611dfd69bd502e5daa56
root@Node1:/home/hjw# kubeadm join 192.168.1.105:6443 --token beyohu.mc68nls3y8wibd1k --discovery-token-ca-cert-hash sha256:1bed0c23843d9251fd8bdf6a72d89b0db0d3df24f02e611dfd69bd502e5daa56
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists
[ERROR Port-10250]: Port 10250 is in use
[ERROR FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
root@Node1:/home/hjw# cd /etc/kubernetes/
root@Node1:/etc/kubernetes# ls
admin.conf kubelet.conf manifests pki
root@Node1:/etc/kubernetes# cd pki
root@Node1:/etc/kubernetes/pki# ls
ca.crt
root@Node1:/etc/kubernetes/pki# cd ..
root@Node1:/etc/kubernetes# rm -f kubelet.conf
root@Node1:/etc/kubernetes# cd pki
root@Node1:/etc/kubernetes/pki# rm -f ca.crt
root@Node1:/etc/kubernetes/pki# ls
root@Node1:/etc/kubernetes/pki# cd ..
root@Node1:/etc/kubernetes# ls
admin.conf manifests pki
root@Node1:/etc/kubernetes# lsof -i:10250
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
kubelet 1466350 root 23u IPv6 43933420 0t0 TCP *:10250 (LISTEN)
root@Node1:/etc/kubernetes# systemctl restart kubelet
root@Node1:/etc/kubernetes# kubeadm join 192.168.1.105:6443 --token beyohu.mc68nls3y8wibd1k --discovery-token-ca-cert-hash sha256:1bed0c23843d9251fd8bdf6a72d89b0db0d3df24f02e611dfd69bd502e5daa56
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
root@Node1:/etc/kubernetes# kubectl get nodes
E0703 03:26:41.997157 3565893 memcache.go:238] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
E0703 03:27:14.001171 3565893 memcache.go:238] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
E0703 03:27:46.002221 3565893 memcache.go:238] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
E0703 03:28:18.004198 3565893 memcache.go:238] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
E0703 03:28:50.005086 3565893 memcache.go:238] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
Unable to connect to the server: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
root@Node1:/etc/kubernetes#
root@Node1:/etc/kubernetes# echo "export KUBECONFIG=/etc/kubernetes/kubelet.conf" >> /etc/profile
root@Node1:/etc/kubernetes# source /etc/profile
root@Node1:/etc/kubernetes# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-node Ready control-plane 130d v1.26.0
node1 Ready <none> 7m15s v1.26.2
忘记k8s集群的join命令的话可以用这个来查看,-TTL 0意思是永不过期。
修改静态ip是要修改/etc/netplan/00开头的那个文件
# This is the network config written by 'subiquity'
network:
ethernets:
ens160:
dhcp4: false
addresses: [192.168.1.105/24] #ip和掩码
optional: true
gateway4: 192.168.1.1 #网关
nameservers:
addresses: [192.168.1.1,144.144.144.144] #DNS服务器
version: 2
renderer: NetworkManager
改成这样,然后应用
sudo netplan apply
这是修改之后的ip关系
master-node ip 192.168.1.105
Node1 ip 192.168.1.100
Node2 ip 192.168.1.101
Node3 ip 192.168.1.102
Node4 ip 192.168.1.103
接下来在node节点中首先修改主机名称,分两种情况,如果是已经加入了节点的主机,需要在master中把node删除
kubectl delete node node_name
然后在node中进入root,reset工作节点
sudo su
kubeadm reset
第三步是在node节点中设置主机名称
sudo hostnamectl set-hostname 新名字
第四步是修改host文件
sudo vim /etc/hosts
127.0.0.1 localhost
127.0.1.1 新名字(改这里,改成新名字)
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
systemctl stop apparmor
systemctl disable apparmor
sudo apt install selinux-utils
setenforce 0
#关闭selinux防火墙
swapoff -a
#临时关闭内存交换
sudo apt update
sudo apt upgrade
sudo apt install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
https://developer.aliyun.com/mirror/kubernetes?spm=a2c6h.13651102.0.0.44e51b11Kc4gu3
在这个链接里面可以配置阿里源
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
我这里是下载指定版本的K8S三件套
apt install docker-ce=5:20.10.17~3-0~ubuntu-focal docker-ce-cli=5:20.10.17~3-0~ubuntu-focal containerd.io
固定docker版本
apt-mark hold docker docker-ce docker-ce-cli
apt install kubelet=1.26.0-00 kubeadm=1.26.0-00 kubectl=1.26.0-00
配置containerd配置文件,生成默认配置文件
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
更改配置文件中/etc/containerd/config.toml的sandbox 镜像地址
修改前:
vim /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri"]
.........
sandbox_image = "k8s.gcr.io/pause:3.2"
........
修改后:
[plugins."io.containerd.grpc.v1.cri"]
...sandbox_image = "registry.aliyuncs.com/k8sxio/pause:3.2"
...
重启containerd
systemctl daemon-reload
systemctl enable containerd
systemctl restart containerd
下面是一个K8S的小BUG,在这个版本中需要检查/etc/kubernetes有没有一个manifests这个文件夹,如果没有的话就添加一个。
chmod +777 /etc/kubernetes
在master-node中执行下列命令
scp /etc/kubernetes/admin.conf [email protected]:/etc/kubernetes/admin.conf
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
systemctl stop apparmor
systemctl disable apparmor
systemctl restart containerd.service