k8s集群搭建-kubeadm

k8s架构图

1636466481738.png

安装准备

关闭防火墙
[root@k8smaster ~]# systemctl stop firewalld   
[root@k8smaster ~]# systemctl disable firewalld  

关闭selinux:
[root@k8smaster ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config 
[root@k8smaster ~]# setenforce 0

[root@k8smaster ~]# getenforce
Disabled

关闭swap:
#临时
[root@k8smaster ~]# swapoff -a
#永久
[root@k8smaster ~]# vi /etc/fstab 
#UUID=ff5b128d-f66c-40a5-86d7-ce69d2c0da61 swap  swap    defaults        0 0

设置主机名:
[root@k8smaster ~]# hostnamectl set-hostname k8smaster
[root@k8smaster ~]# bash


在master添加hosts:
[root@k8smaster ~]# cat >> /etc/hosts << EOF
192.168.153.21 k8s-master
192.168.153.22 k8s-node1
EOF

将桥接的IPv4流量传递到iptables的链:
[root@k8smaster ~]# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#执行生效
[root@k8smaster ~]# sysctl --system

--遇到问题
sysctl --system并未出现相关配置
使用sysctl -p /etc/sysctl.d/k8s.conf,报错:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: 没有那个文件或目录
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: 没有那个文件或目录
--解决问题
[root@k8smaster ~]# modprobe br_netfilter
--然后重新执行
[root@k8smaster ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1


#时间同步:
[root@k8smaster ~]# yum install ntpdate -y
[root@k8smaster ~]# ntpdate time.windows.com

安装Docker【所有节点】

sftp>  cd /etc/yum.repos.d  
sftp> lcd d:\
sftp> put docker-ce.repo

[root@k8smaster ~]# yum -y install docker-ce
[root@k8smaster ~]# systemctl enable docker && systemctl start docker

[root@k8smaster ~]# cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

[root@k8smaster ~]# systemctl restart docker
[root@k8smaster ~]# docker info

安装kubeadm/kubelet

#添加阿里云YUM软件源
[root@k8smaster ~]#  cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

 --------------------------------------------------------------
[root@k8smaster ~]# yum install -y kubelet-1.19.0 kubeadm-1.19.0 kubectl-1.19.0
[root@k8smaster ~]# systemctl enable kubelet

初始化master

在192.168.199.16(Master)执行。

#在192.168.199.16(Master)执行

kubeadm init \
  --apiserver-advertise-address=192.168.153.21 \
  --image-repository registry.aliyuncs.com/google_containers \
  --kubernetes-version v1.19.0 \
  --service-cidr=10.96.0.0/12 \
  --pod-network-cidr=10.244.0.0/16 \
  --ignore-preflight-errors=all

成功后的标志

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.153.21:6443 --token 44nvzl.38d7gx7ad8i301ti \
    --discovery-token-ca-cert-hash sha256:87e7c712c24552f4043aea29a0d99bd954bd1c005e93e67cd27c11a3d0cbbf0e 
    

master初始化成功后相关操作

[root@k8smaster ~]# mkdir -p $HOME/.kube
[root@k8smaster ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8smaster ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8smaster ~]#  kubectl get nodes
NAME        STATUS     ROLES    AGE    VERSION
k8smaster   NotReady   master   2m7s   v1.19.0

加入Kubernetes Node

[root@k8snode1 ~] kubeadm join 192.168.153.21:6443 --token 44nvzl.38d7gx7ad8i301ti \
    --discovery-token-ca-cert-hash sha256:87e7c712c24552f4043aea29a0d99bd954bd1c005e93e67cd27c11a3d0cbbf0e 
    
    
[root@k8smaster ~]#  kubectl get nodes
NAME        STATUS     ROLES    AGE    VERSION
k8smaster   NotReady   master   4m3s   v1.19.0
k8snode1    NotReady      16s    v1.19.0

#默认token有效期为24小时,当过期之后,该token就不可用了

token过期后重新生成

#master节点执行
[root@k8smaster ~]# kubeadm token create --print-join-command

部署容器网络(CNI)

#calico.yaml
#CALICO_IPV4POOL_CIDR的要和init的pod-network-cidr的值一致

            - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"
[root@k8smaster k8s]# kubectl apply -f calico.yaml   
[root@k8smaster k8s]# kubectl get pods -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-97769f7c7-6rtbt   1/1     Running   0          7m50s
calico-node-sxfkf                         1/1     Running   0          7m50s
calico-node-tskfs                         1/1     Running   0          7m50s
coredns-6d56c8448f-lhzmh                  1/1     Running   0          11h
coredns-6d56c8448f-z6t9r                  1/1     Running   0          11h

[root@k8smaster k8s]# kubectl get node
NAME        STATUS   ROLES    AGE   VERSION
k8smaster   Ready    master   11h   v1.19.0
k8snode1    Ready       11h   v1.19.0

为从节点增加node角色

[root@k8smaster k8s]#kubectl label node k8snode1 node-role.kubernetes.io/node=

[root@k8smaster ~]# kubectl get nodes
NAME        STATUS   ROLES    AGE     VERSION
k8smaster   Ready    master   2d23h   v1.19.0
k8snode1    Ready    node     2d23h   v1.19.0
------------------------------------------------------------------------------
#删除node角色
kubectl label node k8snode1 node-role.kubernetes.io/node-

测试kubernetes集群

- 验证Pod工作
- 验证Pod网络通信
- 验证DNS解析
-------------------------------------------------------------
[root@k8s-master ~]# kubectl create deployment nginx --image=nginx
[root@k8s-master ~]# kubectl expose deployment nginx --port=80 --type=NodePort
[root@k8s-master ~]# kubectl get pod,svc

------------------------------------------------------------
[root@k8s-master ~]# kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/nginx-6799fc88d8-xg8b6   1/1     Running   0          56s

NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1               443/TCP        84m
service/nginx        NodePort    10.98.247.164           80:32187/TCP   31s

#内部访问(任意一个节点内部都可以访问)
curl 10.98.247.164
#外部访问(任意一个节点的ip都可以访问)
http://192.168.199.16:32187/
http://192.168.199.17:32187/

部署 Dashboard

#下载kubernertes-dashboard.yaml
[root@k8smaster k8s]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
------------------------------------------------------------------------------------
[root@k8smaster k8s]# kubectl apply -f kubernertes-dashboard.yaml 
$ kubectl get pods -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-6b4884c9d5-gl8nr   1/1     Running   0          13m
kubernetes-dashboard-7f99b75bf4-89cds        1/1     Running   0          13m

----------------------------------------------------------------
[root@k8s-master ~]# kubectl get pods,svc -n kubernetes-dashboard
NAME                                             READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-7b59f7d4df-n2j8m   1/1     Running   0          6m57s
pod/kubernetes-dashboard-5dbf55bd9d-9vxd5        1/1     Running   0          6m57s

NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
service/dashboard-metrics-scraper   ClusterIP   10.103.56.190           8000/TCP        6m57s
service/kubernetes-dashboard        NodePort    10.102.61.181           443:30001/TCP   6m57s
访问:
https://192.168.199.16:30001/
#创建service account并绑定默认cluster-admin管理员集群角色:
# 创建用户
$ kubectl create serviceaccount dashboard-admin -n kube-system
# 用户授权
$ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 获取用户Token
$ kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

-----------------------------
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IlR5Y2U1TXJnU21UamtpM19lR0RveThtTnRJUm55dF9xbmUzZ2JvMlB3Z00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tOTVocWQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiOThlNTEzMTgtOWI0Mi00NmEzLTk1YzMtNmZmZTU4MjMyOGZjIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.NLhZE-_5XAJ3VI-Y7_NmhFIwnyt_qg44xh90RJcxVUW5RXtEcoYrONlREkE7SL9XJoZDqG1hScIZOlVWjJT2NamEA5O5hrxc4p9MaXWvo4ILrisgpJtgZOKz4T2b28f7zAh_oFsAQxW-kWyOgL6FsSCDwAarpp0sA-JA-x6aeuqWStOHwb4Ua3kOxKvthulm8R0MLfOI_oo7N8V1_AG4dkE0Lz5uqmfX4-WA4kBcpMLxXVKYRWWdZQ6OpEMqilXsrcu58n1xSClJOrfXGS4ZC2IuU7ZUjFbUD67iy0H7VVGoHd4JY6xRzwwKcSbQox1zmZ_Xi68NgcPBV_InHkcdCg



eyJhbGciOiJSUzI1NiIsImtpZCI6IkdScDZ1aWt4QkViWlZobFNlRmY4SVg0aVl6ajlHTGh0TVFoRE5ncHR5VU0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tcHNjbmciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMzA0MWE1OWYtMDM5MC00ODcwLTg1ZmItYmMwMzhkZTEwOWI1Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.J9nI28YN92kMuQlrGg4D606OVDoivEfUcFwoiJ2U_ygSw_8i4a4yk27uiSw6NPOi6ocPjitD3pXPS0XjdWyqAyYCZQDhc4NeoAXn6TtZm0A0S_EBHTRoo8WuGKhGwA6nJIaUfEy_kOWyHc464VS5DeC0JfHEsR-myKxJM447oHVymSUIf2Q71xxGQophcdwtzdpW1RjNDoJg_G4gk9NLUqL8XTsE98GgR_CE5cdzKYtebDPdc3xdAGOAsfSrzz7ilp0PdXtJsofHtQ3hVwV2znJoWELAUzMJ90-0KpaNRGtZJO-2AIZnaT7Exjwcrxzf46Wceu7EbHeiuItlQYpm_Q
1635644993314.png

恢复出厂设置

[root@k8snode1 ~]# kubeadm reset

#此命令一夜回到解放前

你可能感兴趣的:(k8s集群搭建-kubeadm)