[root@k8s-master01 ~]# yum list kubectl --showduplicates | sort -r
kubectl.x86_64 1.20.0-0 kubernetes
kubectl.x86_64 1.20.0-0 @kubernetes
kubectl.x86_64 1.19.9-0 kubernetes
kubectl.x86_64 1.19.8-0 kubernetes
kubectl.x86_64 1.19.7-0 kubernetes
kubectl.x86_64 1.19.6-0 kubernetes
kubectl.x86_64 1.19.5-0 kubernetes
kubectl.x86_64 1.19.4-0 kubernetes
kubectl.x86_64 1.19.3-0 kubernetes
kubectl.x86_64 1.19.2-0 kubernetes
kubectl.x86_64 1.19.16-0 kubernetes
kubectl.x86_64 1.19.15-0 kubernetes
kubectl.x86_64 1.19.14-0 kubernetes
kubectl.x86_64 1.19.13-0 kubernetes
kubectl.x86_64 1.19.12-0 kubernetes
kubectl.x86_64 1.19.11-0 kubernetes
kubectl.x86_64 1.19.1-0 kubernetes
kubectl.x86_64 1.19.10-0 kubernetes
kubectl.x86_64 1.19.0-0 kubernetes
[root@k8s-master01 ~]#
192.168.186.128 k8s-master01
192.168.186.129 k8s-node01
192.168.186.130 k8s-node02
192.168.186.131 ansible
#永久修改主机名
hostnamectl set-hostname k8s-master01 && bash #在master上操作
hostnamectl set-hostname k8s-node01 && bash #在node1上操作
hostnamectl set-hostname k8s-node02 && bash #在node2上操作
hostnamectl set-hostname ansible && bash #在ansible上操作
cat >> /etc/hosts << EOF
192.168.186.128 k8s-master01
192.168.186.129 k8s-node01
192.168.186.130 k8s-node02
192.168.186.131 ansible
EOF
#所有机器上都操作
ssh-keygen -t rsa #一路回车,不输入密码
###把本地的ssh公钥文件安装到远程主机对应的账户
for i in k8s-master01 k8s-node01 k8s-node02 ansible;do ssh-copy-id -i .ssh/id_rsa.pub $i ;done
#Ansible服务器安装
yum install epel-release git -y
yum install ansible -y
# 克隆部署k8s集群文件
yum -y install git
git clone https://gitee.com/qq759035366/ansible-kubeadm-install.git
#ansible机器上操作
cd ansible-kubeadm-install/
cat > hosts << EOF
[master]
# 如果部署单Master或多master的主master配置
192.168.186.128 node_name=k8s-master01
[masternode]
# 用于存储集群部署多节点master,单独存放一个组,对部署编写方便
192.168.0.182 node_name=k8s-master02
[node]
192.168.186.129 node_name=k8s-node01
192.168.186.130 node_name=k8s-node02
[etcd]
192.168.186.128 etcd_name=etcd-1
192.168.186.129 etcd_name=etcd-2
192.168.186.130 etcd_name=etcd-3
[lb]
# 如果部署单Master,该项忽略
192.168.0.185 node_name=lb-master
192.168.0.186 node_name=lb-backup
[k8s:children]
master
masternode
node
[newnode]
192.168.0.187 ansible_ssh_port=22 node_name=k8s-node03
EOF
#ansible机器上操作
cat > group_vars/all.yml << EOF
# 安装目录
etcd_work_dir: '/etc/etcd'
tmp_dir: '/tmp/k8s'
# k8s执行配置临时目录
tmp_kubernetes_dir: '/root/kubernetes'
# 集群网络
service_cidr: '10.96.0.0/12'
pod_cidr: '10.244.0.0/16' # 与roles/addons/files/calico.yaml中网段一致
# 集群版本
k8s_version: 1.20.0 # 版本测试安装了,v1.19.0-v1.20.0版本,按照需要版本修改
# 高可用,如果部署单Master,该项忽略
vip: '192.168.0.188'
nic: 'ens33' # **注意这里一定要修改,修改为实际内网网卡名
# 自签证书可信任IP列表,为方便扩展,可添加多个预留IP
cert_hosts:
# 包含所有etcd节点IP
etcd:
- 192.168.186.128
- 192.168.186.129
- 192.168.186.130
EOF
#单master版本
cd /root/ansible-kubeadm-install
ansible-playbook -i hosts -uroot single-master-deploy.yml
kubectl get nodes
kubectl get pods -n kube-system
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 3h48m v1.20.0
k8s-node01 Ready <none> 3h45m v1.20.0
k8s-node02 Ready <none> 3h45m v1.20.0
[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-7985fc4dd6-8t56s 1/1 Running 1 3h48m
calico-node-b4qvv 1/1 Running 3 3h37m
calico-node-hp29x 1/1 Running 1 3h48m
calico-node-rpjbf 1/1 Running 8 3h37m
coredns-7f89b7bc75-69654 1/1 Running 1 3h48m
coredns-7f89b7bc75-scf2l 1/1 Running 1 3h48m
etcd-k8s-master01 1/1 Running 2 3h48m
kube-apiserver-k8s-master01 1/1 Running 2 3h48m
kube-controller-manager-k8s-master01 1/1 Running 9 3h48m
kube-proxy-4wgdq 1/1 Running 1 3h48m
kube-proxy-drsrh 1/1 Running 1 3h37m
kube-proxy-t7kss 1/1 Running 1 3h37m
kube-scheduler-k8s-master01 1/1 Running 9 3h48m
[root@k8s-master01 ~]#
kubectl get pods,svc -n kubernetes-dashboard #查看dashboard对外端口
[root@k8s-master01 yml]# kubectl get pods,svc -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
pod/dashboard-metrics-scraper-7b59f7d4df-8q55k 1/1 Running 1 3h39m
pod/kubernetes-dashboard-5dbf55bd9d-qmd7r 1/1 Running 1 3h39m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/dashboard-metrics-scraper ClusterIP 10.110.47.196 <none> 8000/TCP 3h45m
service/kubernetes-dashboard NodePort 10.103.87.75 <none> 443:30001/TCP 3h45m
[root@k8s-master01 yml]#
#获取用户Token
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
#使用输出的token登录Dashboard
访问地址:https://IP:30001/ #注意必须是https://
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
[root@k8s-master01 ~]# kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-6799fc88d8-qnf95 1/1 Running 1 3h12m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h57m
service/nginx NodePort 10.104.134.62 <none> 80:32216/TCP 3h12m
[root@k8s-master01 ~]#
# vi hosts
...
[newnode]
192.168.0.187 ansible_ssh_port=22 node_name=k8s-node03
#执行安装添加k8s集群node节点
ansible-playbook -i hosts -uroot add-node.yml
# 如果安装某个阶段失败,可针对性测试.例如:只运行部署插件
# ansible-playbook -i hosts -uroot single-master-deploy.yml --tags common