ansible自动部署k8s(集群环境全为ubuntu18.04)
root@k8s-master1:~# vim /etc/netplan/01-netcfg.yaml
network:
version: 2
renderer: networkd
ethernets:
eth0:
dhcp4: no
addresses: [192.168.30.10/24]
gateway4: 192.168.30.2
nameservers:
addresses: [192.168.30.2]
root@k8s-master1:~# netplan apply
root@k8s-master1:~# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:83:c3:2e brd ff:ff:ff:ff:ff:ff
inet 192.168.30.10/24 brd 192.168.30.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe83:c32e/64 scope link
valid_lft forever preferred_lft forever
root@k8s-master1:~# ping www.baidu.com
PING www.a.shifen.com (61.135.169.125) 56(84) bytes of data.
64 bytes from 61.135.169.125 (61.135.169.125): icmp_seq=1 ttl=128 time=7.14 ms
64 bytes from 61.135.169.125 (61.135.169.125): icmp_seq=2 ttl=128 time=7.39 ms
64 bytes from 61.135.169.125 (61.135.169.125): icmp_seq=3 ttl=128 time=7.11 ms
……
root@k8s-master1:~# vim /etc/apt/sources.list
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
root@k8s-master1:~# apt-get update
root@k8s-master1:~# apt-get upgrade
root@k8s-master1:~# apt-get install install python2.7 python-pip -y
root@k8s-master1:~#ln -s /usr/bin/python2.7 /usr/bin/python #创建一个软连接
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun #在各节点执行此命令安装docker
10)所有需要在harbor仓库拉取镜像的节点,需要编辑名字解析hosts文件或指定配置好的DNS服务
root@k8s-master1:~#vim /etc/hosts
……
192.168.30.40 www.harbor.com
……
[root@harbor1 src]# pwd
/usr/local/src
[root@harbor1 src]# ll
-rw-r--r-- 1 root root 580059210 Jul 5 16:47 harbor-offline-installer-v1.7.5.tgz
[root@harbor1 src]# tar xvf harbor-offline-installer-v1.7.5.tgz
[root@harbor1 src]# cd harbor/
[root@harbor1 harbor]# ll
total 572840
drwxr-xr-x 3 root root 23 Aug 14 19:08 common
-rw-r--r-- 1 root root 939 Apr 1 12:07 docker-compose.chartmuseum.yml
-rw-r--r-- 1 root root 975 Apr 1 12:07 docker-compose.clair.yml
-rw-r--r-- 1 root root 1434 Apr 1 12:07 docker-compose.notary.yml
-rw-r--r-- 1 root root 5608 Apr 1 12:07 docker-compose.yml
-rw-r--r-- 1 root root 8033 Apr 1 12:07 harbor.cfg
-rw-r--r-- 1 root root 585234819 Apr 1 12:08 harbor.v1.7.5.tar.gz
-rwxr-xr-x 1 root root 5739 Apr 1 12:07 install.sh
-rw-r--r-- 1 root root 11347 Apr 1 12:07 LICENSE
-rw-r--r-- 1 root root 1263409 Apr 1 12:07 open_source_license
-rwxr-xr-x 1 root root 36337 Apr 1 12:07 prepare
[root@harbor1 harbor]# mkdir certs
[root@harbor1 certs]# openssl genrsa -out harbor_ca.key #生成证书私钥
Generating RSA private key, 2048 bit long modulus
...................+++
............................................................................................................................+++
e is 65537 (0x10001)
[root@harbor1 certs]# ll #查看生成的证书私钥
total 4
-rw-r--r-- 1 root root 1679 Aug 14 19:20 harbor_ca.key
[root@harbor1 harbor]# openssl req -x509 -new -nodes -key harbor_ca.key -subj /CN=www.harbor.com -days 3650 -out harbor_ca.crt #生成证书文件
[root@harbor1 harbor]# vim harbor.cfg
hostname = www.harbor.com #访问harbor服务器的域名
ui_url_protocol = https #强制指定为https加密
ssl_cert = /usr/local/src/harbor/certs/harbor_ca.crt #指定公钥文件(即自签名证书)路径
ssl_cert_key = /usr/local/src/harbor/certs/harbor_ca.key #指定私钥文件路径
harbor_admin_password = 123456 #登录harbor仓库的密码
……
[root@harbor1 harbor]# vim /etc/hosts
……
192.168.30.40 www.harbor.com
[root@harbor1 harbor]#curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun #安装docker
[root@harbor1 harbor]#apt-install dokcer-comose -y #安装docker-compose
[root@harbor1 ~]# systemctl start docker
[root@harbor1 ~]# systemctl enable docker
[root@harbor1 harbor]# pwd
/usr/local/src/harbor
[root@harbor1 harbor]# ./install.sh
[root@k8s-master1 ~]#mkdir -p /etc/docker/certs.d/www.harbor.com/ #先在k8s-master1节点创建docker下的存放harbor服务的证书目录
[root@harbor1 ~]# scp /usr/local/src/harbor/certs/harbor_ca.crt 192.168.30.10:/etc/docker/certs.d/www.harbor.com/ #在harbor节点复制证书至master节点
[root@k8s-master1 ~]# docker login www.harbor.com
Username: admin
Password:
Login Succeeded
root@k8s-master1:~# cat /root/.docker/config.json #以上登录成功后会自动生成此harbor服务的认证文件,下次登录就无需密码
{
"auths": {
"www.harbor.com": {
"auth": "YWRtaW46MTIzNDU2"
}
},
"HttpHeaders": {
"User-Agent": "Docker-Client/19.03.1 (linux)"
}
[root@k8s-master1 ~]# docker pull alpine
Using default tag: latest
Trying to pull repository docker.io/library/alpine ...
latest: Pulling from docker.io/library/alpine
050382585609: Pull complete
Digest: sha256:6a92cd1fcdc8d8cdec60f33dda4db2cb1fcdcacf3410a8e05b3741f44a9b5998
Status: Downloaded newer image for docker.io/alpine:latest
[root@k8s-master1 ~]# docker tag alpine:latest www.harbor.com/base-images/alpine:v1 #修改tag版本号
[root@k8s-master1 ~]# docker push www.harbor.com/base-images/alpine #推送至harbor仓库
The push refers to a repository [www.harbor.com/base-images/alpine]
1bfeebd65323: Pushed
v1: digest: sha256:57334c50959f26ce1ee025d08f136c2292c128f84e7b229d1b0da5dac89e9866 size: 528
[root@k8s-master1 ~]# apt-get install sshpass -y
[root@k8s-master1 ~]# ssh-keygen
The key's randomart image is:
+---[RSA 2048]----+
| +oo.=X=o. |
|. o ==oBo. |
|.. +.o*.= |
| .o..ooBooo |
| . ..=.SE |
| o =++.. |
| . o.o |
| |
| |
+----[SHA256]-----+
[root@harbor1 ~]# scp /usr/local/src/harbor/certs/harbor_ca.crt 192.168.30.10:/etc/docker/certs.d/www.harbor.com
[root@k8s-master1 data]# vim scp_pass_crt.sh
#!/bin/bash
#目标主机列表
IP="192.168.30.10 \
192.168.30.11 \
192.168.30.40 \
192.168.30.50 \
192.168.30.51 \
192.168.30.52 \
192.168.30.60 \
192.168.30.61 \
192.168.30.62"
for node in ${IP};do
sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ $? -eq 0 ];then
echo "${node} 秘钥copy完成"
echo "${node} 秘钥copy完成,准备环境初始化....."
ssh ${node} "mkdir /etc/docker/certs.d/www.harbor.com -p"
echo "Harbor 证书目录创建成功!"
scp /etc/docker/certs.d/www.harbor.com/harbor_ca.crt ${node}:/etc/docker/certs.d/www.harbor.com/harbor_ca.crt
scp -r /root/.docker ${node}:/root/ #复制docker登录harbor仓库生成的认证文件至各节点
echo "Harbor 证书拷贝成功!"
else
echo "${node} 秘钥copy失败"
fi done
[root@k8s-master1 data]# bash scp_pass_crt.sh
[root@keepalive_haproxy ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
[email protected]
[email protected]
}
notification_email_from [email protected]
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id keep1
vrrp_skip_check_adv_addr
# vrrp_strict
vrrp_iptables
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.30.24 dev eth0 label eth0:0
}
unicast_src_ip 192.168.30.20
unicast_peer{
192.168.30.21
}
}
……
listen stats
mode http
bind 0.0.0.0:9999
stats enable
log global
stats uri /haproxy-status
stats auth admin:123456
listen web_port
bind 192.168.30.24:6443
mode tcp
log global
server web1 192.168.30.10:6443 check inter 3000 fall 2 rise 5 #先暂时调度一个master节点
#server web2 192.168.30.11:6443 check inter 3000 fall 2 rise 5
[root@k8s-master1 ~]# yum install ansible -y 安装ansible
[root@k8s-master1 ~]# git clone -b 0.6.1 https://github.com/easzlab/kubeasz
[root@k8s-master1 ~]# mv /etc/ansible/* /temp_file/
[root@k8s-master1 ~]# mv kubeasz/* /etc/ansible/
[root@k8s-master1 ansible]# cp example/hosts.m-masters.example ./hosts #
[root@k8s-master1 ansible]# vim hosts
# 集群部署节点:一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
192.168.30.10 NTP_ENABLED=yes #集群安装chrony 时间同步
# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.30.50 NODE_NAME=etcd1
192.168.30.51 NODE_NAME=etcd2
192.168.30.52 NODE_NAME=etcd3
[new-etcd] # 预留组,后续添加etcd节点使用
#192.168.1.x NODE_NAME=etcdx
[kube-master]
192.168.30.10
[new-master] # 预留组,后续添加master节点使用
#192.168.1.5
[kube-node]
192.168.30.60
192.168.30.61
[new-node] # 预留组,后续添加node节点使用
#192.168.30.xx
# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived
#[lb]
#192.168.1.1 LB_ROLE=backup
#192.168.1.2 LB_ROLE=master
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
#[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="192.168.30.24"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.20.0.0/16"
# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.20.0.0/16"
# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="30000-60000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.20.0.2"
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="jie.local."
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"
# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/usr/bin"
#证书目录
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible"
[root@k8s-master1 ~]# ll /data
-rw-r--r-- 1 root root 219910183 Jul 6 09:07 k8s.1-13-5.tar.gz
[root@k8s-master1 ~]# tar xvf k8s.1-13-5.tar.gz
[root@k8s-master1 bin]# mv /data/bin/* /etc/ansible/bin/
[root@k8s-master1 bin]# ./kubelet --version
Kubernetes v1.13.5
[root@k8s-master1 ansible]# ansible-playbook 01.prepare.yml
changed: [192.168.30.10] => (item=cfssljson)
changed: [192.168.30.10] => (item=kubectl)
TASK [deploy : 读取ca证书stat信息] ********************************************************************************************
ok: [192.168.30.10]
TASK [deploy : 准备CA配置文件] ************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 准备CA签名请求] ************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 生成 CA 证书和私钥] *********************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 准备kubectl使用的admin 证书签名请求] ********************************************************************************
changed: [192.168.30.10]
TASK [deploy : 创建 admin证书与私钥] *******************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 设置集群参数] **************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 设置客户端认证参数] ***********************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 设置上下文参数] *************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 选择默认上下文] *************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 准备kube-proxy 证书签名请求] *************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 创建 kube-proxy证书与私钥] **************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 设置集群参数] **************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 设置客户端认证参数] ***********************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 设置上下文参数] *************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 选择默认上下文] *************************************************************************************************
changed: [192.168.30.10]
TASK [deploy : 移动 kube-proxy.kubeconfig] ********************************************************************************
changed: [192.168.30.10]
TASK [deploy : 安装 rsync] ************************************************************************************************
ok: [192.168.30.10]
[WARNING]: Could not match supplied host pattern, ignoring: lb
PLAY [kube-master,kube-node,deploy,etcd,lb] *****************************************************************************
TASK [prepare : 删除centos/redhat默认安装] ************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.10]
changed: [192.168.30.51]
changed: [192.168.30.50]
changed: [192.168.30.52]
TASK [prepare : 添加EPEL仓库] ***********************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.51]
changed: [192.168.30.50]
changed: [192.168.30.10]
changed: [192.168.30.52]
TASK [prepare : 安装基础软件包] ************************************************************************************************
changed: [192.168.30.51]
changed: [192.168.30.61]
changed: [192.168.30.10]
changed: [192.168.30.50]
changed: [192.168.30.60]
changed: [192.168.30.52]
TASK [prepare : 临时关闭 selinux] *******************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.50]
changed: [192.168.30.10]
changed: [192.168.30.51]
changed: [192.168.30.52]
TASK [prepare : 永久关闭 selinux] *******************************************************************************************
ok: [192.168.30.51]
ok: [192.168.30.50]
ok: [192.168.30.60]
ok: [192.168.30.61]
ok: [192.168.30.10]
ok: [192.168.30.52]
TASK [prepare : 禁用系统 swap] **********************************************************************************************
changed: [192.168.30.10]
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.52]
changed: [192.168.30.51]
changed: [192.168.30.50]
TASK [prepare : 删除fstab swap 相关配置] **************************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
ok: [192.168.30.10]
ok: [192.168.30.50]
ok: [192.168.30.51]
ok: [192.168.30.52]
TASK [prepare : 加载内核模块] *************************************************************************************************
ok: [192.168.30.10] => (item=br_netfilter)
changed: [192.168.30.51] => (item=br_netfilter)
changed: [192.168.30.61] => (item=br_netfilter)
changed: [192.168.30.60] => (item=br_netfilter)
changed: [192.168.30.50] => (item=br_netfilter)
changed: [192.168.30.51] => (item=ip_vs)
changed: [192.168.30.61] => (item=ip_vs)
changed: [192.168.30.51] => (item=ip_vs_rr)
changed: [192.168.30.50] => (item=ip_vs)
changed: [192.168.30.60] => (item=ip_vs)
changed: [192.168.30.10] => (item=ip_vs)
changed: [192.168.30.61] => (item=ip_vs_rr)
changed: [192.168.30.51] => (item=ip_vs_wrr)
changed: [192.168.30.50] => (item=ip_vs_rr)
changed: [192.168.30.60] => (item=ip_vs_rr)
changed: [192.168.30.61] => (item=ip_vs_wrr)
changed: [192.168.30.51] => (item=ip_vs_sh)
changed: [192.168.30.10] => (item=ip_vs_rr)
changed: [192.168.30.50] => (item=ip_vs_wrr)
changed: [192.168.30.60] => (item=ip_vs_wrr)
changed: [192.168.30.61] => (item=ip_vs_sh)
changed: [192.168.30.51] => (item=nf_conntrack_ipv4)
changed: [192.168.30.60] => (item=ip_vs_sh)
changed: [192.168.30.50] => (item=ip_vs_sh)
changed: [192.168.30.61] => (item=nf_conntrack_ipv4)
changed: [192.168.30.10] => (item=ip_vs_wrr)
ok: [192.168.30.51] => (item=nf_conntrack)
changed: [192.168.30.60] => (item=nf_conntrack_ipv4)
ok: [192.168.30.61] => (item=nf_conntrack)
changed: [192.168.30.50] => (item=nf_conntrack_ipv4)
ok: [192.168.30.60] => (item=nf_conntrack)
ok: [192.168.30.50] => (item=nf_conntrack)
changed: [192.168.30.10] => (item=ip_vs_sh)
changed: [192.168.30.52] => (item=br_netfilter)
ok: [192.168.30.10] => (item=nf_conntrack_ipv4)
changed: [192.168.30.52] => (item=ip_vs)
changed: [192.168.30.52] => (item=ip_vs_rr)
ok: [192.168.30.10] => (item=nf_conntrack)
changed: [192.168.30.52] => (item=ip_vs_wrr)
changed: [192.168.30.52] => (item=ip_vs_sh)
changed: [192.168.30.52] => (item=nf_conntrack_ipv4)
ok: [192.168.30.52] => (item=nf_conntrack)
TASK [prepare : 启用systemd自动加载模块服务] **************************************************************************************
ok: [192.168.30.61]
ok: [192.168.30.51]
ok: [192.168.30.50]
ok: [192.168.30.60]
ok: [192.168.30.52]
ok: [192.168.30.10]
TASK [prepare : 增加内核模块开机加载配置] *******************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
changed: [192.168.30.10]
TASK [prepare : 设置系统参数] *************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
changed: [192.168.30.10]
TASK [prepare : 生效系统参数] *************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.10]
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
TASK [prepare : 设置系统 ulimits] *******************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
changed: [192.168.30.10]
TASK [prepare : prepare some dirs] **************************************************************************************
ok: [192.168.30.60] => (item=/usr/bin)
ok: [192.168.30.61] => (item=/usr/bin)
ok: [192.168.30.10] => (item=/usr/bin)
ok: [192.168.30.51] => (item=/usr/bin)
ok: [192.168.30.50] => (item=/usr/bin)
changed: [192.168.30.60] => (item=/etc/kubernetes/ssl)
changed: [192.168.30.61] => (item=/etc/kubernetes/ssl)
changed: [192.168.30.50] => (item=/etc/kubernetes/ssl)
changed: [192.168.30.51] => (item=/etc/kubernetes/ssl)
ok: [192.168.30.10] => (item=/etc/kubernetes/ssl)
ok: [192.168.30.52] => (item=/usr/bin)
changed: [192.168.30.52] => (item=/etc/kubernetes/ssl)
TASK [prepare : 分发证书工具 CFSSL] *******************************************************************************************
changed: [192.168.30.60] => (item=cfssl)
changed: [192.168.30.61] => (item=cfssl)
changed: [192.168.30.51] => (item=cfssl)
changed: [192.168.30.50] => (item=cfssl)
ok: [192.168.30.10] => (item=cfssl)
changed: [192.168.30.60] => (item=cfssl-certinfo)
changed: [192.168.30.61] => (item=cfssl-certinfo)
changed: [192.168.30.51] => (item=cfssl-certinfo)
changed: [192.168.30.50] => (item=cfssl-certinfo)
changed: [192.168.30.60] => (item=cfssljson)
changed: [192.168.30.51] => (item=cfssljson)
changed: [192.168.30.61] => (item=cfssljson)
changed: [192.168.30.50] => (item=cfssljson)
ok: [192.168.30.10] => (item=cfssl-certinfo)
changed: [192.168.30.52] => (item=cfssl)
ok: [192.168.30.10] => (item=cfssljson)
changed: [192.168.30.52] => (item=cfssl-certinfo)
changed: [192.168.30.52] => (item=cfssljson)
TASK [prepare : 设置本地 bin 目录权限] ******************************************************************************************
ok: [192.168.30.10]
TASK [prepare : 写入环境变量$PATH] ********************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
changed: [192.168.30.50]
changed: [192.168.30.10]
changed: [192.168.30.51]
changed: [192.168.30.52]
PLAY [lb] ***************************************************************************************************************
skipping: no hosts matched
PLAY RECAP **************************************************************************************************************
192.168.30.10 : ok=38 changed=29 unreachable=0 failed=0 skipped=16 rescued=0 ignored=0
192.168.30.50 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0
192.168.30.51 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0
192.168.30.52 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0
192.168.30.60 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0
192.168.30.61 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0
[root@k8s-master1 ansible]# bin/etcdctl -v
etcdctl version: 3.3.10
API version: 2
[root@k8s-master1 templates]# tail -30 /var/log/messages
……
Aug 20 22:45:24 k8s-master1 kube-apiserver: E0820 22:45:24.933021 25576 watcher.go:208] watch chan error: etcdserver: mvcc: required revision has been compacted
Aug 20 22:45:24 k8s-master1 kube-controller-manager: W0820 22:45:24.933345 25595 reflector.go:256] k8s.io/client-go/informers/factory.go:132: watch of *v1beta1.Event ended with: The resourceVersion for the provided watch is too old.
……
github官网搜索etcd并点击进入
找到etcd-3.2.24这个版本
点击Assets资产下载etcd-v3.2.24-linux-amd64.tar.gz压缩文件
[root@k8s-master1 etcd]# pwd
/data/etcd
[root@k8s-master1 etcd]# tar xvf etcd-v3.2.24-linux-amd64.tar.gz
[root@k8s-master1 etcd]# cp etcd-v3.2.24-linux-amd64/etcd* /etc/ansible/bin/
[root@k8s-master1 ansible]# pwd
/etc/ansible
[root@k8s-master1 ansible]# bin/etcdctl -v #执行命令检查etcd版本
etcdctl version: 3.2.24 #版本以更改
API version: 2
[root@k8s-master1 ansible]# ansible-playbook 02.etcd.yml
ok: [192.168.30.51] => (item=/etc/kubernetes/ssl)
changed: [192.168.30.52] => (item=/etc/etcd/ssl)
changed: [192.168.30.50] => (item=/etc/etcd/ssl)
changed: [192.168.30.51] => (item=/etc/etcd/ssl)
changed: [192.168.30.52] => (item=/var/lib/etcd)
changed: [192.168.30.50] => (item=/var/lib/etcd)
changed: [192.168.30.51] => (item=/var/lib/etcd)
TASK [etcd : 下载etcd二进制文件] ******************************************************
changed: [192.168.30.51] => (item=etcd)
changed: [192.168.30.50] => (item=etcd)
changed: [192.168.30.52] => (item=etcd)
changed: [192.168.30.50] => (item=etcdctl)
changed: [192.168.30.52] => (item=etcdctl)
changed: [192.168.30.51] => (item=etcdctl)
TASK [etcd : 分发证书相关] ***********************************************************
changed: [192.168.30.50 -> 192.168.30.10] => (item=ca.pem)
changed: [192.168.30.52 -> 192.168.30.10] => (item=ca.pem)
changed: [192.168.30.51 -> 192.168.30.10] => (item=ca.pem)
changed: [192.168.30.52 -> 192.168.30.10] => (item=ca-key.pem)
changed: [192.168.30.51 -> 192.168.30.10] => (item=ca-key.pem)
changed: [192.168.30.50 -> 192.168.30.10] => (item=ca-key.pem)
changed: [192.168.30.52 -> 192.168.30.10] => (item=ca.csr)
changed: [192.168.30.50 -> 192.168.30.10] => (item=ca.csr)
changed: [192.168.30.51 -> 192.168.30.10] => (item=ca.csr)
changed: [192.168.30.52 -> 192.168.30.10] => (item=ca-config.json)
changed: [192.168.30.50 -> 192.168.30.10] => (item=ca-config.json)
changed: [192.168.30.51 -> 192.168.30.10] => (item=ca-config.json)
TASK [etcd : 读取etcd证书stat信息] ***************************************************
ok: [192.168.30.50]
ok: [192.168.30.51]
ok: [192.168.30.52]
TASK [etcd : 创建etcd证书请求] *******************************************************
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
TASK [etcd : 创建 etcd证书和私钥] *****************************************************
changed: [192.168.30.52]
changed: [192.168.30.51]
changed: [192.168.30.50]
TASK [etcd : 创建etcd的systemd unit文件] ********************************************
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
TASK [etcd : 开机启用etcd服务] *******************************************************
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
TASK [etcd : 开启etcd服务] *********************************************************
changed: [192.168.30.52]
changed: [192.168.30.51]
changed: [192.168.30.50]
TASK [etcd : 以轮询的方式等待服务同步完成] ***************************************************
changed: [192.168.30.50]
changed: [192.168.30.51]
changed: [192.168.30.52]
PLAY RECAP *********************************************************************
192.168.30.50 : ok=11 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
192.168.30.51 : ok=11 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
192.168.30.52 : ok=11 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
[root@k8s-etcd-1 ~]# systemctl status etcd
[root@k8s-etcd-1 ~]# export NODE_IPS="192.168.30.50 192.168.30.51 192.168.30.52" #声明变量
[root@k8s-etcd-1 ~]# for ip in ${NODE_IPS};do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;done
https://192.168.30.50:2379 is healthy: successfully committed proposal: took = 1.867785ms #必须都看到 successfully健康成功检测信息
https://192.168.30.51:2379 is healthy: successfully committed proposal: took = 3.900401ms #必须都看到 successfully健康成功检测信息
https://192.168.30.52:2379 is healthy: successfully committed proposal: took = 3.015816ms #必须都看到 successfully健康成功检测信息
[root@k8s-master1 ansible]# ansible-playbook 03.docker.yml
[root@k8s-master1 ansible]# ansible-playbook 04.kube-master.yml
TASK [kube-master : 分发证书相关] *****
ok: [192.168.30.10 -> 192.168.30.10] => (item=admin.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=admin-key.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-key.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.csr)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-config.json)
TASK [kube-master : 创建 kubernetes 证书签名请求] *******************************************************************************************************
ok: [192.168.30.10]
TASK [kube-master : 创建 kubernetes 证书和私钥] ********************************************************************************************************
changed: [192.168.30.10]
TASK [kube-master : 创建 aggregator proxy证书签名请求] **************************************************************************************************
ok: [192.168.30.10]
TASK [kube-master : 创建 aggregator-proxy证书和私钥] ***************************************************************************************************
changed: [192.168.30.10]
TASK [kube-master : 创建 basic-auth.csv] **********************************************************************************************************
ok: [192.168.30.10]
TASK [kube-master : 创建kube-apiserver的systemd unit文件] ********************************************************************************************
ok: [192.168.30.10]
TASK [kube-master : 创建kube-controller-manager的systemd unit文件] ***********************************************************************************
ok: [192.168.30.10]
TASK [kube-master : 创建kube-scheduler的systemd unit文件] ********************************************************************************************
ok: [192.168.30.10]
TASK [kube-master : enable master 服务] ***********************************************************************************************************
changed: [192.168.30.10]
TASK [kube-master : 启动 master 服务] ***************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-master : 以轮询的方式等待master服务启动完成] *******************************************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [kube-node : 创建kube-node 相关目录] *************************************************************************************************************
ok: [192.168.30.10] => (item=/var/lib/kubelet)
ok: [192.168.30.10] => (item=/var/lib/kube-proxy)
ok: [192.168.30.10] => (item=/etc/cni/net.d)
ok: [192.168.30.10] => (item=/root/.kube)
TASK [kube-node : 下载 kubelet,kube-proxy 二进制和基础 cni plugins] *************************************************************************************
ok: [192.168.30.10] => (item=kubectl)
ok: [192.168.30.10] => (item=kubelet)
ok: [192.168.30.10] => (item=kube-proxy)
ok: [192.168.30.10] => (item=bridge)
ok: [192.168.30.10] => (item=host-local)
ok: [192.168.30.10] => (item=loopback)
TASK [kube-node : 分发 kubeconfig配置文件] ************************************************************************************************************
ok: [192.168.30.10 -> 192.168.30.10]
TASK [kube-node : 添加 kubectl 命令自动补全] ************************************************************************************************************
ok: [192.168.30.10]
TASK [kube-node : 分发证书相关] ***********************************************************************************************************************
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-key.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.csr)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-config.json)
TASK [kube-node : 准备kubelet 证书签名请求] *************************************************************************************************************
ok: [192.168.30.10]
TASK [kube-node : 创建 kubelet 证书与私钥] *************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 设置集群参数] ***********************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 设置客户端认证参数] ********************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 设置上下文参数] **********************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 选择默认上下文] **********************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 移动 kubelet.kubeconfig] ********************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 准备 cni配置文件] *******************************************************************************************************************
ok: [192.168.30.10]
TASK [kube-node : 创建kubelet的systemd unit文件] *****************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 开机启用kubelet 服务] ***************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 开启kubelet 服务] *****************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 安装kube-proxy.kubeconfig配置文件] **************************************************************************************************
ok: [192.168.30.10 -> 192.168.30.10]
TASK [kube-node : 创建kube-proxy 服务文件] ************************************************************************************************************
ok: [192.168.30.10]
TASK [kube-node : 开机启用kube-proxy 服务] ************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 开启kube-proxy 服务] **************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 轮询等待kubelet启动] ****************************************************************************************************************
changed: [192.168.30.10]
TASK [kube-node : 轮询等待node达到Ready状态] ************************************************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [kube-node : 设置node节点role] *****************************************************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [Making master nodes SchedulingDisabled] ***************************************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [Setting master role name] *****************************************************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
PLAY RECAP **************************************************************************************************************************************
192.168.30.10 : ok=39 changed=21 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
[root@k8s-master1 ansible]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.30.10 Ready,SchedulingDisabled master 86m v1.13.5
[root@k8s-master1 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 #先在阿里云官网将镜像拉取下来
[root@k8s-master1 ~]# docker login www.harbor.com #登录harbor服务
Username (admin): admin
Password:
Login Succeeded
[root@k8s-master1 ~]# docker images #查看拉取的镜像
REPOSITORY TAG IMAGE ID CREATED SIZE
docker.io/alpine latest b7b28af77ffe 5 weeks ago 5.58 MB
www.harbor.com/base-images/alpine v1 b7b28af77ffe 5 weeks ago 5.58 MB
docker.io/mirrorgooglecontainers/pause-amd64 3.1 da86e6ba6ca1 20 months ago 742 kB
[root@k8s-master1 ~]# [root@k8s-master1 ~]# docker tag docker.io/mirrorgooglecontainers/pause-amd64:3.1 www.harbor.com/base-images/mirrorgooglecontainers/pause-amd64:3.1 #打上tag号
[root@k8s-master1 ~]# docker push www.harbor.com/base-images/pause-amd64 #推送至harbor仓库
[root@k8s-master1 defaults]# pwd
/etc/ansible/roles/kube-node/defaults
[root@k8s-master1 defaults]# vim main.yml
# 默认使用kube-proxy的 'iptables' 模式,可选 'ipvs' 模式(experimental)
PROXY_MODE: "iptables"
# 基础容器镜像
SANDBOX_IMAGE: "www.harbor.com/base-images/pause-amd64:3.1" #本地的harbor镜像地址
#SANDBOX_IMAGE: "registry.access.redhat.com/rhel7/pod-infrastructure:latest"
# Kubelet 根目录
KUBELET_ROOT_DIR: "/var/lib/kubelet"
# node节点最大pod 数
MAX_PODS: 110
[root@k8s-master1 ansible]# ansible-playbook 05.kube-node.yml
TASK [kube-node : 下载 kubelet,kube-proxy 二进制和基础 cni plugins] *****
ok: [192.168.30.61] => (item=kubectl)
ok: [192.168.30.60] => (item=kubectl)
ok: [192.168.30.61] => (item=kubelet)
ok: [192.168.30.60] => (item=kubelet)
ok: [192.168.30.61] => (item=kube-proxy)
ok: [192.168.30.60] => (item=kube-proxy)
ok: [192.168.30.61] => (item=bridge)
ok: [192.168.30.60] => (item=bridge)
ok: [192.168.30.61] => (item=host-local)
ok: [192.168.30.60] => (item=host-local)
ok: [192.168.30.61] => (item=loopback)
ok: [192.168.30.60] => (item=loopback)
TASK [kube-node : 分发 kubeconfig配置文件] ************************************************************************************************************
ok: [192.168.30.60 -> 192.168.30.10]
ok: [192.168.30.61 -> 192.168.30.10]
TASK [kube-node : 添加 kubectl 命令自动补全] ************************************************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
TASK [kube-node : 分发证书相关] ***********************************************************************************************************************
ok: [192.168.30.60 -> 192.168.30.10] => (item=ca.pem)
ok: [192.168.30.61 -> 192.168.30.10] => (item=ca.pem)
ok: [192.168.30.60 -> 192.168.30.10] => (item=ca-key.pem)
ok: [192.168.30.61 -> 192.168.30.10] => (item=ca-key.pem)
ok: [192.168.30.60 -> 192.168.30.10] => (item=ca.csr)
ok: [192.168.30.61 -> 192.168.30.10] => (item=ca.csr)
ok: [192.168.30.60 -> 192.168.30.10] => (item=ca-config.json)
ok: [192.168.30.61 -> 192.168.30.10] => (item=ca-config.json)
TASK [kube-node : 准备kubelet 证书签名请求] *************************************************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
TASK [kube-node : 创建 kubelet 证书与私钥] *************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 设置集群参数] ***********************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 设置客户端认证参数] ********************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 设置上下文参数] **********************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 选择默认上下文] **********************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 移动 kubelet.kubeconfig] ********************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 准备 cni配置文件] *******************************************************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
TASK [kube-node : 创建kubelet的systemd unit文件] *****************************************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
TASK [kube-node : 开机启用kubelet 服务] ***************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 开启kubelet 服务] *****************************************************************************************************************
changed: [192.168.30.61]
changed: [192.168.30.60]
TASK [kube-node : 安装kube-proxy.kubeconfig配置文件] **************************************************************************************************
ok: [192.168.30.60 -> 192.168.30.10]
ok: [192.168.30.61 -> 192.168.30.10]
TASK [kube-node : 创建kube-proxy 服务文件] ************************************************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
TASK [kube-node : 开机启用kube-proxy 服务] ************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 开启kube-proxy 服务] **************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 轮询等待kubelet启动] ****************************************************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.61]
TASK [kube-node : 轮询等待node达到Ready状态] ************************************************************************************************************
changed: [192.168.30.60 -> 192.168.30.10]
changed: [192.168.30.61 -> 192.168.30.10]
TASK [kube-node : 设置node节点role] *****************************************************************************************************************
changed: [192.168.30.61 -> 192.168.30.10]
changed: [192.168.30.60 -> 192.168.30.10]
PLAY RECAP **************************************************************************************************************************************
192.168.30.60 : ok=24 changed=13 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
192.168.30.61 : ok=24 changed=13 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
[root@k8s-master1 ansible]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.30.10 Ready,SchedulingDisabled master 4h29m v1.13.5
192.168.30.60 Ready node 4h28m v1.13.5
192.168.30.61 Ready node 4h28m v1.13.5
[root@k8s-master1 calico]# pwd
/data/calico
[root@k8s-master1 calico]# ll
total 95948
-rw-r--r-- 1 root root 98247376 Jul 10 18:44 release-v3.3.2.tgz
[root@k8s-master1 release-v3.3.2]# ll
[root@k8s-master1 release-v3.3.2]# ll images/
[root@k8s-master1 release-v3.3.2]# ll bin
[root@k8s-master1 release-v3.3.2]# cp bin/calicoctl /etc/ansible/bin
cp: overwrite ‘/etc/ansible/bin/calicoctl’? y
[root@k8s-master1 release-v3.3.2]# /etc/ansible/bin/calicoctl version
[root@k8s-master1 release-v3.3.2]# docker load -i images/calico-node.tar #calico-node加载到本地
[root@k8s-master1 release-v3.3.2]# docker tag calico/node:v3.3.2 www.harbor.com/base-images/calico-node:v3.3.2 #打上tag号
[root@k8s-master1 release-v3.3.2]# docker push www.harbor.com/base-images/calico-node:v3.3.2 #上传calico-node镜像
[root@k8s-master1 release-v3.3.2]# docker load -i images/calico-cni.tar #calico-cni加载到本地
[root@k8s-master1 release-v3.3.2]# docker tag calico/cni:v3.3.2 www.harbor.com/base-images/calico-cni:v3.3.2 #打上tag号
[root@k8s-master1 release-v3.3.2]# docker push www.harbor.com/base-images/calico-cni:v3.3.2 #上传calico-cni镜像
[root@k8s-master1 release-v3.3.2]# docker load -i images/calico-kube-controllers.tar #calico-kube-controllers加载到本地
[root@k8s-master1 release-v3.3.2]# docker tag calico/kube-controllers:v3.3.2 www.harbor.com/base-images/calico-kube-controllers:v3.3.2 #打上tag号
[root@k8s-master1 release-v3.3.2]# docker push www.harbor.com/base-images/calico-kube-controllers:v3.3.2 #上传calico-kube-controllers镜像
[root@k8s-master1 defaults]# pwd
/etc/ansible/roles/calico/defaults
[root@k8s-master1 defaults]# vim main.yml
[root@k8s-master1 templates]# pwd
/etc/ansible/roles/calico/templates
[root@k8s-master1 templates]# vim calico-v3.3.yaml.j2
[root@k8s-master1 templates]# vim calico-v3.3.yaml.j2
[root@k8s-master1 templates]# vim calico-v3.3.yaml.j2
[root@k8s-master1 ansible]# ansible-playbook 06.network.yml
TASK [calico : 创建calico 证书请求] **
ok: [192.168.30.10 -> 192.168.30.10]
TASK [calico : 创建 calico证书和私钥] *****************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [calico : get calico-etcd-secrets info] ***************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [calico : 配置 calico DaemonSet yaml文件] *****************************************************************
ok: [192.168.30.10 -> 192.168.30.10]
TASK [calico : 检查是否已下载离线calico镜像] **************************************************************************
changed: [192.168.30.10]
TASK [calico : node 节点创建calico 相关目录] ***********************************************************************
ok: [192.168.30.60] => (item=/etc/calico/ssl)
ok: [192.168.30.10] => (item=/etc/calico/ssl)
ok: [192.168.30.61] => (item=/etc/calico/ssl)
ok: [192.168.30.10] => (item=/etc/cni/net.d)
ok: [192.168.30.60] => (item=/etc/cni/net.d)
ok: [192.168.30.60] => (item=/opt/kube/images)
ok: [192.168.30.61] => (item=/etc/cni/net.d)
ok: [192.168.30.10] => (item=/opt/kube/images)
ok: [192.168.30.61] => (item=/opt/kube/images)
TASK [calico : 获取calico离线镜像推送情况] ***************************************************************************
changed: [192.168.30.60]
changed: [192.168.30.10]
changed: [192.168.30.61]
TASK [calico : 运行 calico网络] ********************************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
TASK [calico : 删除默认cni配置] **********************************************************************************
ok: [192.168.30.60]
ok: [192.168.30.61]
ok: [192.168.30.10]
TASK [calico : 下载calicoctl 客户端] ****************************************************************************
ok: [192.168.30.61] => (item=calicoctl)
ok: [192.168.30.60] => (item=calicoctl)
ok: [192.168.30.10] => (item=calicoctl)
TASK [calico : 分发 calico 证书] *******************************************************************************
ok: [192.168.30.10 -> 192.168.30.10] => (item=calico.pem)
changed: [192.168.30.60 -> 192.168.30.10] => (item=calico.pem)
changed: [192.168.30.61 -> 192.168.30.10] => (item=calico.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=calico-key.pem)
changed: [192.168.30.61 -> 192.168.30.10] => (item=calico-key.pem)
changed: [192.168.30.60 -> 192.168.30.10] => (item=calico-key.pem)
TASK [calico : 准备 calicoctl配置文件] ***************************************************************************
ok: [192.168.30.61]
ok: [192.168.30.60]
ok: [192.168.30.10]
TASK [calico : 轮询等待calico-node 运行,视下载镜像速度而定] ***************************************************************
changed: [192.168.30.10 -> 192.168.30.10]
changed: [192.168.30.61 -> 192.168.30.10]
changed: [192.168.30.60 -> 192.168.30.10]
PLAY RECAP *************************************************************************************************
192.168.30.10 : ok=15 changed=6 unreachable=0 failed=0 skipped=43 rescued=0 ignored=0
192.168.30.60 : ok=8 changed=3 unreachable=0 failed=0 skipped=25 rescued=0 ignored=0
192.168.30.61 : ok=8 changed=3 unreachable=0 failed=0 skipped=25 rescued=0 ignored=0
[root@k8s-node-1 ~]# calicoctl version
Client Version: v3.3.2
Build date: 2018-12-03T15:10:51+0000
Git commit: 594fd84e
Cluster Version: v3.3.2
Cluster Type: k8s,bgp
[root@k8s-node-1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
www.harbor.com/base-images/calico-node v3.3.2 4e9be81e3a59 8 months ago 75.3MB
www.harbor.com/base-images/calico-cni v3.3.2 490d921fa49c 8 months ago 75.4MB
www.harbor.com/base-images/pause-amd64 3.1 da86e6ba6ca1 20 months ago 742kB
[root@k8s-node-1 ~]# calicoctl node status
Calico process is running.
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+---------------+-------------------+-------+----------+-------------+
| 192.168.30.10 | node-to-node mesh | up | 22:25:47 | Established |
| 192.168.30.61 | node-to-node mesh | up | 22:33:37 | Established |
+---------------+-------------------+-------+----------+-------------+
[root@k8s-master1 ansible]# pwd
/etc/ansible
[root@k8s-master1 ansible]# vim hosts
……
[kube-node]
192.168.30.60
192.168.30.61
[new-node] # 预留组,后续添加node节点使用
192.168.30.62 #添加的新node节点
……
root@k8s-master1:/etc/ansible# vim 20.addnode.yml
[root@k8s-master1 ansible]# ansible-playbook 20.addnode.yml
……
PLAY [deploy] **
TASK [Gathering Facts] *************************************************************************************
ok: [192.168.30.10]
TASK [rm new-node in ansible hosts] ************************************************************************
changed: [192.168.30.10]
PLAY RECAP *************************************************************************************************
192.168.30.10 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
192.168.30.62 : ok=70 changed=60 unreachable=0 failed=0 skipped=58 rescued=0 ignored=0
[root@k8s-master1 ansible]# vim hosts
…..
[kube-master]
192.168.30.10
[new-master] # 预留组,后续添加master节点使用
192.168.30.11 #添加的新master节点
……
root@k8s-master1:/etc/ansible# vim 21.addmaster.yml
[root@k8s-master1 ansible]# ansible-playbook 21.addmaster.yml
……
PLAY [deploy] ****
TASK [Gathering Facts] ***************************************************************************
ok: [192.168.30.10]
TASK [rm new-master in ansible hosts] ************************************************************
changed: [192.168.30.10]
PLAY RECAP ***************************************************************************************
192.168.30.10 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
192.168.30.11 : ok=89 changed=46 unreachable=0 failed=0 skipped=55 rescued=0 ignored=0
[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
92.168.30.10 Ready,SchedulingDisabled master 56m v1.13.5
192.168.30.11 Ready,SchedulingDisabled master 19m v1.13.5 #新添加的master节点
192.168.30.60 Ready node 52m v1.13.5
192.168.30.61 Ready node 52m v1.13.5
192.168.30.62 Ready node 30m v1.13.5 #新添加的node节点
[root@k8s-master1 ~]# kubectl run test-pod --image=alpine --replicas=4 sleep 360000
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
[root@k8s-master1 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-pod-9569f557-kq54m 1/1 Running 0 2m 172.20.76.158 192.168.30.62
test-pod-9569f557-sh4pp 1/1 Running 0 2m 172.20.140.82 192.168.30.61
test-pod-9569f557-sqr77 1/1 Running 0 2m 172.20.109.81 192.168.30.60
test-pod-9569f557-wlmgf 1/1 Running 0 2m 172.20.76.159 192.168.30.62
[root@k8s-master1 ~]# kubectl exec -it test-pod-9569f557-kq54m sh
/ #
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 36:25:BB:AD:A1:A3
inet addr:172.20.76.158 Bcast:0.0.0.0 Mask:255.255.255.255 #当前pod的ip
……
5)在此容器ping另外两个node节点中的pod进行测试,可以ping通,实现跨节点间的pod容器通信
/ # ping 172.20.140.82
PING 172.20.140.82 (172.20.140.82): 56 data bytes
64 bytes from 172.20.140.82: seq=0 ttl=62 time=0.509 ms
64 bytes from 172.20.140.82: seq=1 ttl=62 time=0.614 ms
64 bytes from 172.20.140.82: seq=2 ttl=62 time=0.386 ms
……
/ # ping 172.20.109.81
PING 172.20.109.81 (172.20.109.81): 56 data bytes
64 bytes from 172.20.109.81: seq=0 ttl=62 time=0.584 ms
64 bytes from 172.20.109.81: seq=1 ttl=62 time=0.678 ms
64 bytes from 172.20.109.81: seq=2 ttl=62 time=0.704 ms
[root@k8s-master1 dashboard]# mkdir 1.10.1
[root@k8s-master1 dashboard]# cd 1.10.1
[root@k8s-master1 1.10.1]# pwd
/etc/ansible/manifests/dashboard/1.10.1
[root@k8s-master1 1.10.1]# cp ../*.yaml ./
[root@k8s-master1 1.10.1]# ll
total 24
-rw-r--r-- 1 root root 357 Aug 21 11:13 admin-user-sa-rbac.yaml
-rw-r--r-- 1 root root 4761 Aug 21 11:13 kubernetes-dashboard.yaml
-rw-r--r-- 1 root root 2223 Aug 21 11:13 read-user-sa-rbac.yaml
-rw-r--r-- 1 root root 458 Aug 21 11:13 ui-admin-rbac.yaml
-rw-r--r-- 1 root root 477 Aug 21 11:13 ui-read-rbac.yaml
https://github.com/kubernetes/dashboard/releases/tag/v1.10.1
https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
[root@k8s-master1 1.10.1]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
[root@k8s-master1 1.10.1]# docker images
[root@k8s-master1 1.10.1]# docker tag f9aed6605b81 www.harbor.com/base-images/kubernetes-dashboard-amd64:v1.10.1
docker push www.harbor.com/base-images/kubernetes-dashboard-amd64:v1.10.1 #推送至本地harbor仓库
[root@k8s-master1 1.10.1]# vim kubernetes-dashboard.yaml
……
containers:
- name: kubernetes-dashboard
#image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
image: www.harbor.com/base-images/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
……
[root@k8s-master1 1.10.1]# kubectl create -f ./
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
serviceaccount/dashboard-read-user created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created
clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created
clusterrole.rbac.authorization.k8s.io/ui-admin created
rolebinding.rbac.authorization.k8s.io/ui-admin-binding created
clusterrole.rbac.authorization.k8s.io/ui-read created
rolebinding.rbac.authorization.k8s.io/ui-read-binding created
[root@k8s-master1 1.10.1]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-854875cc8-bvvsv 1/1 Running 0 10m
calico-node-2cb59 2/2 Running 0 10m
calico-node-2ptr5 2/2 Running 0 10m
calico-node-7fh5s 2/2 Running 0 10m
calico-node-cxzxp 2/2 Running 0 10m
calico-node-fc8z8 2/2 Running 0 10m
kubernetes-dashboard-fbfff599f-gfrgd 1/1 Running 0 26s #kubernetes-dashboard容器已经运行
root@k8s-master1:/etc/ansible# kubectl cluster-info
Kubernetes master is running at https://192.168.30.24:6443
kubernetes-dashboard is running at https://192.168.30.24:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
root@k8s-master1:/etc/ansible# kubectl cluster-info
oot@k8s-master1:/etc/ansible# kubectl get secret -n kube-system | grep admin-user
admin-user-token-6qcq8 kubernetes.io/service-account-token 3 47m
root@k8s-master1:/etc/ansible# kubectl get secret -n kube-system | grep admin-user
……
ca.crt: 1346 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZxY3E4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkNzkzODViZS1jNjI2LTExZTktOTNkZC0wMDBjMjkyMDFkYzciLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.F7-fD5vNoQtXA22R6dIPFww9LPQpfBBkMqJMys6mnCcjDyqCoy3kcO9gIwm94aNkhOX7ZSABzbfTmk7RKo67I08NtbzpGNUPKWDqAz1uBkOk7grbjfqwl7SUpbu7PhuNvQbsE7MTnX1tsjSjWxwb_6lALwiiPIRArIY_kMVVStD2lGknK5je1mxw2A-GzgtXTB6BMIuKZ3EbjGcpeIIEOmItrsyfG0pOBpyo8qkJmzVPM9IfEKP8ZoEajANgYmzfgMu9fD5nidjz3MKg9tlhdWeIC_YgLlN9jVJRuA7RdDu6qanRlpUC-XGIMGyWdQxjBh0xDi4jtdgSHplziYJYUA
root@k8s-master1:~# vim /etc/kubernetes/kubelet.kubeconfig
root@k8s-master1:/etc/ansible/manifests# mkdir DNS/{kube-dns,core-dns} -p
root@k8s-master1:/etc/ansible/manifests/DNS# ls
core-dns kube-dns
oot@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# pwd
/etc/ansible/manifests/DNS/kube-dns
root@k8s-master1:/data/src# tar xvf kubernetes.tar.gz
root@k8s-master1:/data/src# ls
kubernetes kubernetes.tar.gz
root@k8s-master1:/data/src/kubernetes/cluster/addons/dns/kube-dns# pwd
/data/src/kubernetes/cluster/addons/dns/kube-dns
root@k8s-master1:/data/src/kubernetes/cluster/addons/dns/kube-dns# ll
total 40
drwxr-xr-x 2 root root 167 Mar 25 16:06 ./
drwxr-xr-x 5 root root 71 Mar 25 16:06 ../
-rw-r--r-- 1 root root 6284 Mar 25 16:06 kube-dns.yaml.base
-rw-r--r-- 1 root root 6362 Mar 25 16:06 kube-dns.yaml.in
-rw-r--r-- 1 root root 6290 Mar 25 16:06 kube-dns.yaml.sed
-rw-r--r-- 1 root root 1077 Mar 25 16:06 Makefile
-rw-r--r-- 1 root root 1954 Mar 25 16:06 README.md
-rw-r--r-- 1 root root 308 Mar 25 16:06 transforms2salt.sed
-rw-r--r-- 1 root root 266 Mar 25 16:06 transforms2sed.sed
root@k8s-master1:/data/src/kubernetes/cluster/addons/dns/kube-dns# cp kube-dns.yaml.base /etc/ansible/manifests/DNS/kube-dns/
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# ls
kube-dns.yaml.base
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:1.14.13 #拉取k8s-dns-kube-dns镜像
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag 55a3c5209c5e www.harbor.com/base-images/k8s-dns-kube-dns-amd64:v1.14.13 #打上tag号
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker push www.harbor.com/base-images/k8s-dns-kube-dns-amd64:v1.14.13 #上传至harbor仓库
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 #拉取k8s-dns-dnsmasq-nanny镜像
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag 6dc8ef8287d3 www.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:v1.14.13 #打上tag号
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker push www.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:v1.14.13 #上传至harbor仓库
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:1.14.13 #拉取k8s-dns-sidecar镜像
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag 4b2e93f0133d www.harbor.com/base-images/k8s-dns-sidecar-amd64:v1.14.13 #打上tag号
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker push www.harbor.com/base-images/k8s-dns-sidecar-amd64:v1.14.13 #上传至harbor仓库
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# vim kube-dns.yaml
……
containers:
- name: kubedns
image: www.harbor.com/base-images/k8s-dns-kube-dns-amd64:v1.14.13
resources:
……
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 4Gi
requests:
cpu: 2
memory: 2Gi
……
args:
- --domain=jie.local. #服务域名后缀
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
……
- name: dnsmasq
image: www.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:v1.14.13
livenessProbe:
……
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/jie.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
……
- name: sidecar
image: www.harbor.com/base-images/k8s-dns-sidecar-amd64:v1.14.13
livenessProbe:
……
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.jie.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.jie.local,5,SRV
……
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl create -f kube-dns.yaml
service/kube-dns created
serviceaccount/kube-dns created
configmap/kube-dns created
deployment.extensions/kube-dns created
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-854875cc8-bvvsv 1/1 Running 1 10h
calico-node-2cb59 1/2 Running 2 10h
calico-node-2ptr5 1/2 Running 2 10h
calico-node-7fh5s 1/2 Running 2 10h
calico-node-cxzxp 2/2 Running 0 10h
calico-node-fc8z8 1/2 Running 2 10h
kube-dns-69dcdbc668-xbfws 3/3 Running 0 69s #kube-dns服务一个pod中包含三个服务
kubernetes-dashboard-fbfff599f-gfrgd 1/1 Running 1 10h
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# ls
busybox.yaml kube-dns.yaml
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull busybox
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag db8ee88ad75f www.harbor.com/base-images/docker:latest
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl create -f busybox.yaml
pod/busybox created
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 24m
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl exec busybox nslookup kubernetes
root@k8s-master1:/etc/ansible/manifests/heapster# ls
grafana.yaml heapster.yaml influxdb-v1.1.1 influxdb-with-pv influxdb.yaml
root@k8s-master1:/etc/ansible/manifests/heapster# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v4.4.3 #拉取heapster-grafana镜像
root@k8s-master1:/etc/ansible/manifests/heapster# docker tag 8cb3de219af7 www.harbor.com/base-images/heapster-grafana-amd64:v4.4.3 #将此镜像打上tag号,便于ban蹦区分
root@k8s-master1:/etc/ansible/manifests/heapster# docker push www.harbor.com/base-images/heapster-grafana-amd64:v4.4.3 #将此镜像推送至本地仓库
root@k8s-master1:/etc/ansible/manifests/heapster# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-amd64:v1.5.4 #拉取heapster镜像
root@k8s-master1:/etc/ansible/manifests/heapster# docker tag 72d68eecf40c www.harbor.com/base-images/heapster-amd64:v1.5.4 #将此镜像打上tag号,便于ban蹦区分
root@k8s-master1:/etc/ansible/manifests/heapster# docker push www.harbor.com/base-images/heapster-amd64:v1.5.4 #将此镜像推送至本地仓库
root@k8s-master1:/etc/ansible/manifests/heapster# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-influxdb-amd64:v1.3.3 #拉取pster-influxdb镜像
root@k8s-master1:/etc/ansible/manifests/heapster# docker tag 577260d221db www.harbor.com/base-images/heapster-influxdb-amd64:v1.3.3 #将此镜像打上tag号,便于ban蹦区分
root@k8s-master1:/etc/ansible/manifests/heapster# docker push www.harbor.com/base-images/heapster-influxdb-amd64:v1.3.3 #将此镜像推送至本地仓库
root@k8s-master1:/etc/ansible/manifests/heapster# cat grafana.yaml heapster.yaml influxdb.yaml
……
spec:
containers:
- name: grafana
#image: gcr.io/google_containers/heapster-grafana-amd64:v4.2.0
image: www.harbor.com/base-images/heapster-grafana-amd64:v4.4.3
ports:
- containerPort: 3000
protocol: TCP
……
……
containers:
- name: heapster
#image: gcr.io/google_containers/heapster-amd64:v1.5.4
image: www.harbor.com/base-images/heapster-amd64:v1.5.4
imagePullPolicy: IfNotPresent
command:
……
containers:
- name: influxdb
#image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3
image: www.harbor.com/base-images/heapster-influxdb-amd64:v1.3.3
volumeMounts:
- mountPath: /data
name: influxdb-storage
……
root@k8s-master1:/etc/ansible/manifests/heapster# kubectl apply -f .
root@k8s-master1:/etc/ansible/manifests/heapster# kubectl get pod -n kube-system
root@k8s-master1:/etc/ansible/manifests/heapster# kubectl cluster-info