ansible部署k8s详解

服务器IP分配:
master1 192.168.50.171
master2 192.168.50.190
node1 192.168.50.191
node2 192.168.50.192
etcd1 192.168.50.193
etcd2 192.168.50.194
etcd3 192.168.50.195
harbor 192.168.50.196
keepalived+haproxy 192.168.50.197 vip 192.168.50.201
keepalived+haproxy 192.168.50.198

所有的服务器的时间尽量要一致seliux关闭(不一致可能会影响证书的签发)

harbor服务器:
harbor按照顺序执行,否则可能出错)

yum install python-pip -y
装docker-ce (yum 装即可)
包 harbor (将harbor的包解压到该目录 /usr/local/src/)
进入/usr/local/src/harbor
pip install docker-compose
修改配置文件
hostname = 192.168.50.196 (本机ip)
harbor_admin_password = 123456 (登录harbor的密码,账号为admin)
启动docker
./install.sh

keepalived+haproxy服务器:

haproxy+keepalived
装包 修改配置文件 (下方的操作 否则haproxy不能启动到两台机器里)
修改配置文件 (keepalived)

[root@k8s-ha1 keepalived-1.4.2]# cat /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
    state MASTER  (另一个改为BACKUP)
    interface eth0
    virtual_router_id 1
    priority 100
    advert_int 3
    unicast_src_ip 192.168.50.197
    unicast_peer {
        192.168.50.198
    }

    authentication {
        auth_type PASS
        auth_pass 123abc
    }
    virtual_ipaddress {
        192.168.50.201 dev eth0 label eth0:1
    }
}

haproxy的配置文件:

global
maxconn 100000
#chroot /usr/local/haproxy
#stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
user        haproxy
group       haproxy
daemon
nbproc 2
cpu-map 1 0
cpu-map 2 1
#pidfile /usr/local/haproxy/run/haproxy.pid
log 127.0.0.1 local3 info

defaults
option http-keep-alive
option  forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client  300000ms
timeout server  300000ms

listen stats
 mode http
 bind 0.0.0.0:9999
 stats enable
 log global
 stats uri     /haproxy-status
 stats auth    admin:123456

#K8S-API-Server
frontend K8S_API
    bind 192.168.50.201:6443
    mode tcp
    default_backend k8s_api_nodes_6443

backend k8s_api_nodes_6443
    mode tcp
    balance leastconn
    server 192.168.50.171  192.168.50.171:6443  check inter 2000 fall 3 rise 5
server 192.168.50.190  192.168.50.190:6443  check inter 2000 fall 3 rise 5




[root@bogon keepalived]# sysctl -a| grep nonlocal
net.ipv4.ip_nonlocal_bind = 0
[root@bogon keepalived]# vim /etc/sysctl.conf 
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1

master1服务器

ansible git 包
rm -rf /etc/ansible/*
克隆 git clone https://github.com/gjmzj/kubeasz.git (如果克隆失败就yum update nss curl 然后在克隆)
cp kubeasz/* /etc/ansible/ (将克隆的cp到ansible里)
进入 cd /etc/ansible
cp example/hosts.m-masters.example ./hosts

免秘钥认证脚本:
[root@bogon ~]# cat dd.sh
#!/bin/bash
#目标主机列表
IP="
192.168.50.171
192.168.50.192
192.168.50.193
192.168.50.194
192.168.50.195
192.168.50.196
192.168.50.197
192.168.50.198
192.168.50.190
192.168.50.191
"
for node in ${IP};do
sshpass -p zzhabq3 ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ ? − e q 0 ] ; t h e n e c h o " ? -eq 0 ];then echo " ?eq0];thenecho"{node} 秘钥copy完成"
else
echo “${node} 秘钥copy失败”
fi
done

然后编辑 hosts

# 集群部署节点:一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
192.168.50.171 NTP_ENABLED=no

# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.50.193 NODE_NAME=etcd1
192.168.50.194 NODE_NAME=etcd2
192.168.50.195 NODE_NAME=etcd3

[kube-master]
192.168.50.171
192.168.50.190

# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived
[lb]
192.168.1.1 LB_ROLE=backup
192.168.1.2 LB_ROLE=master

[kube-node]
192.168.50.191

# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no

# 预留组,后续添加master节点使用
[new-master]
#192.168.1.5

# 预留组,后续添加node节点使用
[new-node]
#192.168.1.xx

[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master

#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.11"

# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="192.168.50.201"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"

# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="flannel"

# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.68.0.0/16"

# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.20.0.0/16"

# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="20000-40000"

# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.68.0.1"

# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.68.0.2"

# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="cluster.local."

# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"

# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/usr/sbin"

#证书目录
ca_dir="/etc/kubernetes/ssl"

#部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible"

[root@bogon ansible]# vim /etc/ansible/roles/deploy/templates/ca-csr.json.j2 (改ssl证书,可以不改)

yum install audit-libs-python checkpolicy container-selinux libcgroup libseccomp libsemanage-python libtool-ltdl policycoreutils-python python-IPy setools-libs -y (docker所需的依赖包,更新docker时用的和启动docker时用的 装到master和node里)

更新docker:
[root@bogon bin]# cd /etc/ansible/bin
传包 k8s.–11-3.tar.gz
解压
解压后有个bin目录
mv bin/* .
cp docker* /opt/
[root@bogon harbor]# scp /usr/bin/docker* 192.168.50.171:/etc/ansible/bin (yum安装的docker的服务器 /usr/bin/docker* 这是通过whereis docker找到的 192.168.50.171是master的服务器的ip)

[root@bogon ansible]# vim 01.prepare.yml 

# [optional] to synchronize time of nodes with 'chrony' 
- hosts: all
  roles:
  - { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }

# to create CA, kubeconfig, kube-proxy.kubeconfig etc. on 'deploy' node
- hosts: deploy
  roles:
  - deploy

# prepare tasks for all nodes
- hosts:
  - kube-master
  - kube-node
  - deploy
  - etcd
#  - lb  (修改这)
  roles:
  - prepare

#  [optional] to install loadbalance service, only needed by multi-master cluster 
- hosts: lb
  roles:
  - lb
~                       

[root@bogon ansible]# ansible-playbook 01.prepare.yml 

[root@bogon ansible]# vim /etc/kubernetes/ssl/ca-csr.json (查看签发之后的证书)

[root@bogon ansible]# ansible-playbook 02.etcd.yml (给node也装上docker的依赖包,否则启动失败)

[root@bogon ansible]# cat roles/docker/templates/docker.service.j2 
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io

[Service]
Environment="PATH={{ bin_dir }}:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStart={{ bin_dir }}/dockerd  --insecure-registry 192.168.50.196   (这个ip是harbor的ip   ins.....是通过[root@bogon ansible]# ./bin/dockerd -h | grep ins找到的)
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
ExecReload=/bin/kill -s HUP $MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

[root@bogon ansible]# ansible-playbook 03.docker.yml (如果docker启动失败,可能是版本问题,建议master的docker版本和harbor保持一致,还有一种就是缺少daemon文件,解决办法就是手动添加即可,具体流程)

[root@bogon ansible]# ansible-playbook 04.kube-master.yml (执行这个是时确保haproxy正常,调度正常)
执行上一条之后,查看:
master1
[root@bogon ansible]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.50.171 Ready,SchedulingDisabled master 2m v1.11.3 (SchedulingDisabled 这个状态是维护时用的)
192.168.50.190 Ready,SchedulingDisabled master 2m v1.11.3

master2
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.50.171 Ready,SchedulingDisabled master 2m v1.11.3
192.168.50.190 Ready,SchedulingDisabled master 2m v1.11.3
如果 ansible-playbook 04.kube-master.yml 失败了 看一下docker是不是启动了,没启动的话还是先要启动docker才行,启动方式systemctl daemon-reload;systemctl start docker

[root@bogon ansible]# cat /etc/ansible/roles/kube-node/templates/kube-proxy.service.j2


…PROXY_MODE… (找到该参数)

过滤出该参数在那个文件里有

cd /etc/ansible

[root@bogon ansible]# grep PROXY_MODE ./* -R
./docs/guide/ipvs.md:- v1.11, v1.10 版本启用 kube-proxy的 ipvs 模式:只需要在 roles/kube-node/defaults/main.yml 配置 PROXY_MODE: "ipvs"
./roles/kube-node/defaults/main.yml:#PROXY_MODE: “iptables”
./roles/kube-node/defaults/main.yml:PROXY_MODE: “ipvs”
./roles/kube-node/templates/kube-proxy.service.j2: --proxy-mode={{ PROXY_MODE }}

启用ipvs
vim /etc/ansible/roles/kube-node/defaults/main.yml
复制该文件里的镜像路径

PROXY_MODE: “iptables” 改成 PROXY_MODE: “ipvs”
[root@bogon ansible]# docker pull mirrorgooglecontainers/pause-amd64:3.1 (该路径就是复制的,上一步说里)
[root@bogon ansible]# docker tag mirrorgooglecontainers/pause-amd64:3.1 192.168.50.196/baseimages/pause-amd64:3.1

[root@bogon ansible]# docker push 192.168.50.196/baseimages/pause-amd64:3.1

[root@bogon ansible]# cat roles/kube-node/templates/kubelet.service.j2
–pod-infra-container-image=192.168.50.196/baseimages/pause-amd64:3.1 (只修改成这样即可)

[root@bogon ansible]# ansible-playbook 05.kube-node.yml

说明:下次更新网络组件

你可能感兴趣的:(k8s)