2020-09-14 架构师第10周作业

▲单独架构的小伙伴看这里:(学习杰哥视频的作业第19-20天)

1、使用docker-compose制作nginx+php-fpm,mysql,wordpress镜像,并管理启动,实现wordpress的访问

一、环境准备:

Centos 7.8.2003 系统已经安装完docker服务并启动

二、安装docker-compose

[root@docker~]# yum install docker-compose

[root@docker~]# docker-compose version

docker-compose version 1.18.0, build 8dd22a9

docker-py version: 2.6.1

CPython version: 3.6.8

OpenSSL version: OpenSSL 1.0.2k-fips  26 Jan 2017

三、编写docker-compose.yml文件

[root@docker~]# mkdir /workpress

[root@docker~]# cd /wordpress

[root@docker wordpress]# vim docker-compose.yml

version: '3.3'

services:

  db:

    image: mysql:5.7

    volumes:

      - db_data:/var/lib/mysql

    restart: always

    environment:

      MYSQL_ROOT_PASSWORD: somewordpress

      MYSQL_DATABASE: wordpress

      MYSQL_USER: wordpress

      MYSQL_PASSWORD: wordpress

  wordpress:

    depends_on:

      - db

    image: wordpress:latest

    ports:

      - "8000:80"

    restart: always

    environment:

      WORDPRESS_DB_HOST: db:3306

      WORDPRESS_DB_USER: wordpress

      WORDPRESS_DB_PASSWORD: wordpress

      WORDPRESS_DB_NAME: wordpress

volumes:

    db_data: {}

四、运行docker-compose

[root@docker wordpress]# docker-compose up -d


2、使用ansible进行K8s初始化安装配置。

一、环境准备

1 Ubuntu 1804

(1)基础软件包

# apt purge ufw lxd lxd-client lxcfs lxc-common

# apt install -y iproute2 ntpdate tcpdump telnet traceroute nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute gcc openssh-server lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip

(2)系统资源限制优化

# vim /etc/security/limits.conf

#root账⼾的资源软限制和硬限制

root soft core unlimited

root hard core unlimited

root soft nproc 1000000

root hard nproc 1000000

root soft nofile 1000000

root hard nofile 1000000

root soft memlock 32000

root hard memlock 32000

root soft msgqueue 8192000

root hard msgqueue 8192000

#其他账⼾的资源软限制和硬限制

* soft core unlimited

* hard core unlimited

* soft nproc 1000000

* hard nproc 1000000

* soft nofile 1000000

* hard nofile 1000000

* soft memlock 32000

* hard memlock 32000

* soft msgqueue 8192000

* hard msgqueue 8192000

(3)内核参数优化

# vim /etc/sysctl.conf

# Controls source route verification

net.ipv4.conf.default.rp_filter = 1

net.ipv4.ip_nonlocal_bind = 1

net.ipv4.ip_forward = 1

# Do not accept source routing

net.ipv4.conf.default.accept_source_route = 0

# Controls the System Request debugging functionality of the kernel

kernel.sysrq = 0

# Controls whether core dumps will append the PID to the core filename.

# Useful for debugging multi-threaded applications.

kernel.core_uses_pid = 1

# Controls the use of TCP syncookies

net.ipv4.tcp_syncookies = 1

# Disable netfilter on bridges.

net.bridge.bridge-nf-call-ip6tables = 0

net.bridge.bridge-nf-call-iptables = 0

net.bridge.bridge-nf-call-arptables = 0

# Controls the default maxmimum size of a mesage queue

kernel.msgmnb = 65536

# # Controls the maximum size of a message, in bytes

kernel.msgmax = 65536

# Controls the maximum shared segment size, in bytes

kernel.shmmax = 68719476736

# # Controls the maximum number of shared memory segments, in pages

kernel.shmall = 4294967296

# TCP kernel paramater

net.ipv4.tcp_mem = 786432 1048576 1572864

net.ipv4.tcp_rmem = 4096 87380 4194304

net.ipv4.tcp_wmem = 4096 16384 4194304

net.ipv4.tcp_window_scaling = 1

net.ipv4.tcp_sack = 1

# socket buffer

net.core.wmem_default = 8388608

net.core.rmem_default = 8388608

net.core.rmem_max = 16777216

net.core.wmem_max = 16777216

net.core.netdev_max_backlog = 262144

net.core.somaxconn = 20480

net.core.optmem_max = 81920

# TCP conn

net.ipv4.tcp_max_syn_backlog = 262144

net.ipv4.tcp_syn_retries = 3

net.ipv4.tcp_retries1 = 3

net.ipv4.tcp_retries2 = 15

# tcp conn reuse

net.ipv4.tcp_timestamps = 0

net.ipv4.tcp_tw_reuse = 0

net.ipv4.tcp_tw_recycle = 0

net.ipv4.tcp_fin_timeout = 1

net.ipv4.tcp_max_tw_buckets = 20000

net.ipv4.tcp_max_orphans = 3276800

net.ipv4.tcp_synack_retries = 1

net.ipv4.tcp_syncookies = 1

# keepalive conn

net.ipv4.tcp_keepalive_time = 300

net.ipv4.tcp_keepalive_intvl = 30

net.ipv4.tcp_keepalive_probes = 3

net.ipv4.ip_local_port_range = 10001 65000

# swap

vm.overcommit_memory = 0

vm.swappiness = 10

#net.ipv4.conf.eth1.rp_filter = 0

#net.ipv4.conf.lo.arp_ignore = 1

#net.ipv4.conf.lo.arp_announce = 2

#net.ipv4.conf.all.arp_ignore = 1

#net.ipv4.conf.all.arp_announce = 2

2 主机IP规划

(1)Master 节点

192.168.7.101  master1.magedu.net  vip:192.168.7.248

192.168.7.102  master2.magedu.net  vip:192.168.7.248

(2)Harbor 节点

192.168.7.103  harbor.magedu.net

(3)etcd 节点

192.168.7.105  etcd1.magedu.net

192.168.7.106  etcd2.magedu.net

192.168.7.107  etcd3.magedu.net

(4)Haproxy+keepalive节点

192.168.7.108  ha.magedu.net

(5)Node 节点

192.168.7.110  node1.magedu.net

192.168.7.111  node2.magedu.net

3 高可用负载均衡(操作主机:7.108)

(1)keepalived

# apt-get install libssl-dev

# apt-get install openssl

# apt-get install libpopt-dev

# apt-get install keepalived

# vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {

state MASTER

interface eth0

virtual_router_id 1

priority 100

advert_int 3

unicast_src_ip 192.168.7.108

unicast_peer {

192.168.7.109

}

authentication {

auth_type PASS

auth_pass 123abc

}

virtual_ipaddress {

192.168.7.248 dev eth0 label eth0:1

}

}

# systemctl start keepalived

# systemctl enable keepalived

# systemctl status keepalived

(2)haproxy

# vim /etc/haproxy/haproxy.cfg

listen k8s_api_nodes_6443

bind 192.168.7.248:6443

mode tcp

#balance leastconn

server 192.168.7.101 192.168.7.101:6443 check inter 2000 fall 3 rise 5

server 192.168.7.102 192.168.7.102:6443 check inter 2000 fall 3 rise 5

4 各节点都需要安装(master/etcd/node)

(1)docker-ce服务;或使用官方自动化命令安装https://developer.aliyun.com/article/110806使用官方安装脚本自动安装 (仅适用于公网环境)

# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

(2)

二、Harbor 安装配置

1 将 harbor-offline-installer-v1.7.5  上传至 /usr/local/src

[root@docker-server1 src]# tar xvf harbor-offline-installer-v1.2.2.tgz

[root@docker-server1 src]# ln -sv /usr/local/src/harbor /usr/local/

[root@docker-server1 harbor]# cd /usr/local/harbor/

2 harbor 之 https 配置

root@k8s-harbor1:/usr/local/src/harbor# pwd

/usr/local/src/harbor

root@k8s-harbor1:/usr/local/src/harbor# mkdir certs/

# openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key #生成私有key

# openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj

"/CN=harbor.magedu.net" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt #签证

# vim harbor.cfg

hostname = harbor.magedu.net

ui_url_protocol = https

ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt

ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key

harbor_admin_password = 123456

# apt-get install docker-compress

# ./install.sh

# docker-compress start

3 client 同步在crt证书

# mkdir /etc/docker/certs.d/harbor.magedu.net -p          #所有节点

harbor1:~# scp /usr/local/src/harbor/certs/harbor-ca.crt 192.168.7.101:/etc/docker/certs.d/harbor.magedu.net

# systemctl restart docker                                #所有节点重启docker

4 登录harbor

root@k8s-master1:~# docker login harbor.magedu.net

Username: admin

Password:

WARNING! Your password will be stored unencrypted in /root/.docker/config.json. Configure a credential helper to remove this warning. See https://docs.docker.com/engine/reference/commandline/login/#credentials-store Login Succeeded

5 测试push镜像到harbor

master1:~# docker pull alpine

root@k8s-master1:~# docker tag alpine harbor.magedu.net/library/alpine:linux36

root@k8s-master1:~# docker push harbor.magedu.net/library/alpine:linux36

The push refers to repository [harbor.magedu.net/library/alpine]

256a7af3acb1: Pushed

linux36: digest:

sha256:97a042bf09f1bf78c8cf3dcebef94614f2b95fa2f988a5c07314031bc2570c7a size: 528

三、ansible自动化部署K8S的前期工作

(1) 基础环境

# apt-get install python2.7

# ln -s /usr/bin/python2.7 /usr/bin/python

# apt-get install git ansible -y

# ssh-keygen                    #生成密钥对

# ssh-copy-id [email protected]

# apt-get install sshpass        #利用ssh同步公钥到各k8s服务器

(2) clone项目

# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git

root@k8s-master1:~# mv /etc/ansible/* /opt/

root@k8s-master1:~# mv kubeasz/* /etc/ansible/

root@k8s-master1:~# cd /etc/ansible/

root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts #复制hosts模板文件

(3) 准备hosts文件

root@k8s-master1:/etc/ansible# pwd

/etc/ansible

root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts

root@k8s-master1:/etc/ansible# cat hosts

# 集群部署节点:一般为运行ansible 脚本的节点

# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步

[deploy]

192.168.7.101 NTP_ENABLED=no

# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点

[etcd]

192.168.7.105 NODE_NAME=etcd1

192.168.7.106 NODE_NAME=etcd2

192.168.7.107 NODE_NAME=etcd3

[new-etcd] # 预留组,后续添加etcd节点使用

#192.168.7.x NODE_NAME=etcdx[kube-master]

192.168.7.101

[new-master] # 预留组,后续添加master节点使用

#192.168.7.5

[kube-node]

192.168.7.110

[new-node] # 预留组,后续添加node节点使用

#192.168.7.xx

# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器

# 如果不使用域名,可以设置 HARBOR_DOMAIN=""

[harbor]

#192.168.7.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no

# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived

[lb]

192.168.7.1 LB_ROLE=backup

192.168.7.2 LB_ROLE=master

#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等

[ex-lb]

#192.168.7.6 LB_ROLE=backup EX_VIP=192.168.7.250

#192.168.7.7 LB_ROLE=master EX_VIP=192.168.7.250

[all:vars]

# ---------集群主要参数---------------

#集群部署模式:allinone, single-master, multi-master

DEPLOY_MODE=multi-master

#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13

K8S_VER="v1.13"

# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443

# 公有云上请使用云负载均衡内网地址和监听端口

MASTER_IP="192.168.7.248"

KUBE_APISERVER="https://{{ MASTER_IP }}:6443"

# 集群网络插件,目前支持calico, flannel, kube-router, cilium

CLUSTER_NETWORK="calico"

# 服务网段 (Service CIDR),注意不要与内网已有网段冲突

SERVICE_CIDR="10.20.0.0/16"

# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突

CLUSTER_CIDR="172.31.0.0/16"

# 服务端口范围 (NodePort Range)

NODE_PORT_RANGE="20000-60000"

# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)

CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"

# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)

CLUSTER_DNS_SVC_IP="10.20.254.254"

# 集群 DNS 域名

CLUSTER_DNS_DOMAIN="linux36.local."

# 集群basic auth 使用的用户名和密码

BASIC_AUTH_USER="admin"

BASIC_AUTH_PASS="123456"

# ---------附加参数--------------------

#默认二进制文件目录

bin_dir="/usr/bin"

#证书目录

ca_dir="/etc/kubernetes/ssl"

#部署目录,即 ansible 工作目录,建议不要修改

base_dir="/etc/ansible"

(4) 准备二进制文件

k8s-master1:/etc/ansible/bin# pwd

/etc/ansible/bin

k8s-master1:/etc/ansible/bin# tar xvf k8s.1-13-5.tar.gz

k8s-master1:/etc/ansible/bin# mv bin/* .

四、开始部署(通过ansible脚本初始化环境及部署k8s 高可用集群)

1:环境初始化

root@k8s-master1:/etc/ansible# pwd

/etc/ansible

root@k8s-master1:/etc/ansible# ansible-playbook 01.prepare.yml

2:部署etcd集群

root@k8s-master1:/etc/ansible# ansible-playbook 02.etcd.yml

3:部署docker:

可选更改启动脚本路径,但是docker已经提前安装,因此不需要重新执行

4:部署master

root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml

5:部署node

root@k8s-master1:/etc/ansible# vim roles/kube-node/defaults/main.yml

# 基础容器镜像

SANDBOX_IMAGE: "harbor.magedu.net/baseimages/pause-amd64:3.1"

root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml

6:部署网络服务calico

# docker load -i calico-cni.tar

# docker tag calico/cni:v3.4.4 harbor.magedu.net/baseimages/cni:v3.4.4

# docker push harbor.magedu.net/baseimages/cni:v3.4.4

# docker load -i calico-node.tar

# docker tag calico/node:v3.4.4 harbor.magedu.net/baseimages/node:v3.4.4

# docker push harbor.magedu.net/baseimages/node:v3.4.4

# docker load -i calico-kube-controllers.tar

# docker tag calico/kube-controllers:v3.4.4 harbor.magedu.net/baseimages/kube-

controllers:v3.4.4

# docker push harbor.magedu.net/baseimages/kube-controllers:v3.4.4

执行部署网络:

root@k8s-master1:/etc/ansible# ansible-playbook 06.network.yml

7:验证当前状态

root@k8s-master1:/etc/ansible# calicoctl node status

你可能感兴趣的:(2020-09-14 架构师第10周作业)