k8s二进制高可用环境搭建

一. 规划

1. 集群规划

  • 内核3.8以上,centOS7
IP hostname 环境说明 备注
10.4.7.200 k8s-manage harbor + nginx + nfs + cfssl 管理节点1c2G
10.4.7.11 keepalived01 keepalived + nginx + bind9 4层和7层代理节点1c1G
10.4.7.12 keepalived02 keepalived + nginx 4层和7层代理节点1c1G
10.4.7.21 k8s-master01 scheduler + controller-manager + apiserver + etcd + kubelet 这三台既是主控节点又是运算节点.2c2G
10.4.7.22 k8s-master02 scheduler + controller-manager + apiserver + etcd + kubelet
10.4.7.23 k8s-master03 scheduler + controller-manager + apiserver + etcd + kubelet

2. 基础要求

  • k8s版本是19.6
# 所有服务器
systemctl stop firewalld;systemctl disable firewalld
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

# 时区修改,如果/etc/localtime有软连接,不是Shanghai,可以直接删除,在软链接
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ntpdate ntp2.aliyun.com			# 同步阿里云服务器上的时间.
/sbin/hwclock --systohc			# 写入到bios系统

# 系统 
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config

# 工具
yum -y install tree vim wget bash-completion bash-completion-extras lrzsz net-tools sysstat iotop iftop htop unzip nc nmap telnet bc  psmisc httpd-tools ntpdate

3. 秘钥

  • k8s-manage上,并分发公钥给其他服务器
ssh-kengen

二. DNS

  • keepalived01上
  • 有DNS服务直接跳过

1. 主配置文件

yum -y install bind

# 配置主配置文件
vim /etc/named.conf

options {
    listen-on port 53 { 10.4.7.11; };	# 改为DNS所在的服务地址,默认监听的是53端口
    listen-on-v6 port 53 { ::1; };		# 删除,不使用IPV6
    directory   "/var/named";
    dump-file   "/var/named/data/cache_dump.db";
    statistics-file "/var/named/data/named_stats.txt";
    memstatistics-file "/var/named/data/named_mem_stats.txt";
    recursing-file  "/var/named/data/named.recursing";
    secroots-file   "/var/named/data/named.secroots";
    allow-query     { any; };			# 改为any,所有网络想通的服务器都可以查到DNS解析的结果
    forwarders      { 10.4.7.254; };	# 往上查,上级DNS,也就是网关的地址
    recursion yes;						# 采用递归的查询的算法,一定要yes,查询算法有2种,一种递归,一种迭代
    dnssec-enable no;					# 暂时关闭
    dnssec-validation no;    			# 暂时关闭,生产这两个不关

2. 区域配置文件

# 区域配置文件,在最后加入.创建了2个域,一个主机域host.com 一个业务域,xg.com
vim /etc/named.rfc1912.zones

zone "host.com" IN {
        type master;
        file "host.com.zone";
        allow-update { 10.4.7.11; };
};

zone "xg.com" IN {
        type master;
        file "xg.com.zone";
        allow-update { 10.4.7.11; };
};

3. 区域数据文件

# 创建区域数据文件
# $TTL 600 表示过期时间,10分钟过期
# @       IN SOA 表示SOA记录,表示区域数据文件的开始,第一条授权记录.下面就是SOA的一些参数
# NS	NS记录,下面就是服务器对应的A记录
vim /var/named/host.com.zone

$ORIGIN host.com.
$TTL 600        ; 10 minutes
@       IN SOA  dns.host.com dnsadmin.host.com. (
                               2021091301 ; serial
                               10800      ; refresh (3 hours)
                               900        ; retry (15 minutes)
                               604800     ; expire (1 week)
                               86400      ; minimum (1 day)
                               )
                        NS  dns.host.com.
$TTL 60 ; 1 minute
dns                    A    10.4.7.11
keepalived01           A    10.4.7.11
keepalived02           A    10.4.7.12
k8s-master01           A    10.4.7.21
k8s-master02           A    10.4.7.22
k8s-master03           A    10.4.7.23
k8s-manage             A    10.4.7.200

# 创建另一个域数据文件
vim /var/named/xg.com.zone

$ORIGIN xg.com.
$TTL 600        ; 10 minutes
@               IN SOA dns.xg.com. dnsadmin.xg.com. (
                               2021091301 ; serial
                               10800      ; refresh (3 hours)
                               900        ; retry (15 minutes)
                               604800     ; expire (1 week)
                               86400      ; minimum (1 day)
                               )
                               NS   dns.xg.com.
$TTL 60 ; 1 minute
dns                A   10.4.7.11

4. 检测语法并启动

# 检查配置是否格式正确
named-checkconf
systemctl start named;systemctl enable named

# 检查端口,默认53端口
netstat -tnulp | grep 53

5. 测试

# 看是否解析为ip地址
dig -t A k8s-manage.host.com @10.4.7.11 +short
dig -t A k8s-master01.host.com @10.4.7.11 +short

6. DNS

# 将所有服务器上的DNS都改为
DNS1=10.4.7.11

systemctl restart network

ping k8s-master03.host.com
ping baidu.com

# 所有服务器上添加一行短域名解析,我们都是在主机域才加这个,业务域不加,否则刹不住车
vim /etc/resolv.conf
search host.com

# 因为是虚拟的服务器,所以window也要改DNS,找到VMnet8 -->属性-->IPV4--->首选DNS服务器配置为:
10.4.7.11

#windows上cmd
ping k8s-master01.host.com

7. rndc

三. ca证书签发

  • k8s-manage上
  • k8s通信依赖的是ssl,所以必须签发证书

1. cfssl下载

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/bin/cfssl-json
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/bin/cfssl-certinfo
chmod +x /usr/bin/cfssl*

2. 根证书

  • 签发证书需要一个根证书,证书基于这个根证书签发,所有我们需要做根证书
# 创建证书目录
mkdir /opt/certs && cd /opt/certs

# 创建ca-csr.json文件,叫ca证书的请求文件,这个文件内容分为几段,CN为机构名字
vim ca-csr.json

{
    "CN":"Moon",
    "hosts":[

    ],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "ST":"shenzhen",
            "L":"shenzhen",
            "O":"xg",
            "OU":"ops"
        }
    ],
    "ca":{
        "expiry":"175200h"
    }
}
内容 说明
CN: Common Name 浏览器使用该字段验证网站是否合法,一般写的是域名,非常重要
C:Country 国家
ST:State 州, 省
L: Locality 地区,城市
O:Organization Name 组织名称, 公司名称
OU:Organization Unit Name f组织单位名称,公司部门
expiry 过期时间,k8s默认是一年,这里设置的是20年
  • 签发根证书
# 测试生成,可以看到能生成证书了私钥了
cfssl gencert -initca ca-csr.json


# 签发
cfssl gencert -initca /opt/certs/ca-csr.json | cfssl-json -bare ca

# 生成的ca.pem是根证书,ca-key.pem是根证书的私钥,ca.csr是证书签名请求文件,证书下发后,csr无需使用,k8s后面会根据ca证书来签发其他的证书
ll
总用量 16
-rw-r--r-- 1 root root  993 12月 20 09:33 ca.csr
-rw-r--r-- 1 root root  316 12月 20 09:28 ca-csr.json
-rw------- 1 root root 1675 12月 20 09:33 ca-key.pem
-rw-r--r-- 1 root root 1346 12月 20 09:33 ca.pem

# 查看证书有效期
cfssl-certinfo -cert ca.pem

3. 证书说明

  • 基于一套CA证书签发
# Etcd:
    Etcd对外提供服务,要有一套etcd server证书
    Etcd各节点之间进行通信,要有一套etcd peer证书
    Kube-APIserver访问Etcd,要有一套etcd client证书
    
# kubernetes:
    Kube-APIserver对外提供服务,要有一套kube-apiserver server证书
    kube-scheduler、kube-controller-manager、kube-proxy、kubelet和其他可能用到的组件,需要访问kube-APIserver,要有一套kube-APIserver client证书
    kube-controller-manager要生成服务的service account,要有一对用来签署service account的证书(CA证书)
    kubelet对外提供服务,要有一套kubelet server证书
    kube-APIserver需要访问kubelet,要有一套kubelet client证书

四. docker环境

  • k8s-master01,k8s-master02,k8s-master03,k8s-manage上

1 . 下载

curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum install docker-ce -y


# 也可以使用官方脚本,一条命令安装完
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

# 国内安装,这两个都行.不推荐用国内这个,貌似有点坑.安装后启动不了docker
curl -sSL https://get.daocloud.io/docker | sh

2. 配置

# 根据不同服务器ip修改bip,22就172.7.22.1/24,这是为了区分后面业务pod的是在哪一台用,方便拍错
mkdir /etc/docker
vim /etc/docker/daemon.json

{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.xg.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.7.21.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}

mkdir -p /data/docker

# 启动
systemctl enable docker;systemctl start docker

# 检查
docker info

五. harbor

  • k8s-manage上

1. 安装harbor

# 下载offline harbor1.8.3版本
wget https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.3.tgz

tar xf harbor-offline-installer-v1.8.3.tgz -C /opt/

# 软链,控制版本,便于版本升级
mv /opt/harbor /opt/harbor-v1.83
ln -s /opt/harbor-v1.83/ /opt/harbor
# 如下初始配置,根据自己情况改.
egrep -v '^$|#' /opt/harbor/harbor.yml 
hostname: reg.mydomain.com
http:
  port: 80
harbor_admin_password: Harbor12345
database:
  password: root123
data_volume: /data
clair: 
  updaters_interval: 12
  http_proxy:
  https_proxy:
  no_proxy: 127.0.0.1,localhost,core,registry
jobservice:
  max_job_workers: 10
chart:
  absolute_url: disabled
log:
  level: info                       #日志级别
  rotate_count: 50        			#日志滚动的控制的数量
  rotate_size: 200M       			#日志大小阈值
  location: /var/log/harbor
_version: 1.8.0
# 数据库和登录密码我没改,如下我改为:
egrep -v '^$|#' /opt/harbor/harbor.yml
hostname: harbor.xg.com
http:
  port: 180
harbor_admin_password: Harbor12345
database:
  password: root123
data_volume: /data/docker
clair: 
  updaters_interval: 12
  http_proxy:
  https_proxy:
  no_proxy: 127.0.0.1,localhost,core,registry
jobservice:
  max_job_workers: 10
chart:
  absolute_url: disabled
log:
  level: info
  rotate_count: 50
  rotate_size: 200M
  location: /data/harbor/logs
_version: 1.8.0

# 创建日志目录
mkdir -p /data/harbor/logs
  • 依赖于docker-compose
# epel源
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install -y docker-compose

2. system

  • 服务器重启会造成harbor起不来,为了方便加入system
# 必须先安装组件,system才会生效
./install.sh
vim /lib/systemd/system/harbor.service

[Unit]
Description=harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
Documentation=http://github.com/vmware/harbor

[Service]
Type=simple
Restart=on-failure
RestartSec=5
ExecStart=/usr/bin/docker-compose -f  /opt/harbor/docker-compose.yml up
ExecStop=/usr/bin/docker-compose -f  /opt/harbor/docker-compose.yml down

[Install]
WantedBy=multi-user.target
systemctl enable harbor;systemctl restart harbor

3. 代理harbor

  • 在k8s-manage上
vim /etc/yum.repos.d/nginx.repo

[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true

[nginx-mainline]
name=nginx mainline repo
baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
gpgcheck=1
enabled=0
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
# yum 安装
yum -y install nginx

vim /etc/nginx/conf.d/harbor.xg.com.conf
server {
    listen       80;
    server_name  harbor.xg.com;

    client_max_body_size 1000m;

    location / {
        proxy_pass http://127.0.0.1:180;
    }
}

4. DNS

# 业务域为xg.com,在业务域的配置文件中修改为:
# serial前滚+ 1,为02,--->2020121702 每次添加一个解析都要前滚一个序号. 
# 最后一行添加harbor             A   10.4.7.200

vim /var/named/xg.com.zone
$ORIGIN xg.com.
$TTL 600        ; 10 minutes
@               IN SOA dns.xg.com. dnsadmin.xg.com. (
                               2021091302 ; serial
                               10800      ; refresh (3 hours)
                               900        ; retry (15 minutes)
                               604800     ; expire (1 week)
                               86400      ; minimum (1 day)
                               )
                               NS   dns.xg.com.
$TTL 60 ; 1 minute
dns                A   10.4.7.11
harbor             A   10.4.7.200
systemctl restart named
dig -t A harbor.xg.com +short      #是否解析为harbor所在的ip上
# 访问
http://harbor.xg.com

六. supervisor

  • k8s-master01,k8s-master02,k8s-master03
  • 进程控制,python开发的,能让普通命令进程变为后台的任务,并监控进程状态,让服务一直运行在后端,如果服务停止,立马就会拉起来

1. 用法

# 停止一个服务
supervisorctl stop etcd-server-7-21

# 启动一个服务
supervisorctl start etcd-server-7-21
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum -y install supervisor
systemctl start supervisord.service;systemctl enable supervisord.service 
  • 复制下面的配置,需要删除注释部分
vim /etc/supervisord.d/etcd-server.ini
# 10.4.7.21的etcd 用7-21表示.其他节点要改
[program:etcd-server-7-21]
# etcd启动脚本路径.
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
# 几个进程
numprocs=1                                                      ; number of processes copies to start (def 1)
# 
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
# 是否自动启动
autostart=true                                                  ; start at supervisord start (default: true)
# 是否自动重启
autorestart=true                                                ; retstart at unexpected quit (default: true)
# 启动多长时间判定为已经起来.
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
# 重启次数
startretries=3                                                  ; max # of serial start failures (default 3)
# 异常退出的信号
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
# 停止的信号
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
# etcd用户
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)

七. etcd集群

1. etcd证书

  • k8s-manage上
  • etcd集群之间通信的证书
vim /opt/certs/ca-config.json
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
} 

# 注意,配置文件中的不同区段:
# peer:   互相通信
# client: 客户端去找服务器需要证书,服务端找客户端不需要
# server: 在启动server的时候需要配置证书
  • 签发的json文件
vim /opt/certs/etcd-peer-csr.json
{
    "CN": "k8s-etcd",
    "hosts": [
        "10.4.7.11",
        "10.4.7.21",
        "10.4.7.22",
        "10.4.7.23",
        "10.4.7.24"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shenzhen",
            "L": "shenzhen",
            "O": "xg",
            "OU": "ops"
        }
    ]
} 


# 注意,配置文件中:
# hosts: 只etcd的ip地址,只能写ip地址,有多少写多少,写10.4.7.11是为了如果某一台etcd出故障了那么11这台可以顶一下.
  • 签发,生成etcd证书
cd /opt/certs
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssl-json -bare etcd-peer

ll etcd*
-rw-r--r-- 1 root root 1066 Sep 15 09:33 etcd-peer.csr
-rw-r--r-- 1 root root  366 Sep 15 09:32 etcd-peer-csr.json
-rw------- 1 root root 1675 Sep 15 09:33 etcd-peer-key.pem
-rw-r--r-- 1 root root 1428 Sep 15 09:33 etcd-peer.pem

2. 安装

  • k8s-master01,k8s-master02,k8s-master03上
useradd -s /sbin/nologin -M etcd

wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz

# 解压,软链
tar xf etcd-v3.1.20-linux-amd64.tar.gz
mv etcd-v3.1.20-linux-amd64 /opt/
mv /opt/etcd-v3.1.20-linux-amd64 /opt/etcd-v3.1.20
ln -s /opt/etcd-v3.1.20 /opt/etcd

# 创建存放证书个私钥的目录
mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
cd /opt/etcd/certs

# 在k8s-manage上将证书分发给etcd节点上
scp k8s-manage:/opt/certs/ca.pem .
scp k8s-manage:/opt/certs/etcd-peer.pem .
scp k8s-manage:/opt/certs/etcd-peer-key.pem .

3. etcd启动脚本

  • 每个etcd节点需要改ip
vim /opt/etcd/etcd-server-startup.sh

#!/bin/bash
./etcd --name etcd-server-7-21 \
       --data-dir /data/etcd/etcd-server \
       --listen-peer-urls https://10.4.7.21:2380 \
       --listen-client-urls https://10.4.7.21:2379,http://127.0.0.1:2379 \
       --quota-backend-bytes 8000000000 \
       --initial-advertise-peer-urls https://10.4.7.21:2380 \
       --advertise-client-urls https://10.4.7.21:2379,http://127.0.0.1:2379 \
       --initial-cluster  etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380,etcd-server-7-23=https://10.4.7.23:2380 \
       --ca-file ./certs/ca.pem \
       --cert-file ./certs/etcd-peer.pem \
       --key-file ./certs/etcd-peer-key.pem \
       --client-cert-auth  \
       --trusted-ca-file ./certs/ca.pem \
       --peer-ca-file ./certs/ca.pem \
       --peer-cert-file ./certs/etcd-peer.pem \
       --peer-key-file ./certs/etcd-peer-key.pem \
       --peer-client-cert-auth \
       --peer-trusted-ca-file ./certs/ca.pem \
       --log-output stdout
  • 参数解释
# 7-21就是10.4.7.21上的etcd,每一台都不一样.
./etcd --name etcd-server-7-21 \
# etcd的数据存储路径.
       --data-dir /data/etcd/etcd-server \
# etcd内部通信用2380端口,所谓内部就是3台etcd之间的通信.
       --listen-peer-urls https://10.4.7.21:2380 \
# etcd 外部通信用2379端口,所谓外部就是什么API接口啊,对外的服务都是2379端口.
       --listen-client-urls https://10.4.7.21:2379,http://127.0.0.1:2379 \
# 给后端的配额字节大小,足够了       
       --quota-backend-bytes 8000000000 \
# 客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url告知客户端url,如果--listen-client-urls被设置了,那么就必须同时设置--advertise-client-urls,即使设置和默认相同,也必须设置
       --initial-advertise-peer-urls https://10.4.7.21:2380 \
# 监听本机上的哪个网卡,哪个端口
       --advertise-client-urls https://10.4.7.21:2379,http://127.0.0.1:2379 \
# 所有etcd的ip及端口信息.
       --initial-cluster  etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380,etcd-server-7-23=https://10.4.7.23:2380 \
# ca证书文件
       --ca-file ./certs/ca.pem \
# etcd证书文件
       --cert-file ./certs/etcd-peer.pem \
# 私钥文件
       --key-file ./certs/etcd-peer-key.pem \
# 需要验证证书.
       --client-cert-auth  \
       --trusted-ca-file ./certs/ca.pem \
# peer相互通信信息.
       --peer-ca-file ./certs/ca.pem \
       --peer-cert-file ./certs/etcd-peer.pem \
       --peer-key-file ./certs/etcd-peer-key.pem \
       --peer-client-cert-auth \
       --peer-trusted-ca-file ./certs/ca.pem \
       --log-output stdout
# 权限
chmod +x /opt/etcd/etcd-server-startup.sh
chown etcd. /opt/etcd-v3.1.20 -R
chown -R etcd. /data/etcd/
chown -R etcd. /data/logs/etcd-server/

4. supervisor

  • 3台etcd用supervisor拉起etcd
  • 需要改第一行
vim /etc/supervisord.d/etcd-server.ini

[program:etcd-server-7-21]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)

  • 启动
supervisorctl update
supervisorctl status

#自动就把etcd启动起来.
netstat -lntup|grep etcd

# 任意一台etcd检查集群的状态,3台都检查一下
/opt/etcd/etcdctl cluster-health
member 8d20962cc5ec1910 is healthy: got healthy result from http://127.0.0.1:2379
member c70550c39873f8e1 is healthy: got healthy result from http://127.0.0.1:2379
member f7e8cbf831ee3326 is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy


# 或者任意一台,每台都检查一下,这个可以看出谁是leader,这里可以看出10.4.7.21是leader.
/opt/etcd/etcdctl member list
8d20962cc5ec1910: name=etcd-server-7-23 peerURLs=https://10.2.3.23:2380 clientURLs=http://127.0.0.1:2379,https://10.2.3.23:2379 isLeader=false
c70550c39873f8e1: name=etcd-server-7-22 peerURLs=https://10.2.3.22:2380 clientURLs=http://127.0.0.1:2379,https://10.2.3.22:2379 isLeader=false
f7e8cbf831ee3326: name=etcd-server-7-21 peerURLs=https://10.2.3.21:2380 clientURLs=http://127.0.0.1:2379,https://10.2.3.21:2379 isLeader=true

八. apiserver

1. 下载k8s源码包

  • 选择自己想要的版本下载
  • k8s-master01,k8s-master02,k8s-master03上
https://dl.k8s.io/v1.19.10/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz  -C /opt/
cd /opt
mv kubernetes kubernetes-v1.19.10
ln -s /opt/kubernetes-v1.19.10 /opt/kubernetes

# 这个目录下以tar结尾的都是一些docker镜像.我们是用不着的,可以直接干掉,剩下绿色的可执行文件就行
cd /opt/kubernetes/server/bin
rm *.tar -f
rm *_tag -f

2. client证书

  • k8s-manage上
  • Kube-apiserver需要访问etcd,那么就需要一套client证书,无论哪一个组件拿着这个client证书,找服务端都会认可,因为这个服务端证书的也是基于CA签发的.所以签发这套client证书
  • client证书实际就是k8s集群管理员的证书
vim /opt/certs/client-csr.json

{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shenzhen",
            "L": "shenzhen",
            "O": "xg",
            "OU": "ops"
        }
    ]
}

# 根据ca证书签发
cd /opt/certs/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client

ll client*
-rw-r--r-- 1 root root  997 Sep 14 17:44 client.csr
-rw-r--r-- 1 root root  282 Sep 14 17:45 client-csr.json
-rw------- 1 root root 1675 Sep 14 17:44 client-key.pem
-rw-r--r-- 1 root root 1363 Sep 14 17:44 client.pem

3. apiserver证书

  • k8s-manage上

  • apiserver和其它k8s组件通信使用,一定要把VIP的地址写上

vim /opt/certs/apiserver-csr.json

{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.4.7.10",
        "10.4.7.21",
        "10.4.7.22",
        "10.4.7.23",
        "10.4.7.24"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shenzhen",
            "L": "shenzhen",
            "O": "xg",
            "OU": "ops"
        }
    ]
}

# 192.168.0.1是service的第一个ip,也是apiserver的service的IP地址,可以在kebe-system看到,可以自己改变网段


# 签发
cd /opt/certs/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver

ll apiserver*
-rw-r--r-- 1 root root 1249 12月 25 21:10 apiserver.csr
-rw-r--r-- 1 root root  566 12月 25 21:09 apiserver-csr.json
-rw------- 1 root root 1679 12月 25 21:10 apiserver-key.pem
-rw-r--r-- 1 root root 1598 12月 25 21:10 apiserver.pem
  • 将证书给apiserver,需要的证书
# 主控节点上,k8s-master01,k8s-master02,k8s-master03
mkdir /opt/kubernetes/server/bin/cert 
cd /opt/kubernetes/server/bin/cert

scp k8s-manage:/opt/certs/ca.pem .
scp k8s-manage:/opt/certs/ca-key.pem .
scp k8s-manage:/opt/certs/client.pem .
scp k8s-manage:/opt/certs/client-key.pem .
scp k8s-manage:/opt/certs/apiserver.pem .
scp k8s-manage:/opt/certs/apiserver-key.pem .

4. 审计日志

  • k8s-master01,k8s-master02,k8s-master03上
  • apiserver启动带的日志审计的功能,专门给k8s做日志审计用的,必须得这么配,标准动作
# 在bin目录下创建启动文件
mkdir /opt/kubernetes/server/bin/conf
cd /opt/kubernetes/server/bin/conf

# 创建审计配置
vim audit.yaml

apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"

5. 启动脚本

vim  /opt/kubernetes/server/bin/kube-apiserver.sh

#!/bin/bash
./kube-apiserver \
  --apiserver-count 3 \
  --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
  --audit-policy-file ./conf/audit.yaml \
  --authorization-mode RBAC \
  --client-ca-file ./cert/ca.pem \
  --requestheader-client-ca-file ./cert/ca.pem \
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  --etcd-cafile ./cert/ca.pem \
  --etcd-certfile ./cert/client.pem \
  --etcd-keyfile ./cert/client-key.pem \
  --etcd-servers https://10.4.7.21:2379,https://10.4.7.22:2379,https://10.4.7.23:2379 \
  --service-account-key-file ./cert/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --service-node-port-range 3000-29999 \
  --target-ram-mb=1024 \
  --kubelet-client-certificate ./cert/client.pem \
  --kubelet-client-key ./cert/client-key.pem \
  --log-dir  /data/logs/kubernetes/kube-apiserver \
  --tls-cert-file ./cert/apiserver.pem \
  --tls-private-key-file ./cert/apiserver-key.pem \
  --v 2
  • 配置说明
vim  /opt/kubernetes/server/bin/kube-apiserver.sh
#!/bin/bash
./kube-apiserver \
# apiserver数量3个,资源多可以多给几个.
  --apiserver-count 3 \
# 日志刷写
  --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
# 日志审计的规则指到了这个文件,也是我们刚才创建的
  --audit-policy-file ./conf/audit.yaml \
# 健全的模式用到的是RBAC模式,意思是基于角色的访问控制
  --authorization-mode RBAC \
# 证书信息
  --client-ca-file ./cert/ca.pem \
  --requestheader-client-ca-file ./cert/ca.pem \
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  --etcd-cafile ./cert/ca.pem \
  --etcd-certfile ./cert/client.pem \
  --etcd-keyfile ./cert/client-key.pem \
  --etcd-servers https://10.4.7.21:2379,https://10.4.7.22:2379,https://10.4.7.23:2379 \
  --service-account-key-file ./cert/ca-key.pem \
# service网端,可以在kube-system名称空间看到apiserver的地址,可以自己设置,默认是192.168.0.1
  --service-cluster-ip-range 192.168.0.0/16 \
# 端口范围
  --service-node-port-range 3000-29999 \
 # 使用内存
  --target-ram-mb=1024 \
  --kubelet-client-certificate ./cert/client.pem \
  --kubelet-client-key ./cert/client-key.pem \
  --log-dir  /data/logs/kubernetes/kube-apiserver \
  --tls-cert-file ./cert/apiserver.pem \
  --tls-private-key-file ./cert/apiserver-key.pem \
# 日志级别
  --v 2
chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh
# 创建日志文件路径
mkdir -p /data/logs/kubernetes/kube-apiserver

6. supervisor

  • 每个服务器上的apiserver需要改
vim /etc/supervisord.d/kube-apiserver.ini

[program:kube-apiserver-7-21]
command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
# 更新apiserver配置,让apiserver启动
supervisorctl update
supervisorctl status
netstat -nltup|grep kube-api
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      17125/./kube-apiser 
tcp6       0      0 :::6443                 :::*                    LISTEN      17125/./kube-apiser 

7. 四层代理

  • keepalived01,和keepalived02上

  • 常用的haproxy这里没有使用,而是用nginx,如果用haproxy可以github上找配置

  • nginx1.18以上才支持四层代理,默认yum装的nginx1.20.1版本,虽然支持四层代理,但没有stream模块,需要自己下载,编译nginx,我的做法是先yum装nginx,然后添加stream模块.

1. nginx下载
vim /etc/yum.repos.d/nginx.repo

[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true

[nginx-mainline]
name=nginx mainline repo
baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
gpgcheck=1
enabled=0
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
yum -y install nginx
systemctl start nginx;systemctl enable nginx
vim /etc/nginx/nginx.conf
# nginx主配置文件最后(http模块的大括号下一行)添加
stream {
    upstream kube-apiserver {
        server 10.4.7.21:6443     max_fails=3 fail_timeout=30s;
        server 10.4.7.22:6443     max_fails=3 fail_timeout=30s;
        server 10.4.7.23:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}


# 发现没有四层代理的模块
nginx -t 
nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:18
nginx: configuration file /etc/nginx/nginx.conf test failed
2. stream模块
  • 如果支持有stream模块就不需要这一步

参考

# 根据自己版本下载
wget http://nginx.org/download/nginx-1.20.1.tar.gz

tar xf nginx-1.20.1.tar.gz
cd nginx-1.20.1

# 下载编译的工具
yum -y install libxml2 libxml2-dev libxslt-devel gd-devel perl-devel perl-ExtUtils-Embed GeoIP GeoIP-devel GeoIP-data zlib zlib-devel --skip-broken gcc gcc-c++ autoconf automake gperftools openssl openssl-devel pcre pcre-devel

# 查看yum装的nginx编译的参数
nginx -V
nginx version: nginx/1.20.1
built by gcc 4.8.5 20150623 (Red Hat 4.8.5-44) (GCC) 
built with OpenSSL 1.1.1g FIPS  21 Apr 2020
TLS SNI support enabled
configure arguments: --prefix=/usr/share/nginx --sbin-path=/usr/sbin/nginx --modules-path=/usr/lib64/nginx/modules --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log --http-client-body-temp-path=/var/lib/nginx/tmp/client_body --http-proxy-temp-path=/var/lib/nginx/tmp/proxy --http-fastcgi-temp-path=/var/lib/nginx/tmp/fastcgi --http-uwsgi-temp-path=/var/lib/nginx/tmp/uwsgi --http-scgi-temp-path=/var/lib/nginx/tmp/scgi --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx --user=nginx --group=nginx --with-compat --with-debug --with-file-aio --with-google_perftools_module --with-http_addition_module --with-http_auth_request_module --with-http_dav_module --with-http_degradation_module --with-http_flv_module --with-http_gunzip_module --with-http_gzip_static_module --with-http_image_filter_module=dynamic --with-http_mp4_module --with-http_perl_module=dynamic --with-http_random_index_module --with-http_realip_module --with-http_secure_link_module --with-http_slice_module --with-http_ssl_module --with-http_stub_status_module --with-http_sub_module --with-http_v2_module --with-http_xslt_module=dynamic --with-mail=dynamic --with-mail_ssl_module --with-pcre --with-pcre-jit --with-stream=dynamic --with-stream_ssl_module --with-stream_ssl_preread_module --with-threads --with-cc-opt='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -m64 -mtune=generic' --with-ld-opt='-Wl,-z,relro -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -Wl,-E'


# 编译,最后修改参数,加一个--with-stream
./configure --prefix=/usr/share/nginx --sbin-path=/usr/sbin/nginx --modules-path=/usr/lib64/nginx/modules --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log --http-client-body-temp-path=/var/lib/nginx/tmp/client_body --http-proxy-temp-path=/var/lib/nginx/tmp/proxy --http-fastcgi-temp-path=/var/lib/nginx/tmp/fastcgi --http-uwsgi-temp-path=/var/lib/nginx/tmp/uwsgi --http-scgi-temp-path=/var/lib/nginx/tmp/scgi --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx --user=nginx --group=nginx --with-file-aio --with-ipv6 --with-http_ssl_module --with-http_v2_module --with-http_realip_module --with-stream_ssl_preread_module --with-http_addition_module --with-http_xslt_module=dynamic --with-http_image_filter_module=dynamic --with-http_sub_module --with-http_dav_module --with-http_flv_module --with-http_mp4_module --with-http_gunzip_module --with-http_gzip_static_module --with-http_random_index_module --with-http_secure_link_module --with-http_degradation_module --with-http_slice_module --with-http_stub_status_module --with-http_perl_module=dynamic --with-http_auth_request_module --with-mail=dynamic --with-mail_ssl_module --with-pcre --with-pcre-jit --with-stream=dynamic --with-stream_ssl_module  --with-debug --with-cc-opt='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -m64 -mtune=generic' --with-ld-opt='-Wl,-z,relro -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -Wl,-E' --with-stream
# 生成二进制文件
 make -j2
 
# 加入stream配置,-t测试一下是否通过,
objs/nginx -t
objs/nginx -V

# 找到之前的nginx二进制的位置,并替换掉,可以先备份一下
cp objs/nginx /usr/sbin/nginx

nginx -t 
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
systemctl reload nginx

8. HA

  • keepalived01,和keepalived02上

  • 两个nginx飘一个VIP来解决apiserver单点的问题

1. keepalived主配置文件
  • 其中一个keepalived
  • 只有主有nopreempt,非抢占的配置.
    • 非抢占模式是当主挂了之后,VIP会到备去,而当主又恢复了,那么VIP还是在备上,主不会去抢占VIP,但是有个问题,VIP现在在备,如果备也挂了,那么VIP还是在备,主还是不去抢这个VIP,那么apiserver服务就彻底挂了
    • 我的解决方式在备上写个脚本,并加入定时任务,当代理服务挂了就重启keepalived的,随着重启keepalived,VIP就会自动回到主上(我自己的思路,生产上没有用到非抢占模式)
  • 以下使用的是抢占模式
yum -y install keepalived

# 默认的配置全部删掉
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
   router_id 10.4.7.11

}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.4.7.11
#   nopreempt               # 非抢占模式

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
         chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
2. keepalived从配置文件
  • 另一个keepalived,BACKUP节点不用加nopreempt
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived
global_defs {
    router_id 10.4.7.12
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 251
    mcast_src_ip 10.4.7.12
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
systemctl enable keepalived
systemctl restart keepalived.service 


# 查看多了一个VIP
ip a
2: eth0:  mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:16:20:1d brd ff:ff:ff:ff:ff:ff
    inet 10.2.3.11/24 brd 10.2.3.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.2.3.10/32 scope global eth0
       valid_lft forever preferred_lft forever
3. 脚本
  • 监控nginx暴露的apiserver端口的脚本,端口不在了就调度到另一个keepalived上
vim /etc/keepalived/check_port.sh

#!/bin/bash
#keepalived 监控端口脚本
#使用方法:
#在keepalived的配置文件中
#vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
#    script "/etc/keepalived/check_port.sh 6379" #配置监听的端口
#    interval 2 #检查脚本的频率,单位(秒)
#}

CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep -c $CHK_PORT`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi

# 权限
chmod +x /etc/keepalived/check_port.sh
4. 内核文件
  • 两台keepalived上
echo 'net.ipv4.ip_forward = 1' >>/etc/sysctl.conf			# 开启IP转发功能
echo 'net.ipv4.ip_nonlocal_bind = 1' >>/etc/sysctl.conf		# 允许绑定非本机的IP
sysctl -p 													# 生效
systemctl restart keepalived.service

# 查看vip,确定在10.4.7.11上
ip a|grep '10.4.7.10'

# 可以模拟一下生产,停nginx看是否生效.

九. controller-manager

  • 主控节点,k8s-master01, k8s-master02, k8s-master03
  • controller-manager需要ca证书和私钥,在操作apiserver的时候已经放上去了

1. 启动脚本

vim /opt/kubernetes/server/bin/kube-controller-manager.sh

#!/bin/bash
./kube-controller-manager \
  --cluster-cidr 172.7.0.0/16 \
  --leader-elect \
  --log-dir /data/logs/kubernetes/kube-controller-manager \
  --master http://127.0.0.1:8080 \
  --service-account-private-key-file ./cert/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --root-ca-file ./cert/ca.pem \
  --v 2 
chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
mkdir -p /data/logs/kubernetes/kube-controller-manager

2. supervisor

  • 第一行修改
vim /etc/supervisord.d/kube-controller-manager.ini

[program:kube-controller-manager-7-21]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=30                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                                                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log  ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
supervisorctl update
supervisorctl status
ps -ef|grep controller-manager

十. scheduler

  • k8s-master01,k8s-mater02,k8s-master03

  • 不需要什么证书,会默认使用apiserver进行交互

1. 启动脚本

vim /opt/kubernetes/server/bin/kube-scheduler.sh

#!/bin/bash
./kube-scheduler \
  --leader-elect  \
  --log-dir /data/logs/kubernetes/kube-scheduler \
  --master http://127.0.0.1:8080 \
  --v 2
chmod +x  /opt/kubernetes/server/bin/kube-scheduler.sh
# 日志目录
mkdir -p /data/logs/kubernetes/kube-scheduler

2. supevisor

vim /etc/supervisord.d/kube-scheduler.ini

[program:kube-scheduler-7-21]
command=/opt/kubernetes/server/bin/kube-scheduler.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                               ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                     ; directory to cwd to before exec (def no cwd)
autostart=true                                                           ; start at supervisord start (default: true)
autorestart=true                                                         ; retstart at unexpected quit (default: true)
startsecs=30                                                             ; number of secs prog must stay running (def. 1)
startretries=3                                                           ; max # of serial start failures (default 3)
exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                ; setuid to this UNIX account to run the program
redirect_stderr=true                                                     ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                              ; emit events on stdout writes (default false)
supervisorctl update
supervisorctl status
ps -ef|grep kube-scheduler

十一. 主控节点健康

  • 主控节点上,k8s-master01,k8s-master02,k8s-master03
# k8s命令行操作工具,直接软连接或者拷贝过去都可以
ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl

# 集群检查
kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}  

# 以上是健康的说明我们把k8s集群的几个老大给搭起来了,也就是主控节点,但是还没有小弟,没有运算节点

十二. kubelet

  • k8s真正干活的,在运算节点操作运算节点,k8s-master01,k8s-master02,k8s-master03既是主控节点,也是运算节点(资源有限)

(一). kubelet证书

  • k8s-manage上
  • 原理是将ca证书和client证书和kubelet的证书和kubelet的私钥组合在一起形成kubelet.kubeconfig
# 和上面主控节点的证书一样,尽量多写一点ip地址,方便以后加节点
vim /opt/certs/kubelet-csr.json

{
    "CN":"k8s-kubelet",
    "hosts":[
        "127.0.0.1",
        "10.4.7.10",
        "10.4.7.21",
        "10.4.7.22",
        "10.4.7.23",
        "10.4.7.24",
        "10.4.7.25",
        "10.4.7.26",
        "10.4.7.27",
        "10.4.7.28",
        "10.4.7.29",
        "10.4.7.30",
        "10.4.7.31",
        "10.4.7.32",
        "10.4.7.33"
    ],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "ST":"shenzhen",
            "L":"shenzhen",
            "O":"xg",
            "OU":"ops"
        }
    ]
}
# 签发证书
cd /opt/certs
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet

ll kubelet*
-rw-r--r-- 1 root root 1119 Sep 17 21:30 kubelet.csr
-rw-r--r-- 1 root root  483 Sep 17 21:28 kubelet-csr.json
-rw------- 1 root root 1675 Sep 17 21:30 kubelet-key.pem
-rw-r--r-- 1 root root 1464 Sep 17 21:30 kubelet.pem
  • 在运算节点上,k8s-master01,k8s-master02,k8s-master03
# 在运算节点,k8s-master01,k8s-master02,k8s-master03
# 如果运算节点和主控节点是分开的,那么可以把k8s的tar包也同主控节点一样,然后把kubelet的证书和私钥拉过来,只是用kubelet就可以
cd /opt/kubernetes/server/bin/cert
scp k8s-manage:/opt/certs/kubelet.pem .
scp k8s-manage:/opt/certs/kubelet-key.pem .

(二). 创建配置

  • 在运算节点上,随便找一台运算节点操作,操作一次就可以了,我是在k8s-master01上执行的

  • kubectl 是 kubernetes 集群的命令行管理工具,kubectl 默认从 ~/.kube/config 文件读取 kube-apiserver 地址、证书、用户名等信息,如果没有配置,执行 kubectl 命令时可能会出错

  • 分为四大步,也是二进制安装k8s最复杂的配置,以下所有操作一定一定要切换到**/opt/kubernetes/server/bin/conf**目录下,这目录就是放k8s配置文件的

  • 说白了就是RBAC的一些操作,目的是给kubelet做一个普通用户

1. set-cluster

  • 设置集群
cd /opt/kubernetes/server/bin/conf

# 当前路径只有这个文件
ll
-rw-r--r-- 1 root root 2223 Sep 14 18:12 audit.yaml

# 设置集群参数,执行
kubectl config set-cluster prod-k8s \
--certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=kubelet.kubeconfig

# 命令说明
# set-cluster prod-k8s 			随便写名字
# --certificate-authority		证书文件
# --embed-certs					将ca.pem证书内容嵌入到生成的kubectl.kubeconfig文件中(不加时,写入的是证书文件路径);
# --server						VIP地址,apiserver通信
# --kubeconfig					输出到这个文件中
# 上面执行完后,会在当前目录生成一个文件,这个文件的加密信息,可以用base64反解一下,然后和ca.perm对比下,他们是一模一样的.
cat kubelet.kubeconfig 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURyakNDQXBhZ0F3SUJBZ0lVUml4WU52UDBSN1JNWWZVVWtuY1JudkNROFc4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1hURUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0hOb1pXNTZhR1Z1TVJFd0R3WURWUVFIRXdoegphR1Z1ZW1obGJqRUxNQWtHQTFVRUNoTUNlR2N4RERBS0JnTlZCQXNUQTI5d2N6RU5NQXNHQTFVRUF4TUVUVzl2CmJqQWVGdzB5TVRBNU1UUXdNekkwTURCYUZ3MDBNVEE1TURrd016STBNREJhTUYweEN6QUpCZ05WQkFZVEFrTk8KTVJFd0R3WURWUVFJRXdoemFHVnVlbWhsYmpFUk1BOEdBMVVFQnhNSWMyaGxibnBvWlc0eEN6QUpCZ05WQkFvVApBbmhuTVF3d0NnWURWUVFMRXdOdmNITXhEVEFMQmdOVkJBTVRCRTF2YjI0d2dnRWlNQTBHQ1NxR1NJYjNEUUVCCkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDOWoxOWc0UU53THh1YmJKQTAvb05tLy8zL0JYWDE0dEJDKzhKdndOV3oKSlBFbmN6b2x4M2dmZDFuRTZ0TmpMVmlUeGFpd3k0bVBkdk1RYzlLRUxmVUgrT3U3dkJ6SkM5TUtLaXJpSU5nMAppd3JsUUZtMXdmWGF6SkdOVm5sazdUY2Q3T3lRRzhHQnJFYXYzdjd1Y1RXTVIwTGMrT1ZrTlJtUmx1WlJCNWNqCmI2cGJOS1htR2ZwTVR1MjN3Z1lEQnBJT1REc21yclpTc0QrZ3VpL0MrKzdwR2ZBOE1mZ2kzdC9kcUkzQWlzcG8KV2xTcU5CSnZJRGw1b08yb3JLMHlSbTlERmYyc3JrSFUvNnRlQWZsNW9ERTNHanFwNmFkRWlOdXVOWmFBSFZGVwpBYS9RSGJUdXROL2NtTG80SmlBWGNMc0lUeVZVMzJ5L2lYOVQ4VXJUTWJ3OUFnTUJBQUdqWmpCa01BNEdBMVVkCkR3RUIvd1FFQXdJQkJqQVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUNNQjBHQTFVZERnUVdCQlJhUHkvQm0wWTUKZnNYU0F0d3k4ZzB5VHB4V2tUQWZCZ05WSFNNRUdEQVdnQlJhUHkvQm0wWTVmc1hTQXR3eThnMHlUcHhXa1RBTgpCZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFFbHc0OUhhOFVIaHhYTmdrSlgzM0dkM0dxTEcyVXlEQXRqR2V5cjRqCkpNMUpSVmlSaU81WmxKWldqUWFNZnoxNUlJTk0vSWt6MHhBbmFhL0c5ZmlEN25wUlRCNThiZHRGcWhEdVVPQkkKczliVlB3THZFdEFVNnFWWmFxa2g0Tk9xRDdlU3lZbkJENUJFd0g5S2pRZnd3dkpSOHhtTG5VZEtmb0pZOG9LSwpaSEpnbWVGQnFlY0pqVW5ZVkxRQkdsYUxkRllBL0dYdGZpc21VYUFRUW9EWTZlSkpCV2tUU0E3RFd4M3gxYXBuCjF1STFERWlyS1dXbVQrTzZyUkRxNjljVjIzQTRoMU1COTdZeEJJSnFGb0pWK2ZrRzBUTWNyT0N1RHVpUTBOVkYKekVHM2szRTV6emE5UUVSYkp6U1NhUTRDd0xyR2QxTW1FNjdSQzJ6cUtZMFc1QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    server: https://10.2.3.10:7443
  name: prod-k8s
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

2. set-credentials

  • 设置user,设置证书
  • 把client证书和私钥加入到kubeconfig文件中
# 设置客户端认证参数,路径不要变/opt/kubernetes/server/bin/conf

kubectl config set-credentials k8s-node \
--client-certificate=/opt/kubernetes/server/bin/cert/client.pem \
--client-key=/opt/kubernetes/server/bin/cert/client-key.pem \
--embed-certs=true \
--kubeconfig=kubelet.kubeconfig

# 命令说明

3. set-context

  • 设置上下文
# 设置上下文参数
kubectl config set-context prod-k8s-context \
--cluster=prod-k8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig

4. use-context

  • 使用上下文,实际上面3步是创建kubelet的用户账户,这一步是使用kubelet这个用户账户的配置文件
kubectl config use-context prod-k8s-context --kubeconfig=kubelet.kubeconfig
# 一般会管理多个k8s集群,为了效率更高,会切换上下文操作,二进制安装的需要指定kebeconfig文件,--kubeconfig=kubelet

# 查看当前上下文
kubectl config current-context --kubeconfig=kubelet.kubeconfig
prod-k8s-context

# 切换上下文
kubectl config use-context prod-k8s-context --kubeconfig=kubelet.kubeconfig

5. 创建角色,绑定运算节点权限

  • 应用资源配置,kubernetes一切皆资源,创建资源时会写入etcd当中.所以只需要在10.4.7.21上创建就可以了,其他节点不用创建了
  • 创建他的目的是给k8s-node绑定成为运算节点的权限
# 创建第一个集群的角色,名字叫k8s-node,同时给这个角色绑定为ClusterRole(集群角色),名字为system:node
# 名字要对应上k8s-node
vim /opt/kubernetes/server/bin/conf/k8s-node.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node
kubectl create -f /opt/kubernetes/server/bin/conf/k8s-node.yaml

# 查看刚才创建的角色
kubectl get clusterrolebinding k8s-node

6. 拷贝kubelet.kubeconfig

  • 其他的运算节点,k8s-master02,k8s-master03
  • 拷贝kubelet.kubeconfig到其他运算节点
# 在10.4.7.22上拉取创建的这个kubelet.kubeconfig文件过来就行,不用再创建一次.
cd /opt/kubernetes/server/bin/conf
scp k8s-master01:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig .

# 然后再其他运算节点试试这命令,发现也可以用
kubectl get clusterrolebinding k8s-node

7. pause

  • k8s-manage上
  • k8s中的每个pod都运行了一个pause容器,pod的其他容器就都会共享pause容器的网络栈和Volume挂载卷,pause容器会优先启动,然后根据yaml中的资源配置,占领资源,然后业务容器才启动,公用pause资源
  • 负责初始化网络空间,IPC空间,UTS空间
# 创建新的镜像仓库,并设置为公开
public

# 登录harbor
docker login harbor.xg.com

# pull镜像
docker pull kubernetes/pause
docker images

# 打tag,这里不需要按照harbor仓库的提示加端口,因为我们用80代理了
docker tag f9d5de079539 harbor.xg.com/public/pause:latest

# 上传到harbor
docker push harbor.xg.com/public/pause:latest

(三). 启动kubelet

1. 脚本

  • 运算节点k8s-master01,k8s-master02,k8s-master03
# 改hostname-override
vim /opt/kubernetes/server/bin/kubelet.sh

#!/bin/bash
./kubelet \
  --anonymous-auth=false \
  --cgroup-driver systemd \
  --cluster-dns 192.168.0.2 \
  --cluster-domain cluster.local \
  --runtime-cgroups=/systemd/system.slice \
  --kubelet-cgroups=/systemd/system.slice \
  --fail-swap-on="false" \
  --client-ca-file ./cert/ca.pem \
  --tls-cert-file ./cert/kubelet.pem \
  --tls-private-key-file ./cert/kubelet-key.pem \
  --hostname-override k8s-master01.host.com \
  --image-gc-high-threshold 20 \
  --image-gc-low-threshold 10 \
  --kubeconfig ./conf/kubelet.kubeconfig \
  --log-dir /data/logs/kubernetes/kube-kubelet \
  --pod-infra-container-image harbor.xg.com/public/pause:latest \
  --root-dir /data/kubelet
  • 参数说明
#!/bin/bash
./kubelet \
# 必须经过apiserver
  --anonymous-auth=false \
  --cgroup-driver systemd \
# 集群的dns统一接入点,后面部署coredns做铺垫
  --cluster-dns 192.168.0.2 \
  --cluster-domain cluster.local \
  --runtime-cgroups=/systemd/system.slice \
  --kubelet-cgroups=/systemd/system.slice \
  --fail-swap-on="false" \
  --client-ca-file ./cert/ca.pem \
  --tls-cert-file ./cert/kubelet.pem \
  --tls-private-key-file ./cert/kubelet-key.pem \
# 配置该节点在集群中的主机名
  --hostname-override hdss7-21.host.com \
  --image-gc-high-threshold 20 \
  --image-gc-low-threshold 10 \
  --kubeconfig ./conf/kubelet.kubeconfig \
# 日志路径,需要创建
  --log-dir /data/logs/kubernetes/kube-kubelet \
# 刚才pull的pause镜像,kubelet必须要用这个镜像.
  --pod-infra-container-image harbor.od.com/public/pause:latest \
# root目录,需要创建
  --root-dir /data/kubelet
chmod +x /opt/kubernetes/server/bin/kubelet.sh
mkdir -p /data/logs/kubernetes/kube-kubelet   /data/kubelet

2. supervisor

  • 运算节点k8s-master01,k8s-master02,k8s-master03
vim /etc/supervisord.d/kube-kubelet.ini

[program:kube-kubelet-7-21]
command=/opt/kubernetes/server/bin/kubelet.sh     ; the program (relative uses PATH, can take args)
numprocs=1                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin              ; directory to cwd to before exec (def no cwd)
autostart=true                                    ; start at supervisord start (default: true)
autorestart=true                                  ; retstart at unexpected quit (default: true)
startsecs=30                                      ; number of secs prog must stay running (def. 1)
startretries=3                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                       ; emit events on stdout writes (default false)
supervisorctl  update
supervisorctl  status
ps -ef|grep kubelet

(四). 验证集群

# 看集群,基本基础集群已经OK
kubectl get node
NAME                    STATUS   ROLES    AGE   VERSION
k8s-master01.host.com   Ready       81s   v1.19.10
k8s-master02.host.com   Ready       85s   v1.19.10
k8s-master03.host.com   Ready       83s   v1.19.10

# 给节点标签
kubectl label node k8s-master01.host.com node-role.kubernetes.io/master=
kubectl label node k8s-master01.host.com node-role.kubernetes.io/node=
kubectl label node k8s-master02.host.com node-role.kubernetes.io/node=
kubectl label node k8s-master02.host.com node-role.kubernetes.io/master=
kubectl label node k8s-master03.host.com node-role.kubernetes.io/master=
kubectl label node k8s-master03.host.com node-role.kubernetes.io/node=


# 再看
kubectl get nodes 
NAME                    STATUS   ROLES         AGE   VERSION
k8s-master01.host.com   Ready    master,node   11m   v1.19.10
k8s-master02.host.com   Ready    master,node   11m   v1.19.10
k8s-master03.host.com   Ready    master,node   11m   v1.19.10

(五). 证书管理

  • 所有将证书做好监控,提前规划好续签证书的准备
  • kubelet.kubeconfig文件是最经常签发的证书,集群中可以有多套kubelet的config文件

1. kubeconfig文件管理

  • kubeconfig文件其实是k8s用户的配置文件,拿着这个配置文件就可以根据这个用户的权限管理集群

    • k8s有服务用户用户两种,kubeconfig就是用户的配置文件
  • 拿到这个文件,就可以找到CA证书,就可以基于这个CA证书自签其他的证书,从而实现替换证书

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-GsiY8BQA-1632575557968)(images/image-20210919160153662.png)]

# echo kubelet的证书,client-certificate-data证书进行反解,可以获得CA证书信息,和client证书信息(集群管理员证书)
echo '' |base64 -d >test.pem
cfssl-certinfo -cert test.pem
{
  "subject": {
    "common_name": "k8s-node",
    "country": "CN",
    "organization": "xg",
    "organizational_unit": "ops",
    "locality": "shenzhen",
    "province": "shenzhen",
    "names": [
      "CN",
      "shenzhen",
      "shenzhen",
      "xg",
      "ops",
      "k8s-node"
    ]
  },
  "issuer": {
    "common_name": "Moon",
    "country": "CN",
    "organization": "xg",
    "organizational_unit": "ops",
    "locality": "shenzhen",
    "province": "shenzhen",
    "names": [
      "CN",
      "shenzhen",
      "shenzhen",
      "xg",
      "ops",
      "Moon"
    ]
  },
  "serial_number": "614464158020354523485459573766198111263928651052",
  "not_before": "2021-09-14T09:40:00Z",
  "not_after": "2041-09-09T09:40:00Z",
  "sigalg": "SHA256WithRSA",
  "authority_key_id": "5A:3F:2F:C1:9B:46:39:7E:C5:D2:2:DC:32:F2:D:32:4E:9C:56:91",
  "subject_key_id": "24:99:E0:DC:86:38:1D:82:C3:D1:CC:BE:8F:65:F8:F0:51:3C:B8:20",
  "pem": "-----BEGIN CERTIFICATE-----\nMIIDwTCCAqmgAwIBAgIUa6GGmlbZG+vhHVafjgeZrzTQjSwwDQYJKoZIhvcNAQEL\nBQAwXTELMAkGA1UEBhMCQ04xETAPBgNVBAgTCHNoZW56aGVuMREwDwYDVQQHEwhz\naGVuemhlbjELMAkGA1UEChMCeGcxDDAKBgNVBAsTA29wczENMAsGA1UEAxMETW9v\nbjAeFw0yMTA5MTQwOTQwMDBaFw00MTA5MDkwOTQwMDBaMGExCzAJBgNVBAYTAkNO\nMREwDwYDVQQIEwhzaGVuemhlbjERMA8GA1UEBxMIc2hlbnpoZW4xCzAJBgNVBAoT\nAnhnMQwwCgYDVQQLEwNvcHMxETAPBgNVBAMTCGs4cy1ub2RlMIIBIjANBgkqhkiG\n9w0BAQEFAAOCAQ8AMIIBCgKCAQEAseGmtbgxDCpW0Hl3MbUZA3omlqygjphDxQnf\nnVx/Lw/+O6G2VcyjigVVnUsS17VWSJT9ZEze2C2dPx+RiL22zoeoro8KokKetF1Y\nRqiPIoUi8zKA31tm7tpjlbHcWsW65OimRBUVtULP+RIpeMjVu2YbGue1BgN/DN0/\nVFOj9RMbnmOmt3AgqDFf7V6yuaGtCg7K1UsCjYUDFXarGBseN6USXGUsLJDZVonX\nvqX+YHbnLVKG/aM6x3T9Xl3XxmhHu46KOcJdoNHZaZgcKYhD3QgpIPWwvHL5RqKF\noxru7hDFx10xxvZY7gUt1e2gKTsP4T0VqhIimBI5mWvrkAXB/wIDAQABo3UwczAO\nBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIw\nADAdBgNVHQ4EFgQUJJng3IY4HYLD0cy+j2X48FE8uCAwHwYDVR0jBBgwFoAUWj8v\nwZtGOX7F0gLcMvINMk6cVpEwDQYJKoZIhvcNAQELBQADggEBAAvPqA57ip7sTBIu\n6NYFiVL9s+KjuFX/bO4piNFHdSC1IzkaO9zHU9Y7zy4acjzqLfwySMs/a2uoLM7q\nZWv7CffRJDYlcEGF4jjGQsJrtTScEIJRcmrlKBjWxrzGTirNl0LW5oFssGve+4tR\nULPTw2l2j/THKe2v5CIBguYwI8N3dlaCbptzB6A6fcY4KJUiLQPx0nQfePA0jfyg\nX/kei8pYRbY6FaF+a3EogRhO9piZH++dRV3JzVjU8AT3ATvNXEoA4l28K7v9rhOp\nEDFX9Hdpj8dWdGqA5p/VDdWEo5oAuq0jSWbchme16TP1jnhiIC9EwleW1SOOqVWj\nwm1Gk5o=\n-----END CERTIFICATE-----\n"
}

十三. kube-proxy

  • 维护集群中的iptables或者IPVS规则,我使用是IPVS
  • 连接pod网格和集群网络的作用
  • 维护节点网络,维护集群网络,维护pod网络

(一). kube-proxy证书

  • k8s-manage
# CN对应的是k8s中的角色名:kube-proxy,具体看如下的set-context动作,如果随便定义CN,那么就和k8s-node一样创建一个用户,绑定权限
vim /opt/certs/kube-proxy-csr.json

{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shenzhen",
            "L": "shenzhen",
            "O": "xg",
            "OU": "ops"
        }
    ]
}
cd /opt/certs
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client

ll kube-proxy-*
-rw-r--r-- 1 root root 1005 12月 27 10:37 kube-proxy-client.csr
-rw------- 1 root root 1675 12月 27 10:37 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 12月 27 10:37 kube-proxy-client.pem
-rw-r--r-- 1 root root  267 12月 27 10:37 kube-proxy-csr.json

# 在10.4.7.21上拉取kube-proxy秘钥和证书,只需要在任意一台执行,生成的文件拷贝到其他节点即可
cd /opt/kubernetes/server/bin/cert
scp k8s-manage:/opt/certs/kube-proxy-client.pem .
scp k8s-manage:/opt/certs/kube-proxy-client-key.pem .

(二). 创建配置

1. set-cluster

  • 运算节点,k8s-master01上,设置集群,只需要操作一次,拷贝配置文件即可和kubelet一样
  • 以下操作一定一定要在**/opt/kubernetes/server/bin/conf**目录下
cd /opt/kubernetes/server/bin/conf

kubectl config set-cluster prod-k8s \
--certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=kube-proxy.kubeconfig
# 和kubelet配置一样,也会生成一个kube-proxy.kubeconfig文件

2. set-credentials

  • 设置证书,设置user
kubectl config set-credentials kube-proxy \
  --client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \
  --client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

3. set-context

  • 设置上下文
kubectl config set-context prod-k8s-context \
--cluster=prod-k8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

4. use-context

  • 使用上下文
kubectl config use-context prod-k8s-context --kubeconfig=kube-proxy.kubeconfig

5. 拷贝kube-proxy.kubeconfig

  • 其他节点拷贝刚创建的kube-proxy.kubeconfig文件
cd /opt/kubernetes/server/bin/conf
scp -rp k8s-master01:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig .

(三). IPVS

  • ipvs相当于内嵌一套lvs,lvs是相当强大的.云厂商,比如阿里用的是iptables,新版的k8s基本用的是ipvs
  • 运算节点 k8s-master01.k8s-master02,k8s-master03

1. 加载模块

# 查看是否有IP_VS所有模块
lsmod |grep ip_vs
# 没有就编写脚本
vim /root/ipvs.sh

#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done

# 执行
sh /root/ipvs.sh

# 再看已经加载了
lsmod |grep ip_vs
ip_vs_wrr              12697  0 
ip_vs_wlc              12519  0 
ip_vs_sh               12688  0 
ip_vs_sed              12519  0 
ip_vs_rr               12600  0 
ip_vs_pe_sip           12740  0 
nf_conntrack_sip       33860  1 ip_vs_pe_sip
ip_vs_nq               12516  0 
ip_vs_lc               12516  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_ftp              13079  0 
ip_vs_dh               12688  0 
ip_vs                 145497  24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack          133095  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

2. 安装IPVS

# 安装
yum install ipvsadm -y

(四). 启动kube-proxy

  • 运算节点k8s-master01,k8s-master02,k8s-master03上

1. 脚本

  • 不同节点--hostname-override需要改为主机名
# 修改hostname-override
vim /opt/kubernetes/server/bin/kube-proxy.sh

#!/bin/bash
./kube-proxy \
  --cluster-cidr 172.7.0.0/16 \
  --hostname-override k8s-master01.host.com \
  --proxy-mode=ipvs \
  --ipvs-scheduler=nq \
  --kubeconfig ./conf/kube-proxy.kubeconfig

# 权限
chmod +x /opt/kubernetes/server/bin/kube-proxy.sh 
# 日志目录
mkdir -p /data/logs/kubernetes/kube-proxy

2. supervisor

  • 运算节点k8s-master01,k8s-master02上
vim /etc/supervisord.d/kube-proxy.ini

[program:kube-proxy-7-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                           ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
autostart=true                                                       ; start at supervisord start (default: true)
autorestart=true                                                     ; retstart at unexpected quit (default: true)
startsecs=30                                                         ; number of secs prog must stay running (def. 1)
startretries=3                                                       ; max # of serial start failures (default 3)
exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                            ; setuid to this UNIX account to run the program
redirect_stderr=true                                                 ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                          ; emit events on stdout writes (default false)
supervisorctl update

# 全部都要起来
supervisorctl status
etcd-server-7-21                 RUNNING   pid 6505, uptime 2:49:13
kube-apiserver-7-21              RUNNING   pid 6522, uptime 2:49:13
kube-controller-manager-7-21     RUNNING   pid 6483, uptime 2:49:13
kube-kubelet-7-21                RUNNING   pid 16428, uptime 1:14:48
kube-proxy-7-21                  RUNNING   pid 31802, uptime 0:00:33
kube-scheduler-7-21              RUNNING   pid 6488, uptime 2:49:13

3. 验证

# 查看
ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 nq
  -> 10.2.3.21:6443               Masq    1      0          0         
  -> 10.2.3.22:6443               Masq    1      0          0         
  -> 10.2.3.23:6443               Masq    1      0          0 
  
#  查看apiserver的svc
kubectl get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1           443/TCP   3d1h

(五). 命令补全

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)

# 写入bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc

十四. CNI

  • 所有运算节点k8s-master01,k8s-master02,k8s-master03才装flannel
  • 采用的flannel网络,各大云厂商基本都用flannel,其实和calico效率差不多,足够用
  • 实现docker的容器之间互相通信,常见的网络插件有
    • Flannel
    • Calico
    • canal

1. 安装

  • 这里使用host-gw类型,后面可能还需要改

官方下载

wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz

mkdir /opt/flannel-v0.11.0
tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
ln -s /opt/flannel-v0.11.0/ /opt/flannel
mkdir -p  /opt/flannel/certs && cd /opt/flannel/certs

# 证书
scp k8s-manage:/opt/certs/ca.pem .
scp k8s-manage:/opt/certs/client.pem .
scp k8s-manage:/opt/certs/client-key.pem .

2. 运行

  • 配置文件
# 22上将地址改为172.7.22.1/24,23上也得改
vim /opt/flannel/subnet.env

FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
  • 启动脚本
# 修改--public-ip,--etcd-endpoints=https,--iface
vim /opt/flannel/flanneld.sh

#!/bin/bash
./flanneld \
       --public-ip=10.4.7.21 \
       --etcd-endpoints=https://10.4.7.21:2379,https://10.4.7.22:2379,https://10.4.7.23:2379 \
       --etcd-keyfile ./certs/client-key.pem \
	   --etcd-certfile ./certs/client.pem \
	   --etcd-cafile ./certs/ca.pem \
	   --iface=eth0 \
	   --subnet-file=./subnet.env \
       --healthz-port=2401
chmod +x /opt/flannel/flanneld.sh
mkdir -p /data/logs/flanneld

3. etcd

  • 在etcd中添加信息,设置flannel网络类型为host-gw类型
  • flannel还有出路host-gw,还有vxLAN类型
# 在任何一个装有etcd的节点上都可以(它们是一个集群),我在etcd主节点上执行的,我是22是主
/opt/etcd/etcdctl member list| grep 'isLeader=true'
c70550c39873f8e1: name=etcd-server-7-22 peerURLs=https://10.4.7.22:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.22:2379 isLeader=true


# etcd中设置key,host-gw类型
/opt/etcd/etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type": "host-gw"}}'

# 验证这个key,查看设置的key是否一致
/opt/etcd/etcdctl get /coreos.com/network/config

4. supervisor

# 修改第一行
vim /etc/supervisord.d/flannel.ini

[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh                             ; the program (relative uses PATH, can take args)
numprocs=1                                                   ; number of processes copies to start (def 1)
directory=/opt/flannel                                       ; directory to cwd to before exec (def no cwd)
autostart=true                                               ; start at supervisord start (default: true)
autorestart=true                                             ; retstart at unexpected quit (default: true)
startsecs=30                                                 ; number of secs prog must stay running (def. 1)
startretries=3                                               ; max # of serial start failures (default 3)
exitcodes=0,2                                                ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                              ; signal used to kill process (default TERM)
stopwaitsecs=10                                              ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                    ; setuid to this UNIX account to run the program
redirect_stderr=true                                         ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log       ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                     ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                  ; emit events on stdout writes (default false)
supervisorctl update
supervisorctl status

5. 验证

# ping一下不同宿主机的容器ip

# 看下10.4.7.21上的路由,也是我们flannel的工作原理,想要去到172.7.22.0/24或者172.7.23.0/24都要经过网关10.4.7.22和10.4.7.23
route -n 
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.4.7.254      0.0.0.0         UG    100    0        0 eth0
10.4.7.0        0.0.0.0         255.255.255.0   U     100    0        0 eth0
172.7.21.0      0.0.0.0         255.255.255.0   U     0      0        0 docker0
172.7.22.0      10.4.7.22       255.255.255.0   UG    0      0        0 eth0
172.7.23.0      10.4.7.23       255.255.255.0   UG    0      0        0 eth0
# k8s-manage上
# pull nginx镜像
docker pull nginx
docker tag nginx:latest harbor.xg.com/public/nginx
docker rmi nginx:latest
docker push harbor.xg.com/public/nginx
  • 编写yaml
vim test-cluster.yaml

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  selector:
    matchLabels:
      app: nginx-ds
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.xg.com/public/nginx
        ports:
        - containerPort: 80
kubectl create -f test-cluster.yaml

kubectl get pod -o wide
NAME                        READY   STATUS    RESTARTS   AGE   IP           NODE                    NOMINATED NODE   READINESS GATES
nginx-ds-5cbc7645b5-9qxtm   1/1     Running   0          9s    172.7.21.2   k8s-master01.host.com              
nginx-ds-5cbc7645b5-qb4fl   1/1     Running   0          9s    172.7.22.2   k8s-master02.host.com              
nginx-ds-5cbc7645b5-sgdxx   1/1     Running   0          9s    172.7.23.2   k8s-master03.host.com              
nginx-ds-5cbc7645b5-zthv9   1/1     Running   0          9s    172.7.22.3   k8s-master02.host.com              

6. iptables-server

  • 运算节点,k8s-master01,k8s-master02,k8s-master03
# centOS7默认没装这个包.
yum -y install iptables-services

systemctl start iptables.service;systemctl enable iptables.service
# 我们需要优化这条规则: POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE,我们就先干掉这个规则,然后重新添加一条规则

# 查看规则
iptables-save | grep -i postrouting
:POSTROUTING ACCEPT [682118:110296514]
:POSTROUTING ACCEPT [12:748]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE

# 干掉POSTROUTING链上SNAT规则,22上ip需要改
iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o  docker0 -j MASQUERADE

# 添加规则,意思是源地址为172.7.21.0/24,目标地址不是172.7.0.0/16(这是docker的大网段),或者是网络发包不是docker0出网的,才进行SNAT做转换,22上ip需要改
iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

# 查看添加的规则
iptables-save | grep -i postrouting
-A POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

# 查看默认转发策略
iptables-save|grep -i reject

# 删除默认的转发策略
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited

# ping非自己的起的容器地址,kubectl logs 看非自己的容器日志是否显示容器的真实ip
ping 172.7.22.2

7. CNI原理

1. host-gw
# host-gw原理
	所有节点必须在同一个二层网络下,这是host-gw的前提,host-gw效率是非常高的

	实际就是创建了静态路由,节点与节点的eth0是可以通信的,节点的容器的先要和其他节点容器通信,gw就加了一个路由,想要去其他节点的网路,配置网关为对端节点ip地址,这个网关和其他节点的网关是相互可以通信的,那么容器ip和其他节点上的容器ip也就能够通信了,其他节点也加上静态路由,最终实现不同节点之间容器互相通信
	
# 实现
/opt/etcd/etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type": "host-gw"}}'

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-5ncux3rV-1632575557970)(images/image-20210919220530753.png)]

2. vxlan
# vxlan原理
	节点和节点不用在同一个二层网络下
	节点虚拟了一个flannel.1的网卡,通过这个虚拟网卡共同去打通一个flannel网络隧道,从而实现的不同节点的容器相互通信,比如一个请求向进入一个节点的容器,这个容器会去找宿主机,宿主机上有个flannel1的网卡,通过这个网卡去找flannel网络隧道,隧道又去找另一个节点的flannel1网卡,找到另一个节点的宿主机,然后找到容器,从而实现跨宿主机容器之间通信
	
# 实现
/opt/etcd/etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type": "VxLAN"}}'

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-LLPVZL8H-1632575557972)(images/image-20210919221941731.png)]

3. 直接路由
  • 自动判断节点是否在同一二层网络,从而来决定你使用什么网络选型
/opt/etcd/etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type": "VxLAN", "Directrouting": true}}'
4. 切换网络
# 1. 停flannel
# 2. 删路由规则
	route del -net 172.7.22.0/24 gw 10.4.7.22 
# 3. 删etcd中设置的key
	/opt/etcd/etcdctl rm /coreos.com/network/config
# 4. etcd重新设置key为VxLAN
	/opt/etcd/etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type": "VxLAN"}}'
# 5. 启动flannel

十五. 内网配置清单

  • k8s-manage上
# yum 安装nginx

vim /etc/nginx/conf.d/k8s-yaml.xg.com.conf

server {
    listen       80;
    server_name  k8s-yaml.xg.com;

    location / {
	autoindex on;
	default_type text/plain;
	root /data/k8s-yaml;
    }
}

# 目录
mkdir /data/k8s-yaml
systemctl reload nginx
  • DNS, keepalived01上
vim /var/named/xg.com.zone 

$ORIGIN xg.com.
$TTL 600        ; 10 minutes
@               IN SOA dns.xg.com. dnsadmin.xg.com. (
                               2021091403 ; serial
                               10800      ; refresh (3 hours)
                               900        ; retry (15 minutes)
                               604800     ; expire (1 week)
                               86400      ; minimum (1 day)
                               )
                               NS   dns.xg.com.
$TTL 60 ; 1 minute
dns                A   10.4.7.11
harbor             A   10.4.7.200
k8s-yaml           A   10.4.7.200
systemctl restart named

# 测试
dig -t A k8s-yaml.xg.com @10.4.7.11 +short
10.4.7.200

十六. coredns

  • 维护k8s环境DNS
  • k8s的服务发现,因为pod IP是不断变化的,为了维持服务的接入点不变,引入了svc资源配合coredns来实现自动关联和服务发现
  • 服务名和IP自动关联起来,这就是k8s的服务发现

1. iptables规则

  • 装网CNI后,如果不改iptables规则,容器的请求日志就显示的是节点请求,这里具体说原理,虽然和上面的重复
# 先curl下22节点上的nginx容器
kubectl exec -ti nginx-ds-5cbc7645b5-zthv9 bash -- curl 172.7.22.2

# 然后在看22上看nginx容器的access日志日志,发现日志中没有将172.7.22.3没有解析,显示的是宿主机的IP地址,这将毫无意义,我们需要将这个宿主机IP变为容器真实的IP地址
kubectl logs -f nginx-ds-5cbc7645b5-qb4fl

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-m52ACngy-1632575557974)(images/image-20210919230655412.png)]

# 查看iptables规则,发现有-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE规则
iptables-save | grep -i postrouting
:POSTROUTING ACCEPT [19232408:3137293681]
:POSTROUTING ACCEPT [74:4468]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  • SNAT规则转换是为了k8s容器之间坦诚相见,不映射为宿主机IP
# 我们需要优化这条规则: POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE,我们就先干掉这个规则,然后重新添加一条规则

# 干掉规则,22和23上ip需要改
iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o  docker0 -j MASQUERADE

# 添加规则,意思是源地址为172.7.21.0/24,目标地址不是172.7.0.0/16(这是docker的大网段),或者是网络发包不是docker0出网的,才进行SNAT做转换,22和23上ip需要改
iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

# 查看添加的规则
iptables-save |grep -i postrouting
-A POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

# 将规则保存在文件中
iptables-save >/etc/sysconfig/iptables

# 查看默认转发策略
iptables-save|grep -i reject
# 删除默认的转发策略
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited

# ping非自己的起的容器地址
ping 172.7.22.2

2. 安装

  • 在k8s-manage上
docker pull coredns/coredns:1.6.1

docker tag c0f6e815079e harbor.xg.com/public/coredns:v1.6.1

docker push harbor.xg.com/public/coredns:v1.6.1


cd /data/k8s-yaml/
mkdir coredns && cd coredns

3. 配置清单

  • 在自建内网yaml创建

官网

mkdir /data/k8s-yaml/coredns && cd /data/k8s-yaml/coredns
1. rbac
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
2. cm
# 配置文件根据自己环境改
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        log
        health
        ready
        kubernetes cluster.local 192.168.0.0/16
        forward . 10.4.7.11
        cache 30
        loop
        reload
        loadbalance
    }
3. deploy
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      containers:
      - name: coredns
        image: harbor.xg.com/public/coredns:v1.6.1
        args:
        - -conf
        - /etc/coredns/Corefile
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
4. svc
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 192.168.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

4. k8s部署coredns

# 10.47.21上,如果起不起来,请检查iptables规则.
kubectl apply -f http://k8s-yaml.xg.com/coredns/rbac.yaml
kubectl apply -f http://k8s-yaml.xg.com/coredns/cm.yaml
kubectl apply -f http://k8s-yaml.xg.com/coredns/deploy.yaml
kubectl apply -f http://k8s-yaml.xg.com/coredns/svc.yaml

5. coredns验证

# 创建一个nginx的svc
vim nginx-svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: nginx-dp
  labels:
    app: nginx
spec:
  ports:
  - name: nginx
    protocol: "TCP"
    port: 80
    targetPort: 80
  selector:
    app: nginx-ds
kubectl create -f nginx-svc.yaml

kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1              443/TCP   130m
nginx-dp     ClusterIP   192.168.63.183           80/TCP    2m27s
dig -t A www.baidu.com @192.168.0.2 +short
www.a.shifen.com.
182.61.200.6
182.61.200.7

dig -t A k8s-manage.host.com @192.168.0.2 +short
10.4.7.200


# 随便找一个svc,dig看一下是否解析为IP地址,格式为: ${svc_name}.${namespace}.svc.cluster.local. @${coredns接入点ip} +short,可以对比一下是不是解析为get svc的ip地址是一致的
dig -t A nginx-dp.default.svc.cluster.local. @192.168.0.2 +short
192.168.63.183


# curl svc正确格式是进入pod容器中,然后curl  ${svc_name}.${namespace}.svc.cluster.local, 其中 cluster.local 为指定的集群的域名. 而在node节点上是curl不到的,也就是说cordns只在k8s集群内部才生效,那怎么样才能将服务暴露出去呢?
curl nginx-dp.kube-public.svc.cluster.local

十七. 服务暴露

(一). 服务暴露方式

  • k8s中的svc想要将服务暴露到集群外的选型有有NodePortIngress以及LoadBalane
  • k8s的svc默认是ClusterIP

1. NodePort

  • NodePort无法使用kube-proxy的ipvs模型,只能用iptables模型, NodePort实际就是使用iptables规则,让你访问宿主机ip加端口的时候将流量引到容器里面

2. ingress

  • Ingress只能调度并暴露7层应用,特指http和https协议
  • Ingress实际就是一组基于域名和URL路径, 把用户的请求转发至svc资源的规则,把集群外部的请求流量转发至集群内部,从而实现服务暴露
  • Ingress控制器实现的工具有
    • Ingress-nginx
    • HAProxy
    • Traefik

3. LoadBalance

  • LoadBalance,是NodePort类型的一 种扩展,这使得服务可以通过一个专用的负载均衡器来访问, 这是由Kubernetes中正在运行的云基础设施提供的。 负载均衡器将流量重定向到跨所有节点的节点端口, 客户端通过负载均衡器的 IP 连接到服务

(二). Traefik

  • 使用Traefik型ingress
  • ingress最好只暴露http协议的域名,如果是https,先让他经过keepalived01和keepalive02上nginx的负载均衡上把证书卸载了然后在到k8s集群

1. 安装

Traefic下载地址

# k8s-manage上
docker pull traefik:v1.7
docker tag add5fac61ae5 harbor.xg.com/public/traefik:v1.7
docker push harbor.xg.com/public/traefik:v1.7

2. 配置清单

  • k8s-manage
mkdir -p /data/k8s-yaml/traefik && cd /data/k8s-yaml/traefik
1. rbac
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system
2. ds
  • 使用的ds的资源,需要根据自己环境修改
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress-lb
spec:
  selector:
    matchLabels:
      k8s-app: traefik-ingress-lb
      name: traefik-ingress-lb
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress-lb
        name: traefik-ingress-lb
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: harbor.xg.com/public/traefik:v1.7
        name: traefik-ingress-lb
        ports:
        - name: http
          containerPort: 80
          hostPort: 81
        - name: admin
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
        - --insecureskipverify=true
        - --kubernetes.endpoint=https://10.4.7.10:7443
        - --accesslog
        - --accesslog.filepath=/var/log/traefik_access.log
        - --traefiklog
        - --traefiklog.filepath=/var/log/traefik.log
        - --metrics.prometheus
3. svc
kind: Service
apiVersion: v1
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress-lb
  ports:
    - protocol: TCP
      port: 80
      name: controller
    - protocol: TCP
      port: 8080
      name: admin-web
4. ing
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: traefik.xg.com
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-ingress-service 
          servicePort: 8080

3. 创建

kubectl apply -f http://k8s-yaml.xg.com/traefik/rbac.yaml
kubectl apply -f http://k8s-yaml.xg.com/traefik/ds.yaml
kubectl apply -f http://k8s-yaml.xg.com/traefik/svc.yaml
kubectl apply -f http://k8s-yaml.xg.com/traefik/ing.yaml

# 报错信息
iptables failed: iptables --wait -t filter -A DOCKER ! -i docker0 -o docker0 -p tcp -d 172.7.22.4 --dport 80 -j ACCEPT: iptables: No chain/target/match by that name

# 如果起不来,检查一下iptables,是因为docker会在iptables中创建一些策略,错误信息就是向filter表中DOCKER链中添加一条规则的时候出错,filter表中没有名字为DOCKER的规则链,我们在filter表中创建该规则链,或者重启docker,我是直接重启docker
iptables -t filter -N DOCKER

4. nginx代理

  • keepalived01和keepalived02都配上,因为VIP在他们上面
  • 使用nginx的七层代理
    • 让用户先访问这个nginx,可以把https证书卸了,然后进入k8s中的ingress
# 只要是业务域中的服务全部丢给ingress,也就是说7层业务全部给ingres来干,注意server_name
vim /etc/nginx/conf.d/xg.com.conf

upstream default_backend_traefik {
	server 10.4.7.21:81		max_fails=3	fail_timeout=10s;
    server 10.4.7.22:81		max_fails=3	fail_timeout=10s;
    server 10.4.7.23:81		max_fails=3	fail_timeout=10s;
}
server {
	server_name *.xg.com;
	
	location / {
		proxy_pass http://default_backend_traefik;
		proxy_set_header Host	$http_host;
		proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
	}
}
  • keepalived01上做解析
vim /var/named/xg.com.zone

$ORIGIN xg.com.
$TTL 600        ; 10 minutes
@               IN SOA dns.xg.com. dnsadmin.xg.com. (
                               2021091304 ; serial
                               10800      ; refresh (3 hours)
                               900        ; retry (15 minutes)
                               604800     ; expire (1 week)
                               86400      ; minimum (1 day)
                               )
                               NS   dns.xg.com.
$TTL 60 ; 1 minute
dns                A   10.4.7.11
harbor             A   10.4.7.200
k8s-yaml           A   10.4.7.200
traefik            A   10.4.7.10


# 重启
systemctl restart named
  • 浏览器访问

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-BZHpZxYa-1632575557975)(images/image-20210921231943967.png)]

十八. k8s安全管理

抄的这里

1. securityContext

  • 大部分情况下容器不需要太多的权限,我们可以通过 Security Context 限定容器的权限和访问控制,只需加上 SecurityContext 字段
apiVersion: v1
kind: Pod
metadata:
  name: >
spec:
  containers:- name: >
  image: >
+   securityContext:

2. 禁用 allowPrivilegeEscalation

  • allowPrivilegeEscalation=true 表示容器的任何子进程都可以获得比父进程更多的权限。最好将其设置为 false,以确保 RunAsUser 命令不能绕过其现有的权限集
apiVersion: v1
kind: Pod
metadata:
  name: >
spec:
  containers:- name: >
  image: >
    securityContext:
+     allowPrivilegeEscalation: false

3. 不要使用 root 用户

  • 为了防止来自容器内的提权攻击,最好不要使用 root 用户运行容器内的应用。UID 设置大一点,尽量大于 3000
apiVersion: v1
kind: Pod
metadata:
  name: >
spec:
  securityContext:
+   runAsUser: >
+   runAsGroup: >

4. requests 和 limits

  • 资源限制,必须加上,同时也是prometheus也更方便监控

5. 不必挂载 Service Account Token

  • ServiceAccount 为 Pod 中运行的进程提供身份标识,怎么标识呢?当然是通过 Token 啦,有了 Token,就防止假冒伪劣进程。如果你的应用不需要这个身份标识,可以不必挂载
apiVersion: v1
kind: Pod
metadata:
  name: >
spec:
+ automountServiceAccountToken: false

6. nodeAffinity

  • 节点亲和性 , 比nodeSelector更加灵活,它可以进行一些简单的逻辑组合,不只是简单的相等匹配 。分为两种,硬策略和软策略
    • requiredDuringSchedulingIgnoredDuringExecution : 硬策略,如果没有满足条件的节点的话,就不断重试直到满足条件为止,简单说就是你必须满足我的要求,不然我就不会调度Pod。
    • preferredDuringSchedulingIgnoredDuringExecution:软策略,如果你没有满足调度要求的节点的话,Pod就会忽略这条规则,继续完成调度过程,说白了就是满足条件最好了,没有满足就忽略掉的策略。
#要求Pod不能运行在128和132两个节点上,如果有节点满足disktype=ssd或者sas的话就优先调度到这类节点上
...
spec:
      containers:
      - name: demo
        image: 192.168.136.10:5000/demo/myblog:v1
        ports:
        - containerPort: 8002
      affinity:
          nodeAffinity:
            requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                - matchExpressions:
                    - key: kubernetes.io/hostname
                      operator: NotIn
                      values:
                        - 192.168.136.128
                        - 192.168.136.132
            preferredDuringSchedulingIgnoredDuringExecution:
                - weight: 1
                  preference:
                    matchExpressions:
                    - key: disktype
                      operator: In
                      values:
                        - ssd
                        - sas
...
operator匹配逻辑 说明
In label 的值在某个列表中
NotIn label 的值不在某个列表中
Gt label 的值大于某个值
Lt label 的值小于某个值
Exists 某个 label 存在
DoesNotExist 某个 label 不存在

7. Taints与tolerations

  • Taints(污点)是Node的一个属性,设置了Taints后,因为有了污点,所以Kubernetes是不会将Pod调度到这个Node上的。于是Kubernetes就给Pod设置了个属性Tolerations(容忍),只要Pod能够容忍Node上的污点,那么Kubernetes就会忽略Node上的污点,就能够(不是必须)把Pod调度过去。
    • 场景一:私有云服务中,某业务使用GPU进行大规模并行计算。为保证性能,希望确保该业务对服务器的专属性,避免将普通业务调度到部署GPU的服务器。
    • 场景二:用户希望把 Master 节点保留给 Kubernetes 系统组件使用,或者把一组具有特殊资源预留给某些 Pod,则污点就很有用了,Pod 不会再被调度到 taint 标记过的节点。

kubectl taint node [node_name] key=value:[effect]   
# 其中[effect] 可取值: [ NoSchedule | PreferNoSchedule | NoExecute ]
# NoSchedule:一定不能被调度。
# PreferNoSchedule:尽量不要调度。
# NoExecute:不仅不会调度,还会驱逐Node上已有的Pod。

# 示例,污点为抽烟.名字可以自己取.
kubectl taint node k8s-slave1 smoke=true:NoSchedule

# 去除指定key及其effect:
kubectl taint nodes [node_name] key:[effect]-    #这里的key不用指定value
                
# 去除指定key所有的effect: 
kubectl taint nodes node_name key-
  • 设置容忍
...
spec:
      containers:
      - name: demo
        image: 192.168.136.10:5000/demo/myblog:v1
      tolerations: 			#设置容忍性
      - key: "smoke" 
        operator: "Equal"  #如果操作符为Exists,那么value属性可省略,不指定operator,默认为Equal,Equal就是等于的意思.等于下面的value,就是true.也就设置了容忍污点.
        value: "true"
        effect: "NoSchedule"
      - key: "drunk" 
        operator: "Exists"  #如果操作符为Exists,那么value属性可省略,不指定operator,默认为Equal
	  #意思是这个Pod要容忍的有污点的Node的key是smoke Equal true,效果是NoSchedule,
      #tolerations属性下各值必须使用引号,容忍的值都是设置Node的taints时给的值。

十九. dashboard

  • 中国团队开发的

官网下载

dashboard配置清单

docker pull hexun/kubernetes-dashboard-amd64:v1.10.1

docker tag hexun/kubernetes-dashboard-amd64:v1.10.1 harbor.xg.com/public/dashboard:v1.10.1

docker push harbor.xg.com/public/dashboard:v1.10.1

1. rbac

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-admin
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system

2. deploy

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      priorityClassName: system-cluster-critical
      containers:
      - name: kubernetes-dashboard
        image: harbor.xg.com/public/dashboard:v1.10.1
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 50m
            memory: 100Mi
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
        volumeMounts:
        - name: tmp-volume
          mountPath: /tmp
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard-admin
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"

3. svc

apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 443
    targetPort: 8443

4. ing

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: dashboard.xg.com
    http:
      paths:
      - path: /
        backend:
          serviceName: kubernetes-dashboard
          servicePort: 443
# 构建
kubectl create -f http://k8s-yaml.xg.com/dashboard/rbac.yaml
kubectl create -f http://k8s-yaml.xg.com/dashboard/deploy.yaml
kubectl create -f http://k8s-yaml.xg.com/dashboard/svc.yaml
kubectl create -f http://k8s-yaml.xg.com/dashboard/iing.yaml
kubectl create -f http://k8s-yaml.xg.com/dashboard/ing.yaml


# 解析,因为我们keepalived01和keepalived01=2上有nginx直接将xg.com域全部抛给k8s的ingress来处理所以直接用VIP就可以
vim /var/named/xg.com.zone 
$ORIGIN xg.com.
$TTL 600        ; 10 minutes
@               IN SOA dns.xg.com. dnsadmin.xg.com. (
                               2021091305 ; serial
                               10800      ; refresh (3 hours)
                               900        ; retry (15 minutes)
                               604800     ; expire (1 week)
                               86400      ; minimum (1 day)
                               )
                               NS   dns.xg.com.
$TTL 60 ; 1 minute
dns                A   10.4.7.11
harbor             A   10.4.7.200
k8s-yaml           A   10.4.7.200
traefik            A   10.4.7.10
dashboard          A   10.4.7.10


# 检查,然后浏览器访问
dig -t A dashboard.xg.com @192.168.0.2 +short

5. 给dashboard做https

  • k8s-manage中
# 使用openssl做签发
yum -y install openssl

cd /opt/certs

(umask 077; openssl genrsa -out dashboard.xg.com.key 2048)


openssl req -new -key dashboard.xg.com.key -out dashboard.xg.com.csr -subj "/CN=dashboard.xg.com/C=CN/ST=SZ/L=shenzhen/O=xg/OU=ops"

openssl x509 -req -in dashboard.xg.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.xg.com.crt -days 3650

# 用cfssl检查一下时间什么的等是否正常
cfssl-certinfo -cert dashboard.xg.com.crt 
  • 在keepalived01和keepalived01上
# 复制证书到nginx中
cd /etc/nginx
mkdir certs && cd certs

scp  -rp k8s-manage:/opt/certs/dashboard.xg.com.key .
scp  -rp k8s-manage:/opt/certs/dashboard.xg.com.crt .

# nginx配置文件
vim /etc/nginx/conf.d/dashboard.xg.conf

server {
    listen          80;
    server_name     dashboard.xg.xcom;

    rewrite ^(.*)$ https://${server_name}$1 permanent;
}

server {
    listen          443 ssl;
    server_name     dashboard.xg.com;

    ssl_certificate "certs/dashboard.xg.com.crt";
    ssl_certificate_key "certs/dashboard.xg.com.key";
    ssl_session_cache shared:SSL:1m;
    ssl_session_timeout   10m;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;

    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host          $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}

nginx -t
systemctl reload nginx

# 访问
https://dashboard.xg.com

二十. RBAC

  • k8s在1.6版本后默认是基于角色访问控制
  • pod的yaml中如果没有指定serviceAccountName,那么就是默认的服务账户serviceAccountName: default
  • k8s有两种账户,同时也需要有两种资源给这两种账户绑定权限的资源
    • 服务账户ServiceAccount
    • 用户账户,用户账户一般用不着
    • ClusterRoleBinding ===> 给集群的服务账户绑定权限的资源
    • RoleBinding ===> 给统一namespace的服务账户绑定权限的资源
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-admin		# 创建了名字为他的ServiceAccount(服务账户)
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin		# 创建名字为他的ClusterRoleBinding
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:		# 参考的哪个角色
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin		# 参考cluster-admin的服务账户来定义ClusterRole
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system
  
  
#####
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:		# 规则,对哪些api组有权限,就是yaml中的apiVersion的值
      - ""
    resources:		# 哪些api组下的资源
      - services
      - endpoints
      - secrets
    verbs:			# 哪些api组下的资源,有什么权限
      - get
      - list
      - watch
  - apiGroups:		# 可以定义多个
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch

二十一. 集群版本升级

  • 集群升级有时候不是一起升级的,也会有一个node节点的升级,那么就需要将这个node节点先驱逐再升级,在关联
# 找一台业务比较少的节点,将节点的标签信息记录下来
kubectl get nodes k8s-master02 --show-labels

# 干掉节点,业务会被掉读到其他节点上去
kubectl delete node k8s-master02.host.com

# 然后对这个节点进行升级操作,升级过程就是找k8s的tar包,然后就是kubelet和kube-proxy的操作等等

# 然后重启supervisor,节点就会自动加入到集群中,但是节点的标签没有了,需要重新打标签

kubectl label node k8s-master02.host.com node-role.kubernetes.io/node=
kubectl label node k8s-master02.host.com node-role.kubernetes.io/master=



































openssl x509 -req -in dashboard.xg.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.xg.com.crt -days 3650

# 用cfssl检查一下时间什么的等是否正常
cfssl-certinfo -cert dashboard.xg.com.crt 
  • 在keepalived01和keepalived01上
# 复制证书到nginx中
cd /etc/nginx
mkdir certs && cd certs

scp  -rp k8s-manage:/opt/certs/dashboard.xg.com.key .
scp  -rp k8s-manage:/opt/certs/dashboard.xg.com.crt .

# nginx配置文件
vim /etc/nginx/conf.d/dashboard.xg.conf

server {
    listen          80;
    server_name     dashboard.xg.xcom;

    rewrite ^(.*)$ https://${server_name}$1 permanent;
}

server {
    listen          443 ssl;
    server_name     dashboard.xg.com;

    ssl_certificate "certs/dashboard.xg.com.crt";
    ssl_certificate_key "certs/dashboard.xg.com.key";
    ssl_session_cache shared:SSL:1m;
    ssl_session_timeout   10m;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;

    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host          $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}

nginx -t
systemctl reload nginx

# 访问
https://dashboard.xg.com

二十. RBAC

  • k8s在1.6版本后默认是基于角色访问控制
  • pod的yaml中如果没有指定serviceAccountName,那么就是默认的服务账户serviceAccountName: default
  • k8s有两种账户,同时也需要有两种资源给这两种账户绑定权限的资源
    • 服务账户ServiceAccount
    • 用户账户,用户账户一般用不着
    • ClusterRoleBinding ===> 给集群的服务账户绑定权限的资源
    • RoleBinding ===> 给统一namespace的服务账户绑定权限的资源
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-admin		# 创建了名字为他的ServiceAccount(服务账户)
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin		# 创建名字为他的ClusterRoleBinding
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:		# 参考的哪个角色
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin		# 参考cluster-admin的服务账户来定义ClusterRole
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system
  
  
#####
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:		# 规则,对哪些api组有权限,就是yaml中的apiVersion的值
      - ""
    resources:		# 哪些api组下的资源
      - services
      - endpoints
      - secrets
    verbs:			# 哪些api组下的资源,有什么权限
      - get
      - list
      - watch
  - apiGroups:		# 可以定义多个
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch

二十一. 集群版本升级

  • 集群升级有时候不是一起升级的,也会有一个node节点的升级,那么就需要将这个node节点先驱逐再升级,在关联
# 找一台业务比较少的节点,将节点的标签信息记录下来
kubectl get nodes k8s-master02 --show-labels

# 干掉节点,业务会被掉读到其他节点上去
kubectl delete node k8s-master02.host.com

# 然后对这个节点进行升级操作,升级过程就是找k8s的tar包,然后就是kubelet和kube-proxy的操作等等

# 然后重启supervisor,节点就会自动加入到集群中,但是节点的标签没有了,需要重新打标签

kubectl label node k8s-master02.host.com node-role.kubernetes.io/node=
kubectl label node k8s-master02.host.com node-role.kubernetes.io/master=

结语

  • 每次重启服务器或者docker需要将iptables这些规则去掉,否则apiserver是起不来的
# 查看默认转发策略
iptables-save|grep -i reject
# 删除默认的转发策略
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited

你可能感兴趣的:(k8s,运维,k8s,云原生)