Kubernetes二进制安装

架构图

实验架构图

准备工作

准备虚拟机

  • 5台服务器

操作系统

  • centos7.6 1810 mini安装

调整yum源

安装epel-release

# yum install -y epel-relase

关闭SElinux和firewalld
  1. # sed -i '/SELINUX/{s/permissive/disabled/}' /etc/selinux/config
  2. # setenforce 0 #查看状态(getenforce)
  3. # systemctl stop firewalld

安装必要工具

# yum install -y wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils

DNS服务初始化

pl66-240.host.com上:

安装bind9软件

# yum install -y bind

配置bind9

主配置文件

# vim /etc/named.conf

listen-on port 53 { 10.10.66.240; };
~~listen-on-v6 port 53 { ::1; };~~
allow-query     { localhost; }; >>allow-query     { any; }; 
forwarders      { 8.8.8.8; };
dnssec-enable no;
dnssec-validation no;
检查配置,是否配置格式正确。

# named-checkconf

区域配置文件

# vim /etc/named.rfc1912.zones

zone "host.com" IN {
type master;
file "host.com.zone";
allow-update { 10.10.66.240; };
};
zone "yw.com" IN {
type master;
file "yw.com.zone";
allow-update { 10.10.66.240; };
};

配置区域数据文件

  • 配置主机域数据文件

# vim /var/named/host.com.zone

$ORIGIN host.com.
$TTL 600        ; 10 minutes
@   IN SOA dns.host.com. dnsadmin.host.com. (
                20200306    ; serial
                10800       ; refresh (3 hours)
                900     ; retry (15 minutes)
                604800      ; expire (1 week)
                86400       ; minimum (1 day)
                )
            NS  dns.host.com.
$TTL 60 ; 1 minute
dns     A   10.10.66.240
pl66-240        A   10.10.66.240
pl66-241        A   10.10.66.241
pl66-242        A   10.10.66.242
pl66-243        A   10.10.66.243
pl66-245        A   10.10.66.245

# vim /var/named/yw.com.zone

$ORIGIN yw.com.
$TTL 600        ; 10 minutes
@   IN SOA dns.yw.com. dnsadmin.yw.com. (
                20200306    ; serial
                10800       ; refresh (3 hours)
                900     ; retry (15 minutes)
                604800      ; expire (1 week)
                86400       ; minimum (1 day)
                )
            NS  dns.yw.com.
$TTL 60 ; 1 minute
dns     A   10.10.66.240
检查配置,是否配置格式正确。

# named-checkconf

启动bind9

# named-checkconf
# systemctl start named
# systemctl enable named

检查

[root@pl66-240 ~]# dig -t A pl66-242.host.com @10.10.66.240 +short
10.10.66.242
[root@pl66-240 ~]# dig -t A dns.yw.com @10.10.66.240 +short
10.10.66.240

批量修改其他主机dns

[root@pl66-240 ~]# cat /etc/resolv.conf
search host.com
nameserver 10.10.66.240

将240主机上的resolve文件拷贝到其他主机

# ansible server -m copy -a 'src=/etc/resolv.conf dest=/etc/resolv.conf force=yes backup=yes'

准备签发证书环境

运维主机pl66-245.host.com上

安装CFSSL

  • 证书签发工具CFSSL:R1.2
    cfssl下载地址
    cfssl-json下载地址
    cfssl-certinfo下载地址
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64  -O /usr/bin/123/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/bin/123/cfssl-json
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/bin/123/cfssl-certinfo
chmod +x /usr/bin/cfssl*

创建生产CA证书的JSON 配置文件

vim /opt/certs/ca-config.json

{
    "signing": {
        "default": {
            "expiry": "175200h"
    },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
                    "client": {
                                "expiry": "175200h",
                                "usages": [
                                        "signing",
                                        "key encipherment",
                                        "client auth"
                                ]
            },
                        "peer": {
                                "expiry": "175200h",
                                "usages": [
                                        "signing",
                                        "key encipherment",
                                        "server auth",
                    "client auth"
                                ]
                        }
        }
    }
}

证书类型

  • client certificate: 客户端使用,用于服务端认证客户端,例如etcdctl、etcd proxy、fleetctl、docker客户端
  • server certificate: 服务端使用,客户端以此验证服务端身份,例如docker服务端、kube-apiserver
  • peer certificate: 双向证书,用于etcd集群成员间通信

创建生产CA证书签名请求(csr)的JSON配置文件

# /opt/cets/ca-csr.json

{
    "CN": "pinnet",
    "hosts": [ 
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "chengdu",
            "L": "chengdu",
            "O": "pl",
            "OU": "ops"
        }
    ],
    "ca": {
        "expiry": "175200h"
    }
}

CN: Common Name,浏览器使用该字段验证网站是否合法,一般写的是域名。
C: Country,国家
ST:State,州,省
L: Locality,地区,城市
O: Organization Name,组织名称, 公司名称
OU:Organization Unit Name,组织单位名称,公司部门

生产CA证书和私钥

[root@66-245 certs]# cfssl gencert -initca ca-csr.json | cfssl-json -bare ca

输出结果如下

[root@66-245 certs]#  cfssl gencert -initca ca-csr.json | cfssl-json -bare ca
2020/03/07 05:58:05 [INFO] generating a new CA key and certificate from CSR
2020/03/07 05:58:05 [INFO] generate received request
2020/03/07 05:58:05 [INFO] received CSR
2020/03/07 05:58:05 [INFO] generating key: rsa-2048
2020/03/07 05:58:06 [INFO] encoded CSR
2020/03/07 05:58:06 [INFO] signed certificate with serial number 64696289091365665227482443074556056282272288290
[root@66-245 certs]# ll
total 16
-rw-r--r-- 1 root root  989 Mar  7 05:58 ca.csr
-rwxr-xr-x 1 root root  224 Mar  7 05:56 ca-csr.json
-rw------- 1 root root 1675 Mar  7 05:58 ca-key.pem
-rw-r--r-- 1 root root 1338 Mar  7 05:58 ca.pem
[root@66-245 certs]# 

部署docker环境

安装

# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

配置

创建docker配置文件

# vim /etc/docker/daemon.json

{
    "graph": "/data/docker",
    "storage-driver": "overlay2",
    "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.yw.com"],
    "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
    "bip": "172.16.242.1/24",
    "exec-opts": ["native.cgroupdriver=systemd"],
    "live-restore": true
}

启动脚本

/usr/lib/systemd/system/docker.service


[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target

启动

# systemctl start docker
# systemctl enable docker
# docker version

部署docker镜像私有仓库harbor

harbor官方github地址

harbor下载地址

下载离线安装版本1.9.3

github 版本信息

解压缩包

[root@66-245 opt]# tar -xcf harbor-offline-installer-v1.9.3.tgz

为版本添加版本号

[root@66-245 opt]# mv harbor/ harbor1.9.3

软连接

[root@66-245 opt]# ln -s /opt/harbor1.9.3/ /opt//harbor
以便软件升级

harbor参数修改

hostname: harbor.yw.com
http:
  port: 180
harbor_admin_password: Harbor12345    ##生产环境需更改
data_volume: /data/harbor
log:
  level: info
  rotate_count: 50
  rotate_size: 200M
  location: /data/harbor/logs

创建harbor所需的目录

# mkdir -p /data/harbor
# mkdir -p /data/harbor/logs

安装docker-compose

[root@66-245 harbor]# yum install docker-compose -y

执行harbor安装文件

[root@66-245 harbor]# /opt/harbor/install.sh

harbor

安装完成后检查

[root@66-245 harbor]# docker-compose ps

docker-compose ps

安装nginx做反向代理

[root@66-245 harbor]# yum -y install nginx

添加Nginx反向代理配置

vim /etc/nginx/conf.d/harbor.yw.com.conf
server {
    listen  80;
    server_name harbor.yw.com;
    
    client_mx_body_size 1000m;
    
    location / {
        proxy_pass http://127.0.0.1:180;
    }   
}

在DNS服务器上(pl66-240)上添加harbor的A记录,并重启服务让其生效

dns

systemctl restart named

验证:

dig -t A harbor.yw.com +short
10.10.66.240

检查添加的配置文件

image.png

检查命令


image.png
image.png

验证harbor

打开网页,输入 http:harbor.yw.com


shu

输入账号密码登录

  • 用户名:admin
  • 密码:Harbor12345

新建项目

image.png

image.png

下载nginx镜像

[root@pl66-245 nginx]# docker pull nginx

查看nginx镜像ID

[root@pl66-245 nginx]# docker images

为镜像添加TAG

[root@pl66-245 nginx]# docker tag 6678c7c2e56c harbor.yw.com/public/nginx:1.9.1

将本地nginx镜像上传至harbor中

  1. 登录harbor

docker login harbor.yw.com

[root@pl66-245 nginx]# docker login harbor.yw.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@pl66-245 nginx]# 

2.将镜像上传至harbor

docker push harbor.yw.com/public/nginx:1.9.1

[root@pl66-245 nginx]# docker push harbor.yw.com/public/nginx:1.9.1
The push refers to repository [harbor.yw.com/public/nginx]
55a77731ed26: Pushed 
71f2244bc14d: Pushed 
f2cb0ecef392: Pushed 
1.9.1: digest: sha256:3936fb3946790d711a68c58be93628e43cbca72439079e16d154b5db216b58da size: 948
[root@pl66-245 nginx]# 

安装Master节点服务

部署etcd集群

集群规划

主机名 角色 IP
pl66-241 etcd lead 10.10.66.241
pl66-242 etcd follow 10.10.66.242
pl66-243 etcd follow 10.10.66.243

注意:这里部署文档以pl66-241为例,另外两台主机部署方法类似

创建生成证书签名请求(csr)的JSON配置文件

vi /opt/certs/cat etcd-peer-csr.json

{
    "CN": "k8s-etcd",
    "hosts": [ 
        "10.10.66.240",
        "10.10.66.241",
        "10.10.66.242",
        "10.10.66.243"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "chengdu",
            "L": "chengdu",
            "O": "pl",
            "OU": "ops"
        }
    ]
}

生成etcd证书和私钥

[root@pl66-245 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssl-json -bare etcd-peer

[root@pl66-245 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-c
2020/03/10 02:26:28 [INFO] generate received request
2020/03/10 02:26:28 [INFO] received CSR
2020/03/10 02:26:28 [INFO] generating key: rsa-2048
2020/03/10 02:26:28 [INFO] encoded CSR
2020/03/10 02:26:28 [INFO] signed certificate with serial number 643611486410713894911975662668229763052251494279
2020/03/10 02:26:28 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

检查生成的证书、私钥

[root@pl66-245 certs]# ll
total 36
-rw-r--r-- 1 root root  918 Mar  9 10:26 ca-config.json
-rw-r--r-- 1 root root  989 Mar  7 05:58 ca.csr
-rwxr-xr-x 1 root root  224 Mar  7 05:56 ca-csr.json
-rw------- 1 root root 1675 Mar  7 05:58 ca-key.pem
-rw-r--r-- 1 root root 1338 Mar  7 05:58 ca.pem
-rw-r--r-- 1 root root 1062 Mar 10 02:26 etcd-peer.csr
-rw-r--r-- 1 root root  262 Mar 10 01:36 etcd-peer-csr.json
-rw------- 1 root root 1675 Mar 10 02:26 etcd-peer-key.pem
-rw-r--r-- 1 root root 1424 Mar 10 02:26 etcd-peer.pem
[root@pl66-245 certs]# 

创建etcd用户

在pl66-241上:

useradd -s /sbin/nologin -M etcd

[root@pl66-241 ~]# useradd -s /sbin/nologin -M etcd
[root@pl66-241 ~]# id etcd
uid=1004(etcd) gid=1004(etcd) groups=1004(etcd)

下载软件,解压,做软连接

etcd下载地址

[root@pl66-241 opt]# tar xvf etcd-v3.1.20-linux-amd64.tar.gz -C /opt
[root@pl66-242 opt]# mv etcd-v3.1.20-linux-amd64 etcd-v3.1.20
[root@pl66-241 opt]# ln -s /opt/etcd-v3.1.20 /opt/etcd

创建目录,拷贝证书,私钥

  • 创建目录

[root@pl66-241 opt]# mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server

  • 拷贝证书
    ***将运维主机上生成的ca.pem、etcd-peer-key.pem、etcd-peer.pem拷贝到/opt/etcd/certs目录中,注意私钥
    /opt/etcd/certs

[root@pl66-241 certs]# scp pl66-245:/opt/certs/ca.pem .
[root@pl66-241 certs]# scp pl66-245:/opt/certs/etcd-peer-key.pem .
[root@pl66-241 certs]# scp pl66-245:/opt/certs/etcd-peer.pem .
文件权限600***

  • 修改权限
[root@pl66-241 certs]# ll
total 12
-rw-r--r--. 1 root root 1338 Mar 10 11:19 ca.pem
-rw-------. 1 root root 1675 Mar 10 11:20 etcd-peer-key.pem
-rw-r--r--. 1 root root 1424 Mar 10 11:20 etcd-peer.pem
[root@pl66-241 certs]# 

创建etcd服务启动脚本

vi /opt/etcd/etcd-server-startup.sh

#!/bin/sh
./etcd --name etcd-server-66-241 \
    --data-dir /data/etcd/etcd-server \
    --listen-peer-urls https://10.10.66.241:2380 \
    --listen-client-urls https://10.10.66.241:2379,http://127.0.0.1:2379 \
    --quota-backend-bytes 800000000 \
    --initial-advertise-peer-urls https://10.10.66.241:2380 \
    --advertise-client-urls https://10.10.66.241:2379,http://127.0.0.1:2379 \
    --initial-cluster etcd-server-66-241=https://10.10.66.241:2380,etcd-server-66-242=https://10.10.66.242:2380,etcd-server-66-243=https://10.10.66.243:2380 \
    --ca-file ./certs/ca.pem \
    --cert-file ./certs/etcd-peer.pem \
    --key-file ./certs/etcd-peer-key.pem \
    --client-cert-auth \
    --trusted-ca-file ./certs/ca.pem \
    --peer-ca-file ./certs/ca.pem \
    --peer-cert-file ./certs/etcd-peer.pem \
    --peer-key-file ./certs/etcd-peer-key.pem \
    --peer-client-cert-auth \
    --peer-trusted-ca-file ./certs/ca.pem \
    --log-output stdout

调整权限和目录

[root@pl66-241 etcd]# chmod +x /opt/etcd/etcd-server-startup.sh
[root@pl66-241 etcd]# chown -R etcd.etcd /opt/etcd-v3.1.20/
[root@pl66-241 etcd]# chown -R etcd.etcd /data/etcd/
[root@pl66-241 etcd]# chown -R etcd.etcd /data/logs/etcd-server/
[root@pl66-241 etcd]# mkdir -p /data/logs/etcd-server

安装supervisor软件


管理后台进程,如果进程挂了可以自动拉起来

[root@pl66-241 etcd]# yum -y install supervisor
[root@pl66-241 etcd]# systemctl start supervisord
[root@pl66-241 etcd]# systemctl enable supervisord

创建etcd-server的启动配置

/etc/suppervisord.d/etcd-server.ini

[program:etcd-server-66-241]
command=/opt/etcd/etcd-server-startup.sh
numprocs=1
directory=/opt/etcd
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=etcd
redirect_stderr=true
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

注意:etcd集群各主机启动配置略有不同,配置其他节点时注意修改。

启动etcd服务并检查

[root@pl66-241 supervisord.d]# supervisorctl update
etcd-server-66-241: added process group

查看启动日志

[root@pl66-241 supervisord.d]# tail -fn 200 /data/logs/etcd-server/etcd.stdout.log

状态检查

[root@pl66-241 member]# supervisorctl status
etcd-server-66-241               RUNNING   pid 28649, uptime 0:00:36
[root@pl66-241 member]# netstat -ntlp | grep etcd
tcp        0      0 10.10.66.241:2379       0.0.0.0:*               LISTEN      28650/./etcd        
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      28650/./etcd        
tcp        0      0 10.10.66.241:2380       0.0.0.0:*               LISTEN      28650/./etcd       

配置另外两台服务器

检查cluster状态

[root@pl66-241 etcd]# ./etcdctl cluster-health
member f8d0e74dd98768e is healthy: got healthy result from http://127.0.0.1:2379
member 53fdb991bce71f1c is healthy: got healthy result from http://127.0.0.1:2379
member 690d0b927b2d3fb7 is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy
[root@pl66-241 etcd]# 
[root@pl66-241 etcd]# ./etcdctl member list
f8d0e74dd98768e: name=etcd-server-66-242 peerURLs=https://10.10.66.242:2380 clientURLs=http://127.0.0.1:2379,https://10.10.66.242:2379 isLeader=false
53fdb991bce71f1c: name=etcd-server-66-243 peerURLs=https://10.10.66.243:2380 clientURLs=http://127.0.0.1:2379,https://10.10.66.243:2379 isLeader=false
690d0b927b2d3fb7: name=etcd-server-66-241 peerURLs=https://10.10.66.241:2380 clientURLs=http://127.0.0.1:2379,https://10.10.66.241:2379 isLeader=true
[root@pl66-241 etcd]# 

部署kube-apiserver集群

集群规划

主机名 角色 IP
pl66-240 kube-apiserver 10.10.66.242
pl66-241 kube-apiserver 10.10.66.243
pl66-242 4层负载均衡 10.10.66.240
pl66-243 4层负载均衡 10.10.66.241

注意:这里10.10.66.240和10.10.66.241使用nginx做4层负载均衡器,用keepalived跑一个vip:10.10.66.250,代理两个kube-apiserver,实现高可用

这里部署文档以pl66-243主机为例,另外一台运算节点安装部署方法类似

下载软件,解压,创建软连接


kubernetes官方Github地址
kubernetes下载地址

[root@pl66-242 opt]# tar xvf kubernetes-server-linux-arm64.tar.gz
[root@pl66-243 opt]# mv kubernetes kubernetes-v1.15.2
[root@pl66-243 opt]# ln -s kubernetes-v1.15.2 kubernetes

[root@pl66-243 opt]# ll
total 442992
drwx--x--x. 4 root root        28 Mar  7 14:43 containerd
lrwxrwxrwx. 1 root root        17 Mar 10 16:46 etcd -> /opt/etcd-v3.1.20
drwxr-xr-x. 4 etcd etcd       166 Mar 10 16:48 etcd-v3.1.20
-rw-r--r--. 1 root root   9850227 Mar 10 16:29 etcd-v3.1.20-linux-amd64.tar.gz
lrwxrwxrwx. 1 root root        18 Mar 13 10:44 kubernetes -> kubernetes-v1.15.2
-rw-r--r--. 1 root root 443770238 Mar 13 09:29 kubernetes-server-linux-amd64.tar.gz
drwxr-xr-x. 4 root root        79 Aug  5  2019 kubernetes-v1.15.2

签发client证书


创建生成证书签名请求(csr)的JSON配置文件

/opt/certs/client-csr.json

{
    "CN": "k8s-node",
    "hosts": [ 
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "chengdu",
            "L": "chengdu",
            "O": "pl",
            "OU": "ops"
        }
    ]
}

生成client证书和私钥

[root@pl66-245 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssl-json -bare client

检查生成的证书、私钥

[root@pl66-245 certs]# ls -l | grep client
-rw-r--r-- 1 root root  993 Mar 11 07:58 client.csr
-rw-r--r-- 1 root root  192 Mar 11 07:58 client-csr.json
-rw------- 1 root root 1675 Mar 11 07:58 client-key.pem
-rw-r--r-- 1 root root 1359 Mar 11 07:58 client.pem
[root@pl66-245 certs]# 

签发kube-apiserver证书

创建生成证书签名请求(csr)的JSON配置文件

/opt/certs/apiserver-csr.json

{
    "CN": "k8s-apiserver",
    "hosts": [ 
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.cluster",
        "kubernetes.default.cluster.local",
        "10.10.66.241",
        "10.10.66.242",
        "10.10.66.243",
        "10.10.66.250"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "chengdu",
            "L": "chengdu",
            "O": "pl",
            "OU": "ops"
        }
    ]
}

生成kube-apiserver证书和私钥

[root@pl66-245 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json | cfssl-json -bare apiserver

检查生成的证书、私钥

[root@pl66-245 certs]# ls -l | grep apiserver
-rw-r--r-- 1 root root 1240 Mar 11 08:47 apiserver.csr
-rw-r--r-- 1 root root  421 Mar 11 06:38 apiserver-csr.json
-rw------- 1 root root 1679 Mar 11 08:47 apiserver-key.pem
-rw-r--r-- 1 root root 1582 Mar 11 08:47 apiserver.pem
[root@pl66-245 certs]# 

拷贝证书至各运算节点,并创建配置

拷贝证书、私钥,注意私钥文件属性600

/opt/kubernetes/server/bin/cert

[root@pl66-242 cert]# scp pl66-245:/opt/certs/ca.pem .
The authenticity of host 'pl66-245 (10.10.66.245)' can't be established.
ECDSA key fingerprint is SHA256:2YOuINoiCs2y07VJzw8hwpc4pbPES7BNYU1c01zdoBg.
ECDSA key fingerprint is MD5:63:11:13:4d:18:eb:fa:2c:9e:21:73:43:5a:51:e9:5e.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'pl66-245,10.10.66.245' (ECDSA) to the list of known hosts.
root@pl66-245's password: 
ca.pem                                                                          100% 1338   796.4KB/s   00:00    
[root@pl66-242 cert]# scp pl66-245:/opt/certs/apiserver-key.pem .
root@pl66-245's password: 
apiserver-key.pem                                                               100% 1679   998.9KB/s   00:00    
[root@pl66-242 cert]# scp pl66-245:/opt/certs/apiserver.pem .
root@pl66-245's password: 
apiserver.pem                                                                   100% 1582     1.0MB/s   00:00    
[root@pl66-242 cert]# scp pl66-245:/opt/certs/ca-key.pem .
root@pl66-245's password: 
ca-key.pem                                                                      100% 1675   913.5KB/s   00:00    
[root@pl66-242 cert]# 
[root@pl66-242 cert]# 
[root@pl66-242 cert]# scp pl66-245:/opt/certs/client-key.pem .
root@pl66-245's password: 
client-key.pem                                     n                             100% 1675   848.3KB/s   00:00    
[root@pl66-242 cert]# scp pl66-245:/opt/certs/client.pem .
root@pl66-245's password: 
client.pem                                                                      100% 1359   773.5KB/s   00:00    
[root@pl66-242 cert]# ls -l 
total 24
-rw-------. 1 root root 1679 Mar 11 08:53 apiserver-key.pem
-rw-r--r--. 1 root root 1582 Mar 11 08:53 apiserver.pem
-rw-------. 1 root root 1675 Mar 11 08:53 ca-key.pem
-rw-r--r--. 1 root root 1338 Mar 11 08:53 ca.pem
-rw-------. 1 root root 1675 Mar 11 08:54 client-key.pem
-rw-r--r--. 1 root root 1359 Mar 11 08:54 client.pem
[root@pl66-242 cert]# 

创建配置

/opt/kubernetes/server/bin/conf/audit.yaml

apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"

创建启动脚本

vi /opt/kubernetes/server/bin/kube-apiserver.sh

#!/bin/bash
./kube-apiserver \
        --apiserver-count 2 \
        --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
        --audit-policy-file ./conf/audit.yaml \
        --authorization-mode RBAC \
        --client-ca-file ./cert/ca.pem \
        --requestheader-client-ca-file ./cert/ca.pem \
        --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
        --etcd-cafile ./cert/ca.pem \
        --etcd-certfile ./cert/client.pem \
        --etcd-keyfile ./cert/client-key.pem \
        --etcd-servers https://10.10.66.241:2379,https://10.10.66.242:2379,https://10.10.66.243:2379 \
        --service-account-key-file ./cert/ca-key.pem \
        --service-cluster-ip-range 192.168.0.0/16 \
        --service-node-port-range 3000-29999 \
        --target-ram-mb=1024 \
        --kubelet-client-certificate ./cert/client.pem \
        --kubelet-client-key ./cert/client-key.pem \
        --log-dir /data/logs/kubernetes/kube-apiserver \
        --tls-cert-file ./cert/apiserver.pem \
        --tls-private-key-file ./cert/apiserver-key.pem \
        --v 2

调整权限和目录

chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh
mkdir -p /data/logs/kubernetes/kube-apiserver

创建supervisor配置

vi /etc/supervisord.d/kube-apiserver.ini

[program:kube-apiserver-66-242]
command=/opt/kubernetes/server/bin/kube-apiserver.sh
numprocs=1
directory=/opt/kubernetes/server/bin/
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

启动服务并检查

[root@pl66-242 supervisord.d]# supervisorctl update

[root@pl66-242 supervisord.d]# supervisorctl status

[root@pl66-242 supervisord.d]# supervisorctl status
etcd-server-66-242               RUNNING   pid 10902, uptime 1:49:11
kube-apiserver-66-242            RUNNING   pid 10901, uptime 1:49:11
[root@pl66-242 supervisord.d]# 

配置4层反向代理

安装nginx

[root@pl66-240 ~]# yum -y install nginx

Nginx配置

/etc/nginx/nginx.conf

stream {
    upstream kube-apiserver {
        server 10.10.66.242:6443    max_fails=3 fail_timeout=30s;
        server 10.10.66.243:6443    max_fails=3 fail_timeout=30s;
    }
    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}

注意:需要在最后追加

keepalived配置

安装keepalived

[root@pl66-241 etcd]# yum -y install keepalived

创建脚本check_port.sh

vim /etc/keepalived/check_port.sh
chmod +x keepalived.conf

#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then 
    PORT_PROCESS=`ss -lnt | grep $CHK_PORT | wc -l`
    if [ $PORT_PROCESS -eq 0 ];then
        echo "Port $CHK_PORT is Not Used,End."
        exit 1
    fi
else
    echo "Check Port Cant Be Empty!"
fi

keepalive主

yum -y install keepalived
[root@pl66-240 keepalived]# rpm -qa keepalived
keepalived-1.3.5-16.el7.x86_64

vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    router_id 10.10.66.240

}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface enp2s0
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.10.66.240
    nopreemt

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.10.66.250
    }
}

keepalive备

yum -y install keepalived
[root@pl66-241 keepalived]# rpm -qa keepalived
keepalived-1.3.5-16.el7.x86_64
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    router_id 10.10.66.241

}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state BACKUP
    interface enp3s0
    virtual_router_id 251
    mcast_src_ip 10.10.66.241
    priority 90
    advert_int 1

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.10.66.250
    }
}

启动代理并检查

systemctl start keepalived
systemctl enable keepalived
nginx -s reload

[root@pl66-240 keepalived]# ip add 
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: enp2s0:  mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether f4:4d:30:14:4d:75 brd ff:ff:ff:ff:ff:ff
    inet 10.10.66.240/24 brd 10.10.66.255 scope global noprefixroute enp2s0
       valid_lft forever preferred_lft forever
    inet 10.10.66.250/32 scope global enp2s0
       valid_lft forever preferred_lft forever
    inet6 fe80::f64d:30ff:fe14:4d75/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0:  mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:35:68:1c:de brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
[root@pl66-240 keepalived]# 

部署controller-manager

集群规划

主机名 角色 IP
pl66-242 controller-manager 10.10.66.242
pl66-243 controller-manager 10.10.66.243

注意:这里部署文档以pl66-242主机为例,另外一台运算节点安装部署方法类似

创建启动脚本

/opt/kubernetes/server/bin/kube-controller-manager.sh

#!/bin/sh
./kube-controller-manager \
    --cluster-cidr  172.16.0.0/16 \
    --leader-elect true \
    --log-dir /data/kubernetes/kube-controller-manager \
    --master http://127.0.0.1:8080 \
    --service-account-private-key-file ./cert/ca-key.pem \
    --service-cluster-ip-range 192.168.0.0/16 \
    --root-ca-file ./cert/ca.pem \
    --v 2

调整文件权限,创建目录

chmod+x /opt/kubernetes/server/bin/kube-controller-manager.sh
mkdir -p /data/kubernetes/kube-controller-manager
mkdir -p /data/logs/kubernetes/kube-controller-manager

创建supervisor配置

/etc/supervisord.d/kube-controller-manager.ini

[program:kube-controller-manager--66-242]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh
numprocs=1
directory=/opt/kubernetes/server/bin/
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

启动服务并检查

[root@pl66-242 supervisord.d]# supervisorctl update

[root@pl66-242 supervisord.d]# supervisorctl status
etcd-server-66-242                RUNNING   pid 10902, uptime 8:05:27
kube-apiserver-66-242             RUNNING   pid 10901, uptime 8:05:27
kube-controller-manager--66-242   RUNNING   pid 11558, uptime 0:06:13

部署kube-scheduler

集群规划

主机名 角色 IP
pl66-242 kube-scheduler 10.10.66.242
pl66-243 kube-scheduler 10.10.66.243

注意:这里部署文档以pl66-242主机为例,另外一台运算节点安装部署方法类似

创建启动脚本

vim /opt/kubernetes/server/bin/kube-scheduler.sh

#!/bin/bash
./kube-scheduler \
    --leader-elect \
    --log-dir /data/logs/kubernetes/kube-scheduler \
    --master http://127.0.0.1:8080 \
    --v 2

调整文件权限,创建目录

chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
mkdir -p /data/logs/kubernetes/kube-scheduler

创建supervisor配置

vim /etc/supervisord.d/kube-scheduler.ini

[program:kube-scheduler--66-242]
command=/opt/kubernetes/server/bin/kube-scheduler.sh
numprocs=1
directory=/opt/kubernetes/server/bin/
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

添加信息并检查

[root@pl66-242 supervisord.d]# supervisorctl update

[root@pl66-242 supervisord.d]# supervisorctl status
etcd-server-66-242                RUNNING   pid 11782, uptime 0:01:23
kube-apiserver-66-242             RUNNING   pid 11762, uptime 0:01:23
kube-controller-manager--66-242   RUNNING   pid 11807, uptime 0:00:31
kube-scheduler--66-242            RUNNING   pid 11763, uptime 0:01:23
[root@pl66-242 supervisord.d]# 

创建kubectl软连接

[root@pl66-242 supervisord.d]# ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl

[root@pl66-242 supervisord.d]# which kubectl
/usr/bin/kubectl

检查集群状态

[root@pl66-242 supervisord.d]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   
[root@pl66-242 supervisord.d]# 

部署Node节点服务

部署kubelet

集群规划

主机名 角色 IP
pl66-242.host.com kubelet 10.10.66.242
pl66-243.host.com kubelet 10.10.66.243

注意:这里部署文档以pl66-242主机为例,另外一台运算节点安装部署方法类似

签发kubelet证书

创建生成证书签名请求(csr)的Json配置文件

vi /opt/certs/kubelet-csr.json

{
    "CN": "k8s-kubelet",
    "hosts": [ 
        "127.0.0.1",
        "10.10.66.240",
        "10.10.66.241",
        "10.10.66.242",
        "10.10.66.243",
        "10.10.66.250",
                "10.10.66.251",
                "10.10.66.252",
                "10.10.66.253"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "chengdu",
            "L": "chengdu",
            "O": "pl",
            "OU": "ops"
        }
    ]
}

生成kubelet证书和私钥

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet

[root@pl66-245 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet2020/03/16 02:18:20 [INFO] generate received request
2020/03/16 02:18:20 [INFO] received CSR
2020/03/16 02:18:20 [INFO] generating key: rsa-2048
2020/03/16 02:18:20 [INFO] encoded CSR
2020/03/16 02:18:20 [INFO] signed certificate with serial number 411291623634880987451147311712722127071427596871
2020/03/16 02:18:20 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

检查生成的证书、私钥

[root@pl66-245 certs]# ll /opt/certs/ | grep kubelet
-rw-r--r-- 1 root root 1106 Mar 16 02:18 kubelet.csr
-rw-r--r-- 1 root root  394 Mar 16 02:15 kubelet-csr.json
-rw------- 1 root root 1679 Mar 16 02:18 kubelet-key.pem
-rw-r--r-- 1 root root 1456 Mar 16 02:18 kubelet.pem
[root@pl66-245 certs]# 

拷贝证书至各运算节点,并创建配置

拷贝证书、私钥,注意私钥文件属性600

/opt/kubernetes/server/bin/cert
[root@pl66-242 cert]# scp pl66-245:/opt/certs/kubelet-key.pem .
[root@pl66-242 cert]# scp pl66-245:/opt/certs/kubelet.pem .

[root@pl66-242 cert]# ll
total 32
-rw-------. 1 root root 1679 Mar 12 09:53 apiserver-key.pem
-rw-r--r--. 1 root root 1582 Mar 12 09:53 apiserver.pem
-rw-------. 1 root root 1675 Mar 12 09:53 ca-key.pem
-rw-r--r--. 1 root root 1338 Mar 12 09:53 ca.pem
-rw-------. 1 root root 1675 Mar 12 09:53 client-key.pem
-rw-r--r--. 1 root root 1359 Mar 12 09:53 client.pem
-rw-------. 1 root root 1679 Mar 16 02:25 kubelet-key.pem
-rw-r--r--. 1 root root 1456 Mar 16 02:25 kubelet.pem
[root@pl66-242 cert]#

创建配置

set-cluster
注意:在conf目录下

/opt/kubernetes/server/bin/conf
set-cluster

[root@pl66-242 conf]# kubectl config set-cluster myk8s \
 --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
 --embed-certs=true \
 --server=https://10.10.66.250:7443 \
 --kubeconfig=kubelet.kubeconfig

Cluster "myk8s" set.
[root@pl66-242 conf]# 

set-credentials

[root@pl66-242 conf]# kubectl config set-credentials k8s-node \
> --client-certificate=/opt/kubernetes/server/bin/cert/client.pem \
> --client-key=/opt/kubernetes/server/bin/cert/client-key.pem \
> --embed-certs=true \
> --kubeconfig=kubelet.kubeconfig
User "k8s-node" set.

set-context

[root@pl66-242 conf]# kubectl config set-context myk8s-context \
> --cluster=myk8s \
> --user=k8s-node \
> --kubeconfig=kubelet.kubeconfig
Context "myk8s-context" created.
[root@pl66-242 conf]# 

use-context

[root@pl66-242 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
Switched to context "myk8s-context".
[root@pl66-242 conf]# 

k8s-node.yaml

  • 创建资源配置文件

/opt/kubernetes/server/bin/conf/k8s-node.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef: 
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node
  • 应用资源配置文件
[root@pl66-242 conf]# kubectl create -f k8s-node.yaml 
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
  • 检查
[root@pl66-242 cert]# kubectl get clusterrolebinding k8s-node
NAME       AGE
k8s-node   18m

拷贝kubelet配置文件、证书到243主机上

[root@pl66-242 conf]# ll
total 20
-rw-r--r--. 1 root root 2223 Mar 13 01:14 audit.yaml
-rw-r--r--. 1 root root 1003 Mar 12 10:14 audit.yaml.bak
-rw-r--r--. 1 root root  259 Mar 16 06:03 k8s-node.yaml
-rw-------. 1 root root 6178 Mar 16 06:00 kubelet.kubeconfig
[root@pl66-242 conf]# pwd
/opt/kubernetes/server/bin/conf
[root@pl66-242 conf]# scp kubelet.kubeconfig pl66-243:/opt/kubernetes/server/bin/conf/
root@pl66-243's password: 
kubelet.kubeconfig                                                                                                 100% 6178     2.3MB/s   00:00    
[root@pl66-242 conf]# 

[root@pl66-245 certs]# scp kubelet.pem root@pl66-243:/opt/kubernetes/server/bin/cert
The authenticity of host 'pl66-243 (10.10.66.243)' can't be established.
ECDSA key fingerprint is SHA256:yghdzfvB+QjjAsNSdGlAOhu1cm2yEIVLRidqi2k3+QQ.
ECDSA key fingerprint is MD5:52:2b:f4:1b:d0:83:00:dd:62:b6:66:d2:9f:38:77:8b.
Are you sure you want to continue connecting (yes/no)? yes            
Warning: Permanently added 'pl66-243' (ECDSA) to the list of known hosts.
root@pl66-243's password: 
kubelet.pem                                                                                                        100% 1456   890.8KB/s   00:00    
[root@pl66-245 certs]# scp kubelet-key.pem root@pl66-243:/opt/kubernetes/server/bin/cert
root@pl66-243's password: 
kubelet-key.pem                                                                                                    100% 1679   442.0KB/s   00:00    
[root@pl66-245 certs]# 

准备pause基础镜像

  • 下载
[root@pl66-245 certs]# docker pull kubernetes/pause
Using default tag: latest
latest: Pulling from kubernetes/pause
4f4fb700ef54: Pull complete 
b9c8ec465f6b: Pull complete 
Digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105
Status: Downloaded newer image for kubernetes/pause:latest
docker.io/kubernetes/pause:latest
[root@pl66-245 certs]# 
  • 提交至私有仓库(harbor)中
  • 登录harbor

[root@pl66-245 certs]# docker login harbor.yw.com

  • 给镜像打tag

[root@pl66-245 certs]# docker tag f9d5de079539 harbor.yw.com/public/pause:latest

  • push到harbor

[root@pl66-245 certs]# docker push harbor.yw.com/public/pause:latest

创建kubelet启动脚本

/opt/kubernetes/server/bin/kubelet.sh

#!/bin/bash

./kubelet \
    --anonymous-auth=false \
    --cgroup-driver systemd \
    --cluster-dns 192.168.0.2 \
    --cluster-domain cluster.local \
    --runtime-cgroups=/systemd/system.slice \
    --kubelet-cgroups=/systemd/system.slice \
    --fail-swap-on="false" \
    --client-ca-file ./cert/ca.pem \
    --tls-cert-file ./cert/kubelet.pem \
    --tls-private-key-file ./cert/kubelet-key.pem \
    --hostname-override pl66-242.host.com \
    --image-gc-high-threshold 20 \
    --image-gc-low-threshold 10 \
    --kubeconfig ./conf/kubelet.kubeconfig \
    --log-dir /data/logs/kubernetes/kube-kubelet \
    --pod-infra-container-image harbor.yw.com/public/pause:latest \
    --root-dir /data/kubelet

检查配置,权限,创建日志目录

[root@pl66-242 conf]# ll | grep kubelet
-rw-------. 1 root root 6178 Mar 16 06:00 kubelet.kubeconfig

chmod +x /opt/kubernetes/server/bin/kubelet.sh
mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet

创建supervisor配置

添加角色

[root@pl66-242 ~]# kubectl label node pl66-242.host.com node-role.kubernetes.io/master=
[root@pl66-242 ~]# kubectl label node pl66-242.host.com node-role.kubernetes.io/node=

部署kube-proxy

集群规划

主机名 角色 IP
pl66-242.host.com kube-proxy 10.10.66.242
pl66-243.host.com kube-proxy 10.10.66.243

创建生成证书签名请求(csr)的JSON配置文件

/opt/certs/kube-proxy-csr.json

{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "chengdu",
            "L": "chengdu",
            "O": "pl",
            "OU": "ops"
        }
    ]
}

生成证书、私钥

[root@pl66-245 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json | cfssl-json -bare kube-proxy-client 
2020/03/16 08:58:51 [INFO] generate received request
2020/03/16 08:58:51 [INFO] received CSR
2020/03/16 08:58:51 [INFO] generating key: rsa-2048
2020/03/16 08:58:51 [INFO] encoded CSR
2020/03/16 08:58:51 [INFO] signed certificate with serial number 601899238979766833696791320168818948790769415904
2020/03/16 08:58:51 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

检查

[root@pl66-245 certs]# ll | grep kube-proxy
-rw-r--r-- 1 root root 1005 Mar 16 08:58 kube-proxy-client.csr
-rw------- 1 root root 1675 Mar 16 08:58 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1371 Mar 16 08:58 kube-proxy-client.pem
-rw-r--r-- 1 root root  184 Mar 16 08:53 kube-proxy-csr.json
[root@pl66-245 certs]# 

拷贝证书,私钥,注意私钥文件属性600

/opt/kubernetes/server/bin/cert

[root@pl66-242 cert]# scp pl66-245:/opt/cert/
apiserver-key.pem  apiserver.pem      ca-key.pem         ca.pem             client-key.pem     client.pem         
[root@pl66-242 cert]# scp pl66-245:/opt/certs/kube-proxy-client.pem .
root@pl66-245's password: 
kube-proxy-client.pem                                                                                                    100% 1371   737.8KB/s   00:00    
[root@pl66-242 cert]# scp pl66-245:/opt/certs/kube-proxy-client-key.pem .
root@pl66-245's password: 
kube-proxy-client-key.pem                                                                                                100% 1675   899.0KB/s   00:00    
[root@pl66-242 cert]# ll
total 40
-rw-------. 1 root root 1679 Mar 12 09:53 apiserver-key.pem
-rw-r--r--. 1 root root 1582 Mar 12 09:53 apiserver.pem
-rw-------. 1 root root 1675 Mar 12 09:53 ca-key.pem
-rw-r--r--. 1 root root 1338 Mar 12 09:53 ca.pem
-rw-------. 1 root root 1675 Mar 12 09:53 client-key.pem
-rw-r--r--. 1 root root 1359 Mar 12 09:53 client.pem
-rw-------. 1 root root 1679 Mar 16 02:25 kubelet-key.pem
-rw-r--r--. 1 root root 1456 Mar 16 02:25 kubelet.pem
-rw-------. 1 root root 1675 Mar 16 09:40 kube-proxy-client-key.pem
-rw-r--r--. 1 root root 1371 Mar 16 09:40 kube-proxy-client.pem
[root@pl66-242 cert]# 

创建配置

set-cluster

[root@pl66-242 conf]# kubectl config set-cluster myk8s \
 --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
 --embed-certs=true \
 --server=https://10.10.66.250:7443 \
 --kubeconfig=kube-proxy.kubeconfig

Cluster "myk8s" set.
[root@pl66-242 conf]# 

set-credentials

[root@pl66-242 conf]# kubectl config set-credentials kube-proxy \
> --client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \
> --client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \
> --embed-certs=true \
> --kubeconfig=kube-proxy.kubeconfig

User "k8s-node" set.

set-context

[root@pl66-242 conf]# kubectl config set-context myk8s-context \
> --cluster=myk8s \
> --user=k8s-proxy \
> --kubeconfig=kube-proxy.kubeconfig

Context "myk8s-context" created.

[root@pl66-242 conf]# 

use-context

[root@pl66-242 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
Switched to context "myk8s-context".

拷贝配置文件到PL66-243上。

[root@pl66-242 conf]# ll
total 28
-rw-r--r--. 1 root root 2223 Mar 13 01:14 audit.yaml
-rw-r--r--. 1 root root 1003 Mar 12 10:14 audit.yaml.bak
-rw-r--r--. 1 root root  259 Mar 16 06:03 k8s-node.yaml
-rw-------. 1 root root 6178 Mar 16 06:00 kubelet.kubeconfig
-rw-------. 1 root root 6197 Mar 16 10:06 kube-proxy.kubeconfig
[root@pl66-242 conf]# scp kube-proxy.kubeconfig [email protected]:/opt/kubernetes/server/bin/conf/
[email protected]'s password: 
kube-proxy.kubeconfig                                                                                                    100% 6197     2.3MB/s   00:00    
[root@pl66-242 conf]# 

创建kube-proxy启动脚本

  • 加载ipvs模块

/root/ipvs.sh

#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null 
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done

增加执行权限

chmod +x ipvs.sh

执行脚本输出结果

[root@pl66-242 ~]# lsmod | grep ip_vs
ip_vs_wrr              12697  0 
ip_vs_wlc              12519  0 
ip_vs_sh               12688  0 
ip_vs_sed              12519  0 
ip_vs_rr               12600  0 
ip_vs_pe_sip           12740  0 
nf_conntrack_sip       33860  1 ip_vs_pe_sip
ip_vs_nq               12516  0 
ip_vs_lc               12516  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_ftp              13079  0 
ip_vs_dh               12688  0 
ip_vs                 145497  24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack          133095  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

LVS官方网站上的调度算法

轮叫调度(Round-Robin Scheduling)
加权轮叫调度(Weighted Round-Robin Scheduling)
最小连接调度(Least-Connection Scheduling)
加权最小连接调度(Weighted Least-Connection Scheduling)
基于局部性的最少链接(Locality-Based Least Connections Scheduling)
带复制的基于局部性最少链接(Locality-Based Least Connections with Replication Scheduling)
目标地址散列调度(Destination Hashing Scheduling)
源地址散列调度(Source Hashing Scheduling)
最短预期延时调度(Shortest Expected Delay Scheduling)
不排队调度(Never Queue Scheduling)
  • 创建启动脚本

/opt/kubernetes/server/bin/kube-proxy.sh

[root@pl66-242 bin]# cat kube-proxy.sh 
#!/bin/bash
./kube-proxy \
    --cluster-cidr 172.16.0.0/16 \
    --hostname-override pl66-242.host.com \
    --proxy-mode=ipvs \
    --ipvs-scheduler=nq \
    --kubeconfig ./conf/kube-proxy.kubeconfig

注意:kube-proxy集群各主机的启动脚本略有不同,部署其他节点时注意修改。

检查配置,权限,创建日志目录

[root@pl66-242 bin]# ll conf/ | grep kube-proxy
-rw-------. 1 root root 6197 Mar 16 10:06 kube-proxy.kubeconfig
[root@pl66-242 bin]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@pl66-242 bin]# mkdir -p /data/logs/kubernetes/kube-proxy

创建supervisor配置

/etc/supervisord.d/kube-proxy.ini

[program:kube-proxy--66-242]
command=/opt/kubernetes/server/bin/kube-proxy.sh
numprocs=1
directory=/opt/kubernetes/server/bin/
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

查看proxy日志

[root@pl66-242 supervisord.d]# tail -fn 200 /data/logs/kubernetes/kube-proxy/proxy.stdout.log

安装ipvsadm

[root@pl66-242 supervisord.d]# yum -y install ipvsadm

验证kubernetes集群

在任意一个运算节点,创建一个资源配置清单

/root/nginx-ds.yaml

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.yw.com/public/nginx:1.9.1
        ports:
        - containerPort: 80

应用资源配置,并检查

/root
[root@pl66-242 ~]# kubectl create -f nginx-ds.yaml

[root@pl66-242 ~]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
nginx-ds-mwp84   1/1     Running   0          
nginx-ds-qck7g   1/1     Running   0          
[root@pl66-242 ~]# 
[root@pl66-242 ~]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE         IP             NODE                NOMINATED NODE   READINESS GATES
nginx-ds-mwp84   1/1     Running   0             172.16.242.2   pl66-242.host.com              
nginx-ds-qck7g   1/1     Running   0             172.16.243.2   pl66-243.host.com              
[root@pl66-242 ~]# 

验证

[root@pl66-242 ~]# curl 172.16.242.2



Welcome to nginx!



Welcome to nginx!

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

For online documentation and support please refer to nginx.org.
Commercial support is available at nginx.com.

Thank you for using nginx.

[root@pl66-242 ~]#

你可能感兴趣的:(Kubernetes二进制安装)