k8s的两种部署方式

0. 服务器配置

IP host_name ROLES
172.27.216.27 k8s-master master
172.27.216.28 k8s-node1 node(01)
172.16.70.32 k8s-node2 node(02)

1. 初始化linux配置:

需要在搭建k8s的服务器上都要执行

# 关闭防火墙:
    systemctl stop firewalld
    systemctl disable firewalld
    
#关闭 selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时

#关闭 swap
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久

# 重启
reboot

#根据规划设置各个主机名(要与hosts文件对应上)
hostnamectl set-hostname master

在master中将ip设置到hosts文件中
cat >> /etc/hosts << EOF
172.27.216.27 k8s-master
172.27.216.28 k8s-node1
172.16.70.32 k8s-node2
EOF

# 将桥接的IPV4流量传递到iptables链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
sysctl --system # 生效

# 加载br_netfilter模块
modprobe br_netfilter

# 查看是否加载
lsmod | grep br_netfilter

# 生效
sysctl --system 



# 时间同步(注: 需要将ntpd关掉)
yum install ntpdate -y
ntpdate time.nist.gov


#开启ipvs
yum -y install ipset ipvsadm
cat > /etc/sysconfig/modules/ipvs.modules <

2. kubeadm命令部署

2.1 安装docker

1. 安装Docker:

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 下载对应的版本
yum -y install docker-ce-18.06.3.ce-3.el7

systemctl enable docker && systemctl start docker

docker version

2. 设置docker加速器

sudo mkdir -p /etc/docker

sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "exec-opts": ["native.cgroupdriver=systemd"], 
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

sudo systemctl daemon-reload

sudo systemctl restart docker

3. 添加阿里云的YUM软件源

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

4. 安装kubeadm、kubelet和kubectl

# 安装
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0

# 为了实现Docker使用的cgroup drvier和kubelet使用的cgroup drver一致,建议修改"/etc/sysconfig/kubelet"文件的内容:
vim /etc/sysconfig/kubelet

# 修改
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

# 设置为开机启动即可
systemctl enable kubelet

5.部署k8s的Master节点(只在master节点执行)

kubeadm init \
--apiserver-advertise-address=172.27.216.27 \ 
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16

# 在master节点执行,这里的命令是kubeadm init命令执行后,提示出来的
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

6. 将node节点加入到master节点

# 在node节点中执行(本条命令,在执行完kubeadm init会打印出)
kubeadm join 172.27.216.27:6443 --token 4bwnx3.kvc4ans3r5zbwtw3     --discovery-token-ca-cert-hash sha256:7aeddf6aeb2c4e901497ab6a779333238f3a57e8bd06a2bf492467a0619bb765

#检查节点状况
kubectl get nodes

默认的token有效期为24h, 过期后,可以使用如下命令进行生成和添加

kubeadm token create --print-join-command

7.部署CNI网络插件(只在master执行)

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

# 查看部署CNI网络插件速度
kubectl get pods -n kube-system

# 在状态都为Running时,查看节点状态
kubectl get nodes

##以下为打印结果, status权威ready
[root@master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
k8s-master   Ready    master   33m   v1.18.0
k8s-node1    Ready       30m   v1.18.0
k8s-node2    Ready       30m   v1.18.0

3. 二进制部署(比较繁琐,极易出错)

1. 部署etcd集群

1.1 准备除非双手了证书生成工具

cfssl 是一个开源的证书管理工具,使用 json 文件生成证书,相比 openssl 更方便使用。 找任意一台服务器操作,这里用 Master 节点。

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64 

mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
  1. 生成ETCD证书

#创建工作目录:
自签 CA:
mkdir -p ~/TLS/{etcd,k8s} && cd TLS/etcd
cat > ca-config.json<< EOF 
{
    "signing":{
        "default":{
            "expiry":"87600h"
        },
        "profiles":{
            "www":{
                "expiry":"87600h",
                "usages":[
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
EOF
cat > ca-csr.json << EOF 
{
    "CN":"etcd CA",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"Beijing",
            "ST":"Beijing"
        }
    ]
}
EOF

# 生成证书:
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

# 检查
ls *pem


  1. 使用自签 CA 签发 Etcd HTTPS 证书

    创建证书申请文件:

 cat > server-csr.json<< EOF 
 {
    "CN":"etcd",
    "hosts":[
        "172.27.216.27",
        "172.16.70.32",
        "172.27.216.28"
    ],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing"
        }
    ]
}
EOF
  1. 生成证书:
# 生成证书命令
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json - profile=www server-csr.json | cfssljson -bare server

# 检查
ls server*pem
2 安装和部署ETCD
  1. 下载etcd源码
wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
  1. 创建工作目录并解压二进制文件

    mkdir /opt/etcd/{bin,cfg,ssl} –p
    tar zxvf etcd-v3.4.9-linux-amd64.tar.gz
    mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
    
  1. 创建etcd配置文件

    cat > /opt/etcd/cfg/etcd.conf << EOF
    
    #[Member]
    ETCD_NAME="etcd-1"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://172.27.216.27:2380"
    ETCD_LISTEN_CLIENT_URLS="https://172.27.216.27:2379,http://127.0.0.1:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.27.216.27:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://172.27.216.27:2379"
    ETCD_INITIAL_CLUSTER="etcd-1=https://172.27.216.27:2380,etcd-2=https://172.27.216.28:2380,etcd-3=https://172.16.70.32:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    
    #[Security]
    ETCD_CERT_FILE="/opt/etcd/ssl/server.pem"
    ETCD_KEY_FILE="/opt/etcd/ssl/server-key.pem"
    ETCD_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
    ETCD_CLIENT_CERT_AUTH="true"
    ETCD_PEER_CERT_FILE="/opt/etcd/ssl/server.pem"
    ETCD_PEER_KEY_FILE="/opt/etcd/ssl/server-key.pem"
    ETCD_PEER_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
    ETCD_PEER_CLIENT_CERT_AUTH="true"
    EOF
    
    #########以下是注解##########
    ETCD_NAME:节点名称,集群中唯一
    ETCD_DATA_DIR:数据目录
    ETCD_LISTEN_PEER_URLS:集群通信监听地址
    ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
    ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
    ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
    ETCD_INITIAL_CLUSTER:集群节点地址
    ETCD_INITIAL_CLUSTER_TOKEN:集群 Token
    ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new 是新集群,existing 表示加入 已有集群
    
  1. systemd管理etcd

    cat > /usr/lib/systemd/system/etcd.service << EOF
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/opt/etcd/cfg/etcd.conf
    ExecStart=/opt/etcd/bin/etcd
    --name=${ETCD_NAME}
    --data-dir=${ETCD_DATA_DIR}
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS}
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS}
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS}
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS}
    --initial-cluster=${ETCD_INITIAL_CLUSTER}
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN}
    --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE}
    --cert-file=${ETCD_CERT_FILE}
    --key-file=${ETCD_KEY_FILE}
    --peer-cert-file=${ETCD_PEER_CERT_FILE}
    --peer-key-file=${ETCD_PEER_KEY_FILE}
    --trusted-ca-file=${ETCD_TRUSTED_CA_FILE}
    --client-cert-auth=${ETCD_CLIENT_CERT_AUTH}
    --peer-client-cert-auth=${ETCD_PEER_CLIENT_CERT_AUTH}
    --peer-trusted-ca-file=${ETCD_PEER_TRUSTED_CA_FILE}
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
  2. 拷贝刚才用命令生成的证书到指定的目录

      cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
    
  3. 启动并设置开机启动

    systemctl daemon-reload 
    systemctl start etcd 
    systemctl enable etcd
    
  4. 将上述配置文件copy到其他机器当中,并修改成其对应的设置,然后启动服务服务。

  5. 查看etcd状态

    systemctl status etcd  # 命令
    
    ####### 以下是返回值 ####### 
    ● etcd.service - Etcd Server
       Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)
       Active: active (running) since 五 2021-07-09 12:04:28 CST; 39min ago
     Main PID: 2006 (etcd)
       CGroup: /system.slice/etcd.service
               └─2006 /opt/etcd/bin/etcd
    
    
  6. 查看成员

    /opt/etcd/bin/etcdctl member list --write-out=table # 命令
    
    ###### 返回值#####
    +------------------+---------+--------+----------------------------+----------------------------+------------+
    |        ID        | STATUS  |  NAME  |         PEER ADDRS         |        CLIENT ADDRS        | IS LEARNER |
    +------------------+---------+--------+----------------------------+----------------------------+------------+
    |  8c56ace40c5d858 | started | etcd-2 | https://172.27.216.28:2380 | https://172.27.216.28:2379 |      false |
    | 1dd6b1b56c862c6f | started | etcd-1 | https://172.27.216.27:2380 | https://172.27.216.27:2379 |      false |
    | 4d3d854caa726877 | started | etcd-3 |  https://172.16.70.32:2380 |  https://172.16.70.32:2379 |      false |
    +------------------+---------+--------+----------------------------+----------------------------+------------+
    

    需要注意的是,在node节点中,需要加上对应的证书,才能获取到正确的信息

    /opt/etcd/bin/etcdctl --endpoints=https://172.27.216.27:2379 --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --cacert=/opt/etcd/ssl/ca.pem member list --write-out=table
    
  7. etcd的部署基本完成

2. 安装docker(需要在每个机器上都装上docker)

1. 第一种方式yum install docker
2. 可以通过源码安装

下载地址:https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz

  1. 解压二进制包

    tar zxvf docker-19.03.9.tgz 
    mv docker/* /usr/bin
    
  2. systemd 管理 docker

    cat > /usr/lib/systemd/system/docker.service << EOF 
    [Unit]
    Description=Docker Application Container Engine 
    Documentation=https://docs.docker.com 
    After=network-online.target firewalld.service 
    Wants=network-online.target
    
    [Service]
    Type=notify 
    ExecStart=/usr/bin/dockerd 
    ExecReload=/bin/kill -s HUP $MAINPID 
    LimitNOFILE=infinity 
    LimitNPROC=infinity 
    LimitCORE=infinity
    TimeoutStartSec=0
    Delegate=yes
    KillMode=process
    Restart=on-failure
    StartLimitBurst=3 
    StartLimitInterval=60s
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
  1. 创建配置文件

    mkdir /etc/docker
    cat > /etc/docker/daemon.json << EOF 
    {
     "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]  # 阿里镜像
    }
    EOF
    
  1. 启动并设置开机启动

    systemctl daemon-reload
    systemctl start docker 
    systemctl enable docker
     
    
  2. docker 安装完成

. 部署Master Node

1. 生成 kube-apiserver 证书
1.1 自签证书颁发机构(CA)
cat > ca-config.json<< EOF 
{
    "signing":{
        "default":{
            "expiry":"87600h"
        },
        "profiles":{
            "kubernetes":{
                "expiry":"87600h",
                "usages":[
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
EOF

cat > ca-csr.json<< EOF 
{
    "CN":"kubernetes",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"Beijing",
            "ST":"Beijing",
            "O":"k8s",
            "OU":"System"
        }
    ]
}
EOF

生成证书:

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

1.2 使用自签 CA 签发 kube-apiserver HTTPS 证书

创建证书申请文件:

cat > server-csr.json<< EOF 
{
    "CN":"kubernetes",
    "hosts":[
        "10.0.0.1",
        "127.0.0.1",
        "172.27.216.27",
        "172.27.216.28",
        "172.16.70.32",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"k8s",
            "OU":"System"
        }
    ]
}
EOF

生成server证书:

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
2. 安装kubernetes

从github上找到对应的版本即可https://github.com/kubernetes/kubernetes

注:打开链接你会发现里面有很多包,下载一个 server 包就够了,包含了 Master 和 Worker Node 二进制文件。

解压二进制包:

mkdir -p /opt/k8s/{bin,cfg,ssl,logs}
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-apiserver kube-scheduler kube-controller-manager /opt/k8s/bin
cp kubectl /usr/bin/
3. 部署kube-apiserver
3.1 创建配置文件:
cat > /opt/k8s/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="\\
--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--etcd-servers=https://172.27.216.27:2379,https://172.27.216.28:2379,https://172.16.70.32:2379 \\
--bind-address=172.27.216.27 \\
--secure-port=6443 \\
--advertise-address=172.27.216.27 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/k8s/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/k8s/ssl/server.pem \\
--kubelet-client-key=/opt/k8s/ssl/server-key.pem \\
--tls-cert-file=/opt/k8s/ssl/server.pem \\
--tls-private-key-file=/opt/k8s/ssl/server-key.pem \\
--client-ca-file=/opt/k8s/ssl/ca.pem \\
--service-account-key-file=/opt/k8s/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/k8s/logs/k8s-audit.log \\
"
EOF


#################关键字解释说明
–logtostderr:启用日志
—v:日志等级
–log-dir:日志目录
–etcd-servers:etcd 集群地址
–bind-address:监听地址
–secure-port:https安全端口
–advertise-address:集群通告地址
–allow-privileged:启用授权
–service-cluster-ip-range:Service 虚拟 IP 地址段 
–enable-admission-plugins:准入控制模块 
–authorization-mode:认证授权,启用 RBAC 授权和节点自管理 
–enable-bootstrap-token-auth:启用 TLS bootstrap 机制 
–token-auth-file:bootstrap token 文件 
–service-node-port-range:Service nodeport 类型默认分配端口范围 
–kubelet-client-xxx:apiserver 访问 kubelet 客户端证书 
–tls-xxx-file:apiserver https 证书
–etcd-xxxfile:连接 Etcd 集群证书 
–audit-log-xxx:审计日志

注: 如果这里有问题,请查看反转义字符

3.2 Copy证书:
cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/k8s/ssl/
3.3 启用TLS Bootstrapping机制:
TLS Bootstraping:Master apiserver 启用 TLS 认证后,Node 节点 kubelet 和 kube-proxy 要与 kube-apiserver 进行通信,必须使用 CA 签发的有效证书才可以,当 Node 节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes 引入了 TLS bootstraping 机制来自动颁发客户端证书,kubelet 会以一个低权限用户自动向 apiserver 申请证书,kubelet 的证书由 apiserver 动态签署。
所以强烈建议在 Node 上使用这种方式,目前主要用于 kubelet,kube-proxy 还是由我们统一颁发一个证书。           
3.4 创建并写入对应的token文件
# 生成对应的token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '

# 写入文件
cat > /opt/k8s/cfg/token.csv << EOF
de49791d71b499e45a25d07185d07fe8,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF

## 格式为
token, 用户名 uid,用户
3.5 systemd管理apiserver
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF 
[Unit]
Description=Kubernetes API Server 
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/k8s/cfg/kube-apiserver.conf
ExecStart=/opt/k8s/bin/kube-apiserver \$KUBE_APISERVER_OPTS 
Restart=on-failure

[Install] 
WantedBy=multi-user.target 
EOF
3.6 设置开机启动
systemctl daemon-reload 
systemctl start kube-apiserver 
systemctl enable kube-apiserver
3.7授权 kubelet-bootstrap 用户允许请求证书
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
4. 部署kube-controller-manager
4.1 创建配置文件
cat > /opt/k8s/cfg/kube-controller-manager.conf << EOF 
KUBE_CONTROLLER_MANAGER_OPTS="\\
--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/cfglogs \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/k8s/cfgssl/ca.pem \\
--cluster-signing-key-file=/opt/k8s/cfgssl/ca-key.pem \\
--root-ca-file=/opt/k8s/cfgssl/ca.pem \\
--service-account-private-key-file=/opt/k8s/cfgssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s\\
"
EOF
############## 以下是关键字解释
–master:通过本地非安全本地端口 8080 连接 apiserver。 
–leader-elect:当该组件启动多个时,自动选举(HA)

–cluster-signing-cert-file/–cluster-signing-key-file:自动为 kubelet 颁发证书 的 CA,与 apiserver 保持一致

4.2 systemd 管理 controller-manager
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF 

[Unit]
Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes

[Service] 
EnvironmentFile=/opt/k8s/cfg/kube-controller-manager.conf 
ExecStart=/opt/k8s/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install] 
WantedBy=multi-user.target 
EOF
4.3 启动并设置开机启动
systemctl daemon-reload
systemctl start kube-controller-manager 
systemctl enable kube-controller-manager
5. 部署kube-scheduler
5.1 创建配置文件
cat > /opt/k8s/cfg/kube-scheduler.conf << EOF 
KUBE_SCHEDULER_OPTS="\\
--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--leader-elect \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1"
EOF

###### 以下是关键词解释
–master:通过本地非安全本地端口 8080 连接 apiserver。 
–leader-elect:当该组件启动多个时,自动选举(HA)
5.2 systemd 管理 scheduler
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF 
[Unit]
Description=Kubernetes Scheduler 
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/k8s/cfg/kube-scheduler.conf 
ExecStart=/opt/k8s/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
5.3启动并设置开机启动
systemctl daemon-reload 
systemctl start kube-scheduler 
systemctl enable kube-scheduler
6. 查看集群状态
6.1 现在所有组件都已经启动成功,通过 kubectl 工具查看当前集群组件状态:
kubectl get cs

##### 一下为返回值
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-1               Healthy   {"health":"true"}
etcd-0               Healthy   {"health":"true"}
etcd-2               Healthy   {"health":"true"}

4. 部署Worker Node

下面还是在 Master Node 上操作,即同时作为 Worker Node

1. 创建工作目录并拷贝二进制文件
cd kubernetes/server/bin
cp kubelet kube-proxy /opt/k8s/bin # 本地拷贝
2. 部署Kubelet
1. 创建配置文件
cat > /opt/k8s/cfg/kubelet.conf << EOF 
KUBELET_OPTS="\\
--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--hostname-override=k8s-master \\
--network-plugin=cni \\
--kubeconfig=/opt/k8s/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/k8s/cfg/bootstrap.kubeconfig \\
--config=/opt/k8s/cfg/kubelet-config.yml \\
--cert-dir=/opt/k8s/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF

######以下为名词解释
–hostname-override:显示名称,集群中唯一 
–network-plugin:启用 CNI 
–kubeconfig:空路径,会自动生成,后面用于连接 apiserver 
–bootstrap-kubeconfig:首次启动向 apiserver 申请证书 
–config:配置参数文件
–cert-dir:kubelet 证书生成目录 
–pod-infra-container-image:管理 Pod 网络容器的镜像
2. 配置参数文件
cat > /opt/k8s/cfg/kubelet-config.yml << EOF 
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0 
port: 10250 
readOnlyPort: 10255 
cgroupDriver: cgroupfs 
clusterDNS: 
- 10.0.0.2
clusterDomain: cluster.local 
failSwapOn: false 
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
x509:
    clientCAFile: /opt/k8s/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
        cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15% 
memory.available: 100Mi 
nodefs.available: 10% 
nodefs.inodesFree: 5% 
maxOpenFiles: 1000000 
maxPods: 110
EOF
3. 生成 bootstrap.kubeconfig 文件
KUBE_APISERVER="https://172.27.216.27:6443" # apiserver IP:PORT
TOKEN="de49791d71b499e45a25d07185d07fe8" # 与 token.csv 里保持一致 

# 生成 kubelet bootstrap kubeconfig 配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig

kubectl config set-credentials "kubelet-bootstrap" \
--token=${TOKEN} \
--kubeconfig=bootstrap.kubeconfig

kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig=bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
4. 拷贝到配置文件路劲
cp bootstrap.kubeconfig /opt/k8s/cfg
5. systemd 管理 kubelet
cat > /usr/lib/systemd/system/kubelet.service << EOF 
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/opt/k8s/cfg/kubelet.conf
ExecStart=/opt/k8s/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

6. 启动并设置开机启动
systemctl daemon-reload 
systemctl start kubelet 
systemctl enable kubelet
7. 比准kubelet证书申请并加入集群
# 查看 kubelet 证书请求 
kubectl get csr

####### 返回值
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-yPdpMAQKEse8SuDuey9uJ-LMeuTRaBcsVeNrW6rnfYw   2m38s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

# 批准申请
kubectl certificate approve node-csr-yPdpMAQKEse8SuDuey9uJ-LMeuTRaBcsVeNrW6rnfYw

# 查看节点
kubectl get nodes

5. 部署 kube-proxy

1. 创建配置文件
cat > /opt/k8s/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--config=/opt/k8s/cfg/kube-proxy-config.yml"
EOF
2.配置参数文件
cat > /opt/k8s/cfg/kube-proxy-config.yml << EOF 
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
    kubeconfig: /opt/k8s/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master
clusterCIDR: 10.0.0.0/24
EOF
3. 生成kube-porxy.kubeconfig文件
1. 生成kube-proxy证书
# 切换工作目录
cd ~/TLS/k8s

# 创建证书请求文件
cat > kube-proxy-csr.json<< EOF 
{
    "CN":"system:kube-proxy",
    "hosts":[

    ],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"k8s",
            "OU":"System"
        }
    ]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

# 查看生成证书状态
ls kube-proxy*pem 

kube-proxy-key.pem kube-proxy.pem
2. 生成kubeconfig
KUBE_APISERVER="https://172.27.216.27:6443" 
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig


kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
3. 拷贝到配置文件到指定路径
cp kube-proxy.kubeconfig /opt/k8s/cfg/
4. systemd 管理 kube-proxy
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service] 
EnvironmentFile=/opt/k8s/cfg/kube-proxy.conf 
ExecStart=/opt/k8s/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
5. 启动并设置开机启动
systemctl daemon-reload 
systemctl start kube-proxy 
systemctl enable kube-proxy

6. 部署CNI网络

下载地址: https://github.com/containernetworking/plugins/releases

选择对应的版本即可,这里使用的是v0.8.6

1. 解压二进制包并移动到默认工作目录:
mkdir /opt/cni/bin
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin
2. 部署CNI网络
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

3.
kubectl get node
4. 授权 apiserver 访问 kubelet
cat > apiserver-to-kubelet-rbac.yaml<< EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
      - pods/log
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
EOF
5.
kubectl apply -f apiserver-to-kubelet-rbac.yaml

7. 新增worker Node

1. 拷贝已经部署好的Node相关文件到新节点
scp -r /opt/k8s [email protected]:/opt/
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service [email protected]:/usr/lib/systemd/system
scp -r /opt/cni/ [email protected]:/opt/
scp /opt/k8s/ssl/ca.pem [email protected]://opt/k8s/ssl
2. 删除 kubelet 证书和 kubeconfig 文件
rm /opt/k8s/cfg/kubelet.kubeconfig
rm -f /opt/k8s/ssl/kubelet*

注:这几个文件是证书申请审批后自动生成的,每个 Node 不同,必须删除重新生成。

3. 修改主机名
vi /opt/k8s/cfg/kubelet.conf
--hostname-override=k8s-node1

vi /opt/k8s/cfg/kube-proxy-config.yml
hostnameOverride: k8s-node1
4. 启动并设置开机启动
systemctl daemon-reload

systemctl start kubelet
systemctl enable kubelet

systemctl start kube-proxy
systemctl enable kube-proxy
5. 在 Master 上批准新 Node kubelet 证书申请
kubectl get csr

kubectl certificate approve node-csr-4zTjsaVSrhuyhIGqsefxzVoZDCNKei-aE2jyTP81Uro
6. 查看nodes
Kubectl get node

你可能感兴趣的:(k8s的两种部署方式)