1. 架构信息
系统版本:CentOS 7.6 内核:3.10.0-957.el7.x86_64 Kubernetes: v1.14.1 Docker-ce: 18.09.5 推荐硬件配置:4核8G Keepalived保证apiserever服务器的IP高可用 Haproxy实现apiserver的负载均衡
2. 节点信息
目前测试为 6 台虚拟机,etcd采用 rpm 安装、kubernetes 使用二进制安装,使用 systemd 来做管理,网络组件采用 flannel,Master 实现了 HA, 集群开启 RBAC;master 不负载 pod,在分发证书等阶段将在另外一台主机上执行,该主机对集群内所有节点配置了 ssh 秘钥登录,基本环境如下
hostname | ip | 组件 | 内存 | cpu |
node-01 | 172.19.8.111 | kube-apiserver、kube-controller-manager、etcd、haproxy、keepalived | 8G | 4c |
node-02 | 172.19.8.112 | kube-apiserver、kube-controller-manager、etcd、haproxy、keepalived | 8G | 4c |
node-03 | 172.19.8.113 | kube-apiserver、kube-controller-manager、etcd | 8G | 4c |
node-04 | 172.19.8.114 | node | 8G | 4c |
node-05 | 172.19.8.115 | node | 8G | 4c |
node-06 | 172.19.8.116 | node | 8G | 4c |
VIP | 172.19.8.250 |
3.1 关闭防火墙和selinux3. 部署前准备工作
3.2 关闭swap
[root@node-01 ~]# swapoff -a
注:修改/etc/fstab,注销swap相关信息
3.3 添加host记录
[root@node-01 ~]# cat >>/etc/hosts<<EOF 172.19.8.111 node-01 172.19.8.112 node-02 172.19.8.113 node-03 172.19.8.114 node-04 172.19.8.115 node-05 172.19.8.116 node-06 EOF
3.4 打通ssh, node-01免密登录其他服务器
[root@node-01 ~]# ssh-keygen Generating public/private rsa key pair. Enter file in which to save the key (/root/.ssh/id_rsa): Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: SHA256:uckCmzy46SfU6Lq9jRbugn0U8vQsr5H+PtfGBsvrfCA root@node-01 The key's randomart image is: +---[RSA 2048]----+ | | | | | | | . o . | | *.+ S | | +o==E.oo | |.=.oBo.o+* | |o.**oooo+ * | |oBO=++o++= | +----[SHA256]-----+
分发node-01公钥,用于免密登录其他服务器
[root@node-01 ~]# for n in `seq -w 01 06`;do ssh-copy-id node-$n;done
3.5 配置内核参数,需要重启服务器,否则后面初始化的时候会报错。
cat </etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_nonlocal_bind = 1 net.ipv4.ip_forward = 1 vm.swappiness=0 EOF sysctl --system
报错处理,没有桥接网络导致,需要安装docker,并启动后才会出现桥接网络
[root@node-01 ~]# sysctl -p /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: 没有那个文件或目录 sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: 没有那个文件或目录
3.6 如果kube-proxy使用ipvs模式,需要加载ipvs模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
3.7 添加yum源
cat </etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude=kube* EOF
考虑到国内无法拉取google源,可以使用阿里云源
$ cat << EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF wget http://mirrors.aliyun.com/repo/Centos-7.repo -O /etc/yum.repos.d/CentOS-Base.repo wget http://mirrors.aliyun.com/repo/epel-7.repo -O /etc/yum.repos.d/epel.repo
以上部署需要在每个节点执行。
4. 部署keepalived和haproxy
4.1 在node-01和node-02上面安装keepalived和haproxy
$ yum install -y keepalived haproxy
4.2 配置keepalived
node-01 配置信息
[root@node-01 ~]# cat /etc/keepalived/keepalived.conf ! Configuratile for keepalived global_defs { notification_email { 995958026@qq.com } notification_email_from [email protected] smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id node-01 } vrrp_script check_apiserver { script "/workspace/crontab/check_apiserver" interval 5 weight -20 fall 3 rise 1 } vrrp_instance VIP_250 { state MASTER interface eth0 virtual_router_id 250 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 890iop } track_script { check_apiserver } virtual_ipaddress { 172.19.8.250 } }
检查脚本配置
$ cat /workspace/crontab/check_apiserver #!/bin/bash curl 127.0.0.1:8080 &>/dev/null if [ $? -eq 0 ];then exit 0 else #systemctl stop keepalived exit 1 fi
$ chmod 755 /workspace/crontab/check_apiserver
node-02 配置
[root@node-02 ~]# cat /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { 435002493@qq.com } notification_email_from [email protected] smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id node-02 } vrrp_instance VI_250 { state BACKUP interface eth0 virtual_router_id 250 priority 90 advert_int 1 authentication { auth_type PASS auth_pass 890iop } virtual_ipaddress { 172.19.8.250 } }
4.3 配置haproxy
[root@node-01 ~]# cat /etc/haproxy/haproxy.cfg global chroot /var/lib/haproxy daemon group haproxy user haproxy # log warning pidfile /var/lib/haproxy.pid maxconn 20000 spread-checks 3 nbproc 8 defaults log global mode tcp retries 3 option redispatch listen https-apiserver bind 0.0.0.0:8443 mode tcp balance roundrobin timeout server 900s timeout connect 15s server apiserver01 172.19.8.111:6443 check port 6443 inter 5000 fall 5 server apiserver02 172.19.8.112:6443 check port 6443 inter 5000 fall 5 server apiserver03 172.19.8.113:6443 check port 6443 inter 5000 fall 5
4.4 启动服务
systemctl enable keepalived && systemctl start keepalived
systemctl enable haproxy && systemctl start haproxy
5 配置证书
创建集群 CA 与 Certificates
5.1 证书介绍
在这部分,将会需要产生 client 与 server 的各组件 certificates,并且替 Kubernetes admin user 产生 client 证书。由于 Etcd 和 Kubernetes 全部采用 TLS 通讯,所以先要生成 TLS 证书,证书生成工具采用 cfssl,生成证书时可在任一节点完成,这里在宿主机执行,证书列表如下
证书名称 | 配置文件 | 用途 |
etcd-root-ca.pem | etcd-root-ca-csr.json | etcd 根 CA 证书 |
etcd.pem | etcd-gencert.json、etcd-csr.json | etcd 集群证书 |
k8s-root-ca.pem | k8s-root-ca-csr.json | k8s 根 CA 证书 |
kube-proxy.pem | k8s-gencert.json、kube-proxy-csr.json | kube-proxy 使用的证书 |
admin.pem | k8s-gencert.json、admin-csr.json | kubectl 使用的证书 |
kubernetes.pem | k8s-gencert.json、kubernetes-csr.json | kube-apiserver 使用的证书 |
5.2 cfssl 工具安装
首先下载 cfssl,并给予可执行权限,然后扔到 PATH 目录下
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
5.3 生成etcd证书
注意,所有证书生成只需要在任意一台主机上生成一遍即可,我这里在 node-01 上操作的。
etcd-csr.json #产生 kube-apiserver certificate 证书
cat <> etcd-csr.json { "key": { "algo": "rsa", "size": 2048 }, "names": [ { "O": "etcd", "OU": "etcd Security", "L": "Beijing", "ST": "Beijing", "C": "CN" } ], "CN": "etcd", "hosts": [ "127.0.0.1", "localhost", "172.19.8.111", "172.19.8.112", "172.19.8.113" ] } EOF
etcd-gencert.json #产生 CA 密钥
cat <> etcd-gencert.json { "signing": { "default": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } } EOF
etcd-root-ca-csr.json #产生 CA 密钥
cat <> etcd-root-ca-csr.json { "key": { "algo": "rsa", "size": 4096 }, "names": [ { "O": "etcd", "OU": "etcd Security", "L": "Beijing", "ST": "Beijing", "C": "CN" } ], "CN": "etcd-root-ca" } EOF
最后生成证书
cfssl gencert --initca=true etcd-root-ca-csr.json | cfssljson --bare etcd-root-ca cfssl gencert --ca etcd-root-ca.pem --ca-key etcd-root-ca-key.pem --config etcd-gencert.json etcd-csr.json | cfssljson --bare etcd
完成后删除不必要文件:
rm -rf *.json
确认/etc/etcd/ssl有以下文件:
etcd.csr etcd-key.pem etcd.pem etcd-root-ca.csr etcd-root-ca-key.pem etcd-root-ca.pe
5.4 生成k8s证书需要借助kubectl,下面安装k8s会介绍。
6 安装etcd
6.1 在node-01、node-02、node-03节点安装etcd
$ yum -y install etcd
6.2 所有节点部署证书
$ mkdir /etc/etcd/ssl $ scp *.pem /etc/etcd/ssl $ chown -R etcd:etcd /etc/etcd/ssl $ chmod -R 644 /etc/etcd/ssl/* $ chmod 755 /etc/etcd/ssl
6.3 修改etcd数据目录权限
$ chown -R etcd:etcd /var/lib/etcd /etc/etcd
6.4 然后修改配置如下(其他两个节点类似,只需要改监听地址和 Etcd Name 即可)
[root@node-01 kubernetes]# cat /etc/etcd/etcd.conf #[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="https://172.19.8.111:2380" ETCD_LISTEN_CLIENT_URLS="https://172.19.8.111:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd1" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.19.8.111:2380" ETCD_ADVERTISE_CLIENT_URLS="https://172.19.8.111:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=https://172.19.8.111:2380,etcd2=https://172.19.8.112:2380,etcd3=https://172.19.8.113:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_AUTO_TLS="false" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" #ETCD_PEER_AUTO_TLS="false" ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" ETCD_CLIENT_CERT_AUTH="true" ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" ETCD_AUTO_TLS="true" ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" ETCD_PEER_CLIENT_CERT_AUTH="true" ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" ETCD_PEER_AUTO_TLS="true" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple"
6.5 最后启动集群并测试如下
$ systemctl daemon-reload $ systemctl start etcd $ systemctl enable etcd [root@node-01 ~]# etcdctl --ca-file=/etc/etcd/ssl/etcd-root-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.19.8.111:2379,https://172.19.8.112:2379,https://172.19.8.113:2379 cluster-health member 20eaf623ac3b75e0 is healthy: got healthy result from https://172.19.8.112:2379 member 49a1ffa1fb4e8c74 is healthy: got healthy result from https://172.19.8.111:2379 member d6afeec04f7054b4 is healthy: got healthy result from https://172.19.8.113:2379 cluster is healthy
如果检查失败,通过日志排查,重点检查etcd节点配置信息。
7 安装kubernetes
7.1 生成kubernetes证书
下载软件包
# master wget https://dl.k8s.io/v1.14.0/kubernetes-server-linux-amd64.tar.gz cp kubernetes/server/bin/{kube-apiserver,kube-scheduler,kube-controller-manager} /bin/ # node https://dl.k8s.io/v1.14.0/kubernetes-node-linux-amd64.tar.gz # 将下载的包解压,然后将 server/bin/ 和 node/bin/ 中的执行文件拷贝到各自的/bin下
在这部分,将会需要生成 client 与 server 的各组件 certificates,并且替 Kubernetes admin user 生成 client 证书。
#!/bin/bash # Deploy the master node. KUBE_SSL=/etc/kubernetes/ssl mkdir -p $KUBE_SSL # Create CA. cat>$KUBE_SSL/ca-config.json<<EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF cat>$KUBE_SSL/ca-csr.json<<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "IMG" } ] } EOF # “10.254.0.1" 注意此处需要添加指定,与cluster-cidr同网段,例如192.168.0.1 cat>$KUBE_SSL/server-csr.json<<EOF { "CN": "kubernetes", "hosts": [ "10.254.0.1", #注意此处需要添加指定,与cluster-cidr同网段,例如192.168.0.1 "127.0.0.1", "172.19.8.111", "172.19.8.112", "172.19.8.113", "172.19.8.114", "172.19.8.115", "172.19.8.116", "172.19.8.250", "localhost", "apiclient", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "IMG" } ] } EOF cd $KUBE_SSL cfssl gencert -initca ca-csr.json | cfssljson -bare ca - cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server # Create kube-proxy CA. cat>$KUBE_SSL/kube-proxy-csr.json<<EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "IMG" } ] } EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy ls *.pem cd ~
# Create a token file. cat>/etc/kubernetes/token.csv<<EOF $(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap" EOF
7.2 node-01安装apiserver
[root@node-01 kubernetes]# cat apiserver.conf # KUBE_APISERVER_OPTS="—logtostderr=false \ --etcd-servers=https://172.19.8.111:2379,https://172.19.8.112:2379,https://172.19.8.113:2379 \ --v=4 \ --bind-address=172.19.8.111 \ --secure-port=6443 \ --advertise-address=172.19.8.111 \ --allow-privileged=true \ --service-cluster-ip-range=10.254.0.0/16 \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \ --authorization-mode=RBAC,Node \ --enable-bootstrap-token-auth \ --token-auth-file=/etc/kubernetes/token.csv \ --service-node-port-range=30000-50000 \ --tls-cert-file=/etc/kubernetes/ssl/server.pem \ --tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \ --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
配置启动脚本
# Create the kube-apiserver service. $ cat /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=etcd.service Wants=etcd.service [Service] EnvironmentFile=-/etc/kubernetes/apiserver.conf ExecStart=/bin/kube-apiserver $KUBE_APISERVER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target $ systemctl daemon-reload $ systemctl enable kube-apiserver.service --now $ systemctl status kube-apiserver.service
7.3 node-01 安装api-controller-manager
$ cat /etc/kubernetes/kube-controller-manager.conf KUBE_CONTROLLER_MANAGER_OPTS="—logtostderr=false \ --v=4 \ --master=127.0.0.1:8080 \ --leader-elect=true \ --address=127.0.0.1 \ --service-cluster-ip-range=10.254.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --root-ca-file=/etc/kubernetes/ssl/ca.pem \ --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
配置启动脚本
$ cat /usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf ExecStart=/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target ------- $ systemctl daemon-reload $ systemctl enable kube-controller-manager.service --now $ systemctl status kube-controller-manager.service
7.4 node-01安装schedule
[root@node-01 kubernetes]# cat /etc/kubernetes/kube-scheduler.conf KUBE_SCHEDULER_OPTS="—logtostderr=false \ --v=4 \ --master=127.0.0.1:8080 \ --leader-elect"
配置启动脚本
$ cat /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf ExecStart=/bin/kube-scheduler $KUBE_SCHEDULER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target —————— $ systemctl daemon-reload $ systemctl enable kube-scheduler.service --now $ systemctl status kube-scheduler.service
检查节点状态
[root@node-01 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
node-02、node-03 做上述相同配置,需要修改ip地址。
8 部署node节点
8.1 创建bootstrap和kube-proxy的kubeconfig文件
$ cat nodeprepare.sh #!/bin/bash BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv) KUBE_SSL=/etc/kubernetes/ssl/ KUBE_APISERVER="https://172.19.8.250:8443" cd $KUBE_SSL # Set cluster parameters. kubectl config set-cluster kubernetes \ --certificate-authority=./ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig # Set client parameters. kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig # Set context parameters. kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig # Set context. kubectl config use-context default --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig # Create kube-proxy kubeconfig file. kubectl config set-cluster kubernetes \ --certificate-authority=./ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \ --client-certificate=./kube-proxy.pem \ --client-key=./kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig cd ~ # Bind kubelet-bootstrap user to system cluster roles. kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap
8.2 配置kube-proxy和kubelet
8.2.1 以node-04为例,配置kube-proxy节点
[root@node-04 kubernetes]# cat /etc/kubernetes/kube-proxy.conf KUBE_PROXY_OPTS="--logtostderr=false \ --v=4 \ --hostname-override=172.19.8.114 \ --cluster-cidr=10.254.0.0/16 \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig"
配置启动脚本
[root@node-04 kubernetes]# cat /usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Proxy After=network.target [Service] EnvironmentFile=-/etc/kubernetes/kube-proxy.conf ExecStart=/bin/kube-proxy $KUBE_PROXY_OPTS Restart=on-failure [Install] WantedBy=multi-user.target
启动
[root@node-04 kubernetes]# systemctl daemon-reload [root@node-04 kubernetes]# systemctl start kube-proxy.service [root@node-04 kubernetes]# systemctl enable kube-proxy.service [root@node-04 kubernetes]# systemctl status kube-proxy.service -l
8.2.2 配置kubelet
[root@node-04 kubernetes]# cat /etc/kubernetes/kubelet.yaml kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 address: 172.19.8.114 port: 10250 readOnlyPort: 10255 cgroupDriver: cgroupfs clusterDNS: ["10.254.0.2"] clusterDomain: cluster.local. failSwapOn: false authentication: anonymous: enabled: true
[root@node-04 kubernetes]# cat /etc/kubernetes/kubelet.conf KUBELET_OPTS="--logtostderr=false \ --v=4 \ --hostname-override=172.19.8.114 \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --config=/etc/kubernetes/kubelet.yaml \ --cert-dir=/etc/kubernetes/ssl \ --pod-infra-container-image=gcr.io/google-containers/pause-amd64:3.0"
配置启动文件
[root@node-04 kubernetes]# cat /usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet After=docker.service Requires=docker.service [Service] EnvironmentFile=/etc/kubernetes/kubelet.conf ExecStart=/bin/kubelet $KUBELET_OPTS Restart=on-failure KillMode=process [Install] WantedBy=multi-user.target
启动
[root@node-04 kubernetes]# systemctl daemon-reload [root@node-04 kubernetes]# systemctl enable kubelet.service --now [root@node-04 kubernetes]# systemctl status kubelet.service -l
node-05 node-06 做相同配置,注意修改IP地址。
kubelet启动后,会向apiserver申请注册,此时apiserver需要同意申请,node才能加入进去。
[root@node-01 ~]# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-K_cecykFJ_g1cAJf5DTJ_EVFUH3W9QgfulMA-TbFoMo 45s kubelet-bootstrap Pending
[root@node-01 ~]# kubectl certificate approve node-csr-K_cecykFJ_g1cAJf5DTJ_EVFUH3W9QgfulMA-TbFoMo certificatesigningrequest.certificates.k8s.io/node-csr-K_cecykFJ_g1cAJf5DTJ_EVFUH3W9QgfulMA-TbFoMo approved
[root@node-01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION 172.19.8.114 Ready12s v1.14.1
此时node已经加入。
9 所有节点部署flannel网络插件
9.1 检查etcd节点状态
[root@node-01 ~]# etcdctl --ca-file=/etc/etcd/ssl/etcd-root-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.19.8.111:2379,https://172.19.8.112:2379,https://172.19.8.113:2379 cluster-health
member 20eaf623ac3b75e0 is healthy: got healthy result from https://172.19.8.112:2379
member 49a1ffa1fb4e8c74 is healthy: got healthy result from https://172.19.8.111:2379
member d6afeec04f7054b4 is healthy: got healthy result from https://172.19.8.113:2379
cluster is healthy
9.2 在etcd中创建flannel网络
export ETCDCTL_API=2 etcdctl --ca-file=/etc/etcd/ssl/etcd-root-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.19.8.111:2379,https://172.19.8.112:2379,https://172.19.8.113:2379 set /coreos.com/network/config '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}'
之前使用etcd api3创建flannel创建网络,在flannel启动时一直报错,报错信息如下,改为api2,问题解决
# Writing into a predetermined subnetwork.Master节点设置 #注意 一开始默认使用ETCDCTL_API=3,发现flannel连接etcd读取下列信息失败,一直保报错 Couldn't fetch network config: 100: Key not found
9.3 下载flannel
# https://github.com/coreos/flannel/releases wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz # 软件包包含flanneld 和 mk-docker-opts.sh mv {flanneld,mk-docker-opts.sh} /bin/
9.4 复制etcd证书到各node节点,目录为/etc/etcd/ssl
[root@node-04 ~]# cd /etc/etcd/ssl/ [root@node-04 ssl]# ls etcd-key.pem etcd.pem etcd-root-ca-key.pem etcd-root-ca.pem
9.5 配置flannel
[root@node-04 kubernetes]# cat /etc/kubernetes/flannel.conf FLANNEL_ETCD_KEY="/coreos.com/network" FLANNEL_OPTIONS="--etcd-endpoints=https://172.19.8.111:2379,https://172.19.8.112:2379,https://172.19.8.113:2379 -etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem -etcd-certfile=/etc/etcd/ssl/etcd.pem -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
配置启动文件
[root@node-04 kubernetes]# cat /usr/lib/systemd/system/flanneld.service [Unit] Description=Flanneld overlay address etcd agent After=network-online.target network.target Before=docker.service [Service] Type=notify EnvironmentFile=/etc/kubernetes/flannel.conf ExecStart=/bin/flanneld --ip-masq $FLANNEL_OPTIONS ExecStartPost=/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env Restart=on-failure [Install] WantedBy=multi-user.target
9.6 修改docker启动文件
# Modify the docker service. cat /usr/lib/systemd/system/docker.service [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify EnvironmentFile=/run/flannel/subnet.env ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS ExecReload=/bin/kill -s HUP $MAINPID TimeoutSec=0 RestartSec=2 Restart=always StartLimitBurst=3 StartLimitInterval=60s LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity TasksMax=infinity Delegate=yes KillMode=process [Install] WantedBy=multi-user.target
启动
# Start or restart related services. systemctl daemon-reload systemctl enable flanneld --now systemctl restart docker systemctl status flanneld systemctl status docker
检查创建的网络
[root@node-04 kubernetes]# ip a 1: lo:mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether fa:d7:b7:9e:55:00 brd ff:ff:ff:ff:ff:ff inet 172.19.8.114/24 brd 172.19.8.255 scope global noprefixroute eth0 valid_lft forever preferred_lft forever 3: docker0: mtu 1500 qdisc noqueue state DOWN group default link/ether 02:42:88:36:c8:89 brd ff:ff:ff:ff:ff:ff inet 10.244.82.1/24 brd 10.244.82.255 scope global docker0 valid_lft forever preferred_lft forever 4: flannel.1: mtu 1450 qdisc noqueue state UNKNOWN group default link/ether da:b3:f5:69:bf:de brd ff:ff:ff:ff:ff:ff inet 10.244.82.0/32 scope global flannel.1 valid_lft forever preferred_lft forever [root@node-04 ~]# cat /run/flannel/subnet.env DOCKER_OPT_BIP="--bip=10.244.16.1/24" DOCKER_OPT_IPMASQ="--ip-masq=false" DOCKER_OPT_MTU="--mtu=1450" DOCKER_NETWORK_OPTIONS=" --bip=10.244.16.1/24 --ip-masq=false --mtu=1450"
10 部署nginx小战一下
[root@node-01 ~]# kubectl apply -f nginx-dep.yml [root@node-01 ~]# kubectl get pods NAME READY STATUS RESTARTS AGE nginx-deployment-6dd86d77d-txhmg 0/1 ContainerCreating 0 11s nginx-deployment-6dd86d77d-vc4mx 0/1 ContainerCreating 0 11s [root@node-01 ~]# kubectl describe pod nginx-deployment-6dd86d77d-txhmg # 检查为什么pod启动失败 Warning FailedCreatePodSandBox 50s kubelet, 172.19.8.116 Failed create pod sandbox: rpc error: code = Unknown desc = failed pulling image "img.cn/google-containers/pause-amd64:3.0": Error response from daemon:
Get https://img.cn/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) # 原因:kubelet.conf 配置的 --pod-infra-container-image 镜像地址拉取失败
11 配置coredns
11.1 添加anonymous认证
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
11.2 配置coredns
[root@node-01 ~]# cat coredns.yml apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: Reconcile name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: EnsureExists name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | .:53 { errors health kubernetes cluster.local. in-addr.arpa ip6.arpa { pods insecure upstream fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . /etc/resolv.conf cache 30 } --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: replicas: 2 strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: coredns template: metadata: labels: k8s-app: coredns spec: serviceAccountName: coredns tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: "CriticalAddonsOnly" operator: "Exists" containers: - name: coredns image: coredns/coredns:1.5.0 imagePullPolicy: IfNotPresent resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: coredns clusterIP: 10.254.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP
部署coredns
[root@node-01 ~]# kubectl apply -f coredns.yml serviceaccount/coredns created clusterrole.rbac.authorization.k8s.io/system:coredns created clusterrolebinding.rbac.authorization.k8s.io/system:coredns created configmap/coredns created deployment.extensions/coredns created service/coredns created [root@node-01 ~]# kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-76458c6948-t5f2b 1/1 Running 0 103s kube-system coredns-76458c6948-v56tg 1/1 Running 0 103s
12 集成k8s到rancher
只有上面的部分完成后,才能导入到rancher集群中。
curl --insecure -sfL https://172.19.2.25:8443/v3/import/f6knmcb7pjgtdppckkdgvfz24gbdcfhpg7npjr2xh4bt9xhxlv8j89.yaml | kubectl apply -f -
大功告成!