docke-ce Version:      17.09.0-ce 

该版本存在BUG, 在ym或者json文件中不能制定对内存的限制,否则服务启动容器,所以以下倒入yml文件之前需要将对memory的资源限制条件删除就可以正常运行。

本地化 kube-apiserver, kube-controller-manager , kube-scheduler     同时10.64.221.244既是 master 也是 nodes


master:

master-2347205.lvs01.dev.ebayc3.com    10.64.221.244

node:

node01-2646823.slc01.dev.ebayc3.com    10.65.146.152

node002-1934005.phx02.dev.ebayc3.com    10.147.190.81


* 初始化环境

DNS可以解析则可以省略,否则需要在/etc/host添加hostname 和对应IP的解析

优化系统参数:

net.ipv4.ip_local_port_range = 30000    60999

net.netfilter.nf_conntrack_max = 26214400

net.netfilter.nf_conntrack_tcp_timeout_established = 86400

net.netfilter.nf_conntrack_tcp_timeout_close_wait = 3600

* 创建 验证

安装 cfssl

mkdir -p /opt/local/cfssl

cd /opt/local/cfssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

mv cfssl_linux-amd64 cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

mv cfssljson_linux-amd64 cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

mv cfssl-certinfo_linux-amd64 cfssl-certinfo

chmod +x *

* 创建 CA 证书配置

mkdir /opt/ssl

cd /opt/ssl

# config.json 文件

vi  config.json

{

  "signing": {

    "default": {

      "expiry": "87600h"

    },

    "profiles": {

      "kubernetes": {

        "usages": [

            "signing",

            "key encipherment",

            "server auth",

            "client auth"

        ],

        "expiry": "87600h"

      }

    }

  }

}

# csr.json 文件

vi csr.json

{

  "CN": "kubernetes",

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

* 生成 CA 证书和私钥

cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca

[root@node1-2325419 ssl]# ls -lt

total 20

-rw-r--r-- 1 root root 1005 Oct 16 17:41 ca.csr

-rw------- 1 root root 1675 Oct 16 17:41 ca-key.pem

-rw-r--r-- 1 root root 1363 Oct 16 17:41 ca.pem

-rw-r--r-- 1 root root  210 Oct 16 17:39 csr.json

-rw-r--r-- 1 root root  292 Oct 16 17:38 config.json

* 分发证书

# 创建证书目录

mkdir -p /etc/kubernetes/ssl

# 拷贝所有文件到目录下

cp *.pem /etc/kubernetes/ssl

# 这里要将文件拷贝到所有的k8的master机器上

scp *.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp *.pem node002-1934005.phx02.dev.ebayc3.com:/etc/kubernetes/ssl/

配置yum源:

# vim /etc/yum.repos.d/kubernetes.repo 

[kubernetes]

name=Kubernetes

baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=0

[virt7-docker-common-release]

name=virt7-docker-common-release

baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=0

[centos]

name=extras

baseurl=http://mirror.centos.org/centos/7/extras/x86_64/

enabled=1

gpgcheck=0

[docker-ce-stable]

name=Docker CE Stable - $basearch

baseurl=https://download.docker.com/linux/centos/7/$basearch/stable

enabled=1

gpgcheck=0

[nginx]

name=nginx

baseurl=https://nginx.org/packages/rhel/7/x86_64/

enabled=1

gpgcheck=0

    

# 安装

yum makecache


yum install docker-ce conntrack-tools -y

* 更改docker 配置

# 使docker使用的Cgroup Driver为systemd

vi /etc/docker/daemon.json

{

     "exec-opts":["native.cgroupdriver=systemd"]

}


# 修改配置(修改ExecStart配置)

vi /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS $DOCKER_OPTS $DOCKER_DNS_OPTIONS


# 修改其他配置

mkdir -p /usr/lib/systemd/system/docker.service.d/

vi /usr/lib/systemd/system/docker.service.d/docker-options.conf


# 添加如下 :   (注意 environment 必须在同一行,如果出现换行会无法加载)

[Service]

Environment="DOCKER_OPTS=--insecure-registry=10.254.0.0/16 --graph=/opt/docker --registry-mirror=http://b438f72b.m.daocloud.io --disable-legacy-registry --iptables=false"

* 重新读取配置,启动 docker 

systemctl daemon-reload

systemctl start docker

systemctl enable docker

* etcd 集群

yum -y install etcd

* 创建 etcd 证书

cd /opt/ssl/

vi etcd-csr.json

{

  "CN": "etcd",

  "hosts": [

    "127.0.0.1",

    "10.64.221.244",

    "10.64.221.245",

    "10.9.219.148"

  ],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

# 生成 etcd   密钥

/opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \

  -ca-key=/opt/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd

# 查看生成

[root@master-2347205 ssl]# ls etcd* -l

-rw-r--r-- 1 root root 1050 Oct 30 00:17 etcd.csr

-rw-r--r-- 1 root root  259 Oct 30 00:16 etcd-csr.json

-rw------- 1 root root 1679 Oct 30 00:17 etcd-key.pem

-rw-r--r-- 1 root root 1424 Oct 30 00:17 etcd.pem

# 拷贝到etcd服务器

# etcd 

[root@master-2347205 ssl]# cp etcd*.pem /etc/kubernetes/ssl/

# 如果 etcd 非 root 用户,读取证书会提示没权限

[root@master-2347205 ssl]# chmod 644 /etc/kubernetes/ssl/etcd-key.pem

* 修改 etcd 配置

修改 etcd 启动文件 /usr/lib/systemd/system/etcd.service

# etcd1

[root@master-2347205 ssl]# vi /usr/lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

#EnvironmentFile=-/etc/etcd/etcd.conf

User=etcd

# set GOMAXPROCS to number of processors

ExecStart=/usr/bin/etcd \

  --name=etcd1 \

  --cert-file=/etc/kubernetes/ssl/etcd.pem \

  --key-file=/etc/kubernetes/ssl/etcd-key.pem \

  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \

  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \

  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \

  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \

  --initial-advertise-peer-urls=https://10.64.241.244:2380 \

  --listen-peer-urls=https://10.64.241.244:2380 \

  --listen-client-urls=https://10.64.241.244:2379,http://127.0.0.1:2379 \

  --advertise-client-urls=https://10.64.241.244:2379 \

  --initial-cluster-token=k8s-etcd-cluster \

  --initial-cluster=etcd1=https://10.64.241.244:2380 \

  --initial-cluster-state=new \

  --data-dir=/var/lib/etcd

Restart=on-failure

RestartSec=5

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

* 启动 etcd

[root@master-2347205 issl]# systemctl enable etcd

[root@master-2347205 ssl]# systemctl start etcd

[root@master-2347205 issl]# systemctl status etcd

# 如果报错 请使用

journalctl -f -t etcd  和 journalctl -u etcd 来定位问题

* 验证 etcd 集群状态

查看 etcd 集群状态:

# etcdctl --endpoints=https://10.64.241.244:2379 \

         --cert-file=/etc/kubernetes/ssl/etcd.pem \

        --ca-file=/etc/kubernetes/ssl/ca.pem \

         --key-file=/etc/kubernetes/ssl/etcd-key.pem \

         member list

f2f13b8cab1c8b9a: name=etcd1 peerURLs=https://10.64.241.244:2380 clientURLs=https://10.64.241.244:2379 isLeader=true

* 安装 kubectl 工具

**Master端配置

# 首先安装 kubectl

wget https://dl.k8s.io/v1.8.0/kubernetes-client-linux-amd64.tar.gz

tar -xzvf kubernetes-client-linux-amd64.tar.gz

cp kubernetes/client/bin/* /usr/bin/

chmod a+x /usr/bin/kube*

# 验证安装

kubectl version

Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.0", GitCommit:"6e937839ac04a38cac63e6a7a306c5d035fe7b0a", GitTreeState:"clean", BuildDate:"2017-09-28T22:57:57Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}

The connection to the server localhost:8080 was refused - did you specify the right host or port?

* 创建 admin 证书

kubectl 与 kube-apiserver 的安全端口通信,需要为安全通信提供 TLS 证书和秘钥。

cd /opt/ssl/

vi admin-csr.json

{

  "CN": "admin",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "ShenZhen",

      "L": "ShenZhen",

      "O": "system:masters",

      "OU": "System"

    }

  ]

}

# 生成 admin 证书和私钥

cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \

  -ca-key=/etc/kubernetes/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin

# 查看生成

# ls admin*

admin.csr  admin-csr.json  admin-key.pem  admin.pem

cp admin*.pem /etc/kubernetes/ssl/

scp admin*.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp admin*.pem node002-1934005.phx02.dev.ebayc3.com:/etc/kubernetes/ssl/

* 配置 kubectl kubeconfig 文件

server 配置为 本机IP 各自连接本机的 Api

# 配置 kubernetes 集群

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.221.244:6443

# 配置 客户端认证

kubectl config set-credentials admin \

  --client-certificate=/etc/kubernetes/ssl/admin.pem \

  --embed-certs=true \

  --client-key=/etc/kubernetes/ssl/admin-key.pem

  

kubectl config set-context kubernetes \

  --cluster=kubernetes \

  --user=admin

kubectl config use-context kubernetes

* kubectl config 文件

# kubeconfig 文件在 如下:

ls /root/.kube/config  -l

-rw------- 1 root root 6303 Oct 30 01:13 /root/.kube/config

* 部署 Kubernetes Master 节点

Master 需要部署 kube-apiserver , kube-scheduler , kube-controller-manager 这三个组件。 kube-scheduler 作用是调度pods分配到那个node里,简单来说就是资源调度。 kube-controller-manager 作用是 对 deployment controller , replication controller, endpoints controller, namespace controller, and serviceaccounts controller等等的循环控制,与kube-apiserver交互。

安装 组件

# 从github 上下载版本

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

\cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/bin/

* 创建 kubernetes 证书

cd /opt/ssl

vi kubernetes-csr.json

{

  "CN": "kubernetes",

  "hosts": [

    "127.0.0.1",

    "10.64.241.244",

    "10.254.0.1",

    "kubernetes",

    "kubernetes.default",

    "kubernetes.default.svc",

    "kubernetes.default.svc.cluster",

    "kubernetes.default.svc.cluster.local"

  ],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

## 这里 hosts 字段中 三个 IP 分别为 127.0.0.1 本机, 10.64.221.244 为 Master 的IP,多个Master需要写多个, 10.254.0.1 为 kubernetes SVC 的 IP 一般是 部署网络的第一个IP , 如: 10.254.0.1 , 在启动完成后,我们使用   kubectl get svc ,就可以查看到

生成 kubernetes 证书和私钥

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \

  -ca-key=/etc/kubernetes/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes

# 查看生成

[root@master-2347205 ssl]# ls -lt kubernetes*

-rw-r--r-- 1 root root 1245 Oct 30 01:16 kubernetes.csr

-rw------- 1 root root 1679 Oct 30 01:16 kubernetes-key.pem

-rw-r--r-- 1 root root 1619 Oct 30 01:16 kubernetes.pem

-rw-r--r-- 1 root root  440 Oct 30 01:16 kubernetes-csr.json

# 拷贝到目录

cp -r kubernetes*.pem /etc/kubernetes/ssl/

scp -r kubernetes*.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp -r kubernetes*.pem node002-1934005.phx02.dev.ebayc3.com :/etc/kubernetes/ssl/

配置 kube-apiserver

kubelet 首次启动时向 kube-apiserver 发送 TLS Bootstrapping 请求,kube-apiserver 验证 kubelet 请求中的 token 是否与它配置的 token 一致,如果一致则自动为 kubelet生成证书和秘钥。

# 生成 token

[root@master-2347205 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '

c4d059eb65035a8c33a7f017f2763834

# 创建 token.csv 文件

cd /opt/ssl

vi token.csv

c4d059eb65035a8c33a7f017f2763834,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

# 拷贝

cp token.csv /etc/kubernetes/

* 创建 kube-apiserver.service 文件

创建 kube-apiserver.service 文件

# 1.8 新增 (Node) --authorization-mode=Node,RBAC

# 自定义 系统 service 文件一般存于 /etc/systemd/system/ 下

# 配置为 各自的本地 IP

vi /etc/systemd/system/kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

[Service]

User=root

ExecStart=/usr/bin/kube-apiserver \

  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \

  --advertise-address=10.64.241.244 \

  --allow-privileged=true \

  --apiserver-count=1 \

  --audit-log-maxage=30 \

  --audit-log-maxbackup=3 \

  --audit-log-maxsize=100 \

  --audit-log-path=/var/lib/audit.log \

  --authorization-mode=Node,RBAC \

  --bind-address=10.64.241.244 \

  --client-ca-file=/etc/kubernetes/ssl/ca.pem \

  --enable-swagger-ui=true \

  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \

  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \

  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \

  --etcd-servers=https://10.64.241.244:2379 \

  --event-ttl=1h \

  --kubelet-https=true \

  --insecure-bind-address=10.64.241.244 \

  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \

  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \

  --service-cluster-ip-range=10.254.0.0/16 \

  --service-node-port-range=30000-32000 \

  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \

  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \

  --enable-bootstrap-token-auth \

  --token-auth-file=/etc/kubernetes/token.csv \

  --v=2

Restart=on-failure

RestartSec=5

Type=notify

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

# 这里面要注意的是 --service-node-port-range=30000-32000

# 这个地方是 映射外部端口时 的端口范围,随机映射也在这个范围内映射,指定映射端口必须也在这个范围内。

* 启动 kube-apiserver

systemctl daemon-reload

systemctl enable kube-apiserver

systemctl start kube-apiserver

systemctl status kube-apiserver

* 配置 kube-controller-manager

master 配置为 本地 IP

# 创建 kube-controller-manager.service 文件

vi /etc/systemd/system/kube-controller-manager.service

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

ExecStart=/usr/bin/kube-controller-manager \

  --address=127.0.0.1 \

  --master=http://10.64.241.244:8080 \

  --allocate-node-cidrs=true \

  --service-cluster-ip-range=10.254.0.0/16 \

  --cluster-cidr=10.233.0.0/16 \

  --cluster-name=kubernetes \

  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \

  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \

  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \

  --root-ca-file=/etc/kubernetes/ssl/ca.pem \

  --leader-elect=true \

  --v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target


* 启动 kube-controller-manager

systemctl daemon-reload

systemctl enable kube-controller-manager

systemctl start kube-controller-manager

systemctl status kube-controller-manager


* 配置 kube-scheduler

master 配置为本地 IP

# 创建 kube-cheduler.service 文件

vi /etc/systemd/system/kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

ExecStart=/usr/bin/kube-scheduler \

  --address=127.0.0.1 \

  --master=http://10.64.241.244:8080 \

  --leader-elect=true \

  --v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

* 启动 kube-scheduler

systemctl daemon-reload

systemctl enable kube-scheduler

systemctl start kube-scheduler

systemctl status kube-scheduler


* 验证 Master 节点

[root@master-2347205 ssl]# kubectl get componentstatuses

NAME                 STATUS    MESSAGE              ERROR

controller-manager   Healthy   ok                   

scheduler            Healthy   ok                   

etcd-0               Healthy   {"health": "true"} 


部署 Master Node 部分

Node 部分 需要部署的组件有 docker calico kubectl kubelet kube-proxy 这几个组件。

配置 kubelet

kubelet 启动时向 kube-apiserver 发送 TLS bootstrapping 请求,需要先将 bootstrap token 文件中的 kubelet-bootstrap 用户赋予 system:node-bootstrapper 角色,然后 kubelet 才有权限创建认证请求(certificatesigningrequests)。

# 先创建认证请求

# user 为 master 中 token.csv 文件里配置的用户

# 只需创建一次就可以

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

* 创建 kubelet kubeconfig 文件

server 配置为 master 本机 IP

# 配置集群

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.221.244:6443 \

  --kubeconfig=bootstrap.kubeconfig

# 配置客户端认证

kubectl config set-credentials kubelet-bootstrap \

  --token=c4d059eb65035a8c33a7f017f2763834 \

  --kubeconfig=bootstrap.kubeconfig

# 配置关联

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kubelet-bootstrap \

  --kubeconfig=bootstrap.kubeconfig

# 配置默认关联

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷贝生成的 bootstrap.kubeconfig 文件

mv bootstrap.kubeconfig /etc/kubernetes/

* 创建 kubelet.service 文件

# 创建 kubelet 目录

> 配置为 node 本机 IP (所有node节点配置相同)

mkdir /var/lib/kubelet

vi /etc/systemd/system/kubelet.service

[Unit]

Description=Kubernetes Kubelet

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=docker.service

Requires=docker.service

[Service]

WorkingDirectory=/var/lib/kubelet

ExecStart=/usr/bin/kubelet \

  --address=10.64.241.244 \

  --hostname-override=10.64.241.244 \

  --cgroup-driver=systemd \

  --network-plugin=cni \

  --pod-infra-container-p_w_picpath=jicki/pause-amd64:3.0 \

  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \

  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \

  --cert-dir=/etc/kubernetes/ssl \

  --cluster_dns=10.254.0.2 \

  --cluster_domain=cluster.local. \

  --hairpin-mode promiscuous-bridge \

  --allow-privileged=true \

  --fail-swap-on=false \

  --serialize-p_w_picpath-pulls=false \

  --logtostderr=true \

  --max-pods=512 \

  --v=2

ExecStopPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT

ExecStopPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT

ExecStopPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT

ExecStopPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

# 如上配置:

10.64.221.244      为本机的IP

10.254.0.2       预分配的 dns 地址

cluster.local.   为 kubernetes 集群的 domain

jicki/pause-amd64:3.0  这个是 pod 的基础镜像,既 gcr 的 gcr.io/google_containers/pause-amd64:3.0 镜像, 下载下来修改为自己的仓库中的比较快。


启动 kubelet

systemctl daemon-reload

systemctl enable kubelet

systemctl start kubelet

systemctl status kubelet

# 如果报错 请使用

journalctl -f -t kubelet  和 journalctl -u kubelet 来定位问题


* 配置 TLS 认证

[root@master-2347205 ssl]# kubectl get csr

NAME                                                   AGE       REQUESTOR           CONDITION

node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8   1d        kubelet-bootstrap   Pending


# 增加 认证

[root@master-2347205 ssl]# kubectl certificate approve node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8

certificatesigningrequest "node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8" approved


* 验证 nodes

[root@master-2347205 ssl]# kubectl get nodes

NAME            STATUS    ROLES     AGE       VERSION

10.64.241.244   Ready        1d        v1.8.0


# 成功以后会自动生成配置文件与密钥

# 配置文件

ls /etc/kubernetes/kubelet.kubeconfig   

/etc/kubernetes/kubelet.kubeconfig

# 密钥文件

# ls /etc/kubernetes/ssl/kubelet*

/etc/kubernetes/ssl/kubelet-client.crt  /etc/kubernetes/ssl/kubelet-client.key  /etc/kubernetes/ssl/kubelet.crt  /etc/kubernetes/ssl/kubelet.key


* 配置 kube-proxy

创建 kube-proxy 证书

# 证书方面由于我们node端没有装 cfssl


# 我们回到 master 端 机器 去配置证书,然后拷贝过来

# cd /opt/ssl

vi kube-proxy-csr.json


{

  "CN": "system:kube-proxy",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

生成 kube-proxy 证书和私钥

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \

  -ca-key=/etc/kubernetes/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes  kube-proxy-csr.json | /opt/local/cfssl/cfssljson -bare kube-proxy


# 查看生成

ls kube-proxy*

kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem


# 拷贝到目录

cp kube-proxy*.pem /etc/kubernetes/ssl/

scp kube-proxy*.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp akube-proxy*.pem node002-1934005.phx02.dev.ebayc3.com:/etc/kubernetes/ssl/


创建 kube-proxy kubeconfig 文件

server配置为各自本机IP,Nod节点配置为server的IP,多节点master则配其代理的IP


# 配置集群

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.221.244:6443 \

  --kubeconfig=kube-proxy.kubeconfig


# 配置客户端认证

kubectl config set-credentials kube-proxy \

  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \

  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \

  --embed-certs=true \

  --kubeconfig=kube-proxy.kubeconfig

  

# 配置关联

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kube-proxy \

  --kubeconfig=kube-proxy.kubeconfig


# 配置默认关联

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷贝到目录

mv kube-proxy.kubeconfig /etc/kubernetes/

* 创建 kube-proxy.service 文件

配置为 各自的 IP

# 创建 kube-proxy 目录

mkdir -p /var/lib/kube-proxy

vi /etc/systemd/system/kube-proxy.service

[Unit]

Description=Kubernetes Kube-Proxy Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

[Service]

WorkingDirectory=/var/lib/kube-proxy

ExecStart=/usr/bin/kube-proxy \

  --bind-address=10.64.241.244 \

  --hostname-override=10.64.241.244 \

  --cluster-cidr=10.254.0.0/16 \

  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \

  --logtostderr=true \

  --v=2

Restart=on-failure

RestartSec=5

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

* 启动 kube-proxy

systemctl daemon-reload

systemctl enable kube-proxy

systemctl start kube-proxy

systemctl status kube-proxy

# 如果报错 请使用

journalctl -f -t kube-proxy  和 journalctl -u kube-proxy 来定位问题


# ALL node节点配置

------------------------------------

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-proxy,kubelet} /usr/bin/


mkdir -p /etc/kubernetes/ssl/

scp ca.pem kube-proxy.pem kube-proxy-key.pem  node-*:/etc/kubernetes/ssl/

# kubelet

# 首先 创建 kubelet kubeconfig 文件

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://0.64.241.244:6443 \

  --kubeconfig=bootstrap.kubeconfig

# 配置客户端认证

kubectl config set-credentials kubelet-bootstrap \

  --token=446d97ddb6b3a4cb363a34cf29afc86d \

  --kubeconfig=bootstrap.kubeconfig

# 配置关联

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kubelet-bootstrap \

  --kubeconfig=bootstrap.kubeconfig

  

# 配置默认关联

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷贝生成的 bootstrap.kubeconfig 文件

mv bootstrap.kubeconfig /etc/kubernetes/

      

# 创建 kube-proxy kubeconfig 文件

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.241.244:6443 \

  --kubeconfig=kube-proxy.kubeconfig

# 配置客户端认证

kubectl config set-credentials kube-proxy \

  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \

  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \

  --embed-certs=true \

  --kubeconfig=kube-proxy.kubeconfig

   

# 配置关联

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kube-proxy \

  --kubeconfig=kube-proxy.kubeconfig

# 配置默认关联

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷贝到目录

mv kube-proxy.kubeconfig /etc/kubernetes/  

------------------------------------


创建Nginx 代理

在每个 node 都必须创建一个 Nginx 代理, 这里特别注意, 当 Master 也做为 Node 的时候 不需要配置 Nginx-proxy

# 创建配置目录

mkdir -p /etc/nginx

# 写入代理配置

cat << EOF > /etc/nginx/nginx.conf

error_log stderr notice;

worker_processes auto;

events {

  multi_accept on;

  use epoll;

  worker_connections 1024;

}

stream {

    upstream kube_apiserver {

        least_conn;

        server 10.64.221.244:6443;

    }

    server {

        listen        0.0.0.0:6443;

        proxy_pass    kube_apiserver;

        proxy_timeout 10m;

        proxy_connect_timeout 1s;

    }

}

EOF

# 配置 Nginx 基于 docker 进程,然后配置 systemd 来启动

cat << EOF > /etc/systemd/system/nginx-proxy.service

[Unit]

Description=kubernetes apiserver docker wrapper

Wants=docker.socket

After=docker.service

[Service]

User=root

PermissionsStartOnly=true

ExecStart=/usr/bin/docker run -p 6443:6443 \\

                              -v /etc/nginx:/etc/nginx \\

                              --name nginx-proxy \\

                              --net=host \\

                              --restart=on-failure:5 \\

                              --memory=512M \\

                              nginx:1.13.3-alpine

ExecStartPre=-/usr/bin/docker rm -f nginx-proxy

ExecStop=/usr/bin/docker stop nginx-proxy

Restart=always

RestartSec=15s

TimeoutStartSec=30s

[Install]

WantedBy=multi-user.target

EOF

# 启动 Nginx

systemctl daemon-reload

systemctl start nginx-proxy

systemctl enable nginx-proxy

systemctl status nginx-proxy

# 重启 Node 的 kubelet 与 kube-proxy

systemctl restart kubelet

systemctl status kubelet

systemctl restart kube-proxy

systemctl status kube-proxy

Master 配置 TLS 认证

[root@master-2347205 ~]# kubectl get csr

NAME                                                   AGE       REQUESTOR           CONDITION

node-csr-ab7uNyElYjU2cVKcUQWLCSynBaTBD38eGLetav-Hkjc   23h       kubelet-bootstrap   Pending

node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8   1d        kubelet-bootstrap   Approved,Issued

node-csr-rNEoTvYkwQrruBnc_kAFJ1WRNVv-5vopplJzbVevTHw   23h       kubelet-bootstrap   Pending

[root@master-2347205 ~]# kubectl certificate approve NAME 

[root@master-2347205 ~]# kubectl get nodes

NAME            STATUS    ROLES     AGE       VERSION

10.147.190.81   Ready        23h       v1.8.0

10.64.241.244   Ready        1d        v1.8.0

10.65.146.152   Ready        23h       v1.8.0

Calico 网络

修改 kubelet.service

vi /etc/systemd/system/kubelet.service

# 增加 如下配置

--network-plugin=cni \

# 重新加载配置

systemctl daemon-reload

systemctl restart kubelet.service

systemctl status kubelet.service

修改 kube-proxy.service

# 重新加载配置

systemctl daemon-reload

systemctl restart kube-proxy.service

systemctl status kube-proxy.service

安装 Calico

(官网地址 http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/hosted)

wget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml

wget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/rbac.yaml

# 下载 镜像

# 国外镜像 有墙

quay.io/calico/node:v2.6.0

quay.io/calico/cni:v1.11.0

quay.io/calico/kube-controllers:v1.0.0

# 国内镜像

jicki/node:v2.6.0

jicki/cni:v1.11.0

jicki/kube-controllers:v1.0.0

 配置 calico 

 # 注意修改如下选项:

 data:

  # Configure this with the location of your etcd cluster.

  etcd_endpoints: "https://10.64.241.244:2379"

 

 # 这里面要写入 base64 的信息

data:

  etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 | tr -d '\n')

  etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 | tr -d '\n')

  etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 | tr -d '\n')

具体配置如下:

 data:

  # Populate the following files with etcd TLS configuration if desired, but leave blank if

  # not using TLS for etcd.

  # This self-hosted install expects three files with the following names.  The values

  # should be base64 encoded strings of the entire contents of each file.

   etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMWpqRVE2bWRnYXc1WUhvemxadUpwVlMzb1QxVDk5aDV1Mm1JcDhHN3BrQW4rVUloCklrRjIrOG93OTZxUkJERzlaa1V2R3h3ZXJtVWx2Y21LOFFlQ1BGKzFHYUZ5U3doVG52dTNLRW5FcXhNaHMzbmcKelM5QXI4SDVDejNDZ05jSldhais3ZTFaU2FBRStKTFF4SzZXdmRadnlSTXdVMnZiVldQMDlvV2hRaTJxUkJJYwpnVFhEMk9XNXhyakgrSzd3K1JnaERsM0NNNmlyTnQrM0xLMWtySlNwdStVeVVlRGZqbWtDWnVIUnBnMUVDTUJ5Ckxrc3ZwZVFlYTZNTHFDMVBIV1ZzQmdPWWFPYU45Z2doQ2FWMUQ4M094QmFBa2QzTmF0aHpDZ0RLb3g5TTNuWVoKL2hrSW1xYkthS1pkbmxWT2pqY2hOdzQwT2R0dHYxT3ROYjF5L1FJREFRQUJBb0lCQURpZURqazMxZm9VczZGdApDcjhxdVlscHg1S2s2OUZnZEdQMDgzb3ErTlpxdEdYcmRXaGhWdm9iS1MvNzg1SGhONGFWR0dzWTFuOFBHcGQ3CmNlNWNKcTZnT1BRS09IUGVRb09OVVk0aldRaEI2VEZQTC92cTFqVGRsbDNETitubmE0QTM2eENsSDJrWEV0bEQKckV1ZlBSWDFXOXdQNTBEVkM4WVl3Z0I1dHZJUE5hRHNFaExFZDBSZlJQWXZ2NkxJS1pZMGhHcXhvMjFPYThpZApiZEEwK3I2dm9OV043TVFENDFHN09EM0t4S3V3OG0zU1FDTTR1ZDVVd25FWTFrOU8xWExqZi9mQTRxRTNNNjI4ClFFaURjVnVDNTJjd1ZQb0M3QmdsV2ZnTUNPK0FnUjVDMnQzblpxNEdUQWJjMWhFYmJsTUdDbVJxbzB5SUt2S0QKdjhtL2kzMENnWUVBM0QrZDlLbUxQRU44UlYzVE9WOUt1ajQ3NXVXaGdlOXBXWGtJRWkrOTRZYjhyYzRXWUFXawo3YVhUdW95OW4yaTkvdE85VGV3cUtvcm9WeTQ4MHd3OFJPTzdMbnNRdFZhMGFjYktVaS96NUc4bkxQSnkvb1FQCjljRXNUbklET0NPWURRV0l0b1hCaS9qRGZCOURWY1RCTkJaaENKc05ySEdQc2xRM2owWkRySWNDZ1lFQStQNjEKZDZmTkpLNFNSMkpIbC8zRGdoWWZxSEZHQnF1MU00UTJoaTE1c0Zrbm15STRiczBwdXpxbHFETjhIVGRGTitUYQpCbVZ3bDcvTVI0OUtMWTAvbFViM3NEcllQNHZsRVFmcDM2RDgzcUsrNnA5Z2IySE1ncnpDNHlTaWNNcmFyQUpQCm5PU1pkSHVCV0VwS1dYcXpNWS9vT1lncXpmK2NDY1ZCaU9kbXFWc0NnWUVBcTYwNjVsYXhuVXZOTmhTb2JIUUcKdjUxNis4UEtYSW5OU0Z5N0dkSDA1REpnQ3VvMUxxdTNuelkvRDQ1YzBFREl1RFU3dmQxMEVLMHQ1YWE0NnBrQwo4WC9CaTcySmpKQUZHTjVISGtFYWp0akZaSmVuWXhSMWpFNEN4eksvcDZhM0FvOXB2VHdOWFptT1p0WEhtcmMvCkVlYlB3L0Y0ZXc0WndCczlJNFRWZzBrQ2dZRUFuNzNoTkFQT1k1TGRuVGhiRUJLNHBET0hXZXhaN1p1cFk0U1YKNmwwd2JjZTAxaDBOTVdDa1BqN3cya2sxZmhUZ2JteCt4NDI2djFCSVFUeVhiVVBxSDkzb3VBSDRqRVEyMEVkUQpicXo4cmVTaE1TclFsb1Nhek9GRzhxeWxIbEcyR3BaYzl4M2RxVkkyK0NxcEttRENwN0JZWHRpU0JaalFSYUJzClcrV0JBbk1DZ1lBNXdKcmZPenBZb08vYkZHWXFPSkY5QmxOT0E4eG42T2w2RDBmZDY0bGZJTDJTUEg3MnNna3gKN1lHTHZGdThvNWhoYml4ejJUQ3ZSRUw0Z1NGWVRhcEcwN0haWEFtbkFqcDAyN3dSWlR4YXdWZ29RTE5DOU9TTwpydmhXSWZjSjMzQWdHWUFNSVVaWWpvL2RQLzRpOWF0VGFQdmI5bmNzaHVNVXkxTURXSnIxOEE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

   etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3akNDQXRhZ0F3SUJBZ0lVY1RaMjVEWWNQcGU2MXBuWkZqeU5jVHNqRURRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0ZOb1pXNWFhR1Z1TVJFd0R3WURWUVFIRXdoVAphR1Z1V21obGJqRU1NQW9HQTFVRUNoTURhemh6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RXpBUkJnTlZCQU1UCkNtdDFZbVZ5Ym1WMFpYTXdIaGNOTVRjeE1ETXdNRGN4TWpBd1doY05NamN4TURJNE1EY3hNakF3V2pCaE1Rc3cKQ1FZRFZRUUdFd0pEVGpFUk1BOEdBMVVFQ0JNSVUyaGxibHBvWlc0eEVUQVBCZ05WQkFjVENGTm9aVzVhYUdWdQpNUXd3Q2dZRFZRUUtFd05yT0hNeER6QU5CZ05WQkFzVEJsTjVjM1JsYlRFTk1Bc0dBMVVFQXhNRVpYUmpaRENDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlk0eEVPcG5ZR3NPV0I2TTVXYmlhVlUKdDZFOVUvZlllYnRwaUtmQnU2WkFKL2xDSVNKQmR2dktNUGVxa1FReHZXWkZMeHNjSHE1bEpiM0ppdkVIZ2p4Zgp0Um1oY2tzSVU1Nzd0eWhKeEtzVEliTjU0TTB2UUsvQitRczl3b0RYQ1Ztby91M3RXVW1nQlBpUzBNU3VscjNXCmI4a1RNRk5yMjFWajlQYUZvVUl0cWtRU0hJRTF3OWpsdWNhNHgvaXU4UGtZSVE1ZHdqT29xemJmdHl5dFpLeVUKcWJ2bE1sSGczNDVwQW1iaDBhWU5SQWpBY2k1TEw2WGtIbXVqQzZndFR4MWxiQVlEbUdqbWpmWUlJUW1sZFEvTgp6c1FXZ0pIZHpXclljd29BeXFNZlRONTJHZjRaQ0pxbXltaW1YWjVWVG80M0lUY09ORG5iYmI5VHJUVzljdjBDCkF3RUFBYU9CbHpDQmxEQU9CZ05WSFE4QkFmOEVCQU1DQmFBd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEVlIwT0JCWUVGSFFjQnRoTXRXcnlPaG5nSUQwZQpCTE4wR0M2Yk1COEdBMVVkSXdRWU1CYUFGSWw3M2JJQ1RqandkdFNtcGNSMlNOVDNPREpHTUJVR0ExVWRFUVFPCk1BeUhCSDhBQUFHSEJBcEE4ZlF3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUlVdlFyN0ZYNGtiTVM3VXU4b2wKSVlLV1JLZG5ZS0FzWTdybGlIaklOUTg1QTh4dDZ2aU9RS2t2bGRXRVBYUGxpcHJUZjFkVzdKbVhRbjZyUExNMQp4NzRhREZGUm5maHFoeWhMWjY3TGRBQzE3cGlGNHJVczNzQ280Ymd1RURyaGpWOWk0aFRCcU0xK0pucmRENVhKCmtoUExZdkdMS3UzODB1ZzhHU3VIamRFd2VGWHFtTkNoVDNiL2syTjBtalhLTGNZOGRsalpWT1NFZFhhSHZKM1gKQ3l1emE1YWJDbnVsZDhZMUJycEUvZXEvU29sMHBqa0c5NDZ6U1lwaEpsS0JadGRmbElYRGg0ZG91RlRKd2RxeAprMEl3NVA3Y21SZW5ieGJFMCs1YUV6MDFVWVJ1ZnlMK1dGU3VZTWU3cUovV3NPVUxxVWVkbUpocE9yN29NK0tmCnpxUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=

   etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR3akNDQXFxZ0F3SUJBZ0lVQXZJRGN1U1FBTnhxV1pZZC9XSGE3OER0OFdvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0ZOb1pXNWFhR1Z1TVJFd0R3WURWUVFIRXdoVAphR1Z1V21obGJqRU1NQW9HQTFVRUNoTURhemh6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RXpBUkJnTlZCQU1UCkNtdDFZbVZ5Ym1WMFpYTXdIaGNOTVRjeE1ETXdNRGN3T0RBd1doY05Nakl4TURJNU1EY3dPREF3V2pCbk1Rc3cKQ1FZRFZRUUdFd0pEVGpFUk1BOEdBMVVFQ0JNSVUyaGxibHBvWlc0eEVUQVBCZ05WQkFjVENGTm9aVzVhYUdWdQpNUXd3Q2dZRFZRUUtFd05yT0hNeER6QU5CZ05WQkFzVEJsTjVjM1JsYlRFVE1CRUdBMVVFQXhNS2EzVmlaWEp1ClpYUmxjekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMejJxb0VTTGxTZjl3ZGsKb05DNnpYUDUyVzhHZ0p6TWFTQWVKNzgrbElvMG5YOWFyeWlheCtDOHg4dG1VcU1sMklzRzhmc01NY3JOM3VOeApRZjF5MXRBZGJHQ0lCR2Q0d1FNcnc3ZU1QNld6L285SFpKMW1lcmlPTHF6YlNqc2VSYmY3VloxZGZHYkxZTDN6CkI2WGZJam5MbjRlS2YrdHIxdkEvc3ppZmIvMjkyYW1jd3RZVER4NkJ1WFlaY1F0ZktBeExNY3psSVNKaEx2cWQKYkpWQkRnYWVPRTMzeTFYeG0vWkR3cnN2VjdIZUhjczY3OUdVdU1QVE9ZRkZQRnFNRGlSam9acWkzOWV6MU9kUgpHKzdIcTJOMDVzQVpTMkJ1eFE5RjZsZFVTTTRZNkdobkVWejRKZzVmVElHdlVWZ2FOR1F0aGpzZ2JvUnVYNXppCkZvSzI2RzhDQXdFQUFhTm1NR1F3RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOEMKQVFJd0hRWURWUjBPQkJZRUZJbDczYklDVGpqd2R0U21wY1IyU05UM09ESkdNQjhHQTFVZEl3UVlNQmFBRklsNwozYklDVGpqd2R0U21wY1IyU05UM09ESkdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUI5UWY3Z004K0pNakZECnBRYm9HM2g2NkNyMC9xOEZNbzlGaURjWlYxb1dpejQvVEdGK1phU1hSOFFxWmRTK0dNQU1xaGUydGdLbTZ3TngKK3VvS2w5TDEzUjkrSmVWWFdvSWpmV3V0SFNyNk5NbVdLMnJjVVNoTjg0cW1vMW81ZjBta3paUWxHZU5OT3V3cAp5ZzBtY2***TcxbDFVeVY0RzR5YWRyQVFHRHhvbDhud0ViZXBKdlkwU2FkVXFIRjVsYW4ySEd5VVV3UXpsdlMwCjRDbVFHQUJNclY5M1phbTBaZTNWbU1WK3BFeVhRbnNXS2lqTG5tSjd5WU5vYjZFd2VXcS93eFc4Y2dCVEpncW0KNnUzRDRVc3NWRXZhQXlsL3F3Zkx6Zkk1NTNxc0kySTg4T1BQd0M5SE1IUU9pWTFja3FIMnlWRm5Pd3htRUphVAo4YzJZbU95TgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==

 

             # Configure the IP Pool from which Pod IPs will be chosen.

             - name: CALICO_IPV4POOL_CIDR

              value: "10.233.0.0/16"

导入 yaml 文件

[root@master-2347205 ~]# kubectl apply -f calico.yaml 

configmap "calico-config" created

secret "calico-etcd-secrets" created

daemonset "calico-node" created

deployment "calico-policy-controller" created

serviceaccount "calico-policy-controller" created

serviceaccount "calico-node" created

[root@master-2347205 ~]# kubectl apply -f rbac.yaml

clusterrole "calico-policy-controller" created

clusterrolebinding "calico-policy-controller" created

clusterrole "calico-node" created

clusterrolebinding "calico-node" created

[root@master-2347205 ~]# kubectl get pods -n kube-system

NAME                                      READY     STATUS    RESTARTS   AGE

calico-kube-controllers-f58c74bfd-xmwjk   1/1       Running   1          8h

calico-node-4b79k                         2/2       Running   4          8h

calico-node-9b68d                         2/2       Running   2          8h

calico-node-mgxjw                         2/2       Running   2          8h

安装 Calicoctl

cd /usr/bin/

wget -c  https://github.com/projectcalico/calicoctl/releases/download/v1.3.0/calicoctl

chmod +x calicoctl

## 创建 calicoctl 配置文件

# 配置文件, 在 安装了 calico 网络的 机器下

mkdir /etc/calico

vi /etc/calico/calicoctl.cfg

apiVersion: v1

kind: calicoApiConfig

metadata:

spec:

  datastoreType: "etcdv2"

  etcdEndpoints: "https://110.64.241.244:2379"

  etcdKeyFile: "/etc/kubernetes/ssl/etcd-key.pem"

  etcdCertFile: "/etc/kubernetes/ssl/etcd.pem"

  etcdCACertFile: "/etc/kubernetes/ssl/ca.pem"

 

 # 查看 calico 状态

 [root@master-2347205 ~]#  calicoctl node status

Calico process is running.

IPv4 BGP status

+---------------+-------------------+-------+----------+-------------+

| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |

+---------------+-------------------+-------+----------+-------------+

| 10.147.190.81 | node-to-node mesh | up    | 05:50:54 | Established |

| 10.65.146.152 | node-to-node mesh | up    | 05:50:53 | Established |

+---------------+-------------------+-------+----------+-------------+

IPv6 BGP status

No IPv6 peers found.

测试集群

# 创建一个 nginx deplyment

apiVersion: extensions/v1beta1 

kind: Deployment 

metadata: 

  name: nginx-dm

spec: 

  replicas: 

  template: 

    metadata: 

      labels: 

        name: nginx 

    spec: 2

      containers: 

        - name: nginx 

          p_w_picpath: nginx:alpine 

          p_w_picpathPullPolicy: IfNotPresent

          ports: 

            - containerPort: 80         

---

apiVersion: v1 

kind: Service

metadata: 

  name: nginx-svc 

spec: 

  ports: 

    - port: 80

      targetPort: 80

      protocol: TCP 

  selector: 

    name: nginx

    

[root@master-2347205 ~]# kubectl get pods -o wide

NAME                        READY     STATUS    RESTARTS   AGE       IP               NODE

nginx-dm-55b58f68b6-fdqlv   1/1       Running   1          7h        10.233.165.193   10.147.190.81

nginx-dm-55b58f68b6-mp9vj   1/1       Running   2          7h        10.233.103.9     10.64.241.244    

 # 在 node 里 curl

[root@master-2347205 ~]# curl http://10.254.49.180:80 -I

HTTP/1.1 200 OK

Server: nginx/1.13.6

Date: Tue, 31 Oct 2017 10:00:09 GMT

Content-Type: text/html

Content-Length: 612

Last-Modified: Fri, 27 Oct 2017 22:20:30 GMT

Connection: keep-alive

ETag: "59f3b12e-264"

Accept-Ranges: bytes

[root@node01-2646823 ~]# curl http://10.254.49.180:80 -I

HTTP/1.1 200 OK

Server: nginx/1.13.6

Date: Tue, 31 Oct 2017 09:59:48 GMT

Content-Type: text/html

Content-Length: 612

Last-Modified: Fri, 27 Oct 2017 22:20:30 GMT

Connection: keep-alive

ETag: "59f3b12e-264"

Accept-Ranges: bytes

配置 KubeDNS

官方 github yaml 相关 https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns

下载镜像

# 官方镜像

gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5

gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5

gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5

 

# 我的镜像

jicki/k8s-dns-sidecar-amd64:1.14.5

jicki/k8s-dns-kube-dns-amd64:1.14.5

jicki/k8s-dns-dnsmasq-nanny-amd64:1.14.5

下载 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/kube-dns.yaml.base

# 修改后缀

mv kube-dns.yaml.base kube-dns.yaml

*系统预定义的 RoleBinding

预定义的 RoleBinding system:kube-dns 将 kube-system 命名空间的 kube-dns ServiceAccount 与 system:kube-dns Role 绑定, 该 Role 具有访问 kube-apiserver DNS 相关 API 的权限;

[root@master-2347205 ~]# kubectl get clusterrolebindings system:kube-dns -o yaml

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  creationTimestamp: 2017-10-30T08:25:06Z

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

  name: system:kube-dns

  resourceVersion: "78"

  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system%3Akube-dns

  uid: d5af7ba9-bd4b-11e7-85cf-74dbd18002e4

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:kube-dns

subjects:

- kind: ServiceAccount

  name: kube-dns

  namespace: kube-system

  

修改 kube-dns.yaml

1. # clusterIP: __PILLAR__DNS__SERVER__ 修改为我们之前定义的 dns IP 10.254.0.2

2. # 修改 --domain=__PILLAR__DNS__DOMAIN__.   为 我们之前 预定的 domain 名称 --domain=cluster.local.

3. # 修改 --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053  中 domain 为我们之前预定的 --server=/cluster.local./127.0.0.1#10053

4. # 修改 --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__, 中的 domain 为我们之前预定的  --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,

5. # 修改 --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,  中的 domain 为我们之前预定的  --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,

 导入 yaml 文件

# 替换所有的 p_w_picpaths 使用国内镜像

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# 导入

[root@master-2347205 ~]# kubectl create -f .

service "kube-dns" created

serviceaccount "kube-dns" created

configmap "kube-dns" created

deployment "kube-dns" created

查看 kubedns 服务

[root@master-2347205 ~]# kubectl get all --namespace=kube-system

NAME             DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE

ds/calico-node   3         3         3         3            3                    9h

NAME                              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

deploy/calico-kube-controllers    1         1         1            1           9h

deploy/calico-policy-controller   0         0         0            0           9h

deploy/kube-dns                   1         1         1            1           3h

NAME                                     DESIRED   CURRENT   READY     AGE

rs/calico-kube-controllers-f58c74bfd     1         1         1         9h

rs/calico-policy-controller-566dc8d645   0         0         0         9h

rs/kube-dns-ff55764f4                    1         1         1         3h

NAME                              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

deploy/calico-kube-controllers    1         1         1            1           9h

deploy/calico-policy-controller   0         0         0            0           9h

deploy/kube-dns                   1         1         1            1           3h

NAME             DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE

ds/calico-node   3         3         3         3            3                    9h

NAME                                     DESIRED   CURRENT   READY     AGE

rs/calico-kube-controllers-f58c74bfd     1         1         1         9h

rs/calico-policy-controller-566dc8d645   0         0         0         9h

rs/kube-dns-ff55764f4                    1         1         1         3h

NAME                                         READY     STATUS    RESTARTS   AGE

po/calico-kube-controllers-f58c74bfd-xmwjk   1/1       Running   1          9h

po/calico-node-4b79k                         2/2       Running   4          9h

po/calico-node-9b68d                         2/2       Running   2          9h

po/calico-node-mgxjw                         2/2       Running   2          9h

po/kube-dns-ff55764f4-qprf4                  3/3       Running   0          3h

NAME                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE

svc/kube-dns               ClusterIP   10.254.0.2              53/UDP,53/TCP   3h

验证 dns 服务

在验证 dns 之前,在 dns 未部署之前创建的 pod 与 deployment 等,都必须删除,重新部署,否则无法解析

[root@master-2347205 ~]# kubectl get pods -o wide

NAME                        READY     STATUS    RESTARTS   AGE       IP               NODE

alpine                      1/1       Running   1          7h        10.233.131.130   10.65.146.152

nginx-dm-55b58f68b6-fdqlv   1/1       Running   1          7h        10.233.165.193   10.147.190.81

nginx-dm-55b58f68b6-mp9vj   1/1       Running   2          7h        10.233.103.9     10.64.241.244

 

[root@master-2347205 ~]# kubectl get svc -o wide

NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE       SELECTOR

kubernetes   ClusterIP   10.254.0.1              443/TCP   1d        

nginx-svc    ClusterIP   10.254.49.180          80/TCP    7h        name=nginx

# 创建一个 pods 来测试一下 nameserver

apiVersion: v1

kind: Pod

metadata:

  name: alpine

spec:

  containers:

  - name: alpine

    p_w_picpath: alpine

    command:

    - sh

    - -c

    - while true; do sleep 1; done

[root@master-2347205 ~]# kubectl get pods

NAME                        READY     STATUS    RESTARTS   AGE

alpine                      1/1       Running   1          7h

[root@master-2347205 ~]# kubectl exec -it alpine nslookup nginx-svc

nslookup: can't resolve '(null)': Name does not resolve

Name:      nginx-svc

Address 1: 10.254.49.180 nginx-svc.default.svc.cluster.local

部署 Ingress 与 Dashboard

 部署 dashboard && heapster

 官方 dashboard 的github https://github.com/kubernetes/dashboard

下载 dashboard 镜像

# 官方镜像

gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3

# 国内镜像

jicki/kubernetes-dashboard-amd64:v1.6.3

 

下载 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-controller.yaml

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-service.yaml

# 因为开启了 RBAC 所以这里需要创建一个 RBAC 认证

vi dashboard-rbac.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  name: dashboard

  namespace: kube-system

---

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1alpha1

metadata:

  name: dashboard

subjects:

  - kind: ServiceAccount

    name: dashboard

    namespace: kube-system

roleRef:

  kind: ClusterRole

  name: cluster-admin

  apiGroup: rbac.authorization.k8s.io

 

导入 yaml

# 替换所有的 p_w_picpaths

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# dashboard-controller.yaml 增加 rbac 授权

# 在第二个 spec 下面 增加

    spec:

      serviceAccountName: dashboard

# 导入文件

[[root@master-2347205 dashboard]# kubectl apply -f .

deployment "kubernetes-dashboard" created

serviceaccount "dashboard" created

clusterrolebinding "dashboard" created

service "kubernetes-dashboard" created

# 查看 svc 与 pod

[root@master-2347205 ~]# kubectl get svc -n kube-system

NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE

kube-dns               ClusterIP   10.254.0.2              53/UDP,53/TCP   4h

kubernetes-dashboard   ClusterIP   10.254.171.137          80/TCP          3h

* 部署 Nginx Ingress

Kubernetes 暴露服务的方式目前只有三种:LoadBlancer Service、NodePort Service、Ingress; 什么是 Ingress ? Ingress 就是利用 Nginx Haproxy 等负载均衡工具来暴露 Kubernetes 服务。

官方 Nginx Ingress github https://github.com/kubernetes/ingress-nginx

# ingress 有多种方式 1.  deployment 自由调度 replicas

                     2.  daemonset 全局调度 分配到所有node里

#  deployment 自由调度过程中,由于我们需要 约束 controller 调度到指定的 node 中,所以需要对 node 进行 label 标签

# 默认如下:

[root@master-2347205 ~]# kubectl get nodes

NAME            STATUS    ROLES     AGE       VERSION

10.147.190.81   Ready        1d        v1.8.0

10.64.241.244   Ready        1d        v1.8.0

10.65.146.152   Ready        1d        v1.8.0

 

 #打上 label

[root@master-2347205 ingress-nginx]# kubectl label nodes 10.64.241.244 ingress=proxy

node "10.64.241.244" labeled

# 打完标签以后

[root@master-2347205 ingress-nginx]# kubectl get nodes --show-labels

NAME            STATUS    ROLES     AGE       VERSION   LABELS

10.147.190.81   Ready        1d        v1.8.0    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=10.147.190.81

10.64.241.244   Ready        1d        v1.8.0    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ingress=proxy,kubernetes.io/hostname=10.64.241.244

10.65.146.152   Ready        1d        v1.8.0    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=10.65.146.152

 

# 下载镜像

# 官方镜像

gcr.io/google_containers/defaultbackend:1.0

gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13

 

# 国内镜像

jicki/defaultbackend:1.0

jicki/nginx-ingress-controller:0.9.0-beta.13

 

* 部署 Nginx  backend , Nginx backend 用于统一转发 没有的域名 到指定页面。

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml

# 替换所有的 p_w_picpaths

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# 直接导入既可, 这里不需要修改

[root@master-2347205 ingress-nginx]# kubectl apply -f default-backend.yaml 

deployment "default-http-backend" created

service "default-http-backend" created

# 查看服务

[root@master-2347205 ingress-nginx]# kubectl get deployment -n ingress-nginx default-http-backend

NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

default-http-backend   1         1         1            1           3h

# 部署 Ingress RBAC 认证

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml

# 导入 yaml 文件

[root@master-2347205 ingress-nginx]# kubectl apply -f rbac.yml 

namespace "nginx-ingress" created

serviceaccount "nginx-ingress-serviceaccount" created

clusterrole "nginx-ingress-clusterrole" created

role "nginx-ingress-role" created

rolebinding "nginx-ingress-role-nisa-binding" created

clusterrolebinding "nginx-ingress-clusterrole-nisa-binding" created

# 部署 Ingress Controller 组件

# 下载 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml

# 替换所有的 p_w_picpaths

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# 上面 对 两个 node 打了 label 所以配置 replicas: 2

# 修改 yaml 文件 增加 rbac 认证 , hostNetwork  还有 nodeSelector, 第二个 spec 下 增加。

spec:

  replicas: 2

  ....

    spec:

      hostNetwork: true

      serviceAccountName: nginx-ingress-serviceaccount

      nodeSelector:

        ingress: proxy

    ....

 # 导入 yaml 文件

[root@master-2347205 ingress-nginx]# kubectl apply -f with-rbac.yaml

deployment "nginx-ingress-controller" created

# 查看服务(如果是多节点master会被调度到不同的节点)

[root@master-2347205 ingress-nginx]# kubectl get pods -n ingress-nginx -o wide

NAME                                        READY     STATUS    RESTARTS   AGE       IP              NODE

default-http-backend-845497d9ff-qcwxc       1/1       Running   0          3h        10.233.103.13   10.64.241.244

nginx-ingress-controller-5576895f6b-5ck4v   1/1       Running   0          3h        10.64.241.244   10.64.241.244

 

 # 查看我们原有的 svc

 [root@master-2347205 ingress-nginx]# kubectl get svc -n ingress-nginx

NAME                   TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE

default-http-backend   ClusterIP   10.254.89.2          80/TCP    3h

# 创建 yaml 文件

vi dashboard-ingress.yaml

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  name: dashboard-ingress

  namespace: kube-system

spec:

  rules:

  - host: master-2347205.lvs01.dev.ebayc3.com

    http:

      paths:

      - backend:

          serviceName: kubernetes-dashboard

          servicePort: 80

 

# 导入 yaml

[root@master-2347205 dashboard]# kubectl apply -f dashboard-ingress.yaml 

ingress "dashboard-ingress" created

# 查看 ingress

[root@master-2347205 dashboard]# kubectl get ingress -n kube-system -o wide

NAME                HOSTS                                 ADDRESS         PORTS     AGE

dashboard-ingress   master-2347205.lvs01.dev.ebayc3.com   10.64.241.244   80        3h

# 测试访问

[root@master-2347205 dashboard]#  curl -I   master-2347205.lvs01.dev.ebayc3.com

HTTP/1.1 200 OK

Server: nginx/1.13.5

Date: Tue, 31 Oct 2017 10:23:36 GMT

Content-Type: text/html; charset=utf-8

Content-Length: 848

Connection: keep-alive

Accept-Ranges: bytes

Cache-Control: no-store

Last-Modified: Fri, 28 Jul 2017 12:38:51 GMT




Update Version

Master Or Node Update 二进制文件

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

systemctl stop kube-apiserver

systemctl stop kube-controller-manager

systemctl stop kube-scheduler

systemctl stop kubelet

systemctl stop kube-proxy

tar zxvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/

systemctl start kube-apiserver

systemctl start kube-controller-manager

systemctl start kube-scheduler

systemctl start kubelet

systemctl start kube-proxy

systemctl status kube-apiserver

systemctl status kube-controller-manager

systemctl status kube-scheduler

systemctl status kubelet

systemctl status kube-proxy

cd ..

rm -rf kubernetes*

Node Update 二进制文件

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

systemctl stop kubelet

systemctl stop kube-proxy

tar zxvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kubectl,kube-proxy,kubelet} /usr/local/bin/

systemctl start kubelet

systemctl start kube-proxy

systemctl status kubelet

systemctl status kube-proxy

cd ..

rm -rf kubernetes*


特殊 env

# yaml 中的一些 特殊 env

    env:

    - name: MY_POD_NAME

      valueFrom:

        fieldRef:

          fieldPath: metadata.name

    - name: MY_POD_NAMESPACE

      valueFrom:

        fieldRef:

          fieldPath: metadata.namespace

    - name: MY_POD_IP

      valueFrom:

        fieldRef:

          fieldPath: status.podIP