Kubernetes部署文档及命令

ubuntu 1804----> root密码:123456 
主要操作:

1.更改网卡名称为eth0:
root@ubuntu:vim /etc/default/grub
GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"
root@ubuntu:update-grub
root@ubuntu:reboot

2.更改系统ip地址:
root@ubuntu:/home/jack# vim /etc/netplan/01-netcfg.yaml
network:
  version: 2
  renderer: networkd
  ethernets:
    eth0:
      dhcp4: no
      addresses: [192.168.100.112/24]
      gateway4: 192.168.100.2
      nameservers:
              addresses: [192.168.100.2]

3.应用ip配置并重启测试:              
root@ubuntu:netplan  apply 


4.更改主机名:
# cat /etc/hostname 
k8s-node1.example.com


5.#安装常用命令
apt-get update
apt-get purge ufw lxd lxd-client lxcfs lxc-common #卸载不用的包
apt-get  install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute  gcc openssh-server lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip ipmitool

6.安装docker:
root@k8s-node1:~# apt-get update
root@k8s-node1:~# apt-get -y install apt-transport-https ca-certificates curl software-properties-common
root@k8s-node1:~# curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
root@k8s-node1:~# add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
root@k8s-node1:~# apt-get -y update && apt-get -y install docker-ce
root@k8s-node1:~# docker info

7.做快照

rm -rf /etc/localtime &&  ln -sv /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ntpdate 172.20.1.211 &&  hwclock  -w
echo "*/30 * * * * ntpdate 172.20.1.211 &&  hwclock  -w" > /var/spool/cron/crontabs/root && systemctl  restart cron.service
 
 
7.其他配置:
root@k8s-node1:~# grep "^[a-Z]" /etc/sysctl.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
net.ipv4.ip_forward = 1

一:服务器初始化及证书制作:
yum install -y https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm
yum install -y https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm


配置主机名和host文件: 同步各服务器时间
192.168.100.101 k8s-master1.example.com   k8s-master1
192.168.100.102 k8s-master2.example.com   k8s-master2
192.168.100.103 k8s-harbor1.example.com   k8s-harbor1
192.168.100.104 k8s-harbor2.example.com   k8s-harbor2
192.168.100.105 k8s-etcd1.example.com       k8s-etcd1
192.168.100.106 k8s-etcd2.example.com       k8s-etcd2
192.168.100.107 k8s-etcd3.example.com       k8s-etcd3
192.168.100.108 k8s-node1.example.com      k8s-node1
192.168.100.109 k8s-node2.example.com      k8s-node2
192.168.100.110 k8s-haproxy1.example.com k8s-haproxy1
192.168.100.111 k8s-haproxy2.example.com k8s-haproxy2

VIP:192.168.100.112

[root@k8s-master1 ~]# yum install sshpass -y
ssh-keygen

 
二:安装harbor服务器:
 安装harbor:
 hostname = k8s-harbor1.example.com
 ui_url_protocol = https

ssl_cert = /usr/local/src/harbor/cert/server.crt
ssl_cert_key = /usr/local/src/harbor/cert/server.key
harbor_admin_password = 123456


mkdir  /usr/local/src/harbor/cert
openssl genrsa -out /usr/local/src/harbor/cert/server.key 2048  #生成私有key
openssl req -x509 -new -nodes -key /usr/local/src/harbor/cert/server.key  -subj "/CN=k8s-harbor1.example.com" -days 7120 -out /usr/local/src/harbor/cert/server.crt   #创建有效期时间的自签名证书

openssl req -x509 -new -nodes -key /usr/local/src/harbor/cert/server.key -subj "/CN=k8s-harbor2.example.com" -days 7120 -out /usr/local/src/harbor/cert/server.crt   #创建有效期时间的自签名证书


yum install python-pip -y
pip install docker-compose

配置客户端使用harbor:
mkdir /etc/docker/certs.d/k8s-harbor1.example.com -pv
mkdir /etc/docker/certs.d/k8s-harbor2.example.com -pv

[root@k8s-harbor1 harbor]# scp cert/server.crt  192.168.100.101:/etc/docker/certs.d/k8s-harbor1.example.com/
[root@k8s-harbor2 harbor]# scp cert/server.crt  192.168.100.101:/etc/docker/certs.d/k8s-harbor2.example.com/

#测试登录
[root@k8s-master1 ~]# docker login k8s-harbor1.example.com
Username (admin):  
Password: 
Login Succeeded
[root@k8s-master1 ~]# docker login k8s-harbor2.example.com
Username (admin): 
Password: 
Login Succeeded


10.10.0.0/16 内部service网络
10.20.0.0/16 容器网络

三:准备证书环境
mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}  #每个机器

批量优化服务器并重启

准备证书制作工具:
cd /usr/local/src
 
[root@k8s-master1 src]# mv cfssl-certinfo_linux-amd64  /usr/bin/cfssl-certinfo
[root@k8s-master1 src]# mv cfssljson_linux-amd64  /usr/bin/cfssljson
[root@k8s-master1 src]# mv cfssl_linux-amd64  /usr/bin/cfssl
[root@k8s-master1 src]# chmod  a+x /usr/bin/cfssl*


[root@k8s-master1 ~]# cd /usr/local/src/ #初始化cfssl
[root@k8s-master1 src]#  cfssl print-defaults config > config.json
[root@k8s-master1 src]#  cfssl print-defaults csr > csr.json


创建生成CA的json文件:
[root@k8s-master1 src]# vim  ca-config.json
{
  "signing": {
    "default": {
      "expiry": "172800h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "172800h"
      }
    }
  }
}

创建生成CA签名证书CSR文件的json文件:
CN是证书拥有者名字,一般为网站名或IP+端口,如www.baidu.com,OU组织机构名 O组织名 L城市 ST州或省 C国家代码
[root@k8s-master1 src]# cat  ca-csr.json

{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成CA证书(ca.pem)和密钥(ca-key.pem)
[root@k8s-master1 src]#  cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@k8s-master1 src]# ll *.pem
-rw------- 1 root root 1675 Jul 11 21:27 ca-key.pem
-rw-r--r-- 1 root root 1359 Jul 11 21:27 ca.pem


#分发证书:
[root@k8s-master1 src]# cp ca.csr ca.pem ca-key.pem ca-config.json /opt/kubernetes/ssl
[root@k8s-master1 src]# ll /opt/kubernetes/ssl/
total 16
-rw-r--r-- 1 root root  290 Jul 11 21:29 ca-config.json
-rw-r--r-- 1 root root 1001 Jul 11 21:29 ca.csr
-rw------- 1 root root 1675 Jul 11 21:29 ca-key.pem
-rw-r--r-- 1 root root 1359 Jul 11 21:29 ca.pem


[root@k8s-master1 src]# cat  /root/ssh.sh
#!/bin/bash
IP="
192.168.100.102
192.168.100.103
192.168.100.104
192.168.100.105
192.168.100.106
192.168.100.107
192.168.100.108
192.168.100.109
192.168.100.110
192.168.100.111
"

for node in ${IP};do
  #sshpass -p 123456 ssh-copy-id  -p22 ${node}  -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
    echo "${node} 秘钥copy完成,准备环境初始化....."
#      ssh  -p22   ${node}  "test ! -d /etc/docker/certs.d/k8s-harbor1.example.com && mkdir /etc/docker/certs.d/k8s-harbor1.example.com -pv"
#      ssh  -p22   ${node}  "test ! -d /etc/docker/certs.d/k8s-harbor1.example.com && mkdir /etc/docker/certs.d/k8s-harbor2.example.com -pv"
#      echo "${node} Harbor 证书目录创建成功!"
#      scp -P22 /etc/docker/certs.d/k8s-harbor1.example.com/server.crt ${node}:/etc/docker/certs.d/k8s-harbor1.example.com/server.crt
#      scp -P22 /etc/docker/certs.d/k8s-harbor2.example.com/server.crt ${node}:/etc/docker/certs.d/k8s-harbor2.example.com/server.crt
#      echo "${node} Harbor 证书拷贝成功!"
##      scp -P22 /etc/hosts ${node}:/etc/hosts
#      echo "${node} host 文件拷贝完成"
#      scp -P22 /etc/sysctl.conf  ${node}:/etc/sysctl.conf
#      echo "${node} sysctl.conf 文件拷贝完成"
#      scp -P22 /etc/security/limits.conf  ${node}:/etc/security/limits.conf
#      echo "${node} limits.conf 文件拷贝完成"
#      scp -r -P22  /root/.docker  ${node}:/root/
#      echo "${node} Harbor 认证文件拷贝完成!"
#      scp -r -P22  /etc/resolv.conf  ${node}:/etc/
#      sleep 2
#      ssh  -p22   ${node}  "reboot"
#      sleep 2
        scp -r -P22 /opt/kubernetes/ssl/*  ${node}:/opt/kubernetes/ssl 
    else
    echo "${node} ssh-key copy error!"
    fi
done

二:etcd集群部署:
#各etcd服务器下载etcd安装包:
[root@k8s-etcd1 src]# tar zxf etcd-v3.2.18-linux-amd64.tar.gz
[root@k8s-etcd1 src]# cd etcd-v3.2.18-linux-amd64

[root@k8s-etcd1 etcd-v3.2.18-linux-amd64]# cp etcdctl  etcd /opt/kubernetes/bin/
[root@k8s-etcd1 etcd-v3.2.18-linux-amd64]# scp  /opt/kubernetes/bin/etcd* 192.168.100.106:/opt/kubernetes/bin/
[root@k8s-etcd1 etcd-v3.2.18-linux-amd64]# scp  /opt/kubernetes/bin/etcd* 192.168.100.107:/opt/kubernetes/bin/


#在master创建创建 etcd 证书签名请求:
root@k8s-master1:/usr/local/src/ssl/etcd# nano etcd-csr.json
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
"192.168.100.105",
"192.168.100.106",
"192.168.100.107"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

#生成 etcd 证书和私钥:
root@k8s-master1:/usr/local/src/ssl/etcd# pwd
/usr/local/src/ssl/etcd
root@k8s-master1:/usr/local/src/ssl/etcd#  cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
   -ca-key=/opt/kubernetes/ssl/ca-key.pem \
   -config=/opt/kubernetes/ssl/ca-config.json \
   -profile=kubernetes etcd-csr.json | cfssljson -bare etcd


root@k8s-master1:/usr/local/src/ssl/etcd# ll
total 24
drwxr-xr-x 2 root root 4096 Jul 14 08:59 ./
drwxr-xr-x 4 root root 4096 Jul 14 08:56 ../
-rw-r--r-- 1 root root 1062 Jul 14 08:59 etcd.csr
-rw-r--r-- 1 root root  293 Jul 14 08:58 etcd-csr.json
-rw------- 1 root root 1679 Jul 14 08:59 etcd-key.pem
-rw-r--r-- 1 root root 1436 Jul 14 08:59 etcd.pem


将证书移动到/opt/kubernetes/ssl目录下
root@k8s-master1:/usr/local/src/ssl/etcd#  cp etcd*.pem /opt/kubernetes/ssl


#将证书复制到各etcd 服务器节点
root@k8s-master1:/usr/local/src/ssl/etcd# bash /root/ssh-dir/ssh.sh 


启动脚本:
vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target

[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify

[Install]
WantedBy=multi-user.target
~                         

#配置文件:
root@k8s-etcd1:/usr/local/src/etcd-v3.2.18-linux-amd64# nano  /opt/kubernetes/cfg/etcd.conf

#[member]
ETCD_NAME="etcd-node1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.100.105:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.105:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.105:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.100.105:2380,etcd-node2=https://192.168.100.106:2380,etcd-node3=https://192.168.100.107:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.105:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"


[root@k8s-etcd1 src]# scp /opt/kubernetes/cfg/etcd.conf  192.168.100.106:/opt/kubernetes/cfg/
[root@k8s-etcd1 src]# scp /opt/kubernetes/cfg/etcd.conf  192.168.100.107:/opt/kubernetes/cfg/
各服务器创建数据目录:
 mkdir /var/lib/etcd
root@k8s-etcd1:/usr/local/src/etcd-v3.2.18-linux-amd64#  systemctl start etcd &&  systemctl enable etcd && systemctl  status etcd

#etcdctl命令软连接
root@k8s-etcd1:/usr/local/src/etcd-v3.2.18-linux-amd64# ln -sv /opt/kubernetes/bin/etcdctl  /usr/bin/


etcdctl --endpoints=https://192.168.100.105:2379 \
  --ca-file=/opt/kubernetes/ssl/ca.pem \
  --cert-file=/opt/kubernetes/ssl/etcd.pem \
  --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health


  
验证集群状态:
etcdctl --endpoints=https://192.168.100.105:2379 \
  --ca-file=/opt/kubernetes/ssl/ca.pem \
  --cert-file=/opt/kubernetes/ssl/etcd.pem \
  --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health

验证node状态
etcdctl --endpoints=https://192.168.100.105:2379   --ca-file=/opt/kubernetes/ssl/ca.pem   --cert-file=/opt/kubernetes/ssl/etcd.pem   --key-file=/opt/kubernetes/ssl/etcd-key.pem member list

三:master部署:
[root@k8s-master1 ~]# cd /usr/local/src/
[root@k8s-master1 src]# tar xvf kubernetes-1.11.0-client-linux-amd64.tar.gz
[root@k8s-master1 src]# tar xvf  kubernetes-1.11.0-node-linux-amd64.tar.gz
[root@k8s-master1 src]# tar xvf  kubernetes-1.11.0-server-linux-amd64.tar.gz
[root@k8s-master1 src]# tar xvf kubernetes-1.11.0.tar.gz


[root@k8s-master1 src]# cp kubernetes/server/bin/kube-apiserver /opt/kubernetes/bin/
[root@k8s-master1 src]# cp kubernetes/server/bin/kube-scheduler /usr/bin/
[root@k8s-master1 src]# cp kubernetes/server/bin/kube-controller-manager  /usr/bin/


创建生成CSR文件的json文件:
[root@k8s-master1 src]# vim kubernetes-csr.json
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "10.10.0.1",
    "192.168.100.101",
    "192.168.100.102",
    "192.168.100.112",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成证书:
[root@k8s-master1 src]#  cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem -ca-key=/opt/kubernetes/ssl/ca-key.pem -config=/opt/kubernetes/ssl/ca-config.json  -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

[root@k8s-master1 src]# ll *.pem
-rw------- 1 root root 1675 Jul 11 22:24 kubernetes-key.pem
-rw-r--r-- 1 root root 1619 Jul 11 22:24 kubernetes.pem

#将证书复制到各服务器
root@k8s-master1:/usr/local/src/ssl/master# cp kubernetes*.pem /opt/kubernetes/ssl/
[root@k8s-master1 src]# bash /root/ssh.sh


#创建apiserver 使用的客户端 token 文件
root@k8s-master1:/usr/local/src/ssl/master#  head -c 16 /dev/urandom | od -An -t x | tr -d ' '
9077bdc74eaffb83f672fe4c530af0d6
[root@k8s-master2 ~]# vim /opt/kubernetes/ssl/bootstrap-token.csv #各master服务器
9077bdc74eaffb83f672fe4c530af0d6,kubelet-bootstrap,10001,"system:kubelet-bootstrap"


配置认证用户密码:
vim /opt/kubernetes/ssl/basic-auth.csv
admin,admin,1
readonly,readonly,2

3.1:部署kube-apisevrer:
api-server 启动脚本:
[root@k8s-master1 src]# cat  /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --bind-address=192.168.100.101 \
  --insecure-bind-address=127.0.0.1 \
  --authorization-mode=Node,RBAC \
  --runtime-config=rbac.authorization.k8s.io/v1 \
  --kubelet-https=true \
  --anonymous-auth=false \
  --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
  --enable-bootstrap-token-auth \
  --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
  --service-cluster-ip-range=10.10.0.0/16 \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
  --client-ca-file=/opt/kubernetes/ssl/ca.pem \
  --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/opt/kubernetes/ssl/ca.pem \
  --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
  --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
  --etcd-servers=https://192.168.100.105:2379,https://192.168.100.106:2379,https://192.168.100.107:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/kubernetes/log/api-audit.log \
  --event-ttl=1h \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

#启动并验证api-server:
[root@k8s-master1 src]# systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver && systemctl status  kube-apiserver

#将启动脚本复制到server,更改--bind-address 为server2 IP地址,然后重启server 2的api-server并验证


3.2:部署Controller Manager服务:
ControllerManager是整个kubernetes集群内部的管理控制中心,负责集群内的Node、Pod副本、服务端点(Endpoint)、命名空间(Namespace)、服务账号(ServiceAccount)、资源定额(ResourceQuota)的管理,当某个Node出现宕机时,Controller Manager会及时发现并执行自动化修复流程,确保集群始终处于预期的工作状态。

[root@k8s-master1 src]# cat /lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.10.0.0/16 \
  --cluster-cidr=10.20.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/opt/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target


#复制启动二进制文件
[root@k8s-master1 src]# cp kubernetes/server/bin/kube-controller-manager /opt/kubernetes/bin/

#将启动文件和启动脚本scp到master2并启动服务和验证
#启动并验证kube-controller-manager:
[root@k8s-master1 src]# systemctl restart kube-controller-manager &&  systemctl status  kube-controller-manager

3.3:部署Kubernetes Scheduler
Kubernetes Scheduler 的作用是根据特定的调度算法将pod调度到指定的工作节点Node节点上

[root@k8s-master1 src]# vim /lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

#准备启动二进制
[root@k8s-master1 src]# cp kubernetes/server/bin/kube-scheduler  /opt/kubernetes/bin/
[root@k8s-master1 src]# scp /opt/kubernetes/bin/kube-scheduler  192.168.100.102:/opt/kubernetes/bin/


#启动并验证服务:
systemctl  enable  kube-scheduler  &&systemctl  restart kube-scheduler


3.4:部署kubectl 命令行工具
kubectl 是一个kubernetes的客户端工具,负责实现资源管理、创建、删除等操作,例如基于yaml文件创建删除pod、namespace等
[root@k8s-master1 src]# cp kubernetes/client/bin/kubectl  /opt/kubernetes/bin/
[root@k8s-master1 src]# scp /opt/kubernetes/bin/kubectl  192.168.100.102:/opt/kubernetes/bin/
[root@k8s-master1 src]# ln -sv /opt/kubernetes/bin/kubectl  /usr/bin/


创建 admin 证书签名请求
vim admin-csr.json
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}


生成 admin 证书和私钥:
[root@k8s-master1 src]#  cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem    -ca-key=/opt/kubernetes/ssl/ca-key.pem     -config=/opt/kubernetes/ssl/ca-config.json    -profile=kubernetes admin-csr.json | cfssljson -bare admin

[root@k8s-master1 src]# ll admin*
-rw-r--r-- 1 root root 1009 Jul 11 22:51 admin.csr
-rw-r--r-- 1 root root  229 Jul 11 22:50 admin-csr.json
-rw------- 1 root root 1679 Jul 11 22:51 admin-key.pem
-rw-r--r-- 1 root root 1399 Jul 11 22:51 admin.pem


[root@k8s-master1 src]# cp admin*.pem /opt/kubernetes/ssl/

设置集群参数:
master1:
kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://192.168.100.112:6443

设置客户端认证参数:
[root@k8s-master1 src]# kubectl config set-credentials admin \
    --client-certificate=/opt/kubernetes/ssl/admin.pem \
    --embed-certs=true \
    --client-key=/opt/kubernetes/ssl/admin-key.pem
User "admin" set.


设置上下文参数
[root@k8s-master1 src]#  kubectl config set-context kubernetes \
    --cluster=kubernetes  --user=admin  --user=admin
Context "kubernetes" created.

设置默认上下文
[root@k8s-master2 src]# kubectl config use-context kubernetes
Switched to context "kubernetes".

[root@k8s-master1 src]# kubectl config use-context kubernetes
Switched to context "kubernetes".

四:node节点部署:
各node节点安装基础命令:
apt-get install ipvsadm ipset conntrack

准备二进制包:
[root@k8s-master1 src]# scp kubernetes/server/bin/kube-proxy  kubernetes/server/bin/kubelet  192.168.100.107:/opt/kubernetes/bin/
[root@k8s-master1 src]# scp kubernetes/server/bin/kube-proxy kubernetes/server/bin/kubelet  192.168.100.108:/opt/kubernetes/bin/


角色绑定:
[root@k8s-master1 src]#  kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

创建 kubelet bootstrapping kubeconfig  文件并设置集群参数
[root@k8s-master1 src]# kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
    --server=https://10.10.8.253:6443 \
    --kubeconfig=bootstrap.kubeconfig
Cluster "kubernetes" set.

设置客户端认证参数
 [root@k8s-master1 src]#  kubectl config set-credentials kubelet-bootstrap \
   --token=9077bdc74eaffb83f672fe4c530af0d6 \
   --kubeconfig=bootstrap.kubeconfig   

   
 设置上下文:
[root@k8s-master1 src]# kubectl config set-context default \
   --cluster=kubernetes \
   --user=kubelet-bootstrap \
   --kubeconfig=bootstrap.kubeconfig
   
  
  选择默认上下文
[root@k8s-master1 src]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
Switched to context "default"


[root@k8s-master1 src]# scp bootstrap.kubeconfig  192.168.100.108:/opt/kubernetes/cfg/  #自动生成文件
[root@k8s-master1 src]# scp bootstrap.kubeconfig  192.168.100.109:/opt/kubernetes/cfg/


部署kubelet:
kubelet在启动时会通过API Server注册自身的节点信息,并定时向API Server汇报状态信息,API Server接收到信息后将信息更新到etcd中
1.配置CNI支持:
网络插件CNI是独立的可执行文件,被上层的容器管理平台调用。网络插件只有两件事情要做:把容器加入到网络以及把容器从网络中删除。
kubernetes 使用了 CNI 网络插件之后 工作流程: 
kubernetes 先创建 pause 容器生成对应的 network namespace
调用网络 driver(因为配置的是 CNI,所以会调用 CNI 相关代码
CNI driver 根据配置调用具体的 cni 插件
cni 插件给 pause 容器配置正确的网络,pod 中其他的容器都是用 pause 的网络.

node节点:
 mkdir -p /etc/cni/net.d
 [root@k8s-node1 ~]# cat  /etc/cni/net.d/10-default.conf
{
        "name": "flannel",
        "type": "flannel",
        "delegate": {
            "bridge": "docker0",
            "isDefaultGateway": true,
            "mtu": 1400
        }
}


mkdir /var/lib/kubelet

创建kubelet服务配置,每个node节点都要配置:
[root@k8s-node2 ~]#  vim /lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
  --address=192.168.100.108 \
  --hostname-override=192.168.100.108 \
  --pod-infra-container-image=k8s-harbor1.example.com/baseimages/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  --cert-dir=/opt/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/kubernetes/bin/cni \
  --cluster-dns=10.10.0.1 \
  --cluster-domain=cluster.local. \
  --hairpin-mode hairpin-veth \
  --allow-privileged=true \
  --fail-swap-on=false \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target


systemctl daemon-reload && systemctl enable kubelet && systemctl start kubelet && systemctl  status kubelet


master查看csr请求:
[root@k8s-master2 src]#  kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-dD84BwWWLl43SeiB5G1PnUaee5Sv60RsoVZFkuuePg0   1m        kubelet-bootstrap   Pending
node-csr-vW4eBZb98z-DvAeG9q8hb9mOAUg0U9HSML9YRBscP8A   2m        kubelet-bootstrap   Pending


master批准TLS请求:
执行完毕后,查看节点状态已经是Ready的状态了
[root@k8s-master2 src]# kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
certificatesigningrequest.certificates.k8s.io/node-csr-dD84BwWWLl43SeiB5G1PnUaee5Sv60RsoVZFkuuePg0 approved
certificatesigningrequest.certificates.k8s.io/node-csr-vW4eBZb98z-DvAeG9q8hb9mOAUg0U9HSML9YRBscP8A approved

[root@k8s-master2 src]# kubectl get nodes
NAME              STATUS    ROLES     AGE       VERSION
192.168.100.108   Ready        32s       v1.11.0
192.168.100.109   Ready        32s       v1.11.0

node节点部署Kubernetes Proxy
[root@k8s-node1 ~]#  yum install -y ipvsadm ipset conntrack
[root@k8s-node2 ~]#  yum install -y ipvsadm ipset conntrack

创建 kube-proxy 证书请求
[root@k8s-master1 src]#  vim kube-proxy-csr.json

{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成证书
[root@k8s-master1 src]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem    -ca-key=/opt/kubernetes/ssl/ca-key.pem    -config=/opt/kubernetes/ssl/ca-config.json    -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

复制证书到各node节点:
[root@k8s-master1 src]# cp kube-proxy*.pem /opt/kubernetes/ssl/
[root@k8s-master1 src]# bash /root/ssh.sh

创建kube-proxy配置文件
[root@k8s-master1 src]# kubectl config set-cluster kubernetes \
    --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://192.168.100.112:6443 \
   --kubeconfig=kube-proxy.kubeconfig
Cluster "kubernetes" set.


[root@k8s-master1 src]# kubectl config set-credentials kube-proxy \
    --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
    --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
   --embed-certs=true \
   --kubeconfig=kube-proxy.kubeconfig
User "kube-proxy" set.

[root@k8s-master1 src]# kubectl config set-context default \
    --cluster=kubernetes \
    --user=kube-proxy \
    --kubeconfig=kube-proxy.kubeconfig


[root@k8s-master1 src]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
Switched to context "default".

分发kubeconfig配置文件:
[root@k8s-master1 src]# scp kube-proxy.kubeconfig  192.168.100.108:/opt/kubernetes/cfg/
[root@k8s-master1 src]# scp kube-proxy.kubeconfig  192.168.100.109:/opt/kubernetes/cfg/


创建kube-proxy服务配置
[root@k8s-node1 ~]#  mkdir /var/lib/kube-proxy
[root@k8s-node2 ~]#  mkdir /var/lib/kube-proxy

# cat /lib/systemd/system/kube-proxy.service 
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
  --bind-address=192.168.100.109 \
  --hostname-override=192.168.100.109 \
  --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
   --masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

重启并验证服务:
ipvsadm ipset conntrack
[root@k8s-node1 ~]#  systemctl daemon-reload && systemctl enable kube-proxy && systemctl start kube-proxy && systemctl status kube-proxy
[root@k8s-node2 ~]#  systemctl daemon-reload && systemctl enable kube-proxy && systemctl start kube-proxy && systemctl status kube-proxy


root@k8s-node1:/opt/kubernetes/cfg# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr
  -> 192.168.100.101:6443         Masq    1      0          0         
  -> 192.168.100.102:6443         Masq    1      0          0 


  
  
  

五:部署flannel:
https://github.com/coreos/flannel

在mster各节点和各node节点部署flannel服务:

生成证书申请csr文件:
[root@k8s-master1 src]# vim flanneld-csr.json
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成证书:
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem    -ca-key=/opt/kubernetes/ssl/ca-key.pem    -config=/opt/kubernetes/ssl/ca-config.json    -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld

cp  flanneld*.pem /opt/kubernetes/ssl/
scp flanneld*.pem 192.168.100.108:/opt/kubernetes/ssl/
scp flanneld*.pem 192.168.100.109:/opt/kubernetes/ssl/


root@k8s-master1:/usr/local/src# tar xvf flannel-v0.10.0-linux-amd64.tar.gz
root@k8s-master1:/usr/local/src# scp -P22 flanneld  192.168.100.108:/opt/kubernetes/bin/
root@k8s-master1:/usr/local/src# scp -P22 flanneld  192.168.100.109:/opt/kubernetes/bin/


复制对应脚本到/opt/kubernetes/bin目录
[root@k8s-master1 src]# cd /usr/local/src/kubernetes/cluster/centos/node/bin/
[root@k8s-master1 bin]# cp remove-docker0.sh /opt/kubernetes/bin/

root@k8s-master1:/usr/local/src/kubernetes/cluster/centos/node/bin# scp remove-docker0.sh  mk-docker-opts.sh  192.168.100.108:/opt/kubernetes/bin/
root@k8s-master1:/usr/local/src/kubernetes/cluster/centos/node/bin# scp remove-docker0.sh  mk-docker-opts.sh  192.168.100.109:/opt/kubernetes/bin/

配置Flannel
[root@k8s-master1 bin]# vim /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=https://192.168.100.105:2379,https://192.168.100.106:2379,https://192.168.100.107:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"

复制配置到其它节点上
[root@k8s-master1 bin]#scp /opt/kubernetes/cfg/flannel 192.168.100.108:/opt/kubernetes/cfg/
[root@k8s-master1 bin]#scp /opt/kubernetes/cfg/flannel 192.168.100.109:/opt/kubernetes/cfg/


设置Flannel系统服务
root@k8s-master1:/usr/local/src/ssl/flannel# vim /lib/systemd/system/flannel.service

[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker

Type=notify

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service


复制系统服务脚本到其它节点上
# scp /lib/systemd/system/flannel.service 192.168.100.108:/lib/systemd/system/flannel.service
# scp /lib/systemd/system/flannel.service 192.168.100.109:/lib/systemd/system/flannel.service


Flannel CNI集成
https://github.com/containernetworking/plugins/releases

[root@k8s-master1 src]# mkdir /opt/kubernetes/bin/cni
[root@k8s-master1 src]# tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni
root@k8s-master1:/usr/local/src#  scp -r  /opt/kubernetes/bin/cni 192.168.100.108:/opt/kubernetes/bin/
root@k8s-master1:/usr/local/src#  scp -r  /opt/kubernetes/bin/cni 192.168.100.109:/opt/kubernetes/bin/


在etcd创建网络:
提前将证书复制到etcd或在node节点操作:
root@k8s-master1:/usr/local/src/ssl/flannel#  /opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file    /opt/kubernetes/ssl/flanneld-key.pem  --no-sync -C https://192.168.100.105:2379,https://192.168.100.106:2379,https://192.168.100.107:2379  mk /kubernetes/network/config  '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' 


验证网段:
[root@k8s-etcd1 ~]# /opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem     --no-sync -C  https://192.168.100.107:2379 get  /kubernetes/network/config   #以下是返回值
{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}

启动flannel:
[root@k8s-node2 ~]# systemctl daemon-reload && systemctl enable flannel && chmod +x /opt/kubernetes/bin/* &&  systemctl start flannel &&  systemctl status  flannel

node节点验证获取到IP地址:
root@k8s-node2:/opt/kubernetes/bin# cat /var/run/flannel/docker 
DOCKER_OPT_BIP="--bip=10.2.32.1/24"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_OPTS=" --bip=10.2.32.1/24 --mtu=1450 "


root@k8s-node2:/opt/kubernetes/bin# cat /var/run/flannel/subnet.env 
FLANNEL_NETWORK=10.2.0.0/16
FLANNEL_SUBNET=10.2.32.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
root@k8s-node2:/opt/kubernetes/bin# 

验证网段:
root@k8s-node2:~# ifconfig
flannel.1: flags=4163  mtu 1450
        inet 10.2.32.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::accd:61ff:fe52:8805  prefixlen 64  scopeid 0x20
        ether ae:cd:61:52:88:05  txqueuelen 0  (Ethernet)
        RX packets 7  bytes 588 (588.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 7  bytes 588 (588.0 B)
        TX errors 0  dropped 12 overruns 0  carrier 0  collisions 0


配置Docker服务使用Flannel
[root@k8s-node1 ~]# vim /usr/lib/systemd/system/docker.service
[Unit] #在Unit下面修改After和增加Requires
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service

[Service] #增加EnvironmentFile=-/run/flannel/docker
Type=notify
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPTS

#启动脚本内容具体如下:
root@k8s-node2:~# cat  /lib/systemd/system/docker.service 
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd  $DOCKER_OPTS  -H fd://
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

#分发至各个node节点:
[root@k8s-node1 ~]# scp /lib/systemd/system/docker.service   192.168.100.108:/lib/systemd/system/docker.service 
[root@k8s-node1 ~]# scp /lib/systemd/system/docker.service   192.168.100.109:/lib/systemd/system/docker.service 
   
   
   
   
 测试创建应用:
 root@k8s-master1:~# kubectl run net-test --image=alpine --replicas=2 sleep 360000

会自动下载镜像,然后验证容器启动成功:
root@k8s-master1:~# kubectl get pods
NAME                      READY     STATUS    RESTARTS   AGE
net-test-ff75d6c6-5tj88   1/1       Running   0          10m
net-test-ff75d6c6-fsftw   1/1       Running   0          10m

#在master节点进入容器:
root@k8s-master1:~# kubectl exec -it net-test-ff75d6c6-fsftw sh
/ #


#在node节点验证进入容器:
 root@k8s-node1:~# docker exec -it 5418e1f1ae17 sh
/ # 


   
六:部署coreDNS:
oot@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# docker tag gcr.io/google-containers/coredns:1.1.3  k8s-harbor1.example.com/baseimages/coredns:1.1.3
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# docker push k8s-harbor1.example.com/baseimages/coredns:1.1.3

cp coredns.yaml.base  coredns.yaml

vim coredns.yaml #更改镜像地址和ClusterIP

kubectl create -f coredns.yaml

kubectl get pods --all-namespaces
kubectl get services -n kube-system
   
#DNS测试:使用讲课提供的busybox镜像
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# docker tag gcr.io/google-containers/busybox  k8s-harbor1.example.com/baseimages/busybox
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# docker push k8s-harbor1.example.com/baseimages/busybox

   
   
#yaml文件
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# cat busybox.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default  #default namespace的DNS
spec:
  containers:
  - image: k8s-harbor1.example.com/baseimages/busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
    name: busybox
  restartPolicy: Always

#创建并验证pod
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# kubectl create -f busybox.yaml
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# kubectl  get pods


#验证CorsDNS解析域名: 不在一个namespace的域名要写域名的全称,在一个namespace的域名只要写service名称即可解析
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# kubectl  get  services --all-namespaces
NAMESPACE     NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
default       kubernetes   ClusterIP   10.1.0.1            443/TCP         4d
kube-system   kube-dns     ClusterIP   10.1.0.254          53/UDP,53/TCP   11m


域名格式: service名称 +  namespace名称.后缀
   
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# kubectl exec busybox nslookup kubernetes
Server:    10.1.0.254
Address 1: 10.1.0.254 kube-dns.kube-system.svc.cluster.local

nslookup: can't resolve 'kubernetes'
command terminated with exit code 1


root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dns/coredns# kubectl exec busybox nslookup kubernetes.default.svc.cluster.local
Server:    10.1.0.254
Address 1: 10.1.0.254

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.1.0.1 kubernetes.default.svc.cluster.local

   
https://192.168.100.102:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy


kubectl exec busybox nslookup kubernetes.default.cluster.local

python-test-tomcat1-spec.python1.svc.cluster.local
python-test-tomcat2-spec.python2.svc.cluster.local

 七:部署kubernetes图形管理dashboard:
将访问账号名admin与cluster-admin关联以获得访问权限。
kubectl create clusterrolebinding login-dashboard-admin --clusterrole=cluster-admin --user=admin

 
master安装flannel,步骤同5
 
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dashboard# pwd
/usr/local/src/kubernetes/cluster/addons/dashboard-1.8.2

更改image 镜像地址
root@k8s-master1:/usr/local/src/kubernetes/cluster/addons/dashboard# kubectl  create  -f  .  #创建dashboard
serviceaccount/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.extensions/kubernetes-dashboard created
service/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/ui-admin created
rolebinding.rbac.authorization.k8s.io/ui-admin-binding created
clusterrole.rbac.authorization.k8s.io/ui-read created
rolebinding.rbac.authorization.k8s.io/ui-read-binding created

 
浏览器访问测试:
https://192.168.100.101:6443/api/v1/namespaces/kube-system/services/http:kubernetes-dashboard:/proxy/#!

 
 
八:heapster
root@k8s-master1:/usr/local/src/kubernetes/heapster# docker load -i heapster-amd64_v1.5.1.tar
root@k8s-master1:/usr/local/src/kubernetes/heapster# docker tag gcr.io/google-containers/heapster-amd64:v1.5.1  k8s-harbor1.example.com/baseimages/eapster-amd64:v1.5.1
root@k8s-master1:/usr/local/src/kubernetes/heapster# docker push k8s-harbor1.example.com/baseimages/eapster-amd64:v1.5.1
root@k8s-master1:/usr/local/src/kubernetes/heapster# vim heapster.yaml #更改image地址
 
 
 root@k8s-master1:/usr/local/src/kubernetes/heapster# docker load -i heapster-grafana-amd64-v4.4.3.tar
 root@k8s-master1:/usr/local/src/kubernetes/heapster# docker tag 8cb3de219af7 k8s-harbor1.example.com/baseimages/heapster-grafana-amd64:v4.4.3
 root@k8s-master1:/usr/local/src/kubernetes/heapster# docker push k8s-harbor1.example.com/baseimages/heapster-grafana-amd64:v4.4.3
 root@k8s-master1:/usr/local/src/kubernetes/heapster# vim heapster.yaml  
 
 
 root@k8s-master1:/usr/local/src/kubernetes/heapster# docker load -i heapster-influxdb-amd64_v1.3.3.tar 
 root@k8s-master1:/usr/local/src/kubernetes/heapster# docker tag gcr.io/google-containers/heapster-influxdb-amd64:v1.3.3 k8s-harbor1.example.com/baseimages/heapster-influxdb-amd64:v1.3.3
 root@k8s-master1:/usr/local/src/kubernetes/heapster# vim influxdb.yaml

 
 root@k8s-master1:/usr/local/src/kubernetes/heapster# kubectl  create -f .
deployment.extensions/monitoring-grafana created
service/monitoring-grafana created
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
deployment.extensions/heapster created
service/heapster created
deployment.extensions/monitoring-influxdb created
service/monitoring-influxdb created


https://192.168.100.101:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy

k8s-harbor1.example.com/baseimages/heapster-amd64:v1.5.1

k8s-harbor1.example.com/baseimages/heapster-grafana-amd64:v4.4.3

k8s-harbor1.example.com/baseimages/heapster-influxdb-amd64:v1.3.3

root@k8s-master1:~# kubectl exec busybox nslookup kubernetes.default.svc.cluster.local  -n kube-system
Server:    10.1.0.254
Address 1: 10.1.0.254 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.1.0.1 kubernetes.default.svc.cluster.local

九:kubernetes-dashboard多租户:
基于namespace实现租户pod间隔离
9.1:导入镜像:
# docker load -i kubernetes-dashboard-amd64_v1.8.3.tar.gz
# docker tag gcr.io/google-containers/kubernetes-dashboard-amd64:v1.8.3 k8s-harbor1.example.com/baseimages/kubernetes-dashboard-amd64:v1.8.3
# docker push k8s-harbor1.example.com/baseimages/kubernetes-dashboard-amd64:v1.8.3

9.2:更改配置:
image: k8s-harbor1.example.com/baseimages/kubernetes-dashboard-amd64:v1.8.3
        args:
          # PLATFORM-SPECIFIC ARGS HERE
          - --auto-generate-certificates
          - --token-ttl=43200 #自定义token 过期时间

9.3:创建dashboard:
# kubectl  apply -f .
  
9.4:登录web:

9.5:获取token:
# kubectl -n kube-system describe secret  $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

 eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZiaDc1Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlM2Y1OGUyOS05YmJmLTExZTgtYjcyMC01MjU0MDBiM2NhN2YiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.kzttUTIScAg9SRfSn7uLYOf9JgIeZac9LGqbW8ai-NmirMiKMxFRgxa8Chdvdfz83uyNgCgjjtXOsRCMeGL-c9VsDN9xiM-vKCo8IF3DEvg2NRTQEtY2K3o2GqmcEcUJ11fX6MOjD1eocfhgJxAqHuAZq3Xd5VoaSiyP3NLx2AOKuFmeeA3VBYCddLoCoMqUrIDh7WioyLP_euDuKiqbcv0xNnRg_nv9PLme6xiRELQwaVXqvK8WpIenkasugG7MCPn-YLkaPsJ-zbVri2JvzT3mzl7Qx8eX2_JtMwWnyaelAYh70La7hjGgWpqbtgt3rQYxT-xg_CAiANjOVYXcrg

9.6:使用token登录测试:


9.7:使用token:

k8s第三天预习及预计上课内容:
1.kubernetes升级与降级
2.dashboard升级1.8.3并实现多租户隔离
3.基于token和kubeconfig文件的登录认证
4.coreDNS更换为Kube-dns

5.Docker生产环境镜像环境准备
6:基于kubernetes的nginx+tomcat业务高可用实战
7:k8s主机亲和性实现
8:k8s容器挂载多NAS实现业务动静分离
        volumeMounts:
        - name: nfs-img-103
          mountPath: /usr/local/nginx/html/webapp/img
          readOnly: false
          

          
      volumes:
      - name: nfs-img-103
        nfs:
          server: 192.168.100.102
          path: /nfs/img
          

export JAVA_HOME=/usr/local/jdk
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin
export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar          
          

yum install -y vim wget tree  pcre pcre-devel gcc gcc-c++  zlib zlib-devel openssl openssl-devel net-tools iotop unzip zip iproute ntpdate nfs-utils tcpdump telnet traceroute

k8s-harbor1.example.com/baseimages/kubernetes-dashboard-amd64:v1.8.3


k8s第四天预习及预计上课内容:
1:k8s结合ELK实现日志收集与展示
2:集群版ceph部署
3:监控组件prometheus+grafana
4:k8s结合gitlab与Jenkins实现业务自动化上线部署与回滚
5:k8s容器手动扩容缩容与自动化实现
6.网络插件更换为calico

九:编写yml文件:
Nginx:


Tomcat:

 
十:监控组件之prometheus:

https://prometheus.io/download/
 
 

 
 https://help.ubuntu.com/community/UbuntuBonding?_ga=2.247657987.581117643.1533008004-450968605.1519631337#Descriptions_of_bonding_modes
 

ELK:

export JAVA_HOME=/usr/local/jdk
export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar
export PATH=/usr/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin

单机

frame

10.20.3.203  172.18.21.57
10.20.3.204  172.18.21.58

10.12.51.202
10.12.51.203
10.12.51.204
10.12.51.205
10.12.51.206
10.12.51.207
10.12.51.208
10.12.51.209


   

你可能感兴趣的:(Docker)