kubeadm安装Kubernetes V1.10.0集群详细文档

kubeadm安装Kubernetes V1.10集群

一、前置准备

1.基础环境规划

操作系统:CentOS Linux release 7.4.1708 (GUI)
docker版本: 17.03.2-ce
socat版本:1.7.3.2-2.el7.x86_64
kubelet版本: 1.10.0-0.x86_64
kubernetes-cni版本: 0.6.0-0.x86_64
kubectl版本:1.10.0-0.x86_64
kubeadm版本:1.10.0-0.x86_64
浏览器: Firefox
内存:16G+
CPU :8core
网络:千兆以上

2.主机用途规划


主机名称 IP地址 用途 备注
k8scy01 172.27.10.101 master and etcd
k8scy02 172.27.10.102 master and etcd
k8scy03 172.27.10.103 master and etcd
k8scy04 172.27.10.104 Node
VirtualIP 172.27.10.105 VIP

3.设置hosts文件

将以下文件追加在所有主机/etc/hosts中

# cat < /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.27.10.101 k8scy01
172.27.10.102 k8scy02
172.27.10.103 k8scy03
172.27.10.104 k8scy04
EOF

4.linux目录规划

目录名称 目录大小(推荐) 备注
/ 100G以上 根目录
/var 100G以上 日志目录
/data/sdv1 1TB以上 数据目录
/data/sdv..n 1TB以上 数据目录

二、Linux操作系统环境配置(所有节点)

1.配置免密登陆(k8scy01上执行)

# ssh-keygen  #一路回车即可
# ssh-copy-id  k8scy02  #输入yes和主机密码
# ssh-copy-id  k8scy03  #输入yes和主机密码
# ssh-copy-id  k8scy04 #输入yes和主机密码

2.配置内核参数

配置系统内核参数

# cp /etc/sysctl.conf /etc/sysctl.conf.bak
# cat < /etc/sysctl.conf

##内核默认参数
kernel.sysrq = 0
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
##打开文件数参数(20*1024*1024)
fs.file-max= 20971520
##WEB Server参数
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_fin_timeout=30
net.ipv4.tcp_keepalive_time=1200
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_rmem=4096 87380 8388608
net.ipv4.tcp_wmem=4096 87380 8388608
net.ipv4.tcp_max_syn_backlog=8192
net.ipv4.tcp_max_tw_buckets = 5000
##TCP补充参数
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 262144
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
##禁用ipv6
net.ipv6.conf.all.disable_ipv6 =1
net.ipv6.conf.default.disable_ipv6 =1
##swap使用率优化
vm.swappiness=0
EOF

执行以下命令使参数生效:

# sysctl -p

设置k8s内核参数

# modprobe br_netfilter

# cat < /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

执行以下命令使参数生效:

# sysctl -p /etc/sysctl.d/k8s.conf
# ls /proc/sys/net/bridge

kubeadm安装Kubernetes V1.10.0集群详细文档_第1张图片

3.配置打开文件数

# echo "* soft nofile 65536" >> /etc/security/limits.conf
# echo "* hard nofile 65536" >> /etc/security/limits.conf
# echo "* soft nproc 65536"  >> /etc/security/limits.conf
# echo "* hard nproc 65536"  >> /etc/security/limits.conf
# echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
# echo "* hard memlock  unlimited"  >> /etc/security/limits.conf

4.关闭防火墙、SELINUX

关闭防火墙

# systemctl stop firewalld
# systemctl disable firewalld

关闭SELINUX

# setenforce  0 
# sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux 
# sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 
# sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux 
# sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config

5.时间同步

# /usr/sbin/ntpdate -us 172.27.5.123;hwclock -w;
# systemctl enable ntpdate.service
# echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' > /tmp/crontab2.tmp
# crontab /tmp/crontab2.tmp
# systemctl start ntpdate.service

6.关闭大叶内存

# cat << EOF >> /etc/rc.d/rc.local
echo never > /sys/kernel/mm/transparent_hugepage/defrag
echo never > /sys/kernel/mm/transparent_hugepage/enabled
EOF

7.系统默认语言

# env |grep LANG

LANG

如果不是en_US.UTF-8,请修改/etc/locale.conf中的LANG为”en_US.UTF-8”

# swapoff -a 
# sed -i 's/.*swap.*/#&/' /etc/fstab

9.安装依赖包
设置k8syum源

# cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装或上传epel-release

# yum install -y epel-release

检查yum源

# yum clean all
# yum repolist

repolist

安装依赖包

# yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools wget vim  ntpdate libseccomp libtool-ltdl tree bash-completion

配置完后建议重启所有节点
三、安装配置keepalived(3master节点)
1.安装keepalived

# yum install -y keepalived
# systemctl enable keepalived

2.配置keepalived.conf
k8scy01的keepalived.conf配置:

# cat < /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://172.27.10.105:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens160
    virtual_router_id 61
    priority 100
    advert_int 1
    mcast_src_ip 172.27.10.101
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        172.27.10.102
        172.27.10.103
    }
    virtual_ipaddress {
        172.27.10.105/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

k8scy02的keepalived.conf配置:

# cat < /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://172.27.10.105:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens160
    virtual_router_id 61
    priority 90
    advert_int 1
    mcast_src_ip 172.27.10.102
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        172.27.10.101
        172.27.10.103
    }
    virtual_ipaddress {
        172.27.10.105/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

k8scy03的keepalived.conf配置:

# cat < /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://172.27.10.105:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens160
    virtual_router_id 61
    priority 80
    advert_int 1
    mcast_src_ip 172.27.10.103
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        172.27.10.101
        172.27.10.102
    }
    virtual_ipaddress {
        172.27.10.105/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

3.启动keepalived

# systemctl restart keepalived
# ip addr  #k8scy01上查看,可见VIP已绑定

kubeadm安装Kubernetes V1.10.0集群详细文档_第2张图片

四、安装配置etcd(3master节点)
1.安装etcd

# yum install etcd -y
# mkdir -p /var/lib/etcd

2.配置etcd.service
k8scy01的etcd.service配置:

# cat </etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name k8scy01 \
  --initial-advertise-peer-urls http://172.27.10.101:2380 \
  --listen-peer-urls http://172.27.10.101:2380 \
  --listen-client-urls http://172.27.10.101:2379,http://127.0.0.1:2379 \
  --advertise-client-urls http://172.27.10.101:2379 \
  --initial-cluster-token etcd-cluster-0 \
--initial-cluster k8scy01=http://172.27.10.101:2380,k8scy02=http://172.27.10.102:2380,k8scy03=http://172.27.10.103:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

k8scy02的etcd.service配置:

# cat </etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name k8scy02 \
  --initial-advertise-peer-urls http://172.27.10.102:2380 \
  --listen-peer-urls http://172.27.10.102:2380 \
  --listen-client-urls http://172.27.10.102:2379,http://127.0.0.1:2379 \
  --advertise-client-urls http://172.27.10.102:2379 \
  --initial-cluster-token etcd-cluster-0 \
--initial-cluster k8scy01=http://172.27.10.101:2380,k8scy02=http://172.27.10.102:2380,k8scy03=http://172.27.10.103:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

k8scy03的etcd.service配置:

# cat </etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name k8scy03 \
  --initial-advertise-peer-urls http://172.27.10.103:2380 \
  --listen-peer-urls http://172.27.10.103:2380 \
  --listen-client-urls http://172.27.10.103:2379,http://127.0.0.1:2379 \
  --advertise-client-urls http://172.27.10.103:2379 \
  --initial-cluster-token etcd-cluster-0 \
--initial-cluster k8scy01=http://172.27.10.101:2380,k8scy02=http://172.27.10.102:2380,k8scy03=http://172.27.10.103:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

3.添加服务自启动
覆盖原有etcd.service

# mv /etc/systemd/system/etcd.service /usr/lib/systemd/system/
# systemctl daemon-reload
# systemctl enable etcd
# systemctl start etcd
# systemctl status etcd

kubeadm安装Kubernetes V1.10.0集群详细文档_第3张图片

4.检查集群状态

# etcdctl cluster-health

kubeadm安装Kubernetes V1.10.0集群详细文档_第4张图片

五、安装配置docker(所有节点)
1.安装docker

# yum install https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm  -y
# yum install https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm  -y

2.配置docker.service
修改ExecStart=/usr/bin/dockerd

# vi /usr/lib/systemd/system/docker.service
    ExecStart=/usr/bin/dockerd   -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock  --registry-mirror=https://ms3cfraz.mirror.aliyuncs.com

3.启动docker

# systemctl daemon-reload
# systemctl enable docker
# systemctl restart docker
# systemctl status docker

六、安装、配置kubeadm(所有节点)
1.安装kubelet kubeadm kubectl

# yum install -y kubelet kubeadm kubectl
# systemctl enable kubelet

2.修改kubelet配置文件

# vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
#修改ARGS
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
#添加一行
Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0"

# systemctl daemon-reload
# systemctl enable kubelet
# echo "source <(kubectl completion bash)" >> ~/.bashrc

七、初始化集群
1. 添加集群初始化配置文件(3master节点执行)

# cat < config.yaml 
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
etcd:
  endpoints:
  - http://172.27.10.101:2379
  - http://172.27.10.102:2379
  - http://172.27.10.103:2379
networking:
  podSubnet: 10.244.0.0/16
kubernetesVersion: 1.10.0
api:
  advertiseAddress: "172.27.10.105"
apiServerCertSANs:
- k8scy01
- k8scy02
- k8scy03
- 172.27.10.101
- 172.27.10.102
- 172.27.10.103
- 172.27.10.105
featureGates:
  CoreDNS: true
imageRepository: "registry.cn-hangzhou.aliyuncs.com/k8sth"
EOF

2.执行初始化(此处只在k8scy01执行)
注意:
配置文件定义了podnetwork:10.244.0.0/16
kubeadmin init -hlep可以看出,service默认网段是10.96.0.0/12
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf默认dns地址cluster-dns=10.96.0.10

# kubeadm init --config config.yaml

kubeadm安装Kubernetes V1.10.0集群详细文档_第5张图片

# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config

注意:
添加节点会用上:You can now join any number of machines by running the following on each node
as root:
kubeadm join 172.27.10.105:6443 –token shxnvk.1n8kob3wwtm9refh –discovery-token-ca-cert-hash sha256:0d9bae3c825157ed995e27721f1aaccf70ade0466a7ec31e73935257bc80edf1
如果此处初始化失败执行:

# kubeadm reset

或者执行

# rm -rf /etc/kubernetes/*.conf
# rm -rf /etc/kubernetes/manifests/*.yaml
# docker ps -a |awk '{print $1}' |xargs docker rm -f
# systemctl stop kubelet

3.分发证书密码文件(k8scy01上执行)

# scp -r /etc/kubernetes/pki  k8scy02:/etc/kubernetes/
# scp -r /etc/kubernetes/pki  k8scy03:/etc/kubernetes/

4.部署flannel网络
flannel版本信息:quay.io/coreos/flannel:v0.10.0-amd64

# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

修改kube-flannel.yml的image为:
image: registry.cn-hangzhou.aliyuncs.com/kubernetes_containers/flannel:v0.10.0-amd64
kubeadm安装Kubernetes V1.10.0集群详细文档_第6张图片

# kubectl create -f  kube-flannel.yml

kubeadm安装Kubernetes V1.10.0集群详细文档_第7张图片
查看所有节点状态:

# kubectl   get node

查看所有pods状态:

# kubectl   get pods --all-namespaces

查看节点信息:

# kubectl describe node k8scy01

查看pod信息:

# kubectl --namespace=kube-system describe pod coredns-7997f8864c-9tnjr

按照配置文件删除pod:

# kubectl delete -f kube-flannel.yml

删除pod:

# kubectl --namespace=kube-system delete pod coredns-7997f8864c-9tnjr

删除所有pods:

# kubectl delete -n kube-system pods --all

5.安装dashboard

# cat < kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f 

# ------------------- Dashboard Secret ------------------- #

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

# ------------------- Dashboard Service Account ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

# ------------------- Dashboard Role & Role Binding ------------------- #

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
  # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

# ------------------- Dashboard Deployment ------------------- #

kind: Deployment
apiVersion: apps/v1beta2
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: kubernetes-dashboard
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/kubernetes-dashboard-amd64:v1.8.3
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

# kubectl create -f kubernetes-dashboard.yaml

kubeadm安装Kubernetes V1.10.0集群详细文档_第8张图片

获取token,通过令牌登陆:

# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

kubeadm安装Kubernetes V1.10.0集群详细文档_第9张图片
通过firefox访问dashboard,输入token,即可登陆:

https://172.27.10.101:30000/#!/login

6.安装heapster
创建heapster配置文件目录

# mkdir -p kube-heapster
# mkdir -p kube-heapster/influxdb
# mkdir -p kube-heapster/rbac

grafana.yaml配置:

#  cat < kube-heapster/influxdb/grafana.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-grafana
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: grafana
    spec:
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: grafana
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-grafana-amd64:v4.4.3
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 3000
          protocol: TCP
        volumeMounts:
        - mountPath: /etc/ssl/certs
          name: ca-certificates
          readOnly: true
        - mountPath: /var
          name: grafana-storage
        env:
        - name: INFLUXDB_HOST
          value: monitoring-influxdb
        - name: GF_SERVER_HTTP_PORT
          value: "3000"
          # The following env variables are required to make Grafana accessible via
          # the kubernetes api-server proxy. On production clusters, we recommend
          # removing these env variables, setup auth for grafana, and expose the grafana
          # service using a LoadBalancer or a public IP.
        - name: GF_AUTH_BASIC_ENABLED
          value: "false"
        - name: GF_AUTH_ANONYMOUS_ENABLED
          value: "true"
        - name: GF_AUTH_ANONYMOUS_ORG_ROLE
          value: Admin
        - name: GF_SERVER_ROOT_URL
          # If you're only using the API Server proxy, set this value instead:
          # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
          value: /
      volumes:
      - name: ca-certificates
        hostPath:
          path: /etc/ssl/certs
      - name: grafana-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-grafana
  name: monitoring-grafana
  namespace: kube-system
spec:
  # In a production setup, we recommend accessing Grafana through an external Loadbalancer
  # or through a public IP.
  # type: LoadBalancer
  # You could also use NodePort to expose the service at a randomly-generated port
  # type: NodePort
  ports:
  - port: 80
    targetPort: 3000
  selector:
    k8s-app: grafana
EOF

heapster.yaml配置:

# cat < kube-heapster/influxdb/heapster.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: heapster
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-amd64:v1.4.2
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:https://kubernetes.default
        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
EOF

influxdb.yaml配置:

# cat < kube-heapster/influxdb/influxdb.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-influxdb
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: influxdb
    spec:
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: influxdb
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-influxdb-amd64:v1.3.3
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /data
          name: influxdb-storage
      volumes:
      - name: influxdb-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-influxdb
  name: monitoring-influxdb
  namespace: kube-system
spec:
  ports:
  - port: 8086
    targetPort: 8086
  selector:
    k8s-app: influxdb
EOF

heapster-rbac.yaml配置:

# cat < kube-heapster/rbac/heapster-rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: heapster
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:heapster
subjects:
- kind: ServiceAccount
  name: heapster
  namespace: kube-system
EOF

安装heapster:

# kubectl create -f kube-heapster/influxdb/

kubeadm安装Kubernetes V1.10.0集群详细文档_第10张图片

# kubectl create -f kube-heapster/rbac/

rbac

7.执行初始化(k8scy02和k8scy03上面分别执行)

# kubeadm init --config config.yaml
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config

8.查看节点信息

#  kubectl get nodes

get nodes

# kubectl get pods --all-namespaces -o wide

kubeadm安装Kubernetes V1.10.0集群详细文档_第11张图片

9.让master运行pod(k8scy01上执行)
如果monitoring-grafana、 monitoring-influxdb和heapster处于Pending状态,让master运行pod即可正常。
kubeadm安装Kubernetes V1.10.0集群详细文档_第12张图片

# kubectl taint nodes --all node-role.kubernetes.io/master-

taint
八、添加节点
1.添加节点
在k8scy04节点执行如下命令,即可将节点添加进集群:

# kubeadm join 172.27.10.105:6443 --token shxnvk.1n8kob3wwtm9refh --discovery-token-ca-cert-hash sha256:0d9bae3c825157ed995e27721f1aaccf70ade0466a7ec31e73935257bc80edf1

kubeadm join

在master节点上查看新添加节点状态:

# kubectl get nodes

kubeadm安装Kubernetes V1.10.0集群详细文档_第13张图片

你可能感兴趣的:(K8S&Docker)