利用Kubeadm离线安装kubernets1.15.0

1 主机列表

本次实验选择4台主机,3台作为Master节点,1台作为Node节点

节点ip OS版本 kernel版本 hostname docker版本 节点类型
192.168.137.129 CentOS7.4 3.10.0-693 centos 18.09.2 Master
192.168.137.130 CentOS7.4 3.10.0-693 centos1 18.09.2 Master
192.168.137.131 CentOS7.4 3.10.0-693 centos2 18.09.2 Master
192.168.137.132 CentOS7.4 3.10.0-693 centos3 18.09.2 Node

注:
master节点需要做互信

2 下载离线包

2.1 获取kubernetes所需容器镜像

  1. 下载kubernetes-server-linux-amd64.tar.gz(包含kubernetes所需的容器镜像))
    kubernetes-server-linux-amd64.tar.gz
  2. 解压kubernetes-server-linux-amd64.tar.gz
  3. 获取容器镜像
docker load -i cloud-controller-manager.tar
docker load -i kube-controller-manager.tar
docker load -i kube-apiserver.tar
docker load -i kube-proxy.tar
docker load -i kube-scheduler.tar
  1. 上传到本地镜像仓库
docker tag k8s.gcr.io/kube-apiserver:v1.15.0 192.168.137.129/k8s.gcr.io/kube-apiserver:v1.15.0
docker push 192.168.137.129/k8s.gcr.io/kube-apiserver:v1.15.0
docker tag k8s.gcr.io/cloud-controller-manager:v1.15.0 192.168.137.129/k8s.gcr.io/cloud-controller-manager:v1.15.0
docker push 192.168.137.129/k8s.gcr.io/cloud-controller-manager:v1.15.0
docker tag k8s.gcr.io/kube-controller-manager:v1.15.0 192.168.137.129/k8s.gcr.io/kube-controller-manager:v1.15.0
docker push 192.168.137.129/k8s.gcr.io/kube-controller-manager:v1.15.0
docker tag k8s.gcr.io/kube-proxy:v1.15.0 192.168.137.129/k8s.gcr.io/kube-proxy:v1.15.0
docker push 192.168.137.129/k8s.gcr.io/kube-proxy:v1.15.0
docker tag k8s.gcr.io/kube-scheduler:v1.15.0 192.168.137.129/k8s.gcr.io/kube-scheduler:v1.15.0
docker push 192.168.137.129/k8s.gcr.io/kube-scheduler:v1.15.0

2.2 获取kubernetes rpm安装包

  1. 配置kubernetes国内yum源并下载rpm包
mkdir /var/www/html/kubernetes1.15.0
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install kubeadm-1.15.0 kubelet-1.15.0 kubectl-1.15.0 --downloadonly --downloaddir=/var/www/html/kubernetes1.15.0
  1. 生成repodata
cd /var/www/html/kubernetes1.15.0
createrepo ./
  1. 配置本地kubernetes yum源
    需要在所有主机上配置
cat <<EOF > /etc/yum.repos.d/kubernetes1.15.0.repo
[kubernetes1.15.0]
name=Kubernetes1.15.0
baseurl=http://192.168.137.129:18080/kubernetes1.15.0/
enabled=1
gpgcheck=0
EOF

3 主机配置修改

需要在所有主机上配置

  1. 关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
sed -i '/^SELINUX=/c SELINUX=disabled' /etc/selinux/config
setenforce 0
  1. 关闭swap分区
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
  1. 配置sysctl.conf
cat << EOF > /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_tw_buckets = 262144
net.ipv4.conf.all.rp_filter = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.core.somaxconn = 65535
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_sack = 0
net.ipv4.tcp_window_scaling = 0
net.core.rmem_default=4194304
net.core.rmem_max=4194304
net.core.wmem_default=262144
net.core.wmem_max=1048586
fs.suid_dumpable = 1
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
vm.swappiness = 0
net.ipv4.ip_local_port_range = 9500 61000
EOF
sysctl -p
  1. 开启ipvs
cat << EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
  modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    modprobe -- \${kernel_module}
  fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
  1. 修改docker cgroupdriver为systemd
mkdir -p /etc/docker && tee /etc/docker/daemon.json <<- EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "insecure-registries": ["192.168.137.129"]
}
EOF
systemctl daemon-reload
systemctl restart docker

4 安装配置kubernetes

  1. 安装kubernetes
yum install -y kubeadm-1.15.0 kubelet-1.15.0 kubectl-1.15.0
systemctl enable --now kubelet
  1. 配置kubernetes
cat << EOF > /etc/default/kubelet
KUBELET_EXTRA_ARGS=--cgroup-driver=systemd   ### 与docker cgroupdriver 对应
EOF
systemctl daemon-reload
systemctl restart kubelet
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
source /etc/profile

5 配置controlPlaneEndpoint

  1. 编辑haproxy.cfg
cat << EOF > /data/haproxy.cfg
global
  daemon
  log 127.0.0.1 local0
  spread-checks 5
  max-spread-checks 15000
  maxconn 50000
  stats timeout 30s

defaults
  log                  global
  retries                   3
  backlog               10000
  maxconn               10000
  timeout connect          3s
  timeout client          30s
  timeout server          30s
  timeout tunnel        3600s
  timeout http-keep-alive  1s
  timeout http-request    15s
  timeout queue           30s
  timeout tarpit          60s
  option            dontlognull
  option            http-server-close
  option            redispatch
  option            forwardfor
  
listen stats
  bind 0.0.0.0:1080
  mode http
  stats enable
  option httplog
  log 127.0.0.1 local0 err
  stats refresh 30s
  maxconn 10
  stats uri /
  stats realm Haproxy\ Statistics
  stats auth admin:123456
  stats hide-version
  stats admin if TRUE
  monitor-uri /_haproxy_health_check


frontend kube-api
  bind *:8443
  mode tcp
  use_backend kube-master

backend kube-master
  balance source
  mode tcp
  server   192_168_137_129_6443    192.168.137.129:6443    check inter 2000 fall 3
  server   192_168_137_130_6443    192.168.137.130:6443    check inter 2000 fall 3
  server   192_168_137_131_6443    192.168.137.131:6443    check inter 2000 fall 3
EOF
  1. 编写haproxy容器启动脚本
cat << EOF > /data/start_haproxy.sh
#!/bin/bash
if netstat -anp | grep 1080; then
  echo "****************** port 1080 is already used ******************"
else
  /usr/bin/docker run -d --net=host -p 1080:1080 -p 8443:8443 -v /data/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg 192.168.137.129/public/haproxy:1.9.7
fi
EOF
  1. 编写haproxy容器自启动服务
cat << EOF > /usr/lib/systemd/system/docker-haproxy.service
[Unit]
Description=haproxy manager
After=docker.service

[Service]
WorkingDirectory=/data
Type=simple
ExecStart=/bin/bash /data/start_haproxy.sh

[Install]
WantedBy=multi-user.target
EOF
systemctl start docker-haproxy
systemctl enable docker-haproxy

注:
可以使用haproxy和keepalive实现HA

6 配置 kubeadm init 参数

  1. 获取模板:
kubeadm config print init-defaults --component-configs KubeProxyConfiguration

output:

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 1.2.3.4
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: centos1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
  qps: 5
clusterCIDR: ""
configSyncPeriod: 15m0s
conntrack:
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  excludeCIDRs: null
  minSyncPeriod: 0s
  scheduler: ""
  strictARP: false
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: ""
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
resourceContainer: /kube-proxy
udpIdleTimeout: 250ms
winkernel:
  enableDSR: false
  networkName: ""
  sourceVip: ""
  1. 编辑kubeadm init config.yaml
cat << EOF > /etc/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
clusterName: k8s-test-local
kubernetesVersion: v1.15.0
controlPlaneEndpoint: "192.168.137.129:8443"
imageRepository: 192.168.137.129/k8s.gcr.io
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
    serverCertSANs:
      - 192.168.137.129
      - 192.168.137.130
      - 192.168.137.131    
      - 127.0.0.1
      - centos
      - centos1
      - centos2
      - localhost
    peerCertSANs:
      - 192.168.137.129
      - 192.168.137.130
      - 192.168.137.131   
      - 127.0.0.1
      - centos
      - centos1
      - centos2
      - localhost
apiServer:
  certSANs:
      - 192.168.137.129
      - 192.168.137.130
      - 192.168.137.131
      - 192.168.137.132     
      - 127.0.0.1
      - centos
      - centos1
      - centos2
      - centos3
      - localhost
  extraArgs:
    service-node-port-range: 1025-65535
networking:
  dnsDomain: k8s.test.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF

7 初始化集群

kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --upload-certs

output:

....

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.137.129:8443 --token t977ih.3xjsm4f5vm3xewtk \
    --discovery-token-ca-cert-hash sha256:01a3b35eaca17e2ba36f3e309205d2a16bdd0a9c008a50499e8420382d488f8d \
    --experimental-control-plane --certificate-key c19e7914f1aab84d1dd05ea35a500a8dc85cc3c10d84cd022aebc7ad7084d553

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.137.129:8443 --token t977ih.3xjsm4f5vm3xewtk \
    --discovery-token-ca-cert-hash sha256:01a3b35eaca17e2ba36f3e309205d2a16bdd0a9c008a50499e8420382d488f8d

8 安装网络插件

kubectl apply -f kube-flannel.yml

9 安装配置dashboard

  1. 安装dashboard
kubectl apply -f kubernetes-dashboard.yaml
  1. 配置dashboard访问
    通过 API Server 访问 Dashboard
创建ServiceAccount
> kubectl create serviceaccount dashboard-admin -n kube-system

绑定相关role
> kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
–clusterrole:指定使用的集群角色,系统默认自带很多,
–serviceaccount:指定认证使用的ServiceAccount,由第一步创建

获取ServiceAccount使用的Secret
> kubectl describe sa dashboard-admin -n kube-system
Name:                dashboard-admin
Namespace:           kube-system
Labels:              
Annotations:         
Image pull secrets:  
Mountable secrets:   dashboard-admin-token-477hc
Tokens:              dashboard-admin-token-477hc
Events:              

> kubectl describe secret dashboard-admin-token-2l7k5 -n kube-system
Name:         dashboard-admin-token-477hc
Namespace:    kube-system
Labels:       
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: b2fd5e6e-41e6-481a-9e33-5b4130383b8f

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNDc3aGMiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiYjJmZDVlNmUtNDFlNi00ODFhLTllMzMtNWI0MTMwMzgzYjhmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.h6cd6H_GXiTpd-4M50yHU5zW2wFd98GOHvw5ZaLDfGj-eoLuiRIF80xw1XvrSx_lQ0eRsdiHAxRhW-pcIIYn800FlG8WhEQx7g_wRMg6QxaRzubk-DjafCQhem5h6bUS67VnPyrj-D_g3mbBZvaj9Vwf4BL6QA2jJXWr27mfaduLSBYKUDSxRFOi-PcQAJKc3y8UaB3Wy14QfKtihb2e1LB73ON8UXxV5V6ugyLxNXM71UnjJD9ZvtKMmuFTJU0q1MMNFAsCCfjaYO7THa8JAxU82pvWBByavs1HghwkNYeC_unhEwWLvaJrfJ209Vs-WOWZhTsESR0W4nntSasJXg

利用Kubeadm离线安装kubernets1.15.0_第1张图片

10 加入 master 节点

kubeadm join 192.168.137.129:8443 --token t977ih.3xjsm4f5vm3xewtk \
    --discovery-token-ca-cert-hash sha256:01a3b35eaca17e2ba36f3e309205d2a16bdd0a9c008a50499e8420382d488f8d \
    --experimental-control-plane --certificate-key c19e7914f1aab84d1dd05ea35a500a8dc85cc3c10d84cd022aebc7ad7084d553

注:
生成加入命令
kubeadm token create --print-join-command
生成新的 certificate-key
kubeadm init phase upload-certs

11 加入 node 节点

kubeadm join 192.168.137.129:8443 --token t977ih.3xjsm4f5vm3xewtk \
    --discovery-token-ca-cert-hash sha256:01a3b35eaca17e2ba36f3e309205d2a16bdd0a9c008a50499e8420382d488f8d

12 查看集群状态

kubectl get nodes,pods -A

output:

NAME           STATUS   ROLES    AGE   VERSION
node/centos1   Ready    master   12m   v1.15.0
node/centos2   Ready    master   13m   v1.15.0

NAMESPACE     NAME                                        READY   STATUS    RESTARTS   AGE
kube-system   pod/coredns-6c45f5bc48-ml9xb                1/1     Running   0          13m
kube-system   pod/coredns-6c45f5bc48-sx2cz                1/1     Running   0          13m
kube-system   pod/etcd-centos1                            1/1     Running   0          12m
kube-system   pod/etcd-centos2                            1/1     Running   0          12m
kube-system   pod/kube-apiserver-centos1                  1/1     Running   0          12m
kube-system   pod/kube-apiserver-centos2                  1/1     Running   0          12m
kube-system   pod/kube-controller-manager-centos1         1/1     Running   0          12m
kube-system   pod/kube-controller-manager-centos2         1/1     Running   1          12m
kube-system   pod/kube-flannel-ds-amd64-g2x6m             1/1     Running   0          11m
kube-system   pod/kube-flannel-ds-amd64-rskbf             1/1     Running   0          11m
kube-system   pod/kube-proxy-5d7zw                        1/1     Running   0          13m
kube-system   pod/kube-proxy-f7l55                        1/1     Running   0          12m
kube-system   pod/kube-scheduler-centos1                  1/1     Running   0          12m
kube-system   pod/kube-scheduler-centos2                  1/1     Running   1          12m
kube-system   pod/kubernetes-dashboard-777fdff474-6sjb7   1/1     Running   0          3m22s

13 查看证书有效期

openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep ' Not '

14 检查ETCD集群健康

docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes 192.168.137.129/k8s.gcr.io/etcd:3.3.10 etcdctl \
--cert-file /etc/kubernetes/pki/etcd/peer.crt \
--key-file /etc/kubernetes/pki/etcd/peer.key \
--ca-file /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://192.168.137.129:2379 cluster-health

你可能感兴趣的:(kubernetes)