Kubernetes14--Kubeadm详解

之前用Kubeadm安装k8s集群遇到了好多问题,后来想扩展CoreDns,Metrics-server等功能发现对于内部机理不太清楚,因此把kubeadm的内部机理学习一下。

kubeadm的常见命令:

Kubernetes14--Kubeadm详解_第1张图片

主要学习一下init初始化过程:

使用kubeadm来部署集群的一般步骤:

kubeadm init
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl apply -f 
kubeadm join --token  :

默认的配置文件

/etc/kubernetes/为k8s集群的应用路径

/etc/kubernetes/manifests存放静态Pod组件的yaml模板文件

etcd  apiserver  controller-manager  scheduler

etcd.yaml文件存放etcd数据库的配置yaml:

[root@Ac-private-1 manifests]# cat etcd.yaml 
apiVersion: v1
kind: Pod
metadata:
  annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ""
  creationTimestamp: null
  labels:
    component: etcd
    tier: control-plane
  name: etcd
  namespace: kube-system
spec:
  containers:
  - command:
    - etcd
    - --advertise-client-urls=https://127.0.0.1:2379
    - --cert-file=/etc/kubernetes/pki/etcd/server.crt
    - --client-cert-auth=true
    - --data-dir=/var/lib/etcd
    - --initial-advertise-peer-urls=https://127.0.0.1:2380
    - --initial-cluster=ac-private-1=https://127.0.0.1:2380
    - --key-file=/etc/kubernetes/pki/etcd/server.key
    - --listen-client-urls=https://127.0.0.1:2379
    - --listen-peer-urls=https://127.0.0.1:2380
    - --name=ac-private-1
    - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
    - --peer-client-cert-auth=true
    - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
    - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    - --snapshot-count=10000
    - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    image: k8s.gcr.io/etcd:3.2.24
    imagePullPolicy: IfNotPresent
    livenessProbe:
      exec:
        command:
        - /bin/sh
        - -ec
        - ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
          --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
          get foo
      failureThreshold: 8
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: etcd
    resources: {}
    volumeMounts:
    - mountPath: /var/lib/etcd
      name: etcd-data
    - mountPath: /etc/kubernetes/pki/etcd
      name: etcd-certs
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /var/lib/etcd
      type: DirectoryOrCreate
    name: etcd-data
  - hostPath:
      path: /etc/kubernetes/pki/etcd
      type: DirectoryOrCreate
    name: etcd-certs
status: {}

apiserver的yaml模板文件:

[root@Ac-private-1 manifests]# cat kube-apiserver.yaml 
apiVersion: v1
kind: Pod
metadata:
  annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ""
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-apiserver
    - --authorization-mode=Node,RBAC
    - --advertise-address=192.168.1.15
    - --allow-privileged=true
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --insecure-port=0
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-cluster-ip-range=10.96.0.0/12
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    image: k8s.gcr.io/kube-apiserver:v1.12.0-rc.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 192.168.1.15
        path: /healthz
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-apiserver
    resources:
      requests:
        cpu: 250m
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
status: {}

controller-manager yaml模板配置文件

[root@Ac-private-1 manifests]# cat kube-controller-manager.yaml 
apiVersion: v1
kind: Pod
metadata:
  annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ""
  creationTimestamp: null
  labels:
    component: kube-controller-manager
    tier: control-plane
  name: kube-controller-manager
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-controller-manager
    - --address=127.0.0.1
    - --allocate-node-cidrs=true
    - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
    - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --cluster-cidr=10.244.0.0/16
    - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
    - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
    - --controllers=*,bootstrapsigner,tokencleaner
    - --kubeconfig=/etc/kubernetes/controller-manager.conf
    - --leader-elect=true
    - --node-cidr-mask-size=24
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --root-ca-file=/etc/kubernetes/pki/ca.crt
    - --service-account-private-key-file=/etc/kubernetes/pki/sa.key
    - --use-service-account-credentials=true
    image: k8s.gcr.io/kube-controller-manager:v1.12.0-rc.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10252
        scheme: HTTP
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-controller-manager
    resources:
      requests:
        cpu: 200m
    volumeMounts:
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/kubernetes/controller-manager.conf
      name: kubeconfig
      readOnly: true
    - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
      name: flexvolume-dir
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/kubernetes/controller-manager.conf
      type: FileOrCreate
    name: kubeconfig
  - hostPath:
      path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
      type: DirectoryOrCreate
    name: flexvolume-dir
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
status: {}

scheduler yaml文件:

[root@Ac-private-1 manifests]# cat kube-scheduler.yaml 
apiVersion: v1
kind: Pod
metadata:
  annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ""
  creationTimestamp: null
  labels:
    component: kube-scheduler
    tier: control-plane
  name: kube-scheduler
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-scheduler
    - --address=127.0.0.1
    - --kubeconfig=/etc/kubernetes/scheduler.conf
    - --leader-elect=true
    image: k8s.gcr.io/kube-scheduler:v1.12.0-rc.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10251
        scheme: HTTP
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-scheduler
    resources:
      requests:
        cpu: 100m
    volumeMounts:
    - mountPath: /etc/kubernetes/scheduler.conf
      name: kubeconfig
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/scheduler.conf
      type: FileOrCreate
    name: kubeconfig
status: {}

/etc/kubernetes存放以上组件的配置文件   主要用于认证

admin.conf  for the cluster admin and kubeadm itself

kubelet.conf (bootstrap-kubelet.conf during TLS bootstrap) 

controller-manager.conf

scheduler.conf

/etc/kubernetes/pki主要存放证书以及密钥

kubeadm init流程

1.Preflight checks

在安装之前进行一系列检查,可以通过--ignore-preflight-errors跳过该步骤。具体检查如下:

检查k8s版本与kubeadm版本

  • [warning] If the Kubernetes version to use (specified with the --kubernetes-version flag) is at least one minor version higher than the kubeadm CLI version.
  • Kubernetes system requirements:
    • if running on linux:
    • 检查内核版本
    • [error] if not Kernel 3.10+ or 4+ with specific KernelSpec
    • 控制组功能
    • [error] if required cgroups subsystem aren’t in set up
    • if using docker:
    • [warning/error] if Docker service does not exist, if it is disabled, if it is not active.
    • 安装docker环境
    • [error] if Docker endpoint does not exist or does not work
    • [warning] if docker version >17.03
    • If using other cri engine:
    • [error] if crictl socket does not answer
  • 操作者为root
  • [error] if user is not root
  • [error] if the machine hostname is not a valid DNS subdomain
  • [warning] if the host name cannot be reached via network lookup
  • kubelet与kubeadm要相符
  • [error] if kubelet version is lower that the minimum kubelet version supported by kubeadm (current minor -1)
  • [error] if kubelet version is at least one minor higher than the required controlplane version (unsupported version skew)
  • [warning] if kubelet service does not exist or if it is disabled
  • [warning] if firewalld is active
  • [error] if API server bindPort or ports 10250/10251/10252 are used
  • 配置文件路径
  • [Error] if /etc/kubernetes/manifest folder already exists and it is not empty
  • 网络流量转发
  • [Error] if /proc/sys/net/bridge/bridge-nf-call-iptables file does not exist/does not contain 1
  • [Error] if advertise address is ipv6 and /proc/sys/net/bridge/bridge-nf-call-ip6tables does not exist/does not contain 1.
  • 关闭swap功能
  • [Error] if swap is on
  • [Error] if ipiptablesmountnsenter commands are not present in the command path
  • [warning] if ebtablesethtoolsocattctouchcrictl commands are not present in the command path
  • [warning] if extra arg flags for API server, controller manager, scheduler contains some invalid options
  • [warning] if connection to https://API.AdvertiseAddress:API.BindPort goes through proxy
  • [warning] if connection to services subnet goes through proxy (only first address checked)
  • [warning] if connection to Pods subnet goes through proxy (only first address checked)
  • etcd数据库检查
  • If external etcd is provided:
    • [Error] if etcd version less than 3.0.14
    • [Error] if etcd certificates or keys are specified, but not provided
  • If external etcd is NOT provided (and thus local etcd will be installed):
    • [Error] if ports 2379 is used
    • [Error] if Etcd.DataDir folder already exists and it is not empty
  • If authorization mode is ABAC:
    • [Error] if abac_policy.json does not exist
  • If authorization mode is WebHook
    • [Error] if webhook_authz.conf does not exist

2.Generate the necessary certificates

产生一系列证书以及密钥信息,存放在 /etc/kubernetes/pki目录,可以通过--cert-dir来修改 

k8s集群的证书以及密钥文件:ca.crt     ca.key

apiserver的证书以及密钥:apiserver.crt    apiserver.key

apiserver client连接kubelet的证书以及密钥: apiserver-kubelet-client.crt    apiserver-kubelet-client.key

signing ServiceAccount Token   sa.pub   sa.key

front proxy   front-proxy-ca.crt    front-proxy-ca.key

A client cert for the front proxy client   front-proxy-client.crt   front-proxy-client.key

3.Generate kubeconfig files for control plane components

A kubeconfig file for kubelet to use, /etc/kubernetes/kubelet.conf

A kubeconfig file for controller-manager, /etc/kubernetes/controller-manager.conf

A kubeconfig file for scheduler, /etc/kubernetes/scheduler.conf

4.Generate static Pod manifests for control plane components

主要产生apiserver  controller-manager  scheduler的yaml模板配置文件

writes static Pod manifest files for control plane components to /etc/kubernetes/manifests

apiserver的配置信息:

apiserver-advertise-address and apiserver-bind-port  ip以及端口

--insecure-port=0  to avoid insecure connections to the api server

--enable-bootstrap-token-auth=true to enable the BootstrapTokenAuthenticator authentication module. 

--allow-privileged to true (required e.g. by kube proxy)

--requestheader-client-ca-file to front-proxy-ca.crt

--enable-admission-plugins

开启API聚合功能:

--requestheader-username-headers=X-Remote-User

--requestheader-group-headers=X-Remote-Group

--requestheader-extra-headers-prefix=X-Remote-Extra-

--requestheader-allowed-names=front-proxy-client

5.Generate static Pod manifest for local etcd

产生etcd yaml模板文件

6.Optional Dynamic Kublet Configuration

Kubelet组件运行在Node节点上,维持运行中的Pods以及提供kuberntes运行时环境,主要完成以下使命: 1.监视分配给该Node节点的pods 2.挂载pod所需要的volumes 3.下载pod的secret 4.通过docker/rkt来运行pod中的容器 5.周期的执行pod中为容器定义的liveness探针 6.上报pod的状态给系统的其他组件 7.上报Node的状态

/var/lib/kubelet/config/init/kubelet

/etc/systemd/system/kubelet.service.d/10-kubeadm.conf   --dynamic-config-dir=/var/lib/kubelet/config/dynamic

Kubernetes14--Kubeadm详解_第2张图片

7.Wait for the control plane to come up

kubeadm relies on the kubelet to pull the control plane images and run them properly as static Pods.下载镜像以及启动Pod

kubeadm waits until localhost:6443/healthz returns ok这一节点耗时较长

8.Write base kubelet configuration

If kubeadm is invoked with --feature-gates=DynamicKubeletConfig:

Write the kubelet base configuration into the kubelet-base-config-v1.9 ConfigMap in the kube-system namespace

Creates RBAC rules for granting read access to that ConfigMap to all bootstrap tokens and all kubelet instances (that is system:bootstrappers:kubeadm:default-node-token and system:nodes groups)

Enable the dynamic kubelet configuration feature for the initial control-plane node by pointing Node.spec.configSource to the newly-created ConfigMap

如果启动该选项,记录kubelet的配置文件到ConfigMap中  创建一个可以读取该配置的RBAC,允许动态该配置

9.Save the kubeadm ClusterConfiguration in a ConfigMap for later reference

This will ensure that kubeadm actions executed in future (e.g kubeadm upgrade) will be able to determine the actual/current cluster state and make new decisions based on that data

10.Mark master

As soon as the control plane is available, kubeadm executes following actions:

  • Label the master with node-role.kubernetes.io/master=""
  • Taints the master with node-role.kubernetes.io/master:NoSchedule

给Master打标签master 设置master不调度pod对象

11.Configure TLS-Bootstrapping for node joining

Create a bootstrap token

Allow joining nodes to call CSR API

Setup auto approval for new bootstrap tokens

Setup nodes certificate rotation with auto approval

Create the public cluster-info ConfigMap

12.Install addons

安装kube-proxy以及DNS

13.Optional self-hosting

使用kubeadm需要安装的镜像: k8s.gcr.io仓库访问不到,可以下载其他镜像,然后利用docker tag修改镜像标签

Kubernetes14--Kubeadm详解_第3张图片

kubeadm  init启动流程很复杂,先粗略学习一下,具体每个细节在以后过程中慢慢研究。

参考链接:

https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/

你可能感兴趣的:(云计算)