kubeadm安装k8s

1.安装harbor、haproxy和keepalived

  1. 安装docker,上传脚本docker-install.sh,执行脚本

    root@ha1:~# bash docker-install.sh
    root@ha1:~# systemctl start docker
    root@ha1:~# systemctl enable docker
    root@ha1:~# apt install docker-compose
    查看脚本内容
    root@ha1:~# vim docker-install.sh 
    #!/bin/bash
    # step 1: 安装必要的一些系统工具
    sudo apt-get update
    sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
    # step 2: 安装GPG证书
    curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
    # Step 3: 写入软件源信息
    sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
    # Step 4: 更新并安装Docker-CE
    sudo apt-get -y update
    sudo apt-get -y install docker-ce  docker-ce-cli
    上传harbor包
    
    
  1. 安装haproxy和keepalived

    root@ha1:~# apt install keepalived haproxy
    配置keepalived
    root@ha1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf
    root@ha1:~# vim /etc/keepalived/keepalived.conf
    vrrp_instance VI_1 {
        state MASTER
        interface eth0
        garp_master_delay 10
        smtp_alert
        virtual_router_id 51
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111
        }
        virtual_ipaddress {
            172.31.3.248 dev eth0 label eth0:1
        }
    }
    root@ha1:~# systemctl restart keepalived
    root@ha1:~# ifconfig 
    docker0: flags=4099  mtu 1500
            inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
            ether 02:42:34:0b:91:d0  txqueuelen 0  (Ethernet)
            RX packets 0  bytes 0 (0.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 0  bytes 0 (0.0 B)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    eth0: flags=4163  mtu 1500
            inet 172.31.3.104  netmask 255.255.255.0  broadcast 172.31.3.255
            inet6 fe80::20c:29ff:fe41:3e54  prefixlen 64  scopeid 0x20
            ether 00:0c:29:41:3e:54  txqueuelen 1000  (Ethernet)
            RX packets 74864  bytes 105728317 (105.7 MB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 9363  bytes 939953 (939.9 KB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    eth0:1: flags=4163  mtu 1500
            inet 172.31.3.248  netmask 255.255.255.255  broadcast 0.0.0.0
            ether 00:0c:29:41:3e:54  txqueuelen 1000  (Ethernet)
    
    lo: flags=73  mtu 65536
            inet 127.0.0.1  netmask 255.0.0.0
            inet6 ::1  prefixlen 128  scopeid 0x10
            loop  txqueuelen 1000  (Local Loopback)
            RX packets 270  bytes 26950 (26.9 KB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 270  bytes 26950 (26.9 KB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
     配置haproxy在最后添加
     root@ha1:~# vim /etc/haproxy/haproxy.cfg
     listen k8s-api-6443
      bind 172.31.3.248:6443
      mode tcp
      server master1 172.31.3.101:6443 check inter 3s fall 3 rise 5
    root@ha1:~# systemctl restart haproxy
    

2.在所有master节点安装指定版本的kubeadm、kubelet、kubectl、docker

在github上查看k8s版本对应支持的docker版本,版本不一致有可能有bug

安装docker
root@ha1:~# bash docker-install.sh
修改cgroup-driver驱动,kubelet与docker不一样导致初始化集群失败
root@master1:~# mkdir -p /etc/docker
root@master1:~# vim /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}

root@ha1:~# systemctl start docker
root@ha1:~# systemctl enable docker
安装阿里的k8s镜像源
root@master1:~# apt-get update && apt-get install -y apt-transport-https
root@master1:~# curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
root@master1:~# vim /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
root@master1:~# apt-get update
查看版本和安装指定版本
root@master1:~# apt-cache madison kubeadm
root@master1:~# apt install kubeadm=1.17.2-00 kubectl=1.17.2-00 kubelet=1.17.2-00
启动keubelet
root@master1:~# systemctl start kubelet && systemctl enable kubelet && systemctl status kubelet

3.在所有node节点安装指定版本的kubeadm、kubelet、kubectl、docker,在node节点kubectl为可选安装

同2

4.master节点运行kubeadm init初始化命令

在三台 master 中任意一台 master 进行集群初始化,而且集群初始化只需要初始化一次。

kubeadm 命令使用:

Available Commands:
 alpha #kubeadm 处于测试阶段的命令
 completion #bash 命令补全,需要安装 bash-completion
#mkdir /data/scripts -p
#kubeadm completion bash > /data/scripts/kubeadm_completion.sh
#source /data/scripts/kubeadm_completion.sh
#vim /etc/profile
source /data/scripts/kubeadm_completion.sh
root@master1:~# source /etc/profile
 config #管理 kubeadm 集群的配置,该配置保留在集群的 ConfigMap 中
#kubeadm config print init-defaults
 help Help about any command
 init #启动一个 Kubernetes 主节点
 join #将节点加入到已经存在的 k8s master
 reset 还原使用 kubeadm init 或者 kubeadm join 对系统产生的环境变化
 token #管理 token
 upgrade #升级 k8s 版本
 version #查看版本信息

kubeadm init 命令简介:

root@master1:~# kubeadm init --help
--apiserver-advertise-address string #K8S API Server 将要监听的监听的本机 IP
--apiserver-bind-port int32 #API Server 绑定的端口,默认为 6443
--apiserver-cert-extra-sans stringSlice #可选的证书额外信息,用于指定 API Server 的服务器证
书。可以是 IP 地址也可以是 DNS 名称。
--cert-dir string #证书的存储路径,缺省路径为 /etc/kubernetes/pki
--certificate-key string #定义一个用于加密 kubeadm-certs Secret 中的控制平台证书的密钥
--config string #kubeadm #配置文件的路径
--control-plane-endpoint string #为控制平台指定一个稳定的 IP 地址或 DNS 名称,即配置一
个可以长期使用切是高可用的 VIP 或者域名,k8s 多 master 高可用基于此参数实现
--cri-socket string #要连接的 CRI(容器运行时接口,Container Runtime Interface, 简称 CRI)套
接字的路径,如果为空,则 kubeadm 将尝试自动检测此值,"仅当安装了多个 CRI 或具有非
标准 CRI 插槽时,才使用此选项"
--dry-run #不要应用任何更改,只是输出将要执行的操作,其实就是测试运行。
--experimental-kustomize string #用于存储 kustomize 为静态 pod 清单所提供的补丁的路径。
--feature-gates string #一组用来描述各种功能特性的键值(key=value)对,选项是:
IPv6DualStack=true|false (ALPHA - default=false)
--ignore-preflight-errors strings #可以忽略检查过程 中出现的错误信息,比如忽略 swap,如
果为 all 就忽略所有
--image-repository string #设置一个镜像仓库,默认为 k8s.gcr.io
--kubernetes-version string #指定安装 k8s 版本,默认为 stable-1
--node-name string #指定 node 节点名称
--pod-network-cidr #设置 pod ip 地址范围
--service-cidr #设置 service 网络地址范围
--service-dns-domain string #设置 k8s 内部域名,默认为 cluster.local,会有相应的 DNS 服务
(kube-dns/coredns)解析生成的域名记录。
--skip-certificate-key-print #不打印用于加密的 key 信息
--skip-phases strings #要跳过哪些阶段
--skip-token-print #跳过打印 token 信息
--token #指定 token
--token-ttl #指定 token 过期时间,默认为 24 小时,0 为永不过期
--upload-certs #更新证书
#全局可选项:
--add-dir-header #如果为 true,在日志头部添加日志目录
--log-file string #如果不为空,将使用此日志文件
--log-file-max-size uint #设置日志文件的最大大小,单位为兆,默认为 1800 兆,0 为没有限
制
--rootfs #宿主机的根路径,也就是绝对路径
--skip-headers #如果为 true,在 log 日志里面不显示标题前缀
--skip-log-headers #如果为 true,在 log 日志里里不显示标题

准备镜像

查看安装指定版本 k8s 需要的镜像有哪些

root@master1:~# kubeadm config images list  --kubernetes-version v1.17.2
W1027 01:16:48.064640   27626 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1027 01:16:48.064746   27626 validation.go:28] Cannot validate kubelet config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.2
k8s.gcr.io/kube-controller-manager:v1.17.2
k8s.gcr.io/kube-scheduler:v1.17.2
k8s.gcr.io/kube-proxy:v1.17.2
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5
先手动下载镜像集群初始化会很快
master 节点镜像下载:
推荐提前在 master 节点下载镜像以减少安装等待时间,但是镜像默认使用 Google 的镜像仓
库,所以国内无法直接下载,但是可以通过阿里云的镜像仓库把镜像先提前下载下来,可以
避免后期因镜像下载异常而导致 k8s 部署异常。
root@master1:~# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.17.2
root@master1:~#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.17.2
root@master1:~#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.17.2
root@master1:~#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.17.2
root@master1:~#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
root@master1:~#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
root@master1:~#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.5

单节点 master 初始化:

 root@master1:~# kubeadm init --apiserver-advertise-address=172.31.3.101 --apiserver-bind-port=6443 -- kubernetes-version=v1.17.2 --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16 -- service-dns-domain=test.com --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers --ignore-preflight-errors=swap

基于命令初始化高可用 master 方式:

注:以下是kubeadm初始化集群常用参数,其中--apiserver-advertise-address:master节点自身ip,         --control-plane-endpoint:集群对外提供的VIP
如果报如下错  [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
修改:root@master1:~# mkdir -p /etc/docker
root@master1:~# vim /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}
清楚原先操作
root@master1:~# kubeadm reset
重新初始化集群
root@master1:~# kubeadm init --apiserver-advertise-address=172.31.3.101 --control-plane-endpoint=172.31.3.248 --apiserver-bind-port=6443 --kubernetes-version=v1.17.2 --pod-network-cidr=10.10.0.0/16 --service-cidr=192.168.5.0/24 --service-dns-domain=test.com --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers --ignore-preflight-errors=swap

出现以下信息表示成功

基于文件初始化高可用 master 方式

输出默认初始化配置
root@master1:~# kubeadm config print init-defaults
root@master1:~# kubeadm config print init-defaults >kubeadm-init.yaml
root@master1:~# vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.31.3.101     #需要修改
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 172.31.3.248     #添加
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers #修改
kind: ClusterConfiguration
kubernetesVersion: v1.17.2      #修改
networking:
  dnsDomain: test.com
  podSubnet: 10.10.0.0/16          #添加
  serviceSubnet: 192.168.5.0/24    #修改
scheduler: {}
初始化集群
root@master1:~# kubeadm init --config kubeadm-init.yaml
 mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 172.31.3.248:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:a8b042c3c585cb744b5074426d550c7ed66f09011f6fdad41cf954dae3d4e909 \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.3.248:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:a8b042c3c585cb744b5074426d550c7ed66f09011f6fdad41cf954dae3d4e909 
在初始化节点生成新的证书用于添加新的控制节点
root@master1:~# kubeadm init phase upload-certs --upload-certs
I1027 02:39:00.506129   57652 version.go:251] remote version is much newer: v1.19.3; falling back to: stable-1.17
W1027 02:39:01.387401   57652 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1027 02:39:01.387438   57652 validation.go:28] Cannot validate kubelet config - no validator is available
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
e7278dab9ef4739b319a2dd16cbe238900b0f22a53052449c8fd61114ac0bbf4
添加其他master节点(在其他master上执行)
root@master2:~# kubeadm join 172.31.3.248:6443 --token h5fd2t.8b2qukmk42uzk2ep   --discovery-token-ca-cert-hash sha256:ae7bfa4644261c1019197d5a4e44a3701025d45b2c8f0f6a16c8764e6d002165 --control-plane  --certificate-key e7278dab9ef4739b319a2dd16cbe238900b0f22a53052449c8fd61114ac0bbf4
部署网络组件 flannel:
https://github.com/coreos/flannel/
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
需要翻墙
以下
root@master1:~# vim flannel.yml   只需要修改pod的网段,要与初始化的pod网段一样

5.验证master节点状态

root@master1:~# kubectl get nodes
NAME               STATUS   ROLES    AGE   VERSION
master1.test.com   Ready    master   48m   v1.17.2
master2.test.com   Ready    master   21m   v1.17.2
master3.test.com   Ready    master   19m   v1.17.2
验证 k8s 集群状态:
root@master1:~# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
当前 csr 证书状态:
root@master1:~# kubectl get csr
NAME        AGE   REQUESTOR                      CONDITION
csr-9dnvd   50m   system:node:master1.test.com   Approved,Issued
csr-hqpcv   22m   system:bootstrap:h5fd2t        Approved,Issued
csr-wlxvd   23m   system:bootstrap:h5fd2t        Approved,Issued

6.在node节点使用kubeadm命令将自己加入k8s master

root@node1:~# kubeadm join 172.31.3.248:6443 --token h5fd2t.8b2qukmk42uzk2ep --discovery-token-ca-cert-hash sha256:ae7bfa4644261c1019197d5a4e44a3701025d45b2c8f0f6a16c8764e6d002165
注:Node 节点会自动加入到 master 节点,下载镜像并启动 flannel,直到最终在 master 看 到 node 处于 Ready 状态。

7.验证node节点状态

root@master1:~# kubectl get nodes
NAME               STATUS   ROLES    AGE    VERSION
master1.test.com   Ready    master   55m    v1.17.2
master2.test.com   Ready    master   28m    v1.17.2
master3.test.com   Ready    master   27m    v1.17.2
node1.test.com     Ready       105s   v1.17.2
node2.test.com     Ready       80s    v1.17.2

8.创建pod并测试网络

root@master1:~#  kubectl run net-test1 --image=alpine --replicas=2 sleep 360000
root@master1:~# kubectl get pod -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP         NODE             NOMINATED NODE   READINESS GATES
net-test1-5fcc69db59-dvbbj   1/1     Running   0          49s   10.5.4.2   node2.test.com              
net-test1-5fcc69db59-xp98t   1/1     Running   0          49s   10.5.3.2   node1.test.com              
root@master1:~# kubectl exec -it net-test1-5fcc69db59-dvbbj sh
/ # ping 10.5.3.2
PING 10.5.3.2 (10.5.3.2): 56 data bytes
64 bytes from 10.5.3.2: seq=0 ttl=62 time=1.032 ms
64 bytes from 10.5.3.2: seq=1 ttl=62 time=0.567 ms
64 bytes from 10.5.3.2: seq=2 ttl=62 time=0.546 ms

9.部署web服务Dashboard

  1. 查看dashboard对应的k8s版本,官方没测试过的版本有可能存在bug

​ https://github.com/kubernetes/dashboard

  1. 部署 dashboard 2.0.0-rc6

    具体yml文件
    root@master1:/usr/local/src/dashboard-yaml# kubectl apply -f dash_board-2.0.0-rc6.yml -f admin-user.yml
    查看token
    root@master1:/usr/local/src/dashboard-yaml# kubectl describe secret  admin-user-token-4v8sm -n kubernetes-dashboard
    使用token登录dashboard
    

10.k8s集群升级

​ 升级 k8s 集群必须 先升级 kubeadm 版本到目的 k8s 版本,也就是说 kubeadm 是 k8s 升级的 “准升证”。

  1. 升级k8s服务

    在 k8s 的所有 master 进行升级,将管理端服务 kube-controller-manager、kube-apiserver、kube-scheduler、kube-proxy

  2. 验证当前k8s版本

    root@master1:~# kubeadm version
    kubeadm version: &version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.2", GitCommit:"59603c6e503c87169aea6106f57b9f242f64df89", GitTreeState:"clean", BuildDate:"2020-01-18T23:27:49Z", GoVersion:"go1.13.5", Compiler:"gc", Platform:"linux/amd64"}
    
  3. 各 master 安装指定新版本 kubeadm

    root@master1:~# apt-get install kubeadm=1.17.4-00
    查看新kubeadm版本
    root@master1:~# kubeadm version
    kubeadm version: &version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.4", GitCommit:"8d8aa39598534325ad77120c120a22b3a990b5ea", GitTreeState:"clean", BuildDate:"2020-03-12T21:01:11Z", GoVersion:"go1.13.8", Compiler:"gc", Platform:"linux/amd64"}
    
  1. kubeadm升级命令使用帮助

    root@master1:~# kubeadm upgrade --help
    Upgrade your cluster smoothly to a newer version with this command
    
    Usage:
      kubeadm upgrade [flags]
      kubeadm upgrade [command]
    
    Available Commands:
      apply       Upgrade your Kubernetes cluster to the specified version
      diff        Show what differences would be applied to existing static pod manifests. See also: kubeadm upgrade apply --dry-run
      node        Upgrade commands for a node in the cluster
      plan        Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter
    
    Flags:
      -h, --help   help for upgrade
    
    Global Flags:
          --add-dir-header           If true, adds the file directory to the header
          --log-file string          If non-empty, use this log file
          --log-file-max-size uint   Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
          --rootfs string            [EXPERIMENTAL] The path to the 'real' host root filesystem.
          --skip-headers             If true, avoid header prefixes in the log messages
          --skip-log-headers         If true, avoid headers when opening log files
      -v, --v Level                  number for the log level verbosity
    
    
  1. 升级计划

你可能感兴趣的:(kubeadm安装k8s)