使用kubeadm部署多master k8s集群部署

文章目录

    • 机器初始化,相关参数
    • 在master机器上安装cfssl工具
      • 配置ca根证书
      • 生成client,客户端证书
      • 生成peer 和server的证书(etcd使用)
      • 证书相关注意事项
    • systemd 方式运行 etcd
    • 配置kubelet
    • 初始化节点master1
    • 安装kubetnetes集群
      • 注意初始化集群完后的操作:
    • 安装flannel网络
    • 初始化节点master节点[2,3]
    • 使用keepalived,或者使用slb代理6443 api-server的端口
      • 主keepalived
      • 从 [1,2] keepalived配置

对kubernetes感兴趣的可以加群885763297,一起玩转kubernetes

机器初始化,相关参数

keeplived k8s ip hostname
master +etcd master1
master2 +etcd + node master2
master3 +etcd + node master3
服务 版本
etcd etcd-v3.2.12
k8s.gcr.io/kube-apiserver-amd64 v1.10.5
k8s.gcr.io/kube-controller-manager-amd64 v1.10.5
k8s.gcr.io/kube-scheduler-amd64 v1.10.5
k8s.gcr.io/kube-proxy-amd64 v1.10.5
quay.io/coreos/flannel v0.10.0-amd64
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64 1.14.8
k8s.gcr.io/k8s-dns-sidecar-amd64 1.14.8
k8s.gcr.io/k8s-dns-kube-dns-amd64 1.14.8
k8s.gcr.io/pause-amd64 3.1
hostnamectl --static set-hostanme k8s-master1/k8s-slave1/k8s-slave2

cat < /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.30.94 k8s-master1
192.168.30.182 k8s-slave1
192.168.30.187 k8s-slave2
EOF

ssh-keygen  #一路回车即可
ssh-copy-id  k8s-slave1
ssh-copy-id  k8s-slave2

yum -y remove firewalld
sed -i  's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
sed -i "s/PasswordAuthentication no/PasswordAuthentication yes/g" /etc/ssh/sshd_config
sed -i 's/.*swap.*/#&/' /etc/fstab

setenforce 0
swapoff -a

yum -y install yum-utils ntpdate
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

cat >> /etc/yum.repos.d/kubernetes.repo <  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
net.ipv4.ip_forward= 1
EOF

echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536"  >> /etc/security/limits.conf
echo "* hard nproc 65536"  >> /etc/security/limits.conf
echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
echo "* hard memlock  unlimited"  >> /etc/security/limits.conf


yum makecache fast
yum install -y --setopt=obsoletes=0 docker-ce-17.03.2.ce-1.el7.centos docker-ce-selinux-17.03.2.ce-1.el7.centos
systemctl enable docker && systemctl start docker

yum --showduplicates list kubeadm
yum install -y --setopt=obsoletes=0 kubeadm-1.10.5-0.x86_64 kubelet-1.10.5-0.x86_64 kubectl-1.10.5-0.x86_64 

yum install -y kubelet kubeadm kubectl
sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet

ntpdate time.pool.aliyun.com

mv /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce.repo.bak

在master机器上安装cfssl工具

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

export PATH=/usr/local/bin:$PATH

配置ca根证书

#配置ca配置文件
cat > ca-config.json << EOF
 {
     "signing": {
         "default": {
             "expiry": "43800h"
         },
         "profiles": {
             "server": {
                 "expiry": "43800h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             },
             "client": {
                 "expiry": "43800h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "client auth"
                 ]
             },
             "peer": {
                 "expiry": "43800h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             }
         }
     }
 }
EOF

#创建ca前面请求
cat > ca-csr.json << EOF
 {
     "CN": "etcd",
     "key": {
         "algo": "rsa",
         "size": 2048
     },
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "k8s",
        "OU": "System"
      }
    ]
 }
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

生成client,客户端证书

 cat >client.json <

生成peer 和server的证书(etcd使用)

#每个etcd机器都要有一份
cfssl print-defaults csr > config.json

{
    "CN": "master1",  #机器名
    "hosts": [
        "10.10.0.220", #机器ip
        "master1"
    ],
    "key": {
        "algo": "ecdsa",
        "size": 256
    },
    "names": [
        {
            "C": "US",
            "L": "CA",
            "ST": "San Francisco"
        }
    ]
}


 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server config.json | cfssljson -bare server
 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer config.json | cfssljson -bare peer

证书相关注意事项

  • etcd使用的证书,可以都放到/etc/kubernetes/etcd/pki/下面
  • kubernetes集群使用的相关证书,可以都放到/etc/kubernetes/pki/下面
  • 如果reset集群以后,只会清除/etc/kubernetes/pki/下面的内容

systemd 方式运行 etcd

  • 安装
  • export ETCD_VERSION=v3.1.12
    curl -sSL https://github.com/coreos/etcd/releases/download/v3.2.12/etcd-v3.2.12-linux-amd64.tar.gz | tar -xzv --strip-components=1 -C /usr/local/bin/
cat >/etc/systemd/system/etcd.service <

配置kubelet

sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# 开启kubelet的api,后边prometheus通过kubelet来获取监控数据的时候,创建的处理usterrole,然后通过此api获取数据。
# Environment="KUBELET_EXTRA_ARGS=--authentication-token-webhook"
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet

初始化节点master1

cat < /etc/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
kubernetesVersion: 1.10.5
api:
  advertiseAddress: "192.168.30.94"   #本节点ip
etcd:
  endpoints:
  #etcd地址
  - https://192.168.30.94:2379
  - https://192.168.30.182:2379
  - https://192.168.30.187:2379
  caFile: /etc/kubernetes/pki/etcd/ca.pem
  certFile: /etc/kubernetes/pki/etcd/client.pem
  keyFile: /etc/kubernetes/pki/etcd/client-key.pem
networking:
  #pod网段
  podSubnet: 10.244.0.0/16
apiServerCertSANs:
  #允许访问apiserver的地址
- 192.168.30.189   #apiserver的负载均衡ip,或者是slb的ip
- k8s-master1
- k8s-slave1
- k8s-slave2
- 192.168.30.94
- 192.168.30.182
- 192.168.30.187
#apiServerExtraArgs:
#  apiserver-count: "3"
#  endpoint-reconciler-type: lease
EOF

安装kubetnetes集群

kubeadm init --config=kubeadm-config.yaml


#如果失败reset
kubeadm reset
#或
rm -rf /etc/kubernetes/*.conf
rm -rf /etc/kubernetes/manifests/*.yaml
docker ps -a |awk '{print $1}' |xargs docker rm -f
systemctl  stop kubelet

注意初始化集群完后的操作:

初始化完成以后,需要把新生成的证书拷贝到其他节点上

安装flannel网络

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f  kube-flannel.yml


[root@k8s-master1 kubernetes]# kubectl get pod -n kube-system  -o wide
NAME                                  READY     STATUS    RESTARTS   AGE       IP               NODE
kube-apiserver-k8s-master1            1/1       Running   0          19h       192.168.30.94    k8s-master1
kube-controller-manager-k8s-master1   1/1       Running   0          19h       192.168.30.94    k8s-master1
kube-dns-86f4d74b45-bwm2h             3/3       Running   0          19h       10.244.0.13      k8s-master1
kube-flannel-ds-rrwrg                 1/1       Running   0          18h       192.168.30.94    k8s-master1
kube-proxy-4zsnt                      1/1       Running   0          19h       192.168.30.94    k8s-master1
kube-scheduler-k8s-master1            1/1       Running   0          19h       192.168.30.94    k8s-master1
[root@k8s-master1 kubernetes]# kubectl get node
NAME          STATUS    ROLES     AGE       VERSION
k8s-master1   Ready     master    8m        v1.10.5

初始化节点master节点[2,3]

cat < /etc/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
kubernetesVersion: 1.10.5
api:
  advertiseAddress: "192.168.30.182"
etcd:
  endpoints:
  - https://192.168.30.94:2379
  - https://192.168.30.182:2379
  - https://192.168.30.187:2379
  caFile: /etc/kubernetes/pki/etcd/ca.pem
  certFile: /etc/kubernetes/pki/etcd/client.pem
  keyFile: /etc/kubernetes/pki/etcd/client-key.pem
networking:
  podSubnet: 10.244.0.0/16
#下面的token是master1节点初始化完成后,join得到的token值
token: "1gddb4.cs1chtdrk5r9aa0i"
tokenTTL: "0s"
apiServerCertSANs:
- 192.168.30.189
- k8s-master1
- k8s-slave1
- k8s-slave2
- 192.168.30.94
- 192.168.30.182
- 192.168.30.187
#apiServerExtraArgs:
#  apiserver-count: "3"
#  endpoint-reconciler-type: lease
EOF

使用keepalived,或者使用slb代理6443 api-server的端口

  • 如果用nginx 做loadbalance ,6443 tsl用 api的ca.crt ca.key, 在/etc/kubernetes/pki下面。 主服务器挂掉,虚拟IP回自动漂移到备服务器

主keepalived

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.30.189:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 61
    priority 100
    advert_int 1
    mcast_src_ip 192.168.30.94
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.30.182
        192.168.30.187
    }
    virtual_ipaddress {
        192.168.30.189/24
    }
    track_script {
        CheckK8sMaster
    }

}

从 [1,2] keepalived配置

global_defs {
   router_id LVS_k8s
}

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.30.189:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface eno16777984  #网卡名
    virtual_router_id 61
    priority 90    #[从1:90,从2:80]
    advert_int 1
    mcast_src_ip 192.168.30.182  #节点ip
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.30.94  #其他节点ip
        192.168.30.187
    }
    virtual_ipaddress {
        192.168.30.189/24  #虚拟ip
    }
    track_script {
        CheckK8sMaster
    }

}

对kubernetes感兴趣的可以加群885763297,一起玩转kubernetes

你可能感兴趣的:(kubernetes)