kubeadm手动搭建k8s 1.13.1(keepalived master高可用)

1、基础环境准备

# 临时禁用selinux# 永久关闭 修改/etc/sysconfig/selinux文件设置

sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux

setenforce 0

# 临时关闭swap

# 永久关闭 注释/etc/fstab文件里swap相关的行

swapoff -a

# 开启forward

# Docker从1.13版本开始调整了默认的防火墙规则

# 禁用了iptables filter表中FOWARD链

# 这样会引起Kubernetes集群中跨Node的Pod无法通信

iptables -P FORWARD ACCEPT

# 配置转发相关参数,否则可能会出错

cat <  /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

vm.swappiness=0

EOF

sysctl --system

# 加载ipvs相关内核模块# 如果重新开机,需要重新加载

modprobe ip_vs

modprobe ip_vs_rr

modprobe ip_vs_wrr

modprobe ip_vs_sh

modprobe nf_conntrack_ipv4

lsmod | grep ip_vs

2、安装docker

yum install -y yum-utils

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && yum makecache fast

yum install -y docker-ce-18.03.1.ce-1.el7.centos.x86_64

systemctl enable docker && systemctl restart docker

3、安装kubeadm、kubelet、kubectl

cat < /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=0

repo_gpgcheck=0

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpghttps://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

yum install -y kubelet-1.13.1 kubeadm-1.13.1 kubectl-1.13.1 ipvsadm

3、安装etcd

(3.1)、创建证书文件夹

mkdir -pv $HOME/ssl && cd $HOME/ssl

生成证书依赖json

cat > ca-config.json << EOF

{

  "signing": {

    "default": {

      "expiry": "87600h"

    },

    "profiles": {

      "kubernetes": {

        "usages": [

            "signing",

            "key encipherment",

            "server auth",

            "client auth"

        ],

        "expiry": "87600h"

      }

    }

  }

}

EOF

cat > etcd-ca-csr.json << EOF

{

  "CN": "etcd",

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shenzhen",

      "L": "Shenzhen",

      "O": "etcd",

      "OU": "Etcd Security"

    }

  ]

}

EOF

cat > etcd-csr.json << EOF

{

    "CN": "etcd",

    "hosts": [

      "127.0.0.1",

      "10.148.0.4",

      "10.148.0.5",

      "10.148.0.6"

    ],

    "key": {

        "algo": "rsa",

        "size": 2048

    },

    "names": [

        {

            "C": "CN",

            "ST": "Shenzhen",

            "L": "Shenzhen",

            "O": "etcd",

            "OU": "Etcd Security"

        }

    ]

}

EOF

执行下面的命令生成证书

cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca

cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

mkdir -pv /etc/etcd/ssl

cp etcd*.pem /etc/etcd/ssl

(3.2)、安装etcd (三个节点都需要安装)

yum install -y etcd

cat << EOF > /etc/etcd/etcd.conf

#[Member]

#ETCD_CORS=""

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

#ETCD_WAL_DIR=""

ETCD_LISTEN_PEER_URLS="https://10.148.0.4:2380"

ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://10.148.0.4:2379"

#ETCD_MAX_SNAPSHOTS="5"

#ETCD_MAX_WALS="5"

ETCD_NAME="etcd1"

#ETCD_SNAPSHOT_COUNT="100000"

#ETCD_HEARTBEAT_INTERVAL="100"

#ETCD_ELECTION_TIMEOUT="1000"

#ETCD_QUOTA_BACKEND_BYTES="0"

#ETCD_MAX_REQUEST_BYTES="1572864"

#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"

#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"

#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"

#[Clustering]

ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.148.0.4:2380"

ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://10.148.0.4:2379"

#ETCD_DISCOVERY=""

#ETCD_DISCOVERY_FALLBACK="proxy"

#ETCD_DISCOVERY_PROXY=""

#ETCD_DISCOVERY_SRV=""

ETCD_INITIAL_CLUSTER="etcd1=https://10.148.0.4:2380,etcd2=https://10.148.0.5:2380,etcd3=https://10.148.0.6:2380"

ETCD_INITIAL_CLUSTER_TOKEN="BigBoss"

#ETCD_INITIAL_CLUSTER_STATE="new"

#ETCD_STRICT_RECONFIG_CHECK="true"

#ETCD_ENABLE_V2="true"

#[Proxy]

#ETCD_PROXY="off"

#ETCD_PROXY_FAILURE_WAIT="5000"

#ETCD_PROXY_REFRESH_INTERVAL="30000"

#ETCD_PROXY_DIAL_TIMEOUT="1000"

#ETCD_PROXY_WRITE_TIMEOUT="5000"

#ETCD_PROXY_READ_TIMEOUT="0"

#[Security]

ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"

ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"

#ETCD_CLIENT_CERT_AUTH="false"

ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"

#ETCD_AUTO_TLS="false"

ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"

ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"

#ETCD_PEER_CLIENT_CERT_AUTH="false"

ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"

#ETCD_PEER_AUTO_TLS="false"

#[Logging]

#ETCD_DEBUG="false"

#ETCD_LOG_PACKAGE_LEVELS=""

#ETCD_LOG_OUTPUT="default"

#[Unsafe]

#ETCD_FORCE_NEW_CLUSTER="false"

#[Version]

#ETCD_VERSION="false"

#ETCD_AUTO_COMPACTION_RETENTION="0"

#[Profiling]

#ETCD_ENABLE_PPROF="false"

#ETCD_METRICS="basic"

#[Auth]

#ETCD_AUTH_TOKEN="simple"

EOF

(3.3)、打包etcd证书,并分发到其他节点

cd /etc && tar -cvzf etcd.tar.gz etcd/

将打包的证书复制到其他节点,并修改etcd.conf文件中的配置

(3.4)、启动etcd集群

systemctl start etcd

需要先将etcd2和etcd3启动之后才能启动etcd1

(3.5)、检查集群

etcdctl --endpoints "https://127.0.0.1:2379"   --ca-file=/etc/etcd/ssl/etcd-ca.pem  \

--cert-file=/etc/etcd/ssl/etcd.pem   --key-file=/etc/etcd/ssl/etcd-key.pem   cluster-health

etcdctl --endpoints "https://127.0.0.1:2379"   --ca-file=/etc/etcd/ssl/etcd-ca.pem  \

--cert-file=/etc/etcd/ssl/etcd.pem   --key-file=/etc/etcd/ssl/etcd-key.pem  member list

etcdctl --endpoints "https://127.0.0.1:2379"   --ca-file=/etc/etcd/ssl/ca.pem  \

--cert-file=/etc/etcd/ssl/server.pem   --key-file=/etc/etcd/ssl/server-key.pem   cluster-health

4、安装keepalived

yum install -y keepalived

vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {

   notification_email {

[email protected]

[email protected]

[email protected]

   }

   notification_email_from [email protected]

   smtp_server 192.168.200.1

   smtp_connect_timeout 30

   router_id LVS_DEVEL

   vrrp_skip_check_adv_addr

   vrrp_garp_interval 0

   vrrp_gna_interval 0

}

vrrp_instance VI_1 {

    state MASTER

    interface eth0

    virtual_router_id 51

    priority 100

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

        10.148.0.10

    }

}

systemctl enable keepalived && systemctl start keepalived

查看虚拟ip

ip a

5、 安装haproxy

yum install -y haproxy

vim /etc/haproxy/haproxy.cfg

global

    log         127.0.0.1 local2

    chroot      /var/lib/haproxy

    pidfile     /var/run/haproxy.pid

    maxconn     4000

    user        haproxy

    group       haproxy

    daemon

defaults

    mode                    tcp

    log                     global

    retries                 3

    timeout connect         10s

    timeout client          1m

    timeout server          1m

frontend kubernetes

    bind *:8443

    mode tcp

    default_backend kubernetes-master

backend kubernetes-master

    balance roundrobin

    server master1  10.148.0.4:6443 check maxconn 2000

    server master2  10.148.0.4:6443 check maxconn 2000

    server master3  10.148.0.4:6443 check maxconn 2000

systemctl enable haproxy && systemctl start haproxy

6、初始化master1

vim kubeadm-master1.yaml

apiVersion: kubeadm.k8s.io/v1beta1

kind: InitConfiguration

localAPIEndpoint:

  advertiseAddress: 10.148.0.4

  bindPort: 6443

---

apiVersion: kubeadm.k8s.io/v1beta1

kind: ClusterConfiguration

kubernetesVersion: v1.13.1

imageRepository: k8s.gcr.io

apiServer:

  certSANs:

  - "master1"

  - "master2"

  - "master3"

  - "10.148.0.6"

  - "10.148.0.5"

  - "10.148.0.4"

  - "10.148.0.10"

  - "127.0.0.1"

controlPlaneEndpoint: "10.148.0.10:6443"

etcd:

  external:

    endpoints:

    - https://10.148.0.4:2379

    - https://10.148.0.5:2379

    - https://10.148.0.6:2379

    caFile: /etc/etcd/ssl/etcd-ca.pem

    certFile: /etc/etcd/ssl/etcd.pem

    keyFile: /etc/etcd/ssl/etcd-key.pem

networking:

  podSubnet: "10.244.0.0/16"

kubeadm init --config kubeadm-master1.yaml --ignore-preflight-errors=all

7、配置kubelet

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

完成到上面的步骤,可以发现 node 处于notready状态,在kube-system命令空间下的 coredns pod一直处于pending状态。

这是由于master节点被设置了污点,执行下面的命令:

备注:关于污点等高级应用会在另外一篇文章中进行说明。

kubectl taint nodes --all node-role.kubernetes.io/master-

执行完上面的命令之后 coredns 处于creating 状态,需要安装网络组件。

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl apply -f kube-flannel.yml

等待一段时间,master1完成。

8、将master1中的证书复制分发到其他节点。

cd /etc/kubernetes && tar cvzf k8s-key.tgz admin.conf pki/ca.* pki/sa.* pki/front-proxy-ca.*

执行上面的命令得到压缩包 k8s-key.tgz

在其他节点执行下面的命令解压:

tar xf k8s-key.tgz -C /etc/kubernetes/

9、初始化master2,依次执行下面的命令

#配置证书

kubeadm init phase certs all --config kubeadm-master.config

#生成kubelet配置文件

kubeadm init phase kubeconfig kubelet --config kubeadm-master.config

#启动kubelet

kubeadm init phase kubelet-start --config kubeadm-master.config

#配置kubectl命令

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

#启动 kube-apiserver、kube-controller-manager、kube-scheduler

kubeadm init phase kubeconfig all --config kubeadm-master2.yaml

kubeadm init phase control-plane all --config kubeadm-master2.yaml

#将节点标记为master节点

kubeadm init phase mark-control-plane --config kubeadm-master2.yaml

apiVersion: kubeadm.k8s.io/v1beta1

kind: InitConfiguration

localAPIEndpoint:

 advertiseAddress: 10.148.0.5

  bindPort: 6443

---

apiVersion: kubeadm.k8s.io/v1beta1

kind: ClusterConfiguration

kubernetesVersion: v1.13.1

imageRepository: k8s.gcr.io

apiServer:

  certSANs:

  - "master1"

  - "master2"

  - "master3"

  - "10.148.0.6"

  - "10.148.0.5"

  - "10.148.0.4"

  - "10.148.0.10"

  - "127.0.0.1"

controlPlaneEndpoint: "10.148.0.10:6443"

etcd:

  external:

    endpoints:

    - https://10.148.0.4:2379

    - https://10.148.0.5:2379

    - https://10.148.0.6:2379

    caFile: /etc/etcd/ssl/etcd-ca.pem

    certFile: /etc/etcd/ssl/etcd.pem

    keyFile: /etc/etcd/ssl/etcd-key.pem

networking:

  podSubnet: "10.244.0.0/16"

10、同理、完成master3的初始化即可。

11、加入node节点,在完成master1的初始化的时候,会得到下面的命令,在node节点中执行即可。

kubeadm join 10.148.0.10:6443 --token 35msw9.r7l2l6cqcbr2i9td --discovery-token-ca-cert-hash sha256:bceaf744be1eb207c684cda8d1171a64b6d27c156dd63974ad512ff667081e5c

附件:docker下载脚本

#!/bin/bash

images=(kube-proxy:v1.13.1 kube-scheduler:v1.13.1 kube-controller-manager:v1.13.1 kube-apiserver:v1.13.1 etcd:3.2.24 pause:3.1 coredns:1.2.6 flannel:v0.10.0-amd64)

for imageName in ${images[@]} ; do

  docker pull registry.aliyuncs.com/google_containers/$imageName

  docker tag registry.cn-shanghai.aliyuncs.com/$imageName k8s.gcr.io/$imageName

done

更多内容请关注我的知乎账号:https://www.zhihu.com/people/dengjiabo/activities

你可能感兴趣的:(kubeadm手动搭建k8s 1.13.1(keepalived master高可用))