使用kubeadm安装k8s13高可用文档

文章目录

  • 修改yum源
  • 安装docker-18.09
  • 系统参数调整
  • 创建slb,本次使用的是haproxy+keepalived
    • haproxy部署
    • keepalived部署
  • 使用kubeadm-config.yaml配置k8s13集群
    • 其他master节点上面部署
  • coredns启动报错

对kubernetes感兴趣的可以加群885763297,一起玩转kubernetes

机器 ip 备注
master-01 192.168.30.77 192.168.30.87 虚拟ip coredns etcd apiserver controller proxy scheduler weave-net
master-02 192.168.30.67 keepalived haproxy etcd apiserver controller proxy scheduler weave-net
master-03 192.168.30.57 keepalived haproxy etcd apiserver controller proxy scheduler weave-net

修改yum源

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup 

curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

cat >> /etc/yum.repos.d/kubernetes.repo <

安装docker-18.09

daemon.json
{
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "100m",
        "max-file": "10"
    },
    "bip": "10.5.5.1/24",
    "oom-score-adjust": -1000,
    "registry-mirrors": ["https://pqbap4ya.mirror.aliyuncs.com"],
    #如果要使用overlay2,前提条件为使用ext4,如果使用xfs,需要格式化磁盘加上参数 mkfs.xfs -n ftype=1 /path/to/your/device  ,ftype=1这个参数需要配置为1
    
    "storage-driver": "overlay2",
    "storage-opts":["overlay2.override_kernel_check=true"]

}

系统参数调整

cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF

# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable kubelet && systemctl start kubelet


cat <  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
EOF
sysctl --system

创建slb,本次使用的是haproxy+keepalived

haproxy部署

haproxy检测工具 http://192.168.30.87:1080/admin?stats

global
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
frontend  apiserver-4
    bind *:7443
    mode tcp
    option tcplog
    log global
    default_backend apiserver-cluster
backend apiserver-cluster
    mode tcp
    balance     roundrobin
    server  apiserver1  192.168.30.77:6443 check
    server  apiserver2  192.168.30.67:6443 check
    server  apiserver3  192.168.30.57:6443 check
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats

keepalived部署

#keepalived.cnf(master,slave)
! Configuration File for keepalived

global_defs {
   router_id LVS_k8s_1
}
vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.30.87:7443" #虚拟ip:端口
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER  #BACKUP
    interface eth0
    virtual_router_id 51
    priority 100   #BACKUP可以写90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.30.87/23 #虚拟ip
    }
    track_script {
        CheckK8sMaster
    }

}

使用kubeadm-config.yaml配置k8s13集群

#master-01上面操作:
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
  certSANs:
  - "LOAD_BALANCER_DNS"  #证书支持使用slb,填写一个域名
controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT"   #添加集群的时候使用的ip+端口
#weave插件不需要下面的网段,calico需要指定
networking:
  podSubnet: "10.20.0.0/16"
imageRepository: "registry.aliyuncs.com/google_containers"

#kubernetesVersion应该设置为Kubernetes版本使用。这个例子使用stable
#controlPlaneEndpoint 应匹配负载均衡器的地址或DNS和端口


#初始化k8s集群
 kubeadm init --config=kubeadm-config.yaml
 
#使用flannel 插件
kubectl apply -f  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#使用weave cni插件
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
#或者使用calico 插件
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml


# 拷贝master的证书到其他节点(在其他节点上同样使用上面的kubeadm-config.yaml去初始化集群)
USER=root
CONTROL_PLANE_IPS="192.168.30.57 192.168.30.67"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
    scp /etc/kubernetes/admin.conf "${USER}"@$host:
done

USER=root 
mkdir -p /etc/kubernetes/pki/etcd
USER=root 
mkdir -p /etc/kubernetes/pki/etcd
mv /${USER}/ca.crt /etc/kubernetes/pki/
mv /${USER}/ca.key /etc/kubernetes/pki/
mv /${USER}/sa.pub /etc/kubernetes/pki/
mv /${USER}/sa.key /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /${USER}/admin.conf /etc/kubernetes/admin.conf

其他master节点上面部署

# 使用这个命令就会自动在master-02、03上面部署从节点
 kubeadm join cluster.kube.com:7443 --token 4j9vsm.p9qmmr04a2jw3jvq --discovery-token-ca-cert-hash sha256:d16b66cf22193f430ca0a37acd3f7c138c51087e3be7c7555f5dbe1f1dd6e703  --experimental-control-plane


#docker-image备份命令
#docker save -o k8s-13-image.tar k8s.gcr.io/kube-apiserver:v1.13.0 k8s.gcr.io/kube-controller-manager:v1.13.0 k8s.gcr.io/kube-scheduler:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0 k8s.gcr.io/pause:3.1 k8s.gcr.io/etcd:3.2.24 k8s.gcr.io/coredns:1.2.6  weaveworks/weave-npc:2.5.0  weaveworks/weave-kube:2.5.0             k8s.gcr.io/coredns:1.2.6             k8s.gcr.io/etcd:3.2.24 
#for i in ${CONTROL_PLANE_IPS};do ssh -p8022  $USER@$i docker load -i  $FILE_PATH/k8s-13-image.tar ;done

coredns启动报错

 #Failed create pod sandbox: rpc error: code = Unknown desc = [failed to set up sandbox container "957541888b8a0e5b9ad65da932f688eb02cc182808e10d1a89a6e8db2132c253" network for pod "coredns-7655b945bc-6hgj9": NetworkPlugin cni failed to set up pod "coredns-7655b945bc-6hgj9_kube-system" network: failed to find plugin "loopback" in path [/opt/cni/bin], failed to clean up sandbox container "957541888b8a0e5b9ad65da932f688eb02cc182808e10d1a89a6e8db2132c253" network for pod "coredns-7655b945bc-6hgj9": NetworkPlugin cni failed to teardown pod "coredns-7655b945bc-6hgj9_kube-system" network: failed to find plugin "portmap" in path [/opt/cni/bin]]
 
 
 https://kubernetes.io/docs/setup/independent/troubleshooting-kubeadm/#coredns-pods-have-crashloopbackoff-or-error-state
如果您的网络提供商不支持portmap CNI插件,您可能需要使用服务的NodePort功能或使用HostNetwork=true。

对kubernetes感兴趣的可以加群885763297,一起玩转kubernetes

你可能感兴趣的:(kubernetes)