Kubernetes 系列之 kubeadm 搭建k8s集群

Kubeadm 搭建k8s集群

注意:Kubernetes 系列 所采用的kuberntetes版本都是 1.15+

1 Master 节点安装

1.1 系统环境配置

1.1.1 设置主机名称

hostnamectl set-hostname kmaster-01
hostnamectl set-hostname knode-01
hostnamectl set-hostname knode-02

vi /etc/hosts

192.168.190.163 knode-01
192.168.190.164 knode-01
192.168.190.165 kmaster-01

1.1.2 关闭防火墙

# 关闭防火墙,并禁止开机启动
systemctl stop firewalld && systemctl disable firewalld

# 查看防火墙状态
systemctl status firewalld

# 状态信息如下
[root@kmaster-01 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)

Aug 05 15:21:17 localhost.localdomain systemd[1]: Starting firewalld - dynamic firewall daemon...
Aug 05 15:21:18 localhost.localdomain systemd[1]: Started firewalld - dynamic firewall daemon.
Aug 12 14:22:20 kmaster-01 systemd[1]: Stopping firewalld - dynamic firewall daemon...
Aug 12 14:22:21 kmaster-01 systemd[1]: Stopped firewalld - dynamic firewall daemon.

1.1.3 禁用SELINUX

#临时关闭,用于关闭selinux防火墙,但重启后失效
setenforce 0

#关闭selinux,将SELINUX=enforcing修改为disabled => 永久关闭
vi /etc/selinux/config

SELINUX=disabled

#查看selinux的状态信息
/usr/sbin/sestatus

#selinux的状态信息如下
[root@kmaster-01 ~]# /usr/sbin/sestatus
SELinux status:                 enabled
SELinuxfs mount:                /sys/fs/selinux
SELinux root directory:         /etc/selinux
Loaded policy name:             targeted
Current mode:                   permissive
Mode from config file:          disabled
Policy MLS status:              enabled
Policy deny_unknown status:     allowed
Max kernel policy version:      31

1.1.4 注释掉 SWAP 的自动挂载

vi  /etc/fstab
 #
 # /etc/fstab
 # Created by anaconda on Mon Jan 21 19:19:41 2019
 #
 # Accessible filesystems, by reference, are maintained under '/dev/disk'
 # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
 #
 /dev/mapper/centos-root /                       xfs     defaults        0 0
 UUID=214b916c-ad23-4762-b916-65b53fce1920 /boot                   xfs     defaults        0     0
 #/dev/mapper/centos-swap swap                    swap    defaults        0 0
 

1.1.5 创建k8s.conf文件

vim /etc/sysctl.d/k8s.conf

#关闭swap,保证 kubelet正确运行
swapoff -a

#创建k8s.conf文件
vi /etc/sysctl.d/k8s.conf

#文件内容
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0 

#执行命令使修改生效
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

1.1.6 kube-proxy开启ipvs的前置条件

#保证在节点重启后能自动加载所需模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# 加上执行权限
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#查看是否已经正确加载所需的内核模块
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#安装了ipset软件包
yum -y install ipset

#为了便于查看ipvs的代理规则,安装管理工具ipvsadm
yum -y install ipvsadm

TIPS: 如果以上前提条件如果不满足,则即使kube-proxy的配置开启了ipvs模式,也会退回到iptables模式

1.1.7 同步时间

1.安装ntpdate工具

yum -y install ntp ntpdate

2.设置系统时间与网络时间同步

ntpdate cn.pool.ntp.org

3.将系统时间写入硬件时间

hwclock --systohc

1.2 安装配置 Docker

1.2.1 安装Docker

移除旧的版本

$ sudo yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-selinux \
                  docker-engine-selinux \
                  docker-engine

安装一些必要的系统工具

sudo yum install -y yum-utils device-mapper-persistent-data lvm2

添加软件源信息

sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

更新 yum 缓存

sudo yum makecache fast

安装 Docker-ce

sudo yum -y install docker-ce

1.2.2 配置Docker

# 编辑文件
vim /etc/docker/daemon.json

# 简易配置
{
 "registry-mirrors": ["https://1bbsr4jc.mirror.aliyuncs.com","https://registry.docker-cn.com"],
  "insecure-registries": ["192.168.190.164:5000"]
}



# 高级点配置
{
  "graph": "/data/docker",
  "storage-driver": "overlay",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
  "bip": "172.7.21.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}

注意:

1、这里bip要根据宿主机ip变化,比如主机是10.4.7.128 => docker 网络可以配置成 172.7.128.1/24

2、insecure-registries: 这里配置私有镜像仓库地址

确认一下iptables filter表中FOWARD链的默认策略(pllicy)为ACCEPT

# 确认一下iptables filter表中FOWARD链的默认策略(pllicy)为ACCEPT
iptables -nvL

Chain INPUT (policy ACCEPT 9 packets, 760 bytes)
 pkts bytes target     prot opt in     out     source            destination         
 
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination
 
#如果不是ACCEPT,则修改
iptables  -P FORWARD  ACCEPT

1.3 安装配置 kubeadm

1.3.1 安装kubeadm

1、创建配置文件

#创建文件命令,这里用阿里云的,也可以用其他的
vim /etc/yum.repos.d/kubernetes.repo

#文件内容
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
enable=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
	https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

2、安装kubelet,kubeadm,kubectl

#建立元数据缓存并安装kubelet,kubeadm,kubectl

# 更新yum 源缓存
yum -y makecache fast

#不指定版本,则默认安装最新版本,目前为1.15.2 版本,这里安装 14.3
yum install -y kubelet-1.14.3 kubeadm-1.14.3 kubectl-1.14.3

yum install -y kubelet-1.15.2 kubeadm-1.15.2 kubectl-15.2

# 默认安装最新版本
yum install -y kubelet kubeadm kubectl

3、设置kubelet 开机自启

# 设置 kubelet 自启动,并启动 kubelet
systemctl enable kubelet && systemctl start kubelet

1.3.2 配置kubeadm

安装 kubernetes 主要是安装它的各个镜像,而 kubeadm 已经为我们集成好了运行 kubernetes 所需的基本镜像。但由于国内的网络原因,在搭建环境时,无法拉取到这些镜像。此时我们只需要修改为阿里云提供的镜像服务即可解决该问题。

1、创建配置
# 导出配置文件
kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm.yml
2、修改配置
[root@k8s-master-01 kubernates]# vim kubeadm.yml 

apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # 修改为服务器的ip
  advertiseAddress: 10.4.7.128
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master-01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
# 镜像仓库地址:为阿里云
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
# 修改版本为 kubeadm kubectl kubelet 的版本
kubernetesVersion: v1.15.2
networking:
  dnsDomain: cluster.local
  # 集群的网络
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
# 指定Node的 Pod 之间的通信策略为 ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

3、查看/拉取镜像
# 进入kubeadm.yaml 所在的目录
cd /usr/local/kubernetes

# 查看所需镜像列表
kubeadm config images list --config kubeadm.yml

# 拉取镜像
kubeadm config images pull --config kubeadm.yml

1.4 初始化Master 节点

执行以下命令初始化主节点,该命令指定了初始化时需要使用的配置文件,其中添加 --experimental-upload-certs 参数可以在后续执行加入节点时自动分发证书文件。追加的 tee kubeadm-init.log 用以输出日志。

1.4.1 安装并颁发证书

1、安装
kubeadm init --config=kubeadm.yml --experimental-upload-certs | tee kubeadm-init.log
2、安装过程
[root@k8s-master-01 kubernates]# kubeadm init --config=kubeadm.yml --experimental-upload-certs | tee kubeadm-init.log

## 安装过程
[init] Using Kubernetes version: v1.14.1
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [10.4.7.128 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [10.4.7.128 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.4.7.128]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 33.503334 seconds
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
37b163df1ac944283189da3b1dc294b46d8cb20ae62786b033857a88a9818594
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.4.7.128:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:73104432e55bb8aeba8ed3daaa302ecae4be1134dc3d00d7c48b673521a3be03
3、配置 kubectl
mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
4、验证是否成功
kubectl get node

# 能够打印出节点信息即表示成功
NAME                STATUS     ROLES    AGE     VERSION
kubernetes-master   NotReady   master   8m40s   v1.14.1
5、kubeadm 初始化集群过程
  • init:指定版本进行初始化操作
  • preflight:初始化前的检查和下载所需要的 Docker 镜像文件
  • kubelet-start:生成 kubelet 的配置文件 var/lib/kubelet/config.yaml,没有这个文件 kubelet 无法启动,所以初始化之前的 kubelet 实际上启动不会成功
  • certificates:生成 Kubernetes 使用的证书,存放在 /etc/kubernetes/pki 目录中
  • kubeconfig:生成 KubeConfig 文件,存放在 /etc/kubernetes 目录中,组件之间通信需要使用对应文件
  • control-plane:使用 /etc/kubernetes/manifest 目录下的 YAML 文件,安装 Master 组件
  • etcd:使用 /etc/kubernetes/manifest/etcd.yaml 安装 Etcd 服务
  • wait-control-plane:等待 control-plan 部署的 Master 组件启动
  • apiclient:检查 Master 组件服务状态。
  • uploadconfig:更新配置
  • kubelet:使用 configMap 配置 kubelet
  • patchnode:更新 CNI 信息到 Node 上,通过注释的方式记录
  • mark-control-plane:为当前节点打标签,打了角色 Master,和不可调度标签,这样默认就不会使用 Master 节点来运行 Pod
  • bootstrap-token:生成 token 记录下来,后边使用 kubeadm join 往集群中添加节点时会用到
  • addons:安装附加组件 CoreDNS 和 kube-proxy

2 Node 节点的安装

2.1 基本配置

将 slave 节点加入到集群中很简单,只需要在 slave 服务器上安装 kubeadm,kubectl,kubelet 三个工具,然后使用 kubeadm join 命令加入即可。准备工作如下:

  • 修改主机名
  • 关闭防火墙、禁用selinux、禁用swap
  • 配置k8s.conf
  • 开启ipvs
  • 安装配置docker
  • 安装三个工具

由于之前章节已经说明了操作步骤,此处不再赘述。

2.2 将 slave 加入到集群

# 加入命令
kubeadm join 10.4.7.128:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:73104432e55bb8aeba8ed3daaa302ecae4be1134dc3d00d7c48b673521a3be03
    
# 加入过程日志
[root@node-01 ~]# kubeadm join 10.4.7.128:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:73104432e55bb8aeba8ed3daaa302ecae4be1134dc3d00d7c48b673521a3be03


[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

说明:

  • token
    • 可以通过安装 master 时的日志查看 token 信息
    • 可以通过 kubeadm token list 命令打印出 token 信息
    • 如果 token 过期,可以使用 kubeadm token create 命令创建新的 token
  • discovery-token-ca-cert-hash
    • 可以通过安装 master 时的日志查看 sha256 信息
    • 可以通过 openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' 命令查看 sha256 信息

2.3 验证是否加入成功

2.3.1 查看node状态

[root@k8s-master-01 kubernates]# kubectl get nodes
NAME            STATUS     ROLES    AGE   VERSION
kmaster-01   NotReady   master      26m   v1.15.2
knode-01         NotReady      79s   v1.15.2
knode-02         NotReady      13s   v1.15.2

2.3.2 加入失败解决方案

如果 slave 节点加入 master 时配置有问题可以在 slave 节点上使用 kubeadm reset 重置配置再使用 kubeadm join 命令重新加入即可。希望在 master 节点删除 node ,可以使用 kubeadm delete nodes 删除

2.3.3 Pod 状态查看

kubectl get pod -n kube-system -o wide

由此可以看出 coredns 尚未运行,此时我们还需要安装网络插件

3 配置集群网络

3.1 CNI

3.1.1 容器网络

容器网络是容器选择连接到其他容器、主机和外部网络的机制。容器的 runtime 提供了各种网络模式,每种模式都会产生不同的体验。例如,Docker 默认情况下可以为容器配置以下网络:

  • none: 将容器添加到一个容器专门的网络堆栈中,没有对外连接。

  • host: 将容器添加到主机的网络堆栈中,没有隔离。

  • default bridge: 默认网络模式。每个容器可以通过 IP 地址相互连接。

  • 自定义网桥: 用户定义的网桥,具有更多的灵活性、隔离性和其他便利功能

3.1.2 什么是 CNI

CNI(Container Network Interface) 是一个标准的,通用的接口。在容器平台,Docker,Kubernetes,Mesos 容器网络解决方案 flannel,calico,weave。只要提供一个标准的接口,就能为同样满足该协议的所有容器平台提供网络功能,而 CNI 正是这样的一个标准接口协议。

3.1.3 Kubernetes 中的 CNI 插件

CNI 的初衷是创建一个框架,用于在配置或销毁容器时动态配置适当的网络配置和资源。插件负责为接口配置和管理 IP 地址,并且通常提供与 IP 管理、每个容器的 IP 分配、以及多主机连接相关的功能。容器运行时会调用网络插件,从而在容器启动时分配 IP 地址并配置网络,并在删除容器时再次调用它以清理这些资源。

运行时或协调器决定了容器应该加入哪个网络以及它需要调用哪个插件。然后,插件会将接口添加到容器网络命名空间中,作为一个 veth 对的一侧。接着,它会在主机上进行更改,包括将 veth 的其他部分连接到网桥。再之后,它会通过调用单独的 IPAM(IP地址管理)插件来分配 IP 地址并设置路由。

在 Kubernetes 中,kubelet 可以在适当的时间调用它找到的插件,为通过 kubelet 启动的 pod进行自动的网络配置。

Kubernetes 中可选的 CNI 插件如下:

  • Flannel
  • Calico
  • Canal
  • Weave

3.2 Calico

3.2.1 什么是 Calico

Calico 为容器和虚拟机提供了安全的网络连接解决方案,并经过了大规模生产验证(在公有云和跨数千个集群节点中),可与 Kubernetes,OpenShift,Docker,Mesos,DC / OS 和 OpenStack 集成。

Calico 还提供网络安全规则的动态实施。使用 Calico 的简单策略语言,您可以实现对容器,虚拟机工作负载和裸机主机端点之间通信的细粒度控制。

3.2.2 安装 Calico

参考官方文档安装:https://docs.projectcalico.org/v3.7/getting-started/kubernetes/

# 在 Master 节点操作即可
kubectl apply -f https://docs.projectcalico.org/v3.7/manifests/calico.yaml

# 安装时显示如下输出
[root@k8s-master-01 kubernates]# kubectl apply -f https://docs.projectcalico.org/v3.7/manifests/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
deployment.extensions/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

确认安装是否成功

watch kubectl get pods --all-namespaces

# 需要等待所有状态为 Running,注意时间可能较久,3 - 5 分钟的样子
Every 2.0s: kubectl get pods --all-namespaces                                                                                                    kubernetes-master: Fri May 10 18:16:51 2019

NAMESPACE     NAME                                        READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-8646dd497f-g2lln    1/1     Running   0          50m
kube-system   calico-node-8jrtp                           1/1     Running   0          50m
kube-system   coredns-8686dcc4fd-mhwfn                    1/1     Running   0          51m
kube-system   coredns-8686dcc4fd-xsxwk                    1/1     Running   0          51m
kube-system   etcd-kubernetes-master                      1/1     Running   0          50m
kube-system   kube-apiserver-kubernetes-master            1/1     Running   0          51m
kube-system   kube-controller-manager-kubernetes-master   1/1     Running   0          51m
kube-system   kube-proxy-p8mdw                            1/1     Running   0          51m
kube-system   kube-scheduler-kubernetes-master            1/1     Running   0          51m

此基本环境已部署完毕

4 运行第一个kubernetes 容器

4.1 检查组件运行状态

kubectl get cs

# 输出如下
NAME                 STATUS    MESSAGE             ERROR
# 调度服务,主要作用是将 POD 调度到 Node
scheduler            Healthy   ok                  
# 自动化修复服务,主要作用是 Node 宕机后自动修复 Node 回到正常的工作状态
controller-manager   Healthy   ok                  
# 服务注册与发现
etcd-0               Healthy   {"health":"true"} 

4.2 检查 Master 状态

kubectl cluster-info

# 输出如下
# 主节点状态
Kubernetes master is running at https://192.168.190.165:6443

# DNS 状态
KubeDNS is running at https://192.168.190.165:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

4.3 检查 Nodes 状态

kubectl get nodes

# 输出如下,STATUS 为 Ready 即为正常状态
[root@kmaster-01 calico]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
kmaster-01   Ready    master   22m   v1.15.2
knode-01     Ready    <none>   11m   v1.15.2

4.4 运行第一个容器实例

# 使用 kubectl 命令创建两个监听 80 端口的 Nginx Pod(Kubernetes 运行容器的最小单元)
kubectl run nginx --image=nginx --replicas=2 --port=80

# 输出如下
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created

4.5 查看全部 Pods 的状态

kubectl get pods

# 输出如下,需要等待一小段实践,STATUS 为 Running 即为运行成功
NAME                     READY   STATUS    RESTARTS   AGE
nginx-755464dd6c-qnmwp   1/1     Running   0          90m
nginx-755464dd6c-shqrp   1/1     Running   0          90m

4.6 查看已部署的服务

kubectl get deployment

# 输出如下
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   2/2     2            2           91m

4.7 映射服务,让用户可以访问

kubectl expose deployment nginx --port=80 --type=LoadBalancer

# 输出如下
service/nginx exposed

4.8 查看已发布的服务

kubectl get services

# 输出如下
NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP      10.96.0.1        <none>        443/TCP        44h
# 由此可见,Nginx 服务已成功发布并将 80 端口映射为 31738
nginx        LoadBalancer   10.108.121.244   <pending>     80:31738/TCP   88m

4.9 查看服务详情

kubectl describe service nginx

# 输出如下
Name:                     nginx
Namespace:                default
Labels:                   run=nginx
Annotations:              <none>
Selector:                 run=nginx
Type:                     LoadBalancer
IP:                       10.108.121.244
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31738/TCP
Endpoints:                192.168.17.5:80,192.168.8.134:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

4.10 验证是否成功

通过浏览器访问 Master 服务器

http://192.168.190.165:31738/

此时 Kubernetes 会以负载均衡的方式访问部署的 Nginx 服务,能够正常看到 Nginx 的欢迎页即表示成功。容器实际部署在其它 Node 节点上,通过访问 Node 节点的 IP:Port 也是可以的。

4.11 停止服务

kubectl delete deployment nginx

# 输出如下
deployment.extensions "nginx" deleted
kubectl delete service nginx

# 输出如下
service "nginx" deleted

你可能感兴趣的:(Kubernetes 系列之 kubeadm 搭建k8s集群)