K8S学习笔记

apiVersion: v1
kind: ReplicationController
metadata:
name: mytomcat
spec:
replicas: 2
selector:
app: mytomcat
template:
metadata:
labels:
app: mytomcat
spec:
containers:
- name: mytomcat
image: tomcat
ports:
- containerPort: 8080

apiVersion: v1
kind: Service
metadata:
name: mytomcat
spec:
type: NodePort
ports:

  • port: 8080
    nodePort: 30001
    selector:
    app: mytomcat

Master

vi /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target

[Service]
Type=simple
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动etcd

systemctl daemon-reload
systemctl enable etcd.service
mkdir -p /var/lib/etcd/
systemctl start etcd.service
etcdctl cluster-health

vi /usr/lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service [Service]
EnvironmentFile=/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS
Restart=on-failure
Type=notify [Install]
WantedBy=multi-user.target

mkdir /etc/kubernetes

vi /etc/kubernetes/apiserver

KUBE_API_ARGS="--storage-backend=etcd3 --etcd-servers=http://127.0.0.1:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --service-cluster-ip-range=169.169.0.0/16 --service-node-port-range=1-65535 --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,ResourceQuota --logtostderr=true --log-dir=/var/log/kubernetes --v=2"

vi /usr/lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service

[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

vi /etc/kubernetes/controller-manager

KUBE_CONTROLLER_MANAGER_ARGS="--master=http://192.168.158.150:8080 --logtostderr=true --log-dir=/var/log/kubernetes --v=2"

vi /usr/lib/systemd/system/kube-scheduler.service

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service

[Service]
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

vi /etc/kubernetes/scheduler

KUBE_SCHEDULER_ARGS="--master=http://192.168.158.150:8080 --logtostderr=true --log-dir=/var/log/kubernetes --v=2"

启动

systemctl daemon-reload
systemctl enable kube-apiserver.service
systemctl start kube-apiserver.service
systemctl enable kube-controller-manager.service
systemctl start kube-controller-manager.service
systemctl enable kube-scheduler.service
systemctl start kube-scheduler.service

检查每个服务的健康状态:

systemctl status kube-apiserver.service
systemctl status kube-controller-manager.service
systemctl status kube-scheduler.service

Node1

vi /usr/lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet
KUBE_LOG_LEVEL
KUBELET_ADDRESS
KUBELET_HOSTNAME
KUBELET_POD_INFRA_CONTAINER
$KUBELET_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target

[Install]
WantedBy=multi-user.target

mkdir -p /var/lib/kubelet

vi /etc/kubernetes/kubelet

KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig --hostname-override=192.168.158.151 --logtostderr=false --log-dir=/var/log/kubernetes --v=2"

vi /etc/kubernetes/kubeconfig

apiVersion: v1
kind: Config
clusters:

  • cluster:
    server: http://192.168.158:150:8080
    name: local
    contexts:
  • context:
    cluster: local
    name: mycontext
    current-context: mycontext

vi /usr/lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/bin/kube-proxy
KUBE_LOG_LEVEL
KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

[Install]
WantedBy=multi-user.target

vi /etc/kubernetes/proxy

KUBE_PROXY_ARGS="--master=http://192.168.158.150:8080 --hostname-override=192.168.158.151 --logtostderr=true --log-dir=/var/log/kubernetes --v=2"

Restart

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl restart kubelet
systemctl status kubelet
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl restart kube-proxy
systemctl status kube-proxy

查看日志:

journalctl _PID=XXX

新版禁用swap不能用--swqp了

关闭Swap,机器重启后不生效

swapoff -a

修改/etc/fstab永久关闭Swap

cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S')

CentOS

sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab

Redhat

sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab

修改后重新挂载全部挂载点

mount -a

查看Swap

free -m
cat /proc/swaps

vi /etc/ansible/hosts

[k8s]
192.168.158.150
192.168.158.151
192.168.158.152
[master]
192.168.158.150
[node]
192.168.158.151
192.168.158.152

ssh-keygen -t rsa

ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.158.150

hostnamectl --static set-hostname k8s-node-1

echo '192.168.158.150 k8s-master
192.168.158.150 etcd
192.168.158.150 registry
192.168.158.151 k8s-node-1
192.168.158.152 k8s-node-2' >> /etc/hosts

yum install etcd -y

vi /etc/etcd/etcd.conf

另一种方式

vi /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

modprobe br_netfilter

sysctl -p /etc/sysctl.d/k8s.conf

sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum list docker-ce --showduplicates

yum makecache fast

yum install docker-ce -y

vi /etc/docker/daemon.json

{"registry-mirrors":["https://w6pxadsb.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn"],"exec-opts": ["native.cgroupdriver=systemd"]
}
systemctl daemon-reload
systemctl enable docker
systemctl start docker
systemctl status docker

cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet kubeadm kubectl

systemctl enable --now kubelet

mster

kubeadm init --apiserver-advertise-address=192.168.158.151 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16

mkdir -p HOME/.kube/config
sudo chown (id -g) $HOME/.kube/config

kubectl cluster-info

node

kubeadm join 192.168.158.150:6443 --token tke6ck.o49skr479bfy2dy4 --discovery-token-ca-cert-hash sha256:e675d752e521e5c5c43ecfee4b0c0b51d7bb9bae87ee83410715329654432e76

kubeadm join 192.168.158.152:6443 --token hpxl8v.jc6szqhkvkwf8s7z --discovery-token-ca-cert-hash sha256:bb4ecc9004703bb18fb8bd11c4c1a3ba347acb60bf7470a39f326c44ab738aad

k8s优点

1、故障迁移:当某一个node节点关机或挂掉后,node节点上的服务会自动转移到另一个node节点上,这个过程所有服务不中断。这是docker或普通云主机是不能做到的
2、资源调度:当node节点上的cpu、内存不够用的时候,可以扩充node节点,新建的pod就会被kube-schedule调度到新扩充的node节点上
3、资源隔离:创建开发、运维、测试三个命名空间,切换上下文后,开发人员就只能看到开发命名空间的所有pod,看不到运维命名空间的pod,这样就不会造成影响,互不干扰,传统的主机或只有docker环境中,登录进去就会看到所有的服务或者容器
4、因为采用docker容器,进程之间互不影响,
5、安全:不同角色有不同的权限,查看pod、删除pod等操作;RBAC认证增加了k8s的安全

Kubernetes 基本架构与常用术语

Kubernetes主要由以下几个核心组件组成:
    etcd保存了整个集群的状态;
    apiserver提供了资源操作的唯一入口,并提供认证、授权、访问控制、API注册和发现等机制;
    controller manager负责维护集群的状态,比如故障检测、自动扩展、滚动更新等; scheduler负责资源的调度,按照预定的调度策略将Pod调度到相应的机器上;
    kubelet负责维护容器的生命周期,同时也负责Volume(CVI)和网络(CNI)的管理; Container runtime负责镜像管理以及Pod和容器的真正运行(CRI);
    kube-proxy负责为Service提供cluster内部的服务发现和负载均衡;
除了核心组件,还有一些推荐的Add-ons:
    kube-dns负责为整个集群提供DNS服务Ingress Controller为服务提供外网入口Heapster提供资源监控
    Dashboard 提 供 GUI Federation提供跨可用区的集群
    Fluentd-elasticsearch提供集群日志采集、存储与查询
Kubernetes设计理念和功能其实就是一个类似Linux的分层架构
    核心层:Kubernetes最核心的功能,对外提供API构建高层的应用,对内提供插件式应用执行环境
    应用层:部署(无状态应用、有状态应用、批处理任务、集群应用等)和路由(服务发现、DNS解析等)
    管理层:系统度量(如基础设施、容器和网络的度量),自动化(如自动扩展、动态Provision等)以及策略 管理(RBAC、Quota、PSP、NetworkPolicy等)
    接口层:kubectl命令行工具、客户端SDK以及集群联邦
    生态系统:在接口层之上的庞大容器集群管理调度的生态系统,可以划分为两个范畴
Kubernetes外部:日志、监控、配置管理、CI、CD、Workflow、FaaS、OTS应用、ChatOps等Kubernetes内部:CRI、CNI、CVI、镜像仓库、Cloud Provider、集群自身的配置和管理等

1.2.1Cluster

Cluster是计算、存储和网络资源的集合,Kubernetes利用这些资源运行各种基于容器的应用. Kubernetes Cluster由Master和Node组成,节点上运行着若干Kubernetes服务

1.2.1Master

Master主要职责是调度,即决定将应用放在哪运行。Master运行Linux系统,可以是物理机或虚拟机。 Master是Kubernetes Cluster的大脑,运行着的Daemon服务包括kube-apiserver、kube-scheduler、kuber-controller- manager、etcd和Pod网络
API Serer(kube-apiserver)
    API Server 提供HTTP/HTTPS RESTful API,即Kubernetes API.是Kubernetes里所有资源的CRUD等操作的唯一入口,也是集群控制的入口进程
Scheduler(kube-scheduler)
    Scheduler负责资源调度的里程,简单说,它决定将Pod放在哪个Node上运行
Controller Manager(kube-controller-manager)
    所有资源对象的自动化控制中心。Controller Manager负责管理Cluster各种资源,保证资源处于预期的状态 。 Controller Manager 有 多 种 , 如 replication controller 、 endpoints controller 、 namespace controller、serviceaccounts controller等。
    不同的controller管理不同的资源,如replication controller管理Deployment、StatefulSet、DaemonSet的生命周期,namespace controller管理Namespace资源
etcd
    etcd负责保存Kubernetes Cluster的配置信息和各种资源的状态信息。当数据发生变化时,etcd会快速地通知Kubernetes相关组件
Pod网络
    Pod要能够相互通信,Kubernetes Cluster必须部署Pod网络,flannel是其中一个可选方案。

1.2.2Node

除了Master,Kubernetes集群中的其它机器被称为Node节点。Node职责是运行容器应用,Node由Master管理,Node负责监控并汇报容器的状态,同时根据Master的要求管理容器的生命周期。Node也运行在Linux系统, 可以是物理机或虚拟机。
每个Node节点上都运行着以下一组关键进程
    kubelet:负责Pod对应的容器的创建、启动等任务,同时与Master节点密切协作,实现集群管理的基本功能
    kube-proxy:实现Kubernetes Service的通信与负载均衡机制的重要组件Docker Enginer:Docker引擎,负责本机的容器创建和管理工作

1.2.3Pod

Pod是Kubernetes的最小单元,也是最重要和最基本的概念。每一个Pod包含一个或多个容器,Pod的容器会作为 一个整体被Master调度到一个Node上运行。Kubenetes为每个Pod都分配了唯一的IP地址,称为PodIP,一个Pod里 的多个容器共享PodIP地址。在Kubernetes里,一个Pod里的容器与另外主机上的Pod容器能够直接通信。

1.2.4Service

Kubernetes Service定义了外界访问一组特定Pod的方式,Service有自己的IP和端口,Service为Pod提供了负载均衡。它也是Kubernetes最核心的资源对象之一,每个Service其实就是我们经常提起的微服务架构中的一个"微服 务"。

1.2.5Replication Controller

Replication Controller(简称RC)是Kubernetes系统中的核心概念之一,它其实是定义了一个期望的场景,即声明某种Pod的副本数量在任意时刻都符合某个预期值,所以RC的定义包括如下几个部分
    Pod期待的副本数(replicas)
    用于筛选目标Pod的Label Selector
    当Pod的副本数量小于预期数量时,用于创建新Pod的Pod模板(template)

以下是总结的RC的一些特性与作用

在大多数情况下,我们通过定义一个RC实现Pod的创建过程及副本数量的自动控制
RC里包括完整的Pod定义模板
RC通过Label Selector机制实现对Pod副本的自动控制
通过改变RC里的Pod副本数量,可以实现Pod的扩容或缩容功能 通过改变RC里Pod模板中镜像版本,可以实现Pod的滚动升级功能

(一)、环境

IP地址    系统  功能
192.168.158.150 CentOS7.4   Master
192.168.158.151 CentOS7.4   node1
192.168.158.152 CentOS7.4   node2

(二)、基础环境安装配置(每一台服务器都要执行)

1、关闭防火墙

[root@DEV004021 ~]# systemctl stop firewalld
[root@DEV004021 ~]# systemctl disable firewalld

2、创建/etc/sysctl.d/k8s.conf 文件

[root@DEV004021 ~]# vim  /etc/sysctl.d/k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

3、把以上配置修改的使其生效。

[root@DEV004021 ~]#modprobe br_netfilter
[root@DEV004021 ~]#sysctl -p /etc/sysctl.d/k8s.conf 
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1

4、关闭虚拟内存

[root@DEV004021 ~]#sudo sed -i '/swap/s/^/#/' /etc/fstab
[root@DEV004021 ~]#sudo swapoff -a

5、安装docker

5.1、删除旧版本的docker

[root@DEV004021 ~]# sudo yum remove docker                   docker-client docker-client-latest docker-common                   docker-latest docker-latest-logrotate docker-logrotate                   docker-selinux docker-engine-selinux docker-engine

5.2、安装必要的工具

[root@DEV004021 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2

5.3、添加yum源的相关软件信息并更新缓存

[root@DEV004021 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

5.4、更新并安装docker

查看支持哪些版本

[root@DEV004021 ~]#yum list docker-ce --showduplicates 
[root@DEV004021 ~]# yum makecache fast
[root@DEV004021 ~]# yum install docker-ce -y

5.5、配置镜像加速

[root@localhost ~]# vi /etc/docker/daemon.json
{"registry-mirrors":["https://w6pxadsb.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn"],"registry-mirrors": ["http://hub-mirror.c.163.com"]}

5.6、设置docker服务并做自启动

systemctl enable docker
    Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
    systemctl start docker

6、安装kubelet、kubeadm、kubectl

[root@DEV004021 ~]# cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@DEV004021 ~]# yum install -y kubelet kubeadm kubectl
systemctl enable --now kubelet
    Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

(三)、构建Kubernetes集群

1、初始化Master节点(只在master节点执行)。

[root@DEV004021 ~]# kubeadm init --apiserver-advertise-address=192.168.158.150 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16

--pod-network-cidr :后续安装 flannel 的前提条件,且值为 10.244.0.0/16。--image-repository :指定镜像仓库这里是阿里云的仓库

2、查看输出日志如下,出现初始化成功了。

[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.4.21:6443 --token dmzz6x.t864anv0btkyxjwi     --discovery-token-ca-cert-hash sha256:2a8bbdd54dcc01435be1a3b443d33d0ce932c8d81c6d9ae8b3c248325977ceb1 

3、依次执行如下命令:

[root@DEV004021 ~]# mkdir -p $HOME/.kube
[root@DEV004021 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@DEV004021 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

4、部署Pod Network到集群中

[root@otrs004021 ~]# kubectl apply -f https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml
    podsecuritypolicy.extensions/psp.flannel.unprivileged created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.extensions/kube-flannel-ds-amd64 created
    daemonset.extensions/kube-flannel-ds-arm64 created
    daemonset.extensions/kube-flannel-ds-arm created
    daemonset.extensions/kube-flannel-ds-ppc64le created
    daemonset.extensions/kube-flannel-ds-s390x created

5、至此master节点初始化完毕,查看集群相关信息。

查看集群相关信息

[root@otrs004021 ~]# kubectl cluster-info
    Kubernetes master is running at https://192.168.4.21:6443
    KubeDNS is running at https://192.168.4.21:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

    To further debug and diagnose cluster problems, use ‘kubectl cluster-info dump‘.

查看节点相关信息

[root@otrs004021 ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
otrs004097   Ready    master   6m27s   v1.15.2

查看pods信息

[root@otrs004021 ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-bccdc95cf-f5wtc              1/1     Running   0          6m32s
kube-system   coredns-bccdc95cf-lnp2j              1/1     Running   0          6m32s
kube-system   etcd-otrs004097                      1/1     Running   0          5m56s
kube-system   kube-apiserver-otrs004097            1/1     Running   0          5m38s
kube-system   kube-controller-manager-otrs004097   1/1     Running   0          5m40s
kube-system   kube-flannel-ds-amd64-xqdcf          1/1     Running   0          2m10s
kube-system   kube-proxy-2lz96                     1/1     Running   0          6m33s
kube-system   kube-scheduler-otrs004097            1/1     Running   0          5m45s

初始化出现问题,使用如下命令进行重置

[root@DEV004021 ~]# kubeadm reset

[root@DEV004021 ~]# rm -rf /var/lib/cni/

[root@DEV004021 ~]# rm -f $HOME/.kube/config

(四)、添加kubernetes其他节点,有两种方法。

方法一、使用master节点初始化的token加入

[root@DEV004021 ~]# kubeadm join 192.168.158.152:6443 --token hpxl8v.jc6szqhkvkwf8s7z --discovery-token-ca-cert-hash sha256:bb4ecc9004703bb18fb8bd11c4c1a3ba347acb60bf7470a39f326c44ab738aad

方法二、重新生成token来加入

[root@otrs004021 ~]# kubeadm token generate
    3o7wop.z2kxzhy7p0zwnb3v
[root@otrs004021 ~]# kubeadm token create 3o7wop.z2kxzhy7p0zwnb3v  --print-join-command --ttl=24h
kubeadm join 192.168.4.21:6443 --token 3o7wop.z2kxzhy7p0zwnb3v     --discovery-token-ca-cert-hash sha256:2a8bbdd54dcc01435be1a3b443d33d0ce932c8d81c6d9ae8b3c248325977ceb1 

2、在其他节点依次执行如下命令即可加入K8S

[root@DEV004021 ~]# kubeadm join 192.168.4.21:6443 --token 3o7wop.z2kxzhy7p0zwnb3v     --discovery-token-ca-cert-hash sha256:2a8bbdd54dcc01435be1a3b443d33d0ce932c8d81c6d9ae8b3c248325977ceb1

kubeadm join 192.168.158.152:6443 --token hpxl8v.jc6szqhkvkwf8s7z --discovery-token-ca-cert-hash sha256:bb4ecc9004703bb18fb8bd11c4c1a3ba347acb60bf7470a39f326c44ab738aad

[root@DEV004021 yum.repos.d]# kubectl get nodes
NAME        STATUS   ROLES    AGE   VERSION
dev004019   Ready       3d    v1.15.2
dev004020   Ready       3d    v1.15.2
dev004021   Ready    master   3d    v1.15.2

至此,1个Master+2 nodes的K8S集群创建成功

http://m.mamicode.com/info-detail-2749150.html

查看日志

journalctl -f -u kubelet

重置

kubeadm reset

kubeadm join 192.168.158.150:6443 --token tke6ck.o49skr479bfy2dy4 --discovery-token-ca-cert-hash sha256:e675d752e521e5c5c43ecfee4b0c0b51d7bb9bae87ee83410715329654432e76

vi /etc/sysconfig/kubelet

--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice

sudo systemctl restart kubelet

docker pull quay.io/coreos/flannel:v0.11.0-amd64

kubectl -n kube-system get secret 1}') -o go-template='{{.data.token}}' | base64 -d

eyJhbGciOiJSUzI1NiIsImtpZCI6ImRCSzRmazBmRFpybm5WNXJCVnoxck51bm1meEk3T3VOTXFIdVNCd1JCeFEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJvYXJkLXVzZXItdG9rZW4tMjc3OWgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoia3Vib2FyZC11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMWJiYTY4YmEtMjliOC00OGUyLTk4NzItNmNhYWIwMzFkNWFmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmt1Ym9hcmQtdXNlciJ9.IlKUsZDZLFRmyheOlRcL_YgcTQf-vDIq9cCJsyob_Tdm9YnKl-r2Hy5u4w9FfKTxpvUqjjDnIdmYi5Ck24gjnR7xFFPrC715Vac52v_AEecxAsgM8DLa-cV_NQ2uzJfvFgoWevKoC1YkSKeIymkWm1NcR60A49h445pLdQxmRuUegP-AowR6VEEXLUhbrwKyp6FcjZEKnE7PLTrnHY4MJ30jztKHg1TR7r6MhZxanCAacfaDz57TXHE-b80FtiOAzz9FuEK25w7kmz6Pdjoputdd3Dixj6GJKzjEzWArll7y0G8AtuzLshsUNxcN5ikuKLGqI2_ZctZFuevZ1_Tkzw

kubectl get deployment -n kube-system
kubectl delete deployment metrics-server -n kube-system

dushBoard
eyJhbGciOiJSUzI1NiIsImtpZCI6ImRCSzRmazBmRFpybm5WNXJCVnoxck51bm1meEk3T3VOTXFIdVNCd1JCeFEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTQ5djl6Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIxYmU1MzMyOC05MGNhLTQzN2EtOGFmNS0xY2FjMGJmNmMxODQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.HD0lanYG35m2lGsI-L_mMLwXT55BWONypmnb2RhTdnJhO2Y5Xaa7wR7Q_pVsS6TU-CM6T13muoKOgbdJzf_ShQcDj9ElO6UiURYFSR_kasODRVqPhUCVqANN2ErLzgoX9Kpcy10E8qCn8x2r99X_Qxogoo1ncAL2JPZMXQcXOHE9JKFBLS6jX8K2FIGu74qSW7sztMcHC_WCNKcFpX3LDF_1KL5fYyoe2xMswnxa-K4cXrkPQo9Wkdu-NpUaZ9eUqVQX_8L3lP4luM6hXFR_5aSxIWGMXYVxyezdIZS3pbmtaD4zOUlHShJiQAn5SAHUprTvaqecn520G3k_peSHXA

你可能感兴趣的:(K8S学习笔记)