# 关闭 firewalld 服务。[root@localhost ~]# systemctl stop firewalld[root@localhost ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@localhost ~]# systemctl stop iptables
Failed to stop iptables.service: Unit iptables.service not loaded.
[root@localhost ~]# systemctl disable iptables
Failed to execute operation: No such file or directory
2.6.5 禁用 selinux。
selinux 是 linux 系统下的一个安全服务,如果不关闭它,在安装集群中会产生各种各样的奇葩问题。
[root@localhost ~]# getenforce
Enforcing
vim /etc/selinux/config
# 编辑 /etc/selinux/config 文件,修改 SELINUX 的值为 disable。# 注意修改完毕之后需要重启 linux 服务。SELINUX=disabled
# This file controls the state of SELinux on the system.# SELINUX= can take one of these three values:# enforcing - SELinux security policy is enforced.# permissive - SELinux prints warnings instead of enforcing.# disabled - No SELinux policy is loaded.# SELINUX=enforcingSELINUX=disabled
# SELINUXTYPE= can take one of three values:# targeted - Targeted processes are protected,# minimum - Modification of targeted policy. Only selected processes are protected. # mls - Multi Level Security protection.SELINUXTYPE=targeted
[root@localhost ~]# vim /etc/fstab### /etc/fstab# Created by anaconda on Thu Dec 1 01:25:13 2022## /etc/fstab# Created by anaconda on Thu Dec 1 01:25:13 2022## Accessible filesystems, by reference, are maintained under '/dev/disk'# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info#
/dev/mapper/centos_localhost-root / xfs defaults 00UUID=c393aaa9-2e37-4fa2-8b32-e8b88af1e576 /boot xfs defaults 00
/dev/mapper/centos_localhost-home /home xfs defaults 00# /dev/mapper/centos_localhost-swap swap swap defaults 0 0
# 在安装 kubernetes 集群之前,必须要提前准备好集群需要的镜像,所需镜像可以通过下面命令查看。[root@localhost ~]# kubeadm config images list
W1201 23:46:05.541002 55376 version.go:101] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get https://storage.googleapis.com/kubernetes-release/release/stable-1.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
W1201 23:46:05.541156 55376 version.go:102] falling back to the local client version: v1.17.4
W1201 23:46:05.541308 55376 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1201 23:46:05.541316 55376 validation.go:28] Cannot validate kubelet config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.4
k8s.gcr.io/kube-controller-manager:v1.17.4
k8s.gcr.io/kube-scheduler:v1.17.4
k8s.gcr.io/kube-proxy:v1.17.4
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5
[root@localhost ~]# # 下载镜像。# 此镜像 kubernetes 的仓库中,由于网络原因,无法连接,下面提供了一种替换方案。images=(
kube-apiserver:v1.17.4
kube-controller-manager:v1.17.4
kube-scheduler:v1.17.4
kube-proxy:v1.17.4
pause:3.1
etcd:3.4.3-0
coredns:1.6.5
)forimageNamein${images[@]};dodocker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageNamedocker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageNamedocker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageNamedone
[root@localhost ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy v1.17.4 6dec7cfde1e5 2 years ago 116MB
k8s.gcr.io/kube-apiserver v1.17.4 2e1ba57fe95a 2 years ago 171MB
k8s.gcr.io/kube-controller-manager v1.17.4 7f997fcf3e94 2 years ago 161MB
k8s.gcr.io/kube-scheduler v1.17.4 5db16c1c7aff 2 years ago 94.4MB
k8s.gcr.io/coredns 1.6.5 70f311871ae1 3 years ago 41.6MB
k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 3 years ago 288MB
k8s.gcr.io/pause 3.1 da86e6ba6ca1 4 years ago 742kB
# 创建集群。[root@master ~]# kubeadm init \
--apiserver-advertise-address=192.168.142.150 \
--image-repository=registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.17.4 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
# To start using your cluster, you need to run the following as a regular user:[root@master ~]# mkdir -p $HOME/.kube[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@localhost ~]# kubeadm init \> --apiserver-advertise-address=192.168.142.150 \> --image-repository=registry.aliyuncs.com/google_containers \> --kubernetes-version=v1.17.4 \> --pod-network-cidr=10.244.0.0/16 \> --service-cidr=10.96.0.0/12
W1202 00:11:31.437970 57290 validation.go:28] Cannot validate kubelet config - no validator is available
W1202 00:11:31.438028 57290 validation.go:28] Cannot validate kube-proxy config - no validator is available
[init] Using Kubernetes version: v1.17.4
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'[kubelet-start] Writing kubelet environment file with flags to file"/var/lib/kubelet/kubeadm-flags.env"[kubelet-start] Writing kubelet configuration to file"/var/lib/kubelet/config.yaml"[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [localhost.localdomain.k8s.master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.142.150][certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain.k8s.master localhost] and IPs [192.168.142.150 127.0.0.1 ::1][certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain.k8s.master localhost] and IPs [192.168.142.150 127.0.0.1 ::1][certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"[kubeconfig] Writing "admin.conf" kubeconfig file[kubeconfig] Writing "kubelet.conf" kubeconfig file[kubeconfig] Writing "controller-manager.conf" kubeconfig file[kubeconfig] Writing "scheduler.conf" kubeconfig file[control-plane] Using manifest folder "/etc/kubernetes/manifests"[control-plane] Creating static Pod manifest for"kube-apiserver"[control-plane] Creating static Pod manifest for"kube-controller-manager"
W1202 00:19:51.720839 57290 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"[control-plane] Creating static Pod manifest for"kube-scheduler"
W1202 00:19:51.721669 57290 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"[etcd] Creating static Pod manifest forlocal etcd in"/etc/kubernetes/manifests"[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 23.003244 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config"in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17"in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node localhost.localdomain.k8s.master as control-plane by adding the label "node-role.kubernetes.io/master=''"[mark-control-plane] Marking the node localhost.localdomain.k8s.master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule][bootstrap-token] Using token: xs70mz.1j3eaj8unj3g11cp
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudocp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudochown$(id -u):$(id -g)$HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join192.168.142.150:6443 --token xs70mz.1j3eaj8unj3g11cp \
--discovery-token-ca-cert-hash sha256:8cb8adbc0147bc1c15fc689f98ab49e8442d16d28e85062d2db9b8f47225c6cc
[root@localhost ~]#
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master NotReady master 4m11s v1.17.4
下面的操作只需要在 node 节点上执行即可。
[root@localhost ~]# kubeadm join 192.168.142.150:6443 --token xs70mz.1j3eaj8unj3g11cp \> --discovery-token-ca-cert-hash sha256:8cb8adbc0147bc1c15fc689f98ab49e8442d16d28e85062d2db9b8f47225c6cc
W1202 00:25:46.290777 86902 join.go:346][preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file"/var/lib/kubelet/config.yaml"[kubelet-start] Writing kubelet environment file with flags to file"/var/lib/kubelet/kubeadm-flags.env"[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this nodejoin the cluster.
在 master 上查看节点信息。
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master NotReady master 6m31s v1.17.4
localhost.localdomain.k8s.node1 NotReady <none> 56s v1.17.4
localhost.localdomain.k8s.node2 NotReady <none> 52s v1.17.4
[root@localhost k8s]# kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@localhost k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master Ready master 62m v1.17.4
localhost.localdomain.k8s.node1 Ready <none> 57m v1.17.4
localhost.localdomain.k8s.node2 Ready <none> 57m v1.17.4
[root@localhost k8s]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-5p8k4 0/1 ContainerCreating 0 83s
[root@localhost k8s]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-5p8k4 1/1 Running 0 2m47s
[root@localhost k8s]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none>443/TCP 71m
nginx NodePort 10.107.6.137 <none>80:31567/TCP 67s
kubernetes 的本质上就是一个集群系统,用户可以在集群中部署各种服务,所谓的部署服务,其实就是在 kubernetes 集群中运行一个个的容器,并将指定的程序跑在容器中。 kubernetes 的最小管理单元是 pod 而不是容器,所以只能将容器放在Pod中,而 kubernetes 一般也不会直接管理 Pod,而是通过Pod 控制器来管理 Pod 的。 Pod 可以提供服务之后,就要考虑如何访问 Pod 中服务,kubernetes 提供了 Service 资源实现这个功能。 当然,如果 Pod 中程序的数据需要持久化,kubernetes 还提供了各种存储系统。
# 查看所有 pod。
kubectl get pod
# 查看某个 pod。
kubectl get pod pod_name
# 查看某个 pod,以 yaml / json 格式展示结果。
kubectl get pod pod_name -o yaml
[root@localhost ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.4", GitCommit:"8d8aa39598534325ad77120c120a22b3a990b5ea", GitTreeState:"clean", BuildDate:"2020-03-12T21:03:42Z", GoVersion:"go1.13.8", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.4", GitCommit:"8d8aa39598534325ad77120c120a22b3a990b5ea", GitTreeState:"clean", BuildDate:"2020-03-12T20:55:23Z", GoVersion:"go1.13.8", Compiler:"gc", Platform:"linux/amd64"}[root@localhost ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.142.150:6443
KubeDNS is running at https://192.168.142.150:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
资源类型。
kubernetes 中所有的内容都抽象为资源,可以通过下面的命令进行查看。
kubectl api-resources
经常使用的资源有下面这些。
资源分类
资源名称
缩写
资源作用
集群级别资源
nodes
no
集群组成部分
namespaces
ns
隔离 Pod
pod 资源
pods
po
装载容器
pod 资源控制器
replicationcontrollers
rc
控制 pod 资源
replicasets
rs
控制 pod 资源
deployments
deploy
控制 pod 资源
daemonsets
ds
控制 pod 资源
jobs
控制 pod 资源
cronjobs
cj
控制 pod 资源
horizontalpodautoscalers
hpa
控制 pod 资源
statefulsets
sts
控制 pod 资源
服务发现资源
services
svc
统一 pod 对外接口
ingress
ing
统一 pod 对外接口
存储资源
volumeattachments
存储
persistentvolumes
pv
存储
persistentvolumeclaims
pvc
存储
配置资源
configmaps
cm
配置
secrets
配置
操作。
kubernetes 允许对资源进行多种操作,可以通过–help 查看详细的操作命令。
kubectl --help
经常使用的操作有下面这些。
命令分类
命令
翻译
命令作用
基本命令
create
创建
创建一个资源
edit
编辑
编辑一个资源
get
获取
获取一个资源
patch
更新
更新一个资源
delete
删除
删除一个资源
explain
解释
展示资源文档
运行和调试
run
运行
在集群中运行一个指定的镜像
expose
暴露
暴露资源为 Service
describe
描述
显示资源内部信息
logs
日志输出容器在 pod 中的日志
输出容器在 pod 中的日志
attach
缠绕进入运行中的容器
进入运行中的容器
exec
执行容器中的一个命令
执行容器中的一个命令
cp
复制
在 Pod 内外复制文件
rollout
首次展示
管理资源的发布
scale
规模
扩(缩)容 Pod 的数量
autoscale
自动调整
自动调整 Pod 的数量
高级命令
apply
rc
通过文件对资源进行配置
label
标签
更新资源上的标签
其他命令
cluster-info
集群信息
显示集群
version
版本
显示当前 Server 和 Client 的版本
下面以一个 namespace / pod 的创建和删除简单演示下命令的使用。
# 创建一个 namespace。[root@localhost ~]# kubectl create namespace dev
namespace/dev created
# 获取 namespace。[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h
dev Active 17s
kube-flannel Active 12h
kube-node-lease Active 13h
kube-public Active 13h
kube-system Active 13h
# 在此 namespace 下创建并运行一个 nginx 的 Pod。[root@localhost ~]# kubectl run pod --image=nginx:1.17.1 -n dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/pod created
# 查看新创建的 pod。不加 -n dev 默认查 default。[root@localhost ~]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod-644584df94-5gx6f 1/1 Running 0 3m33s
[root@localhost ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-tlmw5 1/1 Running 0 25m
[root@localhost ~]# kubectl get pod -n default
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-tlmw5 1/1 Running 0 26m
# 删除指定的 pod。[root@localhost ~]# kubectl delete pods pod-644584df94-5gx6f -n dev
pod "pod-644584df94-5gx6f" deleted
[root@localhost ~]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod-644584df94-wzzcv 0/1 ContainerCreating 0 40s
# 删除后又生成了一个。控制器,后面讲解。# 删除指定的 namespace。[root@localhost ~]# kubectl delete ns dev
namespace "dev" deleted
[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h
kube-flannel Active 12h
kube-node-lease Active 13h
kube-public Active 13h
kube-system Active 13h
[root@localhost ~]# kubectl get pod -n dev
No resources found in dev namespace.
[root@localhost ~]# kubectl get pod -n dev
[root@localhost ~]# kubectl describe pods pod-644584df94-5gx6f -n dev
Name: pod-644584df94-5gx6f
Namespace: dev
Priority: 0
Node: localhost.localdomain.k8s.node2/192.168.142.152
Start Time: Fri, 02 Dec 202213:35:27 +0800
Labels: pod-template-hash=644584df94
run=pod
Annotations: <none>
Status: Running
IP: 10.244.2.3
IPs:
IP: 10.244.2.3
Controlled By: ReplicaSet/pod-644584df94
Containers:
pod:
Container ID: docker://26a71073d6b9f116bd7411aacdf862f77c1d1f485844e33a55aa2590edeb8614
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31
Port: <none>
Host Port: <none>
State: Running
Started: Fri, 02 Dec 202213:36:34 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-khzf8 (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-khzf8:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-khzf8
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 5m24s default-scheduler Successfully assigned dev/pod-644584df94-5gx6f to localhost.localdomain.k8s.node2
Normal Pulling 5m23s kubelet, localhost.localdomain.k8s.node2 Pulling image "nginx:1.17.1"
Normal Pulled 4m19s kubelet, localhost.localdomain.k8s.node2 Successfully pulled image "nginx:1.17.1"
Normal Created 4m18s kubelet, localhost.localdomain.k8s.node2 Created container pod
Normal Started 4m17s kubelet, localhost.localdomain.k8s.node2 Started container pod
3.3.2 命令式对象配置。
命令式对象配置就是使用命令配合配置文件一起来操作 kubernetes 资源。
1) 创建一个 nginxpod.yaml,内容如下。
apiVersion: v1
kind: Namespace
metadata:name: dev
---apiVersion: v1
kind: Pod
metadata:name: nginxpod
namespace: dev
spec:containers:-name: nginx-containers
image: nginx:1.17.1
2)执行 create 命令,创建资源。
[root@localhost k8s]# kubectl create -f nginxpod.yaml
namespace/dev created
pod/nginxpod created
此时发现创建了两个资源对象,分别是 namespace 和 pod。
[root@localhost k8s]# kubectl get ns dev
NAME STATUS AGE
dev Active 37s
[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginxpod 0/1 ContainerCreating 0 43s
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginxpod 1/1 Running 0 90s
3)执行 get 命令,查看资源。
[root@localhost k8s]# kubectl get -f nginxpod.yaml
NAME STATUS AGE
namespace/dev Active 2m15s
NAME READY STATUS RESTARTS AGE
pod/nginxpod 1/1 Running 0 2m14s
默认情况下,kubernetes 集群中的所有的 Pod 都是可以相互访问的。但是在实际中,可能不想让两个 Pod 之间进行互相的访问,那此时就可以将两个 Pod 划分到不同的 namespace 下。kubernetes 通过将集群内部的资源分配到不同的 Namespace 中,可以形成逻辑上的"组",以方便不同的组的资源进行隔离使用和管理。
可以通过 kubernetes 的授权机制,将不同的 namespace 交给不同租户进行管理,这样就实现了多租户的资源隔离。此时还能结合 kubernetes 的资源配额机制,限定不同租户能占用的资源,例如 CPU 使用量、内存使用量等等,来实现租户可用资源的管理。
kubernetes 在集群启动之后,会默认创建几个 namespace。
[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h # 所有未指定 Namespace 的对象都会被分配在 default 命名空间。#dev Active 17m#kube-flannel Active 12h
kube-node-lease Active 13h # 集群节点之间的心跳维护,v1.13 开始引入。
kube-public Active 13h # 此命名空间下的资源可以被所有人访问(包括未认证用户)。
kube-system Active 13h # 所有由 Kubernetes 系统创建的资源都处于这个命名空间。
下面来看 namespace 资源的具体操作。
4.1.1 查看。
# 查看所有的 ns。命令:kubectl get ns[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h
dev Active 23m
kube-flannel Active 12h
kube-node-lease Active 13h
kube-public Active 13h
kube-system Active 13h
[root@localhost ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-5959v 1/1 Running 0 13h
coredns-9d85f5447-gvqxh 1/1 Running 0 13h
etcd-localhost.localdomain.k8s.master 1/1 Running 0 13h
kube-apiserver-localhost.localdomain.k8s.master 1/1 Running 0 13h
kube-controller-manager-localhost.localdomain.k8s.master 1/1 Running 0 13h
kube-proxy-7dc95 1/1 Running 1 13h
kube-proxy-7hss2 1/1 Running 0 13h
kube-proxy-rpnvx 1/1 Running 0 13h
kube-scheduler-localhost.localdomain.k8s.master 1/1 Running 0 13h
# 查看指定的 ns。命令:kubectl get ns ns 名称[root@localhost ~]# kubectl get ns default
NAME STATUS AGE
default Active 13h
# 指定输出格式。命令:kubectl get ns ns 名称 -o 格式参数# kubernetes 支持的格式有很多,比较常见的是 wide、json、yaml[root@localhost ~]# kubectl get ns default -o yaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: "2022-12-01T16:20:14Z"
name: default
resourceVersion: "146"
selfLink: /api/v1/namespaces/default
uid: 11fb6c7d-f67f-4d9e-8e2b-66621c0d5d08
spec:
finalizers:
- kubernetes
status:
phase: Active
# 查看 ns 详情。命令:kubectl describe ns ns 名称[root@localhost ~]# kubectl describe ns default
Name: default
Labels: <none>
Annotations: <none>
Status: Active
No resource quota.
No LimitRange resource.
# status# Active 命名空间正在使用中 Terminating 正在删除命名空间。# ResourceQuota 针对 namespace 做的资源限制。# LimitRange 针对 namespace 中的每个组件做的资源限制。
4.1.2 创建。
# 创建 namespace。[root@master ~]# kubectl create ns dev
namespace/dev created
# 命令格式:kubectl run (pod 控制器名称) [参数] # --image 指定 Pod 的镜像。# --port 指定端口。# --namespace 指定 namespace。[root@localhost k8s]# kubectl run nginx --image=nginx:1.17.1 --port=80 --namespace dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-kwbhk 1/1 Running 0 2m8s
[root@localhost k8s]# kubectl get pod -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-kwbhk 1/1 Running 0 2m13s 10.244.2.7 localhost.localdomain.k8s.node2 <none><none># READY ~ pod 中容器数量。
4.2.2 查看 pod 信息。
# 查看 Pod 基本信息。[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-kwbhk 1/1 Running 0 4m26s
# 查看 Pod 的详细信息。[root@localhost k8s]# kubectl describe pod nginx -n dev
Name: nginx-64777cd554-kwbhk
Namespace: dev
Priority: 0
Node: localhost.localdomain.k8s.node2/192.168.142.152
Start Time: Fri, 02 Dec 202214:41:15 +0800
Labels: pod-template-hash=64777cd554
run=nginx
Annotations: <none>
Status: Running
IP: 10.244.2.7
IPs:
IP: 10.244.2.7
Controlled By: ReplicaSet/nginx-64777cd554
Containers:
nginx:
Container ID: docker://f96e2bc540280474d7e6f7942ae42c08b485b86e18c090152d0bcdf6ea6fed21
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:b4b9b3eee194703fc2fa8afa5b7510c77ae70cfba567af1376a573a967c03dbb
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Fri, 02 Dec 202214:41:17 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-z9vht (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-z9vht:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-z9vht
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 4m37s default-scheduler Successfully assigned dev/nginx-64777cd554-kwbhk to localhost.localdomain.k8s.node2
Normal Pulled 4m36s kubelet, localhost.localdomain.k8s.node2 Container image "nginx:1.17.1" already present on machine
Normal Created 4m36s kubelet, localhost.localdomain.k8s.node2 Created container nginx
Normal Started 4m35s kubelet, localhost.localdomain.k8s.node2 Started container nginx
4.2.3 访问 Pod。
# 获取 pod IP。[root@localhost k8s]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-kwbhk 1/1 Running 0 8m32s 10.244.2.7 localhost.localdomain.k8s.node2 <none><none># 访问 POD。[root@localhost k8s]# curl http://10.244.2.7:80<!DOCTYPE html><html><head><title>Welcome to nginx!</title><style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;}</style></head><body><h1>Welcome to nginx!</h1><p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p><p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p></body></html>
4.2.4 删除指定 Pod。
# 删除指定 Pod。[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-kwbhk 1/1 Running 0 11m
# 此时,显示删除 Pod 成功,但是再查询,发现又新产生了一个。 [root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-xwknq 1/1 Running 0 26s
# kubectl run (pod 控制器名称) [参数] # 这是因为当前 Pod 是由 Pod 控制器创建的,控制器会监控 Pod 状况,一旦发现 Pod 死亡,会立即重建。# 此时要想删除 Pod,必须删除 Pod 控制器。# 先来查询一下当前 namespace 下的 Pod 控制器。[root@localhost k8s]# kubectl get deploy -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 11 12m
# 接下来,删除此 Pod 控制器。[root@localhost k8s]# kubectl delete deploy nginx -n dev
deployment.apps "nginx" deleted
# 稍等片刻,再查询 Pod,发现 Pod 被删除了。[root@localhost k8s]# kubectl get pods -n dev
No resources found in dev namespace.
4.2.5 配置操作。
创建一个 pod-nginx.yaml,内容如下。
apiVersion: v1
kind: Pod
metadata:name: nginx
namespace: dev
spec:containers:-image: nginx:1.17.1
name: pod
ports:-name: nginx-port
containerPort:80protocol: TCP
然后就可以执行对应的创建和删除命令了。
[root@localhost k8s]# vim pod-nginx.yaml[root@localhost k8s]# kubectl create -f pod-nginx.yaml
pod/nginx created
[root@localhost k8s]# kubectl delete -f pod-nginx.yaml
pod "nginx" deleted
name in (master, slave): 选择所有包含 Label 中的 key=“name” 且 value=“master” 或 “slave” 的对象。 name not in (frontend): 选择所有包含 Label 中的 key=“name” 且 value 不等于 “frontend” 的对象。
name=slave,env!=production name not in (frontend),env!=production
4.3.1 命令方式。
[root@localhost k8s]# kubectl create -f pod-nginx.yaml
pod/nginx created
# 查看标签。[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 69s <none># 为 pod 资源打标签。[root@localhost k8s]# kubectl label pod nginx version=1.0 -n dev
pod/nginx labeled
# 查看标签。[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 2m19s version=1.0# 追加打标签。[root@localhost k8s]# kubectl label pod nginx tier=back -n dev
pod/nginx labeled
# 查看标签。[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 3m4s tier=back,version=1.0# 为 pod 资源更新标签。[root@localhost k8s]# kubectl label pod nginx version=2.0 -n dev
error: 'version' already has a value (1.0), and --overwrite is false[root@localhost k8s]# kubectl label pod nginx version=2.0 -n dev --overwrite
pod/nginx labeled
[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
# 筛选标签。
apiVersion: v1
kind: Pod
metadata:
name: nginx1
namespace: dev
spec:
containers:
- image: nginx:1.17.1
name: pod
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
[root@localhost k8s]# vim pod-nginx1.yaml [root@localhost k8s]# kubectl create -f pod-nginx1.yaml
pod/nginx1 created
[root@localhost k8s]# kubectl label pod nginx1 version=1.0 -n dev --overwrite
pod/nginx1 labeled
[root@localhost k8s]# kubectl get pods -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 10m tier=back,version=2.0
nginx1 1/1 Running 0 48s version=1.0[root@localhost k8s]# kubectl get pod -n dev -l version=2.0 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 11m tier=back,version=2.0[root@localhost k8s]# kubectl get pod -n dev -l version!=2.0 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx1 1/1 Running 0 117s version=1.0[root@localhost k8s]# kubectl get pod -n dev -l version=3.0 --show-labels
No resources found in dev namespace.
# 删除标签。[root@localhost k8s]# kubectl label pod nginx tier- -n dev
pod/nginx labeled
[root@localhost k8s]# kubectl get pods -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 14m version=2.0
nginx1 1/1 Running 0 4m32s version=1.0
4.3.2 配置方式。
apiVersion: v1
kind: Pod
metadata:name: nginx
namespace: dev
labels:version:"3.0"env:"test"spec:containers:-image: nginx:1.17.1
name: pod
ports:-name: nginx-port
containerPort:80protocol: TCP
然后就可以执行对应的更新命令了:kubectl apply -f pod-nginx.yaml。
4.4 Deployment。
在 kubernetes 中,Pod 是最小的控制单元,但是 kubernetes 很少直接控制 Pod,一般都是通过 Pod 控制器来完成的。Pod 控制器用于 pod 的管理,确保 pod 资源符合预期的状态,当 pod 的资源出现故障时,会尝试进行重启或重建 pod。
在 kubernetes 中 Pod 控制器的种类有很多,本章节只介绍一种:Deployment。
4.4.1 命令操作。
kubectl run。。。 底层使用了 pod 控制器。
# 命令格式: kubectl create deployment 名称 [参数] # --image 指定 pod 的镜像。# --port 指定端口。# --replicas 指定创建 pod 数量。# --namespace 指定 namespace。[root@localhost ~]# kubectl delete ns dev
namespace "dev" deleted
[root@localhost ~]# kubectl create ns dev
namespace/dev created
[root@localhost ~]# kubectl get deployment, pods -n dev
error: arguments in resource/name form must have a single resource and name
[root@localhost ~]# kubectl get deployment,pods -n dev
No resources found in dev namespace.
[root@localhost ~]# kubectl run nginx --image=nginx:1.17.1 --port=80 --replicas=3 -n dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
# 查看创建的 Pod。[root@localhost ~]# kubectl get deployment,pods -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 2/3 32 44s
NAME READY STATUS RESTARTS AGE
pod/nginx-64777cd554-64kzc 0/1 ContainerCreating 0 44s
pod/nginx-64777cd554-6zmhj 1/1 Running 0 44s
pod/nginx-64777cd554-tdrjw 1/1 Running 0 44s
# 查看 deployment 的信息。[root@localhost ~]# kubectl get deploy -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 3/3 33 4m16s
[root@localhost ~]# kubectl get pods -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-64777cd554-64kzc 1/1 Running 0 3m38s pod-template-hash=64777cd554,run=nginx
nginx-64777cd554-6zmhj 1/1 Running 0 3m38s pod-template-hash=64777cd554,run=nginx
nginx-64777cd554-tdrjw 1/1 Running 0 3m38s pod-template-hash=64777cd554,run=nginx
# UP-TO-DATE:成功升级的副本数量# AVAILABLE:可用副本的数量[root@localhost ~]# kubectl get deploy -n dev -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx 3/3 33 6m7s nginx nginx:1.17.1 run=nginx
# 查看 deployment 的详细信息。[root@localhost ~]# kubectl describe deploy nginx -n dev
Name: nginx
Namespace: dev
CreationTimestamp: Fri, 02 Dec 202223:28:04 +0800
Labels: run=nginx
Annotations: deployment.kubernetes.io/revision: 1
Selector: run=nginx
Replicas: 3 desired |3 updated |3 total |3 available |0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: run=nginx
Containers:
nginx:
Image: nginx:1.17.1
Port: 80/TCP
Host Port: 0/TCP
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets: <none>
NewReplicaSet: nginx-64777cd554 (3/3 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 6m33s deployment-controller Scaled up replica set nginx-64777cd554 to 3# 删除。[root@localhost ~]# kubectl delete deploy nginx -n dev
deployment.apps "nginx" deleted
[root@localhost ~]# kubectl get pods -n dev
No resources found in dev namespace.
[root@localhost k8s]# vim deploy-nginx.yaml[root@localhost k8s]# kubectl create -f deploy-nginx.yaml
deployment.apps/nginx created
[root@localhost k8s]# kubectl get deployment,pods -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 3/3 33 33s
NAME READY STATUS RESTARTS AGE
pod/nginx-64777cd554-25w54 1/1 Running 0 32s
pod/nginx-64777cd554-p862l 1/1 Running 0 32s
pod/nginx-64777cd554-ppsbl 1/1 Running 0 33s
[root@localhost k8s]# kubectl delete -f deploy-nginx.yaml
deployment.apps "nginx" deleted
[root@localhost k8s]# kubectl get deployment,pods -n dev
NAME READY STATUS RESTARTS AGE
pod/nginx-64777cd554-25w54 0/1 Terminating 0 59s
pod/nginx-64777cd554-p862l 0/1 Terminating 0 59s
pod/nginx-64777cd554-ppsbl 0/1 Terminating 0 60s
[root@localhost k8s]# kubectl get deployment,pods -n dev
No resources found in dev namespace.
[root@localhost k8s]#
4.5 Service。
通过上节课的学习,已经能够利用 Deployment 来创建一组 Pod 来提供具有高可用性的服务。
[root@localhost k8s]# kubectl create -f deploy-nginx.yaml
deployment.apps/nginx created
[root@localhost k8s]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-8d26x 1/1 Running 0 19s 10.244.1.5 localhost.localdomain.k8s.node1 <none><none>
nginx-64777cd554-dtm27 1/1 Running 0 19s 10.244.2.17 localhost.localdomain.k8s.node2 <none><none>
nginx-64777cd554-t9x4n 1/1 Running 0 19s 10.244.2.16 localhost.localdomain.k8s.node2 <none><none>[root@localhost k8s]# curl 10.244.1.5
curl: (7) Failed connect to 10.244.1.5:80; Connection refused
# 删除后,会重新创建一个新 pod,ip 会变。[root@localhost k8s]# kubectl delete pod nginx-64777cd554-t9x4n -n dev
pod "nginx-64777cd554-t9x4n" deleted
[root@localhost k8s]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-8d26x 1/1 Running 0 4m50s 10.244.1.5 localhost.localdomain.k8s.node1 <none><none>
nginx-64777cd554-ck66j 1/1 Running 0 10s 10.244.2.18 localhost.localdomain.k8s.node2 <none><none>
nginx-64777cd554-dtm27 1/1 Running 0 4m50s 10.244.2.17 localhost.localdomain.k8s.node2 <none><none>
虽然每个 Pod 都会分配一个单独的 Pod IP,然而却存在如下两问题:
Pod IP 会随着 Pod 的重建产生变化。
Pod IP 仅仅是集群内可见的虚拟 IP,外部无法访问。
这样对于访问这个服务带来了难度。因此,kubernetes 设计了 Service 来解决这个问题。
Service 可以看作是一组同类 Pod 对外的访问接口。借助 Service,应用可以方便地实现服务发现和负载均衡。
4.5.1 创建集群内部可访问的 Service。
# 暴露 Service。[root@localhost k8s]# kubectl expose deploy nginx --name=svc-nginx1 --type=ClusterIP --port=80 --target-port=80 -n dev
service/svc-nginx1 exposed
# 查看 service。[root@localhost k8s]# kubectl get service -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-nginx1 ClusterIP 10.104.13.248 <none>80/TCP 17s
# 这里产生了一个 CLUSTER-IP,这就是 service 的 IP,在 Service 的生命周期中,这个地址是不会变动的。# 可以通过这个 IP 访问当前 service 对应的 POD。[root@master ~]# curl 10.104.13.248:80<!DOCTYPE html><html><head><title>Welcome to nginx!</title></head><body><h1>Welcome to nginx!</h1>.......
</body></html>
4.5.2 创建集群外部也可访问的 Service。
# 上面创建的 Service 的 type 类型为 ClusterIP,这个 ip 地址只用集群内部可访问。# 如果需要创建外部也可以访问的 Service,需要修改 type 为 NodePort。[root@localhost k8s]# kubectl expose deploy nginx --name=svc-nginx2 --type=NodePort --port=80 --target-port=80 -n dev
service/svc-nginx2 exposed
# 此时查看,会发现出现了 NodePort 类型的 Service,而且有一对 Port(80:31928/TC)。[root@localhost k8s]# kubectl get svc -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-nginx1 ClusterIP 10.104.13.248 <none>80/TCP 22m
svc-nginx2 NodePort 10.97.109.91 <none>80:30228/TCP 2m17s
# 接下来就可以通过集群外的主机访问节点 IP:30228 访问服务了。# 例如在的电脑主机上通过浏览器访问下面的地址。
http://192.168.142.150:30228
apiVersion: v1
kind: Service
metadata:name: svc-nginx
namespace: dev
spec:clusterIP: 10.109.179.231 # 固定 svc 的内网 ip。ports:-port:80protocol: TCP
targetPort:80selector:run: nginx
type: ClusterIP
然后就可以执行对应的创建和删除命令了:
创建:kubectl create -f svc-nginx.yaml。
删除:kubectl delete -f svc-nginx.yaml。
[root@localhost k8s]# vim svc-nginx.yaml[root@localhost k8s]# kubectl create -f svc-nginx.yaml
service/svc-nginx created
[root@localhost k8s]# kubectl get service -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-nginx ClusterIP 10.109.179.231 <none>80/TCP 13s
[root@localhost k8s]# curl 10.109.179.231:80
curl: (7) Failed connect to 10.109.179.231:80; Connection refused
[root@localhost k8s]# kubectl delete -f svc-nginx.yaml service"svc-nginx" deleted
[root@localhost k8s]# kubectl get service -n dev
No resources found in dev namespace.
可以在根容器上设置 Ip 地址,其它容器都此 Ip(Pod IP),以实现 Pod 内部的网路通信。
这里是 Pod 内部的通讯,Pod 的之间的通讯采用虚拟二层网络技术来实现,我们当前环境用的是 Flannel。
5.1.2 Pod 定义。
下面是 Pod 的资源清单。
apiVersion: v1 # 必选,版本号,例如 v1。kind: Pod # 必选,资源类型,例如 Pod。metadata:# 必选,元数据。name: string # 必选,Pod 名称。namespace: string # Pod 所属的命名空间,默认为 "default"。labels:# 自定义标签列表。-name: string
spec:# 必选,Pod 中容器的详细定义。containers:# 必选,Pod 中容器列表。-name: string # 必选,容器名称。image: string # 必选,容器的镜像名称。imagePullPolicy:[ Always|Never|IfNotPresent ]# 获取镜像的策略。command:[ string ]# 容器的启动命令列表,如不指定,使用打包时使用的启动命令。args:[ string ]# 容器的启动命令参数列表。workingDir: string # 容器的工作目录。volumeMounts:# 挂载到容器内部的存储卷配置。-name: string # 引用 pod 定义的共享存储卷的名称,需用 volumes[] 部分定义的的卷名。mountPath: string # 存储卷在容器内 mount 的绝对路径,应少于 512 字符。readOnly: boolean # 是否为只读模式。ports:# 需要暴露的端口库号列表。-name: string # 端口的名称。containerPort: int # 容器需要监听的端口号。hostPort: int # 容器所在主机需要监听的端口号,默认与 Container 相同。protocol: string # 端口协议,支持 TCP 和 UDP,默认 TCP。env:# 容器运行前需设置的环境变量列表。-name: string # 环境变量名称。value: string # 环境变量的值。resources:# 资源限制和请求的设置。limits:# 资源限制的设置。cpu: string # cpu 的限制,单位为 core 数,将用于 docker run --cpu-shares 参数。memory: string # 内存限制,单位可以为 Mib/Gib,将用于 docker run --memory 参数。requests:# 资源请求的设置。cpu: string # cpu 请求,容器启动的初始可用数量。memory: string # 内存请求,容器启动的初始可用数量。lifecycle:# 生命周期钩子。postStart:# 容器启动后立即执行此钩子,如果执行失败,会根据重启策略进行重启。preStop:# 容器终止前执行此钩子,无论结果如何,容器都会终止。livenessProbe:# 对 Pod 内各容器健康检查的设置,当探测无响应几次后将自动重启该容器。exec:# 对 Pod 容器内检查方式设置为 exec 方式。command:[ string ]# exec 方式需要制定的命令或脚本。httpGet:# 对 Pod 内个容器健康检查方法设置为 HttpGet,需要制定 Path、port。path: string
port: number
host: string
scheme: string
HttpHeaders:-name: string
value: string
tcpSocket:# 对 Pod 内个容器健康检查方式设置为 tcpSocket 方式。port: number
initialDelaySeconds:0# 容器启动完成后首次探测的时间,单位为秒。timeoutSeconds:0# 对容器健康检查探测等待响应的超时时间,单位秒,默认 1 秒。periodSeconds:0# 对容器监控检查的定期探测时间设置,单位秒,默认 10 秒一次。successThreshold:0failureThreshold:0securityContext:privileged:falserestartPolicy:[ Always | Never | OnFailure ]# Pod 的重启策略。nodeName:> # 设置 NodeName 表示将该 Pod 调度到指定到名称的 node 节点上。nodeSelector: object # 设置 NodeSelector 表示将该 Pod 调度到包含这个 label 的 node 上。imagePullSecrets:# Pull 镜像时使用的 secret 名称,以 key:secretKey 格式指定。-name: string
hostNetwork:false# 是否使用主机网络模式,默认为 false,如果设置为 true,表示使用宿主机网络。volumes:# 在该 pod 上定义共享存储卷列表。-name: string # 共享存储卷名称 (volumes 类型有很多种)。emptyDir:{}# 类型为 emptyDir 的存储卷,与 Pod 同生命周期的一个临时目录。为空值。hostPath: string # 类型为 hostPath 的存储卷,表示挂载 Pod 所在宿主机的目录。path: string # Pod 所在宿主机的目录,将被用于同期中 mount 的目录。secret:# 类型为 secret 的存储卷,挂载集群与定义的 secret 对象到容器内部。secretname: string
items:-key: string
path: string
configMap:# 类型为 configMap 的存储卷,挂载预定义的 configMap 对象到容器内部。name: string
items:-key: string
path: string
# 小提示:# 在这里,可通过一个命令来查看每种资源的可配置项。# kubectl explain 资源类型 查看某种资源可以配置的一级属性。# kubectl explain 资源类型.属性 查看属性的子属性。[root@localhost k8s]# kubectl explain pod
KIND: Pod
VERSION: v1
DESCRIPTION:
Pod is a collection of containers that can run on a host. This resource is
created by clients and scheduled onto hosts.
FIELDS:
apiVersion <string>
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
kind <string>
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metadata <Object>
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
spec s metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
ObjectMeta is metadata that all persisted resources must have, which
includes all objects users must create.
FIELDS:
annotations <map[string]string>
Annotations is an unstructured key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. They
are not queryable and should be preserved when modifying objects. More
info: http://kubernetes.io/docs/user-guide/annotations
clusterName <string>
The name of the cluster which the object belongs to. This is used to
distinguish resources with same name and namespace in different clusters.
This field is not set anywhere right now and apiserver is going to ignore
it ifsetin create or update request.
creationTimestamp <string>
CreationTimestamp is a timestamp representing the server time when this
object was created. It is not guaranteed to be setin happens-before order
across separate operations. Clients may not set this value. It is
represented in RFC3339 form and is in UTC. Populated by the system.
Read-only. Null for lists. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
deletionGracePeriodSeconds <integer>
Number of seconds allowed for this object to gracefully terminate before it
will be removed from the system. Only set when deletionTimestamp is also
set. May only be shortened. Read-only.
deletionTimestamp <string>
DeletionTimestamp is RFC 3339date and time at which this resource will be
deleted. This field is set by the server when a graceful deletion is
requested by the user, and is not directly settable by a client. The
resource is expected to be deleted (no longer visible from resource lists,
and not reachable by name) after the timein this field, once the
finalizers list is empty. As long as the finalizers list contains items,
deletion is blocked. Once the deletionTimestamp is set, this value may not
be unset or be set further into the future, although it may be shortened or
the resource may be deleted prior to this time. For example, a user may
request that a pod is deleted in30 seconds. The Kubelet will react by
sending a graceful termination signal to the containers in the pod. After
that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)
to the container and after cleanup, remove the pod from the API. In the
presence of network partitions, this object may still exist after this
timestamp, until an administrator or automated process can determine the
resource is fully terminated. If not set, graceful deletion of the object
has not been requested. Populated by the system when a graceful deletion is
requested. Read-only. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
finalizers <[]string>
Must be empty before the object is deleted from the registry. Each entry is
an identifier for the responsible component that will remove the entry from
the list. If the deletionTimestamp of the object is non-nil, entries in
this list can only be removed. Finalizers may be processed and removed in
any order. Order is NOT enforced because it introduces significant risk of
stuck finalizers. finalizers is a shared field, any actor with permission
can reorder it. If the finalizer list is processed in order, then this can
lead to a situation inwhich the component responsible for the first
finalizer in the list is waiting for a signal (field value, external
system, or other) produced by a component responsible for a finalizer later
in the list, resulting in a deadlock. Without enforced ordering finalizers
are free to order amongst themselves and are not vulnerable to ordering
changes in the list.
generateName <string>
GenerateName is an optional prefix, used by the server, to generate a
unique name ONLY IF the Name field has not been provided. If this field is
used, the name returned to the client will be different than the name
passed. This value will also be combined with a unique suffix. The provided
value has the same validation rules as the Name field, and may be truncated
by the length of the suffix required to make the value unique on the
server. If this field is specified and the generated name exists, the
server will NOT return a 409 - instead, it will either return201 Created
or 500 with Reason ServerTimeout indicating a unique name could not be
found in the time allotted, and the client should retry (optionally after
the time indicated in the Retry-After header). Applied only if Name is not
specified. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
generation <integer>
A sequence number representing a specific generation of the desired state.
Populated by the system. Read-only.
labels <map[string]string>
Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services. More info: http://kubernetes.io/docs/user-guide/labels
managedFields <[]Object>
ManagedFields maps workflow-id and version to the set of fields that are
managed by that workflow. This is mostly for internal housekeeping, and
users typically shouldn't need to set or understand this field. A workflow
can be the user's name, a controller's name, or the name of a specific
apply path like "ci-cd". The set of fields is always in the version that
the workflow used when modifying the object.
name <string>
Name must be unique within a namespace. Is required when creating
resources, although some resources may allow a client to request the
generation of an appropriate name automatically. Name is primarily intended
for creation idempotence and configuration definition. Cannot be updated.
More info: http://kubernetes.io/docs/user-guide/identifiers#names
namespace <string>
Namespace defines the space within each name must be unique. An empty
namespace is equivalent to the "default" namespace, but "default" is the
canonical representation. Not all objects are required to be scoped to a
namespace - the value of this field for those objects will be empty. Must
be a DNS_LABEL. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/namespaces
ownerReferences <[]Object>
List of objects depended by this object. If ALL objects in the list have
been deleted, this object will be garbage collected. If this object is
managed by a controller, then an entry in this list will point to this
controller, with the controller field set to true. There cannot be more
than one managing controller.
resourceVersion <string>
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. Clients must treat these values as opaque and
passed unmodified back to the server. They may only be valid for a
particular resource or set of resources. Populated by the system.
Read-only. Value must be treated as opaque by clients and . More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
selfLink <string>
SelfLink is a URL representing this object. Populated by the system.
Read-only. DEPRECATED Kubernetes will stop propagating this field in1.20
release and the field is planned to be removed in1.21 release.
uid <string>UID is the unique intime and space value for this object. It is typically
generated by the server on successful creation of a resource and is not
allowed to change on PUT operations. Populated by the system. Read-only.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
[root@localhost k8s]# kubectl explain pod.spec.containers.ports
KIND: Pod
VERSION: v1
RESOURCE: ports <[]Object>
DESCRIPTION:
List of ports to expose from the container. Exposing a port here gives the
system additional information about the network connections a container
uses, but is primarily informational. Not specifying a port here DOES NOT
prevent that port from being exposed. Any port which is listening on the
default "0.0.0.0" address inside a container will be accessible from the
network. Cannot be updated.
ContainerPort represents a network port in a single container.
FIELDS:
# 容器要监听的端口(0 < x < 65536)。
containerPort <integer> -required-
Number of port to expose on the pod's IP address. This must be a valid port
number, 0< x <65536.
# 要将外部端口绑定到的主机 IP(一般省略)。
hostIP <string>
What host IP to bind the external port to.
# 容器要在主机上公开的端口,如果设置,主机上只能运行容器的一个副本(一般省略)。
hostPort <integer>
Number of port to expose on the host. If specified, this must be a valid
port number, 0< x <65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
# 端口名称,如果指定,必须保证 name 在 pod 中是唯一的。
name <string>
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
named port in a pod must have a unique name. Name for the port that can be
referred to by services.
# 端口协议。必须是 UDP、TCP 或 SCTP。默认为“TCP”。
protocol <string>
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
接下来,编写一个测试案例,创建 pod-ports.yaml。
apiVersion: v1
kind: Pod
metadata:name: pod-ports
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:# 设置容器暴露的端口列表。-name: nginx-port
containerPort:80protocol: TCP
[root@localhost k8s]# kubectl explain pod.spec.containers.livenessProbe
KIND: Pod
VERSION: v1
RESOURCE: livenessProbe <Object>
DESCRIPTION:
Periodic probe of container liveness. Container will be restarted if the
probe fails. Cannot be updated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
Probe describes a health check to be performed against a container to
determine whether it is alive or ready to receive traffic.
FIELDS:
exec<Object>
One and only one of the following should be specified. Exec specifies the
action to take.
# 连续探测失败多少次才被认定为失败。默认是 3。最小值是 1。
failureThreshold <integer>
Minimum consecutive failures for the probe to be considered failed after
having succeeded. Defaults to 3. Minimum value is 1.
httpGet <Object>
HTTPGet specifies the http request to perform.
# 容器启动后等待多少秒执行第一次探测。
initialDelaySeconds <integer>
Number of seconds after the container has started before liveness probes
are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes# 执行探测的频率。默认是 10 秒,最小 1 秒。
periodSeconds <integer>
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum
value is 1.
# 连续探测成功多少次才被认定为成功。默认是 1。
successThreshold <integer>
Minimum consecutive successes for the probe to be considered successful
after having failed. Defaults to 1. Must be 1for liveness and startup.
Minimum value is 1.
tcpSocket <Object>
TCPSocket specifies an action involving a TCP port. TCP hooks not yet
supported
# 探测超时时间。默认 1 秒,最小 1 秒。
timeoutSeconds <integer>
Number of seconds after which the probe times out. Defaults to 1 second.
Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
下面稍微配置两个,演示下效果即可:
[root@k8s-master01 ~]# more pod-liveness-httpget.yamlapiVersion: v1
kind: Pod
metadata:name: pod-liveness-httpget
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:-name: nginx-port
containerPort:80livenessProbe:httpGet:scheme: HTTP
port:80path: /
initialDelaySeconds:30# 容器启动后 30s 开始探测。timeoutSeconds:5# 探测超时时间为 5s。
5.3.5 重启策略。
在上一节中,一旦容器探测出现了问题,kubernetes 就会对容器所在的 Pod 进行重启,其实这是由 pod 的重启策略决定的,pod 的重启策略有 3 种,分别如下。
Always :容器失效时,自动重启该容器,这也是默认值。
OnFailure : 容器终止运行且退出码不为 0 时重启。
Never : 不论状态为何,都不重启该容器。
重启策略适用于 pod 对象中的所有容器,首次需要重启的容器,将在其需要时立即进行重启,随后再次需要重启的操作将由 kubelet 延迟一段时间后进行,且反复的重启操作的延迟时长以此为10s、20s、40s、80s、160s 和 300s,300s 是最大延迟时长。
创建 pod-restartpolicy.yaml:
apiVersion: v1
kind: Pod
metadata:name: pod-restartpolicy
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:-name: nginx-port
containerPort:80livenessProbe:httpGet:scheme: HTTP
port:80path: /hello
restartPolicy: Never # 设置重启策略为 Never。
运行 Pod 测试。
# 创建 pod。[root@localhost k8s]# vim pod-restartpolicy.yaml[root@localhost k8s]# kubectl create -f pod-restartpolicy.yaml
pod/pod-restartpolicy created
# 查看 Pod 详情,发现 nginx 容器失败。[root@localhost k8s]# kubectl describe pods pod-restartpolicy -n dev
Name: pod-restartpolicy
Namespace: dev
Priority: 0
Node: localhost.localdomain.k8s.node2/192.168.142.152
Start Time: Sat, 03 Dec 202215:50:00 +0800
Labels: <none>
Annotations: <none>
Status: Running
IP: 10.244.2.29
IPs:
IP: 10.244.2.29
Containers:
nginx:
Container ID: docker://5e3a86bc04bf5eff2a861c0db69fb2c87ae730fe680b633def25d7a18c08ed05
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:b4b9b3eee194703fc2fa8afa5b7510c77ae70cfba567af1376a573a967c03dbb
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Sat, 03 Dec 202215:50:01 +0800
Ready: True
Restart Count: 0
Liveness: http-get http://:80/hello delay=0s timeout=1s period=10s #success=1 #failure=3
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-n8qxp (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-n8qxp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-n8qxp
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 17s default-scheduler Successfully assigned dev/pod-restartpolicy to localhost.localdomain.k8s.node2
Normal Pulled 16s kubelet, localhost.localdomain.k8s.node2 Container image "nginx:1.17.1" already present on machine
Normal Created 16s kubelet, localhost.localdomain.k8s.node2 Created container nginx
Normal Started 16s kubelet, localhost.localdomain.k8s.node2 Started container nginx
Warning Unhealthy 3s (x2 over 13s) kubelet, localhost.localdomain.k8s.node2 Liveness probe failed: HTTP probe failed with statuscode: 404# 多等一会,再观察 pod 的重启次数,发现一直是 0,并未重启。 [root@localhost k8s]# kubectl get pods pod-restartpolicy -n dev
NAME READY STATUS RESTARTS AGE
pod-restartpolicy 0/1 Completed 0 78s
5.4 Pod 调度。
在默认情况下,一个 Pod 在哪个 Node 节点上运行,是由 Scheduler 组件采用相应的算法计算出来的,这个过程是不受人工控制的。但是在实际使用中,这并不满足的需求,因为很多情况下,我们想控制某些 Pod 到达某些节点上,那么应该怎么做呢?这就要求了解 kubernetes 对 Pod 的调度规则,kubernetes 提供了四大类调度方式。
自动调度:运行在哪个节点上完全由 Scheduler 经过一系列的算法计算得出。
定向调度:NodeName、NodeSelector。
亲和性调度:NodeAffinity、PodAffinity、PodAntiAffinity。
污点(容忍)调度:Taints、Toleration。
5.4.1 定向调度。
定向调度,指的是利用在 pod 上声明 nodeName 或者 nodeSelector,以此将 Pod 调度到期望的 node 节点上。注意,这里的调度是强制的,这就意味着即使要调度的目标 Node 不存在,也会向上面进行调度,只不过 pod 运行失败而已。
- NodeName。
NodeName 用于强制约束将 Pod 调度到指定的 Name 的 Node 节点上。这种方式,其实是直接跳过 Scheduler 的调度逻辑,直接将 Pod 调度到指定名称的节点。
[root@localhost k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master Ready master 39h v1.17.4
localhost.localdomain.k8s.node1 Ready <none> 39h v1.17.4
localhost.localdomain.k8s.node2 Ready <none> 39h v1.17.4
接下来,实验一下:创建一个 pod-nodename.yaml 文件。
apiVersion: v1
kind: Pod
metadata:name: pod-nodename
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
nodeName: localhost.localdomain.k8s.node2 # 指定调度到 node2 节点上。
# 创建 pod。[root@localhost k8s]# vim pod-nodename.yaml[root@localhost k8s]# kubectl create -f pod-nodename.yaml
pod/pod-nodename created
# 查看 Pod 调度到 NODE 属性,确实是调度到了 node1 节点上。[root@localhost k8s]# kubectl get pods pod-nodename -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 1/1 Running 0 31s 10.244.2.30 localhost.localdomain.k8s.node2 <none><none># 接下来,删除 pod,修改 nodeName 的值为 node3(并没有 node3 节点)。[root@localhost k8s]# vim pod-nodename.yaml[root@localhost k8s]# kubectl delete -f pod-nodename.yaml
pod "pod-nodename" deleted
[root@localhost k8s]# kubectl create -f pod-nodename.yaml
pod/pod-nodename created
# 再次查看,发现已经向 Node3节点调度,但是由于不存在 node3 节点,所以 pod 无法正常运行。[root@localhost k8s]# kubectl get pods pod-nodename -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 0/1 Pending 0 27s <none> localhost.localdomain.k8s.node3 <none><none>
- NodeSelector。
NodeSelector 用于将 pod 调度到添加了指定标签的 node 节点上。它是通过 kubernetes 的 label-selector 机制实现的,也就是说,在 pod 创建之前,会由 scheduler 使用 MatchNodeSelector 调度策略进行 label 匹配,找出目标 node,然后将 pod 调度到目标节点,该匹配规则是强制约束。
[root@localhost k8s]# kubectl explain pod.spec.affinity
KIND: Pod
VERSION: v1
RESOURCE: affinity <Object>
DESCRIPTION:
If specified, the pod's scheduling constraints
Affinity is a group of affinity scheduling rules.
FIELDS:
nodeAffinity <Object>
Describes node affinity scheduling rules for the pod.
podAffinity <Object>
Describes pod affinity scheduling rules (e.g. co-locate this pod in the
same node, zone, etc. as some other pod(s)).
podAntiAffinity <Object>
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod
in the same node, zone, etc. as some other pod(s)).
[root@localhost k8s]# kubectl explain pod.spec.affinity.nodeAffinity
KIND: Pod
VERSION: v1
RESOURCE: nodeAffinity <Object>
DESCRIPTION:
Describes node affinity scheduling rules for the pod.
Node affinity is a group of node affinity scheduling rules.
FIELDS:
preferredDuringSchedulingIgnoredDuringExecution <[]Object>
The scheduler will prefer to schedule pods to nodes that satisfy the
affinity expressions specified by this field, but it may choose a node that
violates one or more of the expressions. The node that is most preferred is
the one with the greatest sum of weights, i.e. for each node that meets all
of the scheduling requirements (resource request, requiredDuringScheduling
affinity expressions, etc.), compute a sum by iterating through the
elements of this field and adding "weight" to the sumif the node matches
the corresponding matchExpressions; the node(s) with the highest sum are
the most preferred.
requiredDuringSchedulingIgnoredDuringExecution <Object>
If the affinity requirements specified by this field are not met at
scheduling time, the pod will not be scheduled onto the node. If the
affinity requirements specified by this field cease to be met at some point
during pod execution (e.g. due to an update), the system may or may not try
to eventually evict the pod from its node.
apiVersion: v1
kind: Pod
metadata:name: pod-nodeaffinity-preferred
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
affinity:# 亲和性设置。nodeAffinity:# 设置 node 亲和性。preferredDuringSchedulingIgnoredDuringExecution:# 软限制。-weight:1preference:matchExpressions:# 匹配 env 的值在 ["xxx", "yyy"] 中的标签(当前环境没有)。-key: nodeenv
operator: In
values:["xxx","yyy"]
# 创建 pod。[root@localhost k8s]# vim pod-nodeaffinity-preferred.yaml[root@localhost k8s]# kubectl create -f pod-nodeaffinity-preferred.yaml
pod/pod-nodeaffinity-preferred created
# 查看 pod 状态 (运行成功)。[root@localhost k8s]# kubectl get pod pod-nodeaffinity-preferred -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodeaffinity-preferred 1/1 Running 0 21s 10.244.2.33 localhost.localdomain.k8s.node2 <none><none>
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-target
namespace: dev
labels:podenv: prod # 设置标签。spec:containers:-name: nginx
image: nginx:1.17.1
nodeName: localhost.localdomain.k8s.node1 # 将目标 pod 名确指定到 node1 上。
# 启动目标 pod。[root@localhost k8s]# kubectl create -f pod-podaffinity-target.yaml
pod/pod-podaffinity-target created
# 查看 pod 状况。[root@localhost k8s]# kubectl get pods pod-podaffinity-target -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod-podaffinity-target 1/1 Running 0 8s podenv=prod
2)创建 pod-podaffinity-required.yaml,内容如下。
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-required
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
affinity:# 亲和性设置。podAffinity:# 设置 pod 亲和性。requiredDuringSchedulingIgnoredDuringExecution:# 硬限制。-labelSelector:matchExpressions:# 匹配 env 的值在 ["xxx", "yyy"] 中的标签。-key: podenv
operator: In
values:["xxx","yyy"]topologyKey: kubernetes.io/hostname
上面配置表达的意思是:新 Pod 必须要与拥有标签 nodeenv=xxx 或者 nodeenv=yyy 的 pod 在同一 Node 上,显然现在没有这样 pod,接下来,运行测试一下。
# 启动 pod。[root@localhost k8s]# kubectl create -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
# 查看 pod 状态,发现未运行。[root@localhost k8s]# kubectl get pods pod-podaffinity-required -n dev
NAME READY STATUS RESTARTS AGE
pod-podaffinity-required 0/1 Pending 0 19s
# 查看详细信息。[root@localhost k8s]# kubectl describe pods pod-podaffinity-required -n dev
Name: pod-podaffinity-required
Namespace: dev
Priority: 0
Node: <none>
Labels: <none>
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Containers:
nginx:
Image: nginx:1.17.1
Port: <none>
Host Port: <none>
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-n8qxp (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
default-token-n8qxp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-n8qxp
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 49s default-scheduler 0/3 nodes are available: 1 node(s) had taints that the pod didn't tolerate, 2 node(s) didn't match pod affinity rules.
# 接下来修改 values: ["xxx", "yyy"] —> values: ["prod", "yyy"]。# 意思是:新 Pod 必须要与拥有标签 nodeenv=xxx 或者 nodeenv=yyy 的 pod 在同一 Node 上。[root@k8s-master01 ~]# vim pod-podaffinity-required.yaml# 然后重新创建 pod,查看效果。[root@localhost k8s]# kubectl delete -f pod-podaffinity-required.yaml
pod "pod-podaffinity-required" deleted
[root@localhost k8s]# kubectl create -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
# 发现此时 Pod 运行正常。[root@localhost k8s]# kubectl get pods pod-podaffinity-required -n dev
NAME READY STATUS RESTARTS AGE
pod-podaffinity-required 1/1 Running 0 32s
# 创建 rs。[root@localhost k8s]# vim pc-replicaset.yaml[root@localhost k8s]# kubectl create -f pc-replicaset.yaml
replicaset.apps/pc-replicaset created
# 查看 rs。# DESIRED ~ 期望副本数量。# CURRENT ~ 当前副本数量。 # READY ~ 已经准备好提供服务的副本数量。[root@localhost k8s]# kubectl get rs pc-replicaset -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-replicaset 330 49s nginx nginx:1.17.1 app=nginx-pod
[root@localhost k8s]# kubectl describe rs pc-replicaset -n dev
Name: pc-replicaset
Namespace: dev
Selector: app=nginx-pod
Labels: <none>
Annotations: <none>
Replicas: 3 current / 3 desired
Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed
Pod Template:
Labels: app=nginx-pod
Containers:
nginx:
Image: nginx:1.17.1
Port: <none>
Host Port: <none>
Environment: <none>
Mounts: <none>
Volumes: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal SuccessfulCreate 78s replicaset-controller Created pod: pc-replicaset-64z8n
Normal SuccessfulCreate 78s replicaset-controller Created pod: pc-replicaset-74msh
Normal SuccessfulCreate 78s replicaset-controller Created pod: pc-replicaset-m5rsv
# 查看当前控制器创建出来的 pod。# 这里发现控制器创建出来的 pod 的名称是在控制器名称后面拼接了-xxxxx 随机码。[root@k8s-master01 ~]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pc-replicaset-6vmvt 1/1 Running 0 54s
pc-replicaset-fmb8f 1/1 Running 0 54s
pc-replicaset-snrk2 1/1 Running 0 54s
[root@localhost k8s]# kubectl taint nodes localhost.localdomain.k8s.node1 tag=geek:NoExecute-[root@localhost k8s]# kubectl get rs pc-replicaset -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-replicaset 333 5m58s nginx nginx:1.17.1 app=nginx-pod
扩缩容。
# 编辑 rs 的副本数量,修改 spec:replicas: 6 即可。[root@localhost ~]# kubectl edit rs pc-replicaset -n dev
replicaset.apps/pc-replicaset edited
# 查看 pod。[root@localhost ~]# kubectl get rs pc-replicaset -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-replicaset 666 23m nginx nginx:1.17.1 app=nginx-pod
[root@localhost ~]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pc-replicaset-5x5tx 1/1 Running 0 25s
pc-replicaset-64z8n 1/1 Running 0 23m
pc-replicaset-74msh 1/1 Running 0 23m
pc-replicaset-8fxqq 1/1 Running 0 25s
pc-replicaset-bbtj8 1/1 Running 0 25s
pc-replicaset-m5rsv 1/1 Running 0 23m
# 当然也可以直接使用命令实现。# 使用 scale 命令实现扩缩容,后面 --replicas=n 直接指定目标数量即可。[root@localhost ~]# kubectl scale rs pc-replicaset --replicas=2 -n dev
replicaset.apps/pc-replicaset scaled
# 命令运行完毕,立即查看,发现已经有 4 个开始准备退出了。[root@localhost ~]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-replicaset-64z8n 1/1 Running 0 25m
pc-replicaset-74msh 1/1 Running 0 25m
pc-replicaset-ddzk9 1/1 Terminating 0 9s
pc-replicaset-gvsbq 0/1 Terminating 0 9s
pc-replicaset-m8q76 0/1 Terminating 0 9s
pc-replicaset-zn5xk 0/1 Terminating 0 9s
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-replicaset-64z8n 1/1 Running 0 26m
pc-replicaset-74msh 1/1 Running 0 26m
镜像升级。
# 编辑 rs 的容器镜像 - image: nginx:1.17.2[root@localhost k8s]# kubectl edit rs pc-replicaset -n dev
replicaset.apps/pc-replicaset edited
# 再次查看,发现镜像版本已经变更了。[root@localhost k8s]# kubectl get rs pc-replicaset -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-replicaset 222 28m nginx nginx:1.17.2 app=nginx-pod
# 同样的道理,也可以使用命令完成这个工作。# kubectl set image rs rs 名称 容器=镜像版本 -n namespace[root@localhost k8s]# kubectl set image rs pc-replicaset nginx=nginx:1.17.1 -n dev
replicaset.apps/pc-replicaset image updated
# 再次查看,发现镜像版本已经变更了。[root@localhost k8s]# kubectl get rs -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-replicaset 222 29m nginx nginx:1.17.1 app=nginx-pod
删除 ReplicaSet。
# 使用 kubectl delete 命令会删除此 RS 以及它管理的 Pod。# 在 kubernetes 删除 RS 前,会将 RS 的 replicasclear 调整为0,等待所有的 Pod 被删除后,在执行 RS 对象的删除。[root@k8s-master01 ~]# kubectl delete rs pc-replicaset -n dev
replicaset.apps "pc-replicaset" deleted
[root@k8s-master01 ~]# kubectl get pod -n dev -o wide
No resources found in dev namespace.。
# 如果希望仅仅删除 RS 对象(保留 Pod),可以使用 kubectl delete 命令时添加--cascade=false 选项(不推荐)。[root@k8s-master01 ~]# kubectl delete rs pc-replicaset -n dev --cascade=false
replicaset.apps "pc-replicaset" deleted
[root@k8s-master01 ~]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-replicaset-cl82j 1/1 Running 0 75s
pc-replicaset-dslhb 1/1 Running 0 75s。
# 也可以使用 yaml 直接删除(推荐)。[root@localhost k8s]# kubectl get rs -n dev
NAME DESIRED CURRENT READY AGE
pc-replicaset 222 33m
[root@localhost k8s]# kubectl delete -f pc-replicaset.yaml
replicaset.apps "pc-replicaset" deleted
[root@localhost k8s]# kubectl get rs -n dev
No resources found in dev namespace.
# 创建 deployment。[root@localhost k8s]# kubectl create -f pc-deployment.yaml
deployment.apps/pc-deployment created
# 查看 deployment。# UP-TO-DATE 最新版本的 pod 的数量。# AVAILABLE 当前可用的 pod 的数量。[root@localhost k8s]# kubectl create -f pc-deployment.yaml
deployment.apps/pc-deployment created
[root@localhost k8s]# kubectl get deploy pc-deployment -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
pc-deployment 3/3 33 8s
# 查看 rs。# 发现 rs 的名称是在原来 deployment 的名字后面添加了一个 10 位数的随机串。[root@localhost k8s]# kubectl get rs -n dev
NAME DESIRED CURRENT READY AGE
pc-deployment-5d89bdfbf9 333 90s
# 查看 pod。[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-29p82 1/1 Running 0 112s
pc-deployment-5d89bdfbf9-dhnhn 1/1 Running 0 112s
pc-deployment-5d89bdfbf9-hh4s4 1/1 Running 0 112s
6.3.2 扩缩容。
# 变更副本数量为 5 个。[root@localhost k8s]# kubectl scale deploy pc-deployment --replicas=5 -n dev
deployment.apps/pc-deployment scaled
# 查看 deployment。[root@localhost k8s]# kubectl get rs -n dev
NAME DESIRED CURRENT READY AGE
pc-deployment-5d89bdfbf9 555 8m21s
# 查看 pod。[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-25xdq 1/1 Running 0 6s
pc-deployment-5d89bdfbf9-29p82 1/1 Running 0 6m47s
pc-deployment-5d89bdfbf9-dhnhn 1/1 Running 0 6m47s
pc-deployment-5d89bdfbf9-hh4s4 1/1 Running 0 6m47s
pc-deployment-5d89bdfbf9-jkxsj 1/1 Running 0 6s
# 编辑 deployment 的副本数量,修改 spec:replicas: 3 即可。# 查看 pod。[root@localhost k8s]# kubectl edit deploy pc-deployment -n dev
deployment.apps/pc-deployment edited
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-29p82 1/1 Running 0 9m14s
pc-deployment-5d89bdfbf9-dhnhn 1/1 Running 0 9m14s
pc-deployment-5d89bdfbf9-jkxsj 1/1 Running 0 2m33s
strategy:指定新的 Pod 替换旧的 Pod 的策略,支持两个属性。
type:指定策略类型,支持两种策略。
Recreate:在创建出新的 Pod 之前会先杀掉所有已存在的 Pod。
RollingUpdate:滚动更新,就是杀死一部分,就启动一部分,在更新过程中,存在两个版本 Pod。
rollingUpdate:当 type 为 RollingUpdate 时生效,用于为 RollingUpdate 设置参数,支持两个属性。
maxUnavailable:用来指定在升级过程中不可用 Pod 的最大数量,默认为 25%。
max 违规词汇: 用来指定在升级过程中可以超过期望的 Pod 的最大数量,默认为 25%。
# 查看 rs,发现原来的 rs 的依旧存在,只是 pod 数量变为了 0,而后又新产生了一个 rs,pod 数量为 4。# 其实这就是 deployment 能够进行版本回退的奥妙所在,后面会详细解释。[root@localhost k8s]# kubectl create -f pc-deployment.yaml --record
deployment.apps/pc-deployment created
[root@localhost k8s]# kubectl get deploy,rs,pod -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/pc-deployment 3/3 33 21s
NAME DESIRED CURRENT READY AGE
replicaset.apps/pc-deployment-5d89bdfbf9 333 21s
NAME READY STATUS RESTARTS AGE
pod/pc-deployment-5d89bdfbf9-bdjdp 1/1 Running 0 21s
pod/pc-deployment-5d89bdfbf9-ntb5t 1/1 Running 0 21s
pod/pc-deployment-5d89bdfbf9-v6wtg 1/1 Running 0 21s
[root@localhost k8s]# kubectl get pod -n dev -w
NAME READY STATUS RESTARTS AGE
pc-deployment-675d469f8b-c6sbq 1/1 Running 0 27s
pc-deployment-675d469f8b-jvfgf 1/1 Running 0 25s
pc-deployment-675d469f8b-lds2k 1/1 Running 0 30s
pc-deployment-5d89bdfbf9-rw46s 0/1 Pending 0 0s
pc-deployment-5d89bdfbf9-rw46s 0/1 Pending 0 0s
pc-deployment-5d89bdfbf9-rw46s 0/1 ContainerCreating 0 0s
pc-deployment-5d89bdfbf9-rw46s 1/1 Running 0 1s
pc-deployment-675d469f8b-jvfgf 1/1 Terminating 0 35s
pc-deployment-5d89bdfbf9-5drv2 0/1 Pending 0 0s
pc-deployment-5d89bdfbf9-5drv2 0/1 Pending 0 0s
pc-deployment-5d89bdfbf9-5drv2 0/1 ContainerCreating 0 0s
pc-deployment-675d469f8b-jvfgf 0/1 Terminating 0 36s
pc-deployment-5d89bdfbf9-5drv2 1/1 Running 0 2s
pc-deployment-675d469f8b-c6sbq 1/1 Terminating 0 39s
pc-deployment-5d89bdfbf9-9jzpr 0/1 Pending 0 0s
pc-deployment-5d89bdfbf9-9jzpr 0/1 Pending 0 0s
pc-deployment-5d89bdfbf9-9jzpr 0/1 ContainerCreating 0 0s
pc-deployment-675d469f8b-c6sbq 0/1 Terminating 0 40s
pc-deployment-5d89bdfbf9-9jzpr 1/1 Running 0 2s
pc-deployment-675d469f8b-lds2k 1/1 Terminating 0 44s
pc-deployment-675d469f8b-lds2k 0/1 Terminating 0 45s
pc-deployment-675d469f8b-c6sbq 0/1 Terminating 0 48s
pc-deployment-675d469f8b-c6sbq 0/1 Terminating 0 48s
pc-deployment-675d469f8b-jvfgf 0/1 Terminating 0 46s
pc-deployment-675d469f8b-jvfgf 0/1 Terminating 0 46s
pc-deployment-675d469f8b-lds2k 0/1 Terminating 0 55s
pc-deployment-675d469f8b-lds2k 0/1 Terminating 0 55s
# rs -pod# 会新创建 rs。[root@localhost k8s]# kubectl get rs -n dev
NAME DESIRED CURRENT READY AGE
pc-deployment-5d89bdfbf9 333 6m31s
pc-deployment-675d469f8b 000 2m44s
# pc-deployment-675d469f8b 留着用于版本回退。
6.3.3 版本回退。
deployment 支持版本升级过程中的暂停、继续功能以及版本回退等诸多功能,下面具体来看。
kubectl rollout:版本升级相关功能,支持下面的选项。
status 显示当前升级状态。
history 显示升级历史记录。
pause 暂停版本升级过程。
resume 继续已经暂停的版本升级过程。
restart 重启版本升级过程。
undo 回滚到上一级版本(可以使用 --to-revision 回滚到指定版本)。
# 查看当前升级版本的状态。[root@localhost k8s]# kubectl rollout status deploy pc-deployment -n dev
deployment "pc-deployment" successfully rolled out
[root@localhost k8s]# kubectl set image deploy pc-deployment nginx=nginx:1.17.1 -n dev
deployment.apps/pc-deployment image updated
[root@localhost k8s]# kubectl rollout status deploy pc-deployment -n dev
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 2 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 2 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 2 old replicas are pending termination...
Waiting for deployment "pc-deployment" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "pc-deployment" rollout to finish: 1 old replicas are pending termination...
deployment "pc-deployment" successfully rolled out
# 查看升级历史记录。[root@localhost k8s]# kubectl rollout history deploy pc-deployment -n dev
deployment.apps/pc-deployment
REVISION CHANGE-CAUSE
4 kubectl create --filename=pc-deployment.yaml --record=true
5 kubectl create --filename=pc-deployment.yaml --record=true
# 可以发现有 5 次版本记录,说明完成过 4 次升级。# 版本回滚。# 这里直接使用 --to-revision=1 回滚到了 1 版本,如果省略这个选项,就是回退到上个版本,就是 2 版本。[root@localhost k8s]# kubectl rollout undo deployment pc-deployment --to-revision=1 -n dev
error: unable to find specified revision 1inhistory[root@localhost k8s]# kubectl rollout undo deployment pc-deployment --to-revision=4 -n dev
deployment.apps/pc-deployment rolled back
# 查看发现,通过 nginx 镜像版本可以发现到了第一版。[root@localhost k8s]# kubectl get deploy -n dev -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
pc-deployment 3/3 33 19m nginx nginx:1.17.2 app=nginx-pod
# 查看 rs,发现第一个 rs 中有 3 个 pod 运行,后面两个版本的 rs 中 pod 为运行。# 其实 deployment 之所以可是实现版本的回滚,就是通过记录下历史 rs 来实现的,一旦想回滚到哪个版本,只需要将当前版本 pod 数量降为 0,然后将回滚版本的 pod 提升为目标数量就可以了。[root@localhost k8s]# kubectl get rs -n dev
NAME DESIRED CURRENT READY AGE
pc-deployment-5d89bdfbf9 000 18m
pc-deployment-675d469f8b 333 14m
[root@localhost k8s]# kubectl rollout history deploy pc-deployment -n dev
deployment.apps/pc-deployment
REVISION CHANGE-CAUSE
5 kubectl create --filename=pc-deployment.yaml --record=true
6 kubectl create --filename=pc-deployment.yaml --record=true
比如有一批新的 Pod 资源创建完成后立即暂停更新过程,此时,仅存在一部分新版本的应用,主体部分还是旧的版本。然后,再筛选一小部分的用户请求路由到新版本的 Pod 应用,继续观察能否稳定地按期望的方式运行。确定没问题之后再继续完成余下的 Pod 资源滚动更新,否则立即回滚更新操作。这就是所谓的金丝雀发布。
# 更新 deployment 的版本,并配置暂停 deployment。[root@localhost k8s]# kubectl set image deploy pc-deployment nginx=nginx:1.17.1 -n dev && kubectl rollout pause deployment pc-deployment -n dev
deployment.apps/pc-deployment image updated
deployment.apps/pc-deployment paused
# 观察更新状态。[root@localhost k8s]# kubectl rollout status deploy pc-deployment -n dev
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
# 监控更新的过程,可以看到已经新增了一个资源,但是并未按照预期的状态去删除一个旧的资源,就是因为使用了 pause 暂停命令。[root@localhost k8s]# kubectl get rs -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-deployment-5d89bdfbf9 111 24m nginx nginx:1.17.1 app=nginx-pod,pod-template-hash=5d89bdfbf9
pc-deployment-675d469f8b 333 20m nginx nginx:1.17.2 app=nginx-pod,pod-template-hash=675d469f8b
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-qnnvk 1/1 Running 0 94s
pc-deployment-675d469f8b-bk7zd 1/1 Running 0 8m10s
pc-deployment-675d469f8b-jjwkr 1/1 Running 0 8m13s
pc-deployment-675d469f8b-prbfd 1/1 Running 0 8m9s
# 确保更新的 pod 没问题了,继续更新。[root@localhost k8s]# kubectl rollout resume deploy pc-deployment -n dev
deployment.apps/pc-deployment resumed
# 查看最后的更新情况[root@localhost k8s]# kubectl get rs -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-deployment-5d89bdfbf9 333 26m nginx nginx:1.17.1 app=nginx-pod,pod-template-hash=5d89bdfbf9
pc-deployment-675d469f8b 000 22m nginx nginx:1.17.2 app=nginx-pod,pod-template-hash=675d469f8b
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-2vccz 1/1 Running 0 74s
pc-deployment-5d89bdfbf9-7j727 1/1 Running 0 73s
pc-deployment-5d89bdfbf9-qnnvk 1/1 Running 0 3m21s
在前面的课程中,我们已经可以实现通过手工执行 kubectl scale 命令实现 Pod 扩容或缩容,但是这显然不符合 Kubernetes 的定位目标–自动化、智能化。Kubernetes 期望可以实现通过监测 Pod 的使用情况,实现 pod 数量的自动调整,于是就产生了 Horizontal Pod Autoscaler(HPA)这种控制器。
HPA 可以获取每个 Pod 利用率,然后和 HPA 中定义的指标进行对比,同时计算出需要伸缩的具体值,最后实现 Pod 的数量的调整。其实 HPA 与之前的 Deployment 一样,也属于一种 Kubernetes 资源对象,它通过追踪分析 RC 控制的所有目标 Pod 的负载变化情况,来确定是否需要针对性地调整目标 Pod 的副本数,这是 HPA 的实现原理。
# 安装 git[root@k8s-master01 ~]# yum install git -y# 获取 metrics-server,注意使用的版本。[root@k8s-master01 ~]# git clone -b v0.3.6 https://github.com/kubernetes-incubator/metrics-server
geek@LAPTOP-0GJSKR6T MINGW64 /d/lyfGeek。download。/metrics_server
$ git clone -b v0.3.6 https://github.com/kubernetes-incubator/metrics-server
Cloning into 'metrics-server'...
remote: Enumerating objects: 14982, done.
remote: Counting objects: 100% (92/92), done.
remote: Compressing objects: 100% (59/59), done.
remote: Total 14982(delta 35), reused 72(delta 30), pack-reused 14890
Receiving objects: 100% (14982/14982), 13.45 MiB |3.24 MiB/s, done.
Resolving deltas: 100% (7951/7951), done.
Note: switching to 'd1f4f6fc09cd3134e8ea5ba4e0bd2db4e8002ed8'.
You are in'detached HEAD' state. You can look around, make experimental
changes and commit them, and you can discard any commits you makein this
state without impacting any branches by switching back to a branch.
If you want to create a new branch to retain commits you create, you may
do so (now or later) by using -c with the switch command. Example:
git switch -c <new-branch-name>
Or undo this operation with:
git switch -
Turn off this advice by setting config variable advice.detachedHead to false
Updating files: 100% (2971/2971), done.
# 修改 deployment,注意修改的是镜像和初始化参数。[root@k8s-master01 ~]# cd /root/metrics-server/deploy/1.8+/[root@k8s-master01 1.8+]# vim metrics-server-deployment.yaml
按图中添加下面选项
hostNetwork: true
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
args:
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
# 安装 metrics-server。[root@localhost 1.8+]# cp metrics-server-deployment.yaml metrics-server-deployment.yaml.bak[root@localhost 1.8+]# vim metrics-server-deployment.yaml[root@localhost 1.8+]# kubectl apply -f ./
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.apps/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
# 查看 pod 运行情况。[root@localhost 1.8+]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-5959v 0/1 Running 155 3d18h
coredns-9d85f5447-gvqxh 0/1 CrashLoopBackOff 155 3d18h
etcd-localhost.localdomain.k8s.master 1/1 Running 1 3d18h
kube-apiserver-localhost.localdomain.k8s.master 1/1 Running 9 3d18h
kube-controller-manager-localhost.localdomain.k8s.master 1/1 Running 2 3d18h
kube-proxy-7dc95 1/1 Running 2 3d17h
kube-proxy-7hss2 1/1 Running 1 3d17h
kube-proxy-rpnvx 1/1 Running 1 3d18h
kube-scheduler-localhost.localdomain.k8s.master 1/1 Running 2 3d18h
metrics-server-6b976979db-j5g6s 1/1 Running 0 74s
# 使用 kubectl top node 查看资源使用情况。[root@localhost 1.8+]# kubectl top pod -n kube-system
NAME CPU(cores) MEMORY(bytes)
coredns-9d85f5447-gvqxh 0m 0Mi
etcd-localhost.localdomain.k8s.master 27m 83Mi
kube-apiserver-localhost.localdomain.k8s.master 71m 352Mi
kube-controller-manager-localhost.localdomain.k8s.master 39m 53Mi
kube-proxy-7dc95 1m 16Mi
kube-proxy-7hss2 2m 16Mi
kube-proxy-rpnvx 1m 20Mi
kube-scheduler-localhost.localdomain.k8s.master 11m 25Mi
metrics-server-6b976979db-j5g6s 2m 10Mi
# 至此,metrics-server 安装完成。
# 创建 deployment。[root@localhost ~]# kubectl run nginx --image=nginx:1.17.1 --requests=cpu=100m -n dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@localhost ~]# kubectl get deploy,pod -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 1/1 11 4s
NAME READY STATUS RESTARTS AGE
pod/nginx-778cb5fb7b-dgntb 1/1 Running 0 4s
# 创建 service。[root@localhost ~]# kubectl expose deployment nginx --type=NodePort --port=80 -n dev
service/nginx exposed
# 查看。[root@localhost ~]# kubectl get deploy,pods,services -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 1/1 11 2m3s
NAME READY STATUS RESTARTS AGE
pod/nginx-778cb5fb7b-dgntb 1/1 Running 0 2m3s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx NodePort 10.107.80.13 <none>80:30994/TCP 73s
6.4.3 部署 HPA。
创建 pc-hpa.yaml 文件,内容如下。
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:name: pc-hpa
namespace: dev
spec:minReplicas:1# 最小 pod 数量。maxReplicas:10# 最大 pod 数量。targetCPUUtilizationPercentage:3# CPU 使用率指标。scaleTargetRef:# 指定要控制的 nginx 信息。apiVersion: /v1
kind: Deployment
name: nginx
name: nginx 指 pod/nginx-778cb5fb7b-dgntb
[root@localhost k8s]# kubectl get deploy,pods,services -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 1/1 11 8m24s
NAME READY STATUS RESTARTS AGE
pod/nginx-778cb5fb7b-c4fv5 1/1 Running 0 8m24s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx NodePort 10.97.197.88 <none>80:32496/TCP 7m52s
# 创建 hpa。[root@localhost k8s]# kubectl create -f pc-hpa.yaml
horizontalpodautoscaler.autoscaling/pc-hpa created
# 查看 hpa。[root@localhost k8s]# kubectl get hpa -n dev
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
pc-hpa Deployment/nginx <unknown>/3% 1100 24s
[root@localhost k8s]# kubectl get deploy -n dev -w
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 11 9m56s
[root@localhost k8s]# kubectl get hpa -n dev -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
pc-hpa Deployment/nginx <unknown>/3% 1100 3m15s
6.4.4 测试。
使用压测工具对 service 地址 192.168.5.4:31830 进行压测,然后通过控制台查看 hpa 和 pod 的变化。
[root@localhost k8s]# kubectl get hpa -n dev -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
pc-hpa Deployment/nginx <unknown>/3% 1100 2m37s
[root@localhost k8s]# kubectl get deployment -n dev -w
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 11 4m
nginx-deployment 3/3 33 145m
tomcat-deployment 3/3 33 145m
[root@localhost k8s]# kubectl get pods -n dev -w
NAME READY STATUS RESTARTS AGE
nginx-778cb5fb7b-c4fv5 1/1 Running 0 4m25s
hpa 变化。
[root@k8s-master01 ~]# kubectl get hpa -n dev -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
deployment 变化。
[root@k8s-master01 ~]# kubectl get deployment -n dev -w
NAME READY UP-TO-DATE AVAILABLE AGE
pod 变化。
[root@k8s-master01 ~]# kubectl get pods -n dev -w
NAME READY STATUS RESTARTS AGE
6.5 DaemonSet(DS)。
DaemonSet 类型的控制器可以保证在集群中的每一台(或指定)节点上都运行一个副本。一般适用于日志收集、节点监控等场景。也就是说,如果一个 Pod 提供的功能是节点级别的(每个节点都需要且只需要一个),那么这类 Pod 就适合使用 DaemonSet 类型的控制器创建。
在 kubernetes 中,pod 是应用程序的载体,我们可以通过 pod 的 ip 来访问应用程序,但是 pod 的 ip 地址不是固定的,这也就意味着不方便直接采用 pod 的 ip 对服务进行访问。
为了解决这个问题,kubernetes 提供了 Service 资源,Service 会对提供同一个服务的多个 pod 进行聚合,并且提供一个统一的入口地址。通过访问 Service 的入口地址就能访问到后面的 pod 服务。
Service 在很多情况下只是一个概念,真正起作用的其实是 kube-proxy 服务进程,每个 Node 节点上都运行着一个 kube-proxy 服务进程。当创建 Service 的时候会通过 api-server 向 etcd 写入创建的 service 的信息,而 kube-proxy 会基于监听的机制发现这种 Service 的变动,然后 ta 会将最新的 Service 信息转换成对应的访问规则。
# 10.97.97.97:80 是 service 提供的访问入口。# 当访问这个入口的时候,可以发现后面有三个 pod 的服务在等待调用,# kube-proxy 会基于 rr(轮询)的策略,将请求分发到其中一个 pod 上去。# 这个规则会同时在集群内的所有节点上都生成,所以在任何一个节点上,都可以访问。[root@localhost k8s]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
# 没有对应规则,未开启。
userspace 模式下,kube-proxy 会为每一个 Service 创建一个监听端口,发向 Cluster IP 的请求被 Iptables 规则重定向到 kube-proxy 监听的端口上,kube-proxy 根据 LB 算法选择一个提供服务的 Pod 并和其建立链接,以将请求转发到 Pod 上。该模式下,kube-proxy 充当了一个四层负责均衡器的角色。由于 kube-proxy 运行在 userspace 中,在进行转发处理时会增加内核和用户空间之间的数据拷贝,虽然比较稳定,但是效率比较低。
7.1.2 iptables 模式。
iptables 模式下,kube-proxy 为 service 后端的每个 Pod 创建对应的 iptables 规则,直接将发向 Cluster IP 的请求重定向到一个 Pod IP。该模式下 kube-proxy 不承担四层负责均衡器的角色,只负责创建 iptables 规则。该模式的优点是较 userspace 模式效率更高,但不能提供灵活的 LB 策略,当后端 Pod 不可用时也无法进行重试。
kind: Service # 资源类型。apiVersion: v1 # 资源版本。metadata:# 元数据。name: service # 资源名称。namespace: dev # 命名空间。spec:# 描述。selector:# 标签选择器,用于确定当前 service 代理哪些 pod。app: nginx
type:# Service 类型,指定 service 的访问方式。clusterIP:# 虚拟服务的 ip 地址。sessionAffinity:# session 亲和性,支持 ClientIP、None 两个选项。ports:# 端口信息。-protocol: TCP
port:3017# service 端口。targetPort:5003# pod 端口。nodePort:31122# 主机端口。
apiVersion: v1
kind: Service
metadata:name: service-clusterip
namespace: dev
spec:selector:app: nginx-pod
clusterIP: 10.97.97.97 # service 的 ip 地址,如果不写,默认会生成一个。type: ClusterIP
ports:-port:80# Service 端口。targetPort:80# pod 端口。
在某些场景中,开发人员可能不想使用 Service 提供的负载均衡功能,而希望自己来控制负载均衡策略,针对这种情况,kubernetes 提供了 HeadLiness Service,这类 Service 不会分配 Cluster IP,如果想要访问 service,只能通过 service 的域名进行查询。
# 创建 service。[root@localhost k8s]# vim service-headliness.yaml[root@localhost k8s]# kubectl create -f service-headliness.yaml
service/service-headliness created
# 获取 service,发现 CLUSTER-IP 未分配。[root@localhost k8s]# kubectl get svc service-headliness -n dev -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service-headliness ClusterIP None <none>80/TCP 14s app=nginx-pod
# 查看 service 详情。[root@localhost k8s]# kubectl describe svc service-headliness -n dev
Name: service-headliness
Namespace: dev
Labels: <none>
Annotations: <none>
Selector: app=nginx-pod
Type: ClusterIP
IP: None
Port: <unset>80/TCP
TargetPort: 80/TCP
Endpoints: 10.244.1.62:80,10.244.1.63:80,10.244.2.64:80
Session Affinity: None
Events: <none># 查看域名的解析情况。进入其中一台 pod。[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginx-778cb5fb7b-dgntb 1/1 Running 1 3d4h
pc-deployment-6696798b78-9s6pb 1/1 Running 1 7h25m
pc-deployment-6696798b78-fz6xg 1/1 Running 1 7h25m
pc-deployment-6696798b78-gp6fs 1/1 Running 1 7h25m
[root@localhost k8s]# kubectl exec -it pc-deployment-6696798b78-9s6pb -n dev /bin/bash
root@pc-deployment-6696798b78-9s6pb:/# cat /etc/resolv.conf
nameserver 10.96.0.10
search dev.svc.cluster.local svc.cluster.local cluster.local localdomain.k8s.node1
options ndots:5
# @nameserver(域名服务器) + 服务名称(service 名称 + namespace(dev)【dev.svc.cluster.local】)[root@localhost k8s]# dig @10.96.0.10 service-headliness.dev.svc.cluster.local;<<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.10 <<>> @10.96.0.10 service-headliness.dev.svc.cluster.local
;(1 server found);; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 646;; flags: qr aa rd; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096;; QUESTION SECTION:
;service-headliness.dev.svc.cluster.local. IN A
;; ANSWER SECTION:
service-headliness.dev.svc.cluster.local. 30 IN A 10.244.1.63
service-headliness.dev.svc.cluster.local. 30 IN A 10.244.2.64
service-headliness.dev.svc.cluster.local. 30 IN A 10.244.1.62
;; Query time: 1 msec
;; SERVER: 10.96.0.10#53(10.96.0.10);; WHEN: Fri Dec 09 00:46:10 CST 2022;; MSG SIZE rcvd: 237
7.3.5 NodePort 类型的 Service。
在之前的样例中,创建的 Service 的 ip 地址只有集群内部才可以访问,如果希望将 Service 暴露给集群外部使用,那么就要使用到另外一种类型的 Service,称为 NodePort 类型。NodePort 的工作原理其实就是将 service 的端口映射到 Node 的一个端口上,然后就可以通过 NodeIp:NodePort 来访问 service 了。
创建 service-nodeport.yaml。
apiVersion: v1
kind: Service
metadata:name: service-nodeport
namespace: dev
spec:selector:app: nginx-pod
type: NodePort # service 类型。ports:-port:80nodePort:30002# 指定绑定的 node 的端口(默认的取值范围是:30000-32767),如果不指定,会默认分配。targetPort:80
# 创建 service。[root@localhost k8s]# vim service-nodeport.yaml[root@localhost k8s]# kubectl create -f service-nodeport.yaml
service/service-nodeport created
# 查看 service。[root@localhost k8s]# kubectl get svc -n dev -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service-nodeport NodePort 10.110.23.187 <none>80:30002/TCP 28s app=nginx-pod
# 接下来可以通过电脑主机的浏览器去访问集群中任意一个 nodeip 的 30002 端口,即可访问到 pod。192.168.142.150:30002
ExternalName 类型的 Service 用于引入集群外部的服务,ta 通过 externalName 属性指定外部一个服务的地址,然后在集群内部访问此 service 就可以访问到外部的服务了。
apiVersion: v1
kind: Service
metadata:
name: service-externalname
namespace: dev
spec:
type: ExternalName # service 类型。
externalName: www.baidu.com # 改成 ip 地址也可以。
# 创建 service。[root@localhost k8s]# kubectl create -f service-externalname.yaml
service/service-externalname created
# 域名解析。[root@localhost k8s]# dig @10.96.0.10 service-externalname.dev.svc.cluster.local;<<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.10 <<>> @10.96.0.10 service-externalname.dev.svc.cluster.local
;(1 server found);; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 16497;; flags: qr aa rd; QUERY: 1, ANSWER: 4, AUTHORITY: 0, ADDITIONAL: 1;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096;; QUESTION SECTION:
;service-externalname.dev.svc.cluster.local. IN A
;; ANSWER SECTION:
service-externalname.dev.svc.cluster.local. 5 IN CNAME www.baidu.com.
www.baidu.com. 5 IN CNAME www.a.shifen.com.
www.a.shifen.com. 5 IN A 112.80.248.75
www.a.shifen.com. 5 IN A 112.80.248.76
;; Query time: 10 msec
;; SERVER: 10.96.0.10#53(10.96.0.10);; WHEN: Fri Dec 09 14:34:23 CST 2022;; MSG SIZE rcvd: 247
# 创建文件夹。[root@k8s-master01 ~]# mkdir ingress-controller[root@k8s-master01 ~]# cd ingress-controller/# 获取 ingress-nginx,本次案例使用的是 0.30 版本。[root@k8s-master01 ingress-controller]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml[root@k8s-master01 ingress-controller]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/provider/baremetal/service-nodeport.yaml。# 修改 mandatory.yaml 文件中的仓库。# 修改 quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0# 为 quay-mirror.qiniu.com/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0# (不用修改了)。# 创建 ingress-nginx。[root@localhost k8s]# mkdir ingress_controller[root@localhost k8s]# cd ingress_controller/[root@localhost ingress_controller]# ls
mandatory.yaml service-nodeport.yaml
[root@localhost ingress_controller]# kubectl apply -f ./
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
limitrange/ingress-nginx created
service/ingress-nginx created
# 查看 ingress-nginx。[root@localhost ingress_controller]# kubectl get pod -n ingress-nginx
NAME READY STATUS RESTARTS AGE
nginx-ingress-controller-7f74f657bd-ng7fq 1/1 Running 0 50s
# 查看 service。[root@localhost ingress_controller]# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx NodePort 10.105.121.237 <none>80:31476/TCP,443:31813/TCP 76s
# 创建。[root@localhost ingress_controller]# kubectl create -f tomcat-nginx.yaml
deployment.apps/nginx-deployment created
deployment.apps/tomcat-deployment created
service/nginx-service created
service/tomcat-service created
# 查看。[root@localhost ingress_controller]# kubectl get svc -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-service ClusterIP None <none>80/TCP 28s
tomcat-service ClusterIP None <none>8080/TCP 28s
[root@localhost ingress_controller]# kubectl get deployment -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 3/3 33 55s
tomcat-deployment 3/3 33 55s
以教员和课程为例介绍一对多关联关系,在这里认为一个教员可以叫多门课程,而一门课程只有1个教员教,这种关系在实际中不太常见,通过教员和课程是多对多的关系。
示例数据:
地址表:
CREATE TABLE ADDRESSES
(
ADDR_ID INT(11) NOT NULL AUTO_INCREMENT,
STREET VAR
In this lesson we used the key "UITextAttributeTextColor" to change the color of the UINavigationBar appearance to white. This prompts a warning "first deprecated in iOS 7.0."
Ins
质数也叫素数,是只能被1和它本身整除的正整数,最小的质数是2,目前发现的最大的质数是p=2^57885161-1【注1】。
判断一个数是质数的最简单的方法如下:
def isPrime1(n):
for i in range(2, n):
if n % i == 0:
return False
return True
但是在上面的方法中有一些冗余的计算,所以
hbase(hadoop)是用java编写的,有些语言(例如python)能够对它提供良好的支持,但也有很多语言使用起来并不是那么方便,比如c#只能通过thrift访问。Rest就能很好的解决这个问题。Hbase的org.apache.hadoop.hbase.rest包提供了rest接口,它内嵌了jetty作为servlet容器。
启动命令:./bin/hbase rest s
下面这段sql本来目的是想更新条件下的数据,可是这段sql却更新了整个表的数据。sql如下:
UPDATE tops_visa.visa_order
SET op_audit_abort_pass_date = now()
FROM
tops_visa.visa_order as t1
INNER JOIN tops_visa.visa_visitor as t2
ON t1.