环境
> 192.168.50.53 k8s-master
> 192.168.50.51 k8s-node1
> 192.168.50.50 k8s-node2
Kubernetes集群部署
[root@localhost ~]# setenforce 0
[root@localhost ~]# iptables -F
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@localhost ~]# systemctl stop NetworkManager
[root@localhost ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
[root@localhost ~]# sed -i '/^SELINUX=/s/enforcing/disabled/' /etc/selinux/config
[root@localhost ~]# cat << EOF >> /etc/hosts
> 192.168.50.53 k8s-master
> 192.168.50.51 k8s-node1
> 192.168.50.50 k8s-node2
> EOF
[root@k8s-node2 ~]# yum -y install vim wget net-tools lrzsz
[root@k8s-node2 ~]# swapoff -a
[root@k8s-node2 ~]# sed -i '/swap/s/^/#/' /etc/fstab
[root@k8s-node2 ~]# cat << EOF >> /etc/sysctl.conf
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@k8s-node2 ~]# modprobe br_netfilter
[root@k8s-node2 ~]# sysctl -p
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@k8s-node2 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s-node2 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-node2 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
已加载插件:fastestmirror
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@k8s-node2 ~]# yum clean all && yum makecache fast
[root@k8s-node2 ~]# yum -y install docker-ce
[root@k8s-node2 ~]# systemctl start docker
[root@k8s-node2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@k8s-master ~]# cat << END > /etc/docker/daemon.json
> {
> "registry-mirrors":[ "https://nyakyfun.mirror.aliyuncs.com" ]
> }
> END
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl restart docker
[root@k8s-master ~]# cat <
/etc/yum.repos.d/kubernetes.repo > [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
> https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@k8s-node1 ~]# yum -y install kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0
[root@k8s-master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@k8s-master ~]# kubeadm config print init-defaults > init-config.yaml
[root@k8s-master ~]# vim init-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.50.53
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.244.0.0/16
scheduler: {}
[root@k8s-master ~]# kubeadm config images list --config init-config.yaml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.20.0
registry.aliyuncs.com/google_containers/pause:3.2
registry.aliyuncs.com/google_containers/etcd:3.4.13-0
registry.aliyuncs.com/google_containers/coredns:1.7.0
[root@k8s-master ~]# kubeadm config images pull --config=init-config.yaml
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.20.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
安装matser节点
[root@k8s-master ~]# echo "1" > /proc/sys/net/ipv4/ip_forward
[root@k8s-master ~]# kubeadm init --config=init-config.yaml 初始化安装K8S
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.50.53:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:c10b72d9a9b83d9ff1352b24cb02921ecadae96426efca21a8454016ba3e3e3d
成功
安装node节点
根据master安装时的提示信息
node1
[root@k8s-node1 ~]# kubeadm join 192.168.50.53:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:86298acd695d69555d30972005742018093d59bc2791b5397cf0edb777197330
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 24.0.5. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
node2
[root@k8s-node2 ~]# kubeadm join 192.168.50.53:6443 --token abcdef.0123456789abcdef \
> > --discovery-token-ca-cert-hash sha256:86298acd695d69555d30972005742018093d59bc2791b5397cf0edb777197330
accepts at most 1 arg(s), received 2
To see the stack trace of this error execute with --v=5 or higher
[root@k8s-node2 ~]# kubeadm join 192.168.50.53:6443 --token abcdef.0123456789abcdef > --discovery-token-ca-cert-hash sha256:86298acd695d69555d30972005742018093d59bc2791b5397cf0edb777197330
accepts at most 1 arg(s), received 2
To see the stack trace of this error execute with --v=5 or higher
[root@k8s-node2 ~]# kubeadm join 192.168.50.53:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:86298acd695d69555d30972005742018093d59bc2791b5397cf0edb777197330
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 24.0.5. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
master查看
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady control-plane,master 51m v1.20.0
k8s-node1 NotReady47m v1.20.0
k8s-node2 NotReady8s v1.20.0 前面已经提到,在初始化 k8s-master 时并没有网络相关配置,所以无法跟 node 节点通信,因此状态都是“NotReady”。但是通过 kubeadm join 加入的 node 节点已经在k8s-master 上可以看到。
安装flannelMaster 节点NotReady 的原因就是因为没有使用任何的网络插件,此时Node 和Master的连接还不正常。目前最流行的Kubernetes 网络插件有Flannel、Calico、Canal、Weave 这里选择使用flannel。
所有主机:
master上传kube-flannel.yml,所有主机上传flannel_v0.12.0-amd64.tar
[root@k8s-master ~]# docker load < flannel_v0.12.0-amd64.tar
256a7af3acb1: Loading layer 5.844MB/5.844MB
d572e5d9d39b: Loading layer 10.37MB/10.37MB
57c10be5852f: Loading layer 2.249MB/2.249MB
7412f8eefb77: Loading layer 35.26MB/35.26MB
05116c9ff7bf: Loading layer 5.12kB/5.12kB
Loaded image: quay.io/coreos/flannel:v0.12.0-amd64
三台主机安装cni插件
[root@k8s-master ~]# tar xf cni-plugins-linux-amd64-v0.8.6.tgz
[root@k8s-master ~]# cp flannel /opt/cni/bin/
[root@k8s-master ~]# rz
[root@k8s-master ~]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
Warning: rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole
clusterrole.rbac.authorization.k8s.io/flannel created
Warning: rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady control-plane,master 16h v1.20.0
k8s-node1 Ready
16h v1.20.0 k8s-node2 Ready
16h v1.20.0 [root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane,master 17h v1.20.0
k8s-node1 Ready
17h v1.20.0 k8s-node2 Ready
17h v1.20.0 [root@k8s-master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7f89b7bc75-c7wxl 1/1 Running 0 10m
coredns-7f89b7bc75-zlc5l 1/1 Running 0 10m
etcd-k8s-master 1/1 Running 0 10m
kube-apiserver-k8s-master 1/1 Running 0 10m
kube-controller-manager-k8s-master 1/1 Running 0 10m
kube-flannel-ds-amd64-77wzn 1/1 Running 0 5m11s
kube-flannel-ds-amd64-hwsbr 1/1 Running 0 5m11s
kube-flannel-ds-amd64-nf2n6 1/1 Running 0 5m11s
kube-proxy-7wclg 1/1 Running 0 8m30s
kube-proxy-pl29j 1/1 Running 0 9m
kube-proxy-zxr8p 1/1 Running 0 10m
kube-scheduler-k8s-master 1/1 Running 0 10m
[root@k8s-master ~]# cat ./k8s/nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: test3
labels:
app: web
spec:
containers:
- name: nginx02
image: nginx:1.13
ports:
- containerPort: 80
- name: busybox
image: docker.io/busybox:latest
command: ["sleep","3600"]
ports:
- containerPort: 80
~
Deployment 是一种 Kubernetes 资源,用于管理 Pod 的副本数量和更新。以下是使用 kubectl create 创建 Deployment
[root@k8s-master ~]# kubectl create deployment nginx-dofdfd --image=nginx:1.14 --port=80 --replicas=2
deployment.apps/nginx-dofdfd created
查看
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dofdfd-6b95f96b75-827jm 1/1 Running 0 2m26s
nginx-dofdfd-6b95f96b75-ksl5b 1/1 Running 0 2m26s
[root@k8s-master ~]# kubectl expose deployment nginx-dofdfd --port=80 --target-port=80 --name=nginx-service --type=NodePort
service/nginx-service exposed
[root@k8s-master ~]# kubectl get pods,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-dofdfd-6b95f96b75-827jm 1/1 Running 0 5m35s
pod/nginx-dofdfd-6b95f96b75-ksl5b 1/1 Running 0 5m35s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1
443/TCP 78m service/nginx-service NodePort 10.99.2.229
80:32421/TCP 19s
通过 kubectl expose 命令创建 Service,设置内部通信端口和外部暴露的端口均为 80、名称为 nginx-service、类型是 NodePort。创建 Service 完后,就可以通过 kubectl get svc命令查看到对外暴露的端口
执行 kubectl get endpoints 查看容器自带的负载均衡。从执行结果可以得知容器自带的负载均衡
[root@k8s-master ~]# kubectl get endpoints
NAME ENDPOINTS AGE
kubernetes 192.168.50.53:6443 78m
nginx-service 10.244.1.7:80,10.244.2.8:80 71s
执行 kubectl get pods 命令时加参数 -o wide 可以查看到 Node 真实节点上的分布。
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dofdfd-6b95f96b75-827jm 1/1 Running 0 7m9s 10.244.2.8 k8s-node2
nginx-dofdfd-6b95f96b75-ksl5b 1/1 Running 0 7m9s 10.244.1.7 k8s-node1
一般来说,生产环境中的线上项目会随着时间的推进不断地进行更新、维护、版本升级、兼容老版本。而此时如果需要对 Nginx 更换版本,就需要滚动更新。
执行以下命令查看nginx版本
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dofdfd-6b95f96b75-827jm 1/1 Running 0 7m47s
nginx-dofdfd-6b95f96b75-ksl5b 1/1 Running 0 7m47s
[root@k8s-master ~]# kubectl exec nginx-dofdfd-6b95f96b75-827jm -- nginx -v
nginx version: nginx/1.14.2
[root@k8s-master ~]# kubectl exec nginx-dofdfd-6b95f96b75-ksl5b -- nginx -v
nginx version: nginx/1.14.2
通过 set 选项将 Nginx 版本换成 1.19,再监使用-w 先处于监听状态进行听,更新速度快。如果不使用 Ctrl+c 中断监听,会一直持续。
[root@k8s-master ~]# kubectl set image deployment.apps/nginx-dofdfd nginx=nginx:1.19
deployment.apps/nginx-dofdfd image updated
[root@k8s-master ~]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
nginx-7cf55fb7bb-x9d26 0/1 ErrImagePull 0 25s
nginx-d9d8cf5c7-7wgk8 1/1 Running 0 118s
nginx-d9d8cf5c7-sdkxn 1/1 Running 0 118s
nginx-7cf55fb7bb-x9d26 0/1 ImagePullBackOff 0 28s
nginx-7cf55fb7bb-x9d26 1/1 Running 0 83s
nginx-d9d8cf5c7-7wgk8 1/1 Terminating 0 2m56s
nginx-7cf55fb7bb-s8gvs 0/1 Pending 0 0s
nginx-7cf55fb7bb-s8gvs 0/1 Pending 0 0s
nginx-7cf55fb7bb-s8gvs 0/1 ContainerCreating 0 0s
nginx-d9d8cf5c7-7wgk8 0/1 Terminating 0 2m57s
nginx-d9d8cf5c7-7wgk8 0/1 Terminating 0 3m6s
nginx-d9d8cf5c7-7wgk8 0/1 Terminating 0 3m6s
nginx-7cf55fb7bb-s8gvs 1/1 Running 0 29s
nginx-d9d8cf5c7-sdkxn 1/1 Terminating 0 3m25s
nginx-d9d8cf5c7-sdkxn 0/1 Terminating 0 3m26s
nginx-d9d8cf5c7-sdkxn 0/1 Terminating 0 4m6s
nginx-d9d8cf5c7-sdkxn 0/1 Terminating 0 4m6s
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-7cf55fb7bb-s8gvs 1/1 Running 0 3m17s
nginx-7cf55fb7bb-x9d26 1/1 Running 0 4m40s
查看版本
[root@k8s-master ~]# kubectl exec nginx-7cf55fb7bb-s8gvs --nginx -v
Error: unknown flag: --nginx
See 'kubectl exec --help' for usage.
[root@k8s-master ~]# kubectl exec nginx-7cf55fb7bb-s8gvs -- nginx -v
nginx version: nginx/1.19.10
[root@k8s-master ~]# kubectl exec nginx-7cf55fb7bb-x9d26 -- nginx -v
nginx version: nginx/1.19.10
若想回滚到上一个版本可以使用 rollout 参数。 使用—help 查看 rollout 帮助信息
[root@k8s-master ~]# kubectl rollout --help
Manage the rollout of a resource.
Valid resource types include:
* deployments
* daemonsets
* statefulsets
Examples:
# Rollback to the previous deployment
kubectl rollout undo deployment/abc
# Check the rollout status of a daemonset
kubectl rollout status daemonset/foo
Available Commands:
history 显示 rollout 历史
pause 标记提供的 resource 为中止状态
restart Restart a resource
resume 继续一个停止的 resource
status 显示 rollout 的状态
undo 撤销上一次的 rollout
Usage:
kubectl rollout SUBCOMMAND [options]
Use "kubectl
--help" for more information about a given command. Use "kubectl options" for a list of global command-line options (applies to all commands).
通过 histtory 查看历史信息
[root@k8s-master ~]# kubectl rollout history deployment.apps/nginx
deployment.apps/nginx
REVISION CHANGE-CAUSE
1
2
通过 undo 执行回滚
[root@k8s-master ~]# kubectl rollout undo deployment.apps/nginx
deployment.apps/nginx rolled back
通过 status 检查回滚
[root@k8s-master ~]# kubectl rollout status deployment.apps/nginx
deployment "nginx" successfully rolled out
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-d9d8cf5c7-954pg 1/1 Running 0 76s
nginx-d9d8cf5c7-vh9pr 1/1 Running 0 77s
查看版本
[root@k8s-master ~]# kubectl exec nginx-d9d8cf5c7-954pg -- nginx -v
nginx version: nginx/1.14.2
[root@k8s-master ~]# kubectl exec nginx-d9d8cf5c7-vh9pr -- nginx -v
nginx version: nginx/1.14.2
通过以上步骤,完成了创建、发布、更新、回滚操作,接下来进行删除操作。通过 kubectl get deploy 查看 deployment 信息,并且使用 kubectl delete 删除 deployment;通过 kubectl get svc 查看 sevice 信息,并且使用 kubectl delete 删除 sevice。
[root@k8s-master ~]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 2/2 2 2 12m
删除
[root@k8s-master ~]# kubectl delete deployment/nginx
deployment.apps "nginx" deleted
[root@k8s-master ~]# kubectl get deploy
No resources found in default namespace.
[root@k8s-master ~]# kubectl get pods
No resources found in default namespace.
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1
443/TCP 92m nginx-service NodePort 10.108.70.195
80:30618/TCP 12m [root@k8s-master ~]# kubectl delete svc/nginx-service
service "nginx-service" deleted
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1
443/TCP 93m [root@k8s-master ~]# kubectl create nginx --image=nginx:latest --port=80 --replicas=3
Error: unknown flag: --image
See 'kubectl create --help' for usage.
通过 describe 可以查看较为详细的容器资源使用情况,包括一些错误信息都能检测出来。
创建容器
[root@k8s-master ~]# kubectl create deployment nginx-dofdfd --image=nginx:1.14 --port=80 --replicas=2
deployment.apps/nginx-dofdfd created
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dofdfd-6b95f96b75-hnscl 1/1 Running 0 10s
nginx-dofdfd-6b95f96b75-mpdst 1/1 Running 0 10s
通过 describe 可以查看较为详细的容器资源使用情况,包括一些错误信息都能检测出来。
[root@k8s-master ~]# kubectl describe pod nginx-dofdfd-6b95f96b75-hnscl
Name: nginx-dofdfd-6b95f96b75-hnscl
Namespace: default
Priority: 0
Node: k8s-node1/192.168.50.51
Start Time: Mon, 14 Aug 2023 18:12:49 +0800
Labels: app=nginx-dofdfd
pod-template-hash=6b95f96b75
Annotations:
Status: Running
IP: 10.244.2.10
IPs:
IP: 10.244.2.10
Controlled By: ReplicaSet/nginx-dofdfd-6b95f96b75
Containers:
nginx:
Container ID: docker://87d1e5e0272be40bdad2f138469097724f7e5f3d7c1b64a10531bae7878a1746
Image: nginx:1.14
Image ID: docker-pullable://nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Mon, 14 Aug 2023 18:12:49 +0800
Ready: True
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-bjmfr (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-bjmfr:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-bjmfr
Optional: false
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 43s default-scheduler Successfully assigned default/nginx-dofdfd-6b95f96b75-hnscl to k8s-node1
Normal Pulled 43s kubelet Container image "nginx:1.14" already present on machine
Normal Created 43s kubelet Created container nginx
Normal Started 43s kubelet Started container nginx
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dofdfd-6b95f96b75-hnscl 1/1 Running 0 6m42s
nginx-dofdfd-6b95f96b75-mpdst 1/1 Running 0 6m42s
nginx-dofdfdsdfsdf-6bb4488b6f-5hvvq 1/1 Running 0 4m35s
nginx-dofdfdsdfsdf-6bb4488b6f-hmk24 1/1 Running 0 4m35s
使用 exec 可以进入pod,完成相关操作
[root@k8s-master ~]# kubectl exec -it nginx-dofdfd-6b95f96b75-hnscl bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-dofdfd-6b95f96b75-hnscl:/# /etc/init.d/nginx status
[ ok ] nginx is running.
root@nginx-dofdfd-6b95f96b75-hnscl:/# nginx -v
nginx version: nginx/1.14.2
root@nginx-dofdfd-6b95f96b75-hnscl:/# exit
exit
在 Kubernetes 中可以使用 YAML 格式的文件来创建符合预期期望的 Pod,这样的YAML 文件一般称之为资源清单。
YAML 语言是一个可读性高,用来表达数据序列的语言格式。YAML 是"YAMLAin't a MarkupLanguage"(YAML 不是一种标记语言)的递归缩写。在开发这个语言时,YAML 的意思其实是:"YetAnother Markup Language"(仍是一种标记语言),但为了强调这种语言以数据做为中心,而不是以标记语言为重点,而用反向缩略语重命名。
(1)基本语法
(2)支持的数据结构
前面介绍了使用 kubectl 命令创建容器资源方法。基于这种命令方式创建容器资源,优点在于简单直观快捷、上手比较快,适合临时测试或实验。除了 kubectl 命令方式创建资源之外,还可以通过 YAML 配置文件来创建容器资源。
基于 YAML 配置文件创建容器资源的方式,优点在于配置文件提供了创建资源的模板,能够重复部署,可以像管理代码一样管理部署,适合正式的、跨环境的、规模化部署。
YAML 语法格式:
通过 kubectl api-versions 查看以 group/version 的格式显示服务器所支持的 API 版本。
[root@k8s-master ~]# kubectl api-versions
admissionregistration.k8s.io/v1
admissionregistration.k8s.io/v1beta1
apiextensions.k8s.io/v1
apiextensions.k8s.io/v1beta1
apiregistration.k8s.io/v1
apiregistration.k8s.io/v1beta1
apps/v1
authentication.k8s.io/v1
authentication.k8s.io/v1beta1
authorization.k8s.io/v1
authorization.k8s.io/v1beta1
autoscaling/v1
autoscaling/v2beta1
autoscaling/v2beta2
batch/v1
batch/v1beta1
certificates.k8s.io/v1
certificates.k8s.io/v1beta1
coordination.k8s.io/v1
coordination.k8s.io/v1beta1
discovery.k8s.io/v1beta1
events.k8s.io/v1
events.k8s.io/v1beta1
extensions/v1beta1
flowcontrol.apiserver.k8s.io/v1beta1
networking.k8s.io/v1
networking.k8s.io/v1beta1
node.k8s.io/v1
node.k8s.io/v1beta1
policy/v1beta1
rbac.authorization.k8s.io/v1
rbac.authorization.k8s.io/v1beta1
scheduling.k8s.io/v1
scheduling.k8s.io/v1beta1
storage.k8s.io/v1
storage.k8s.io/v1beta1
v1
在创建 Deployment 资源清单之前先创建 demo 目录,用于存放资源清单的文件。在创建的 nginx-deployment.yaml 资源清单中,定义以下信息。
apiVersion: apps/v1 #指定API版本 kind: Deployment #指定资源类型 metadata: #指定属性 name: nginx-deployment #指定名称 labels: #定义标签 app: nginx spec: #定义详细信息 replicas: 3 #指定副本数量 selector: #定义选择器信息 matchLabels: app: nginx template: #指定模板 metadata: labels: app: nginx spec: #定义容器信息 containers: - name: nginx image: nginx:1.19.6 ports: - containerPort: 80 |
[root@k8s-master ~]# mkdir demo
[root@k8s-master ~]# cd demo
[root@k8s-master demo]# vim nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.19.6
ports:
- containerPort: 80
创建完 Deployment 的资源清单之后,使用 create 执行资源清单来创建容器。通过 get pods 可以查看到 Pod 容器资源已经自动创建完成。
[root@k8s-master demo]# kubectl create -f nginx-deployment.yaml
deployment.apps/nginx-deployment created
[root@k8s-master demo]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-76ccf9dd9d-kvw65 1/1 Running 0 19m
nginx-deployment-76ccf9dd9d-z4tnq 1/1 Running 0 19m
nginx-deployment-76ccf9dd9d-4kxkx 1/1 Running 0 19m
nginx-dofdfd-6b95f96b75-mpdst 1/1 Running 0 18s
nginx-dofdfdsdfsdf-6bb4488b6f-5hvvq 1/1 Running 0 18s
nginx-dofdfdsdfsdf-6bb4488b6f-hmk24 1/1 Running 0 18s
创建Service资源清单
在创建的 nginx-service 资源清单中,定义名称为 nginx-service 的 Service、标签选择器为 app: nginx、type 为 NodePort 指明外部流量可以访问内部容器。在 ports 中定义暴露的端口库号列表,对外暴露访问的端口是 80,容器内部的端口也是 80。
[root@k8s-master demo]# vim nginx-service.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx-service
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
selector:
app: nginx
[root@k8s-master demo]# kubectl create -f nginx-service.yaml
service/nginx-service created
[root@k8s-master demo]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1443/TCP 61m
nginx-service NodePort 10.96.198.17180:31249/TCP 5s
访问测试
[root@k8s-master ~]# elinks --dump http://192.168.50.53:31249
Welcome to nginx!If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.For online documentation and support please refer to [1]nginx.org.
Commercial support is available at [2]nginx.com.Thank you for using nginx.
References
Visible links
1. http://nginx.org/
2. http://nginx.com/
3、使用kubectl命令创建yaml模板
创建deployment模板
[root@k8s-master ~]# kubectl create deployment tomcat --image=tomcat:8 -o yaml --dry-run > tomcat-deployment.yaml
查看创建的模板
[root@k8s-master ~]# vim tomcat-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: tomcat
name: tomcat
spec:
replicas: 3
selector:
matchLabels:
app: tomcat
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: tomcat
spec:
containers:
- image: tomcat:8
name: tomcat
resources: {}
status: {}
创建tomcat容器资源
[root@k8s-master ~]# kubectl create -f tomcat-deployment.yaml
deployment.apps/tomcat created
查看创建状态
[root@k8s-master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
tomcat-7f89f4bb87-6swmt 1/1 Running 0 4m36s
tomcat-7f89f4bb87-rbzfb 1/1 Running 0 4m36s
tomcat-7f89f4bb87-rkmtr 1/1 Running 0 4m36s
创建service模板
[root@k8s-master ~]# kubectl expose deployment tomcat --port=8080 --target-port=8080 --type=NodePort -o yaml --dry-run >tomcat_service.yaml
编辑service模板
[root@k8s-master ~]# vim tomcat_service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: tomcat
name: tomcat
spec:
ports:
- port: 8080 #映射到宿主机端口
protocol: TCP
targetPort: 8080 #容器端口
selector:
app: tomcat
type: NodePort
status:
loadBalancer: {}
创建service资源
[root@k8s-master ~]# kubectl create -f tomcat_service.yaml
service/tomcat created
查看创建状态
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1443/TCP 9h
tomcat NodePort 10.96.7.1948080:32707/TCP 8s