一、安装脚本
#!/bin/bash
systemctl stop firewalld
systemctl disable firewalld
yum remove firewalld -y
setenforce 0
sed -i s/^SELINUX=enforcing/SELINUX=disabled/g /etc/sysconfig/selinux
sed -i s/^SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
sed -i s/^SELINUX=permissive/SELINUX=disabled/g /etc/sysconfig/selinux
sed -i s/^SELINUX=permissive/SELINUX=disabled/g /etc/selinux/config
cat > /etc/sysctl.d/k8s.conf << EOR
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOR
yum install wget ipset ipvsadm chrony -y
systemctl enable chronyd
systemctl start chronyd
mkdir -p /etc/yum.repos.d/bak
mv /etc/yum.repos.d/CentOS* /etc/yum.repos.d/bak
wget -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/Centos-7.repo
wget -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/epel-7.repo
cat > /etc/sysconfig/modules/ipvs.modules << EOR
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr
ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo
ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOR
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
cat > /etc/sysctl.d/k8s.conf << EOR
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOR
sysctl --system
cat > /etc/yum.repos.d/kubernetes.repo < /etc/docker/daemon.json < /etc/sysconfig/kubelet < kubeadm.yaml
sed -i s#"^ *ttl: .*"#" ttl: \"0\""#g kubeadm.yaml
sed -i s#"^ *advertiseAddress.*"#" advertiseAddress: $IP"#g kubeadm.yaml
sed -i s#^kubernetesVersion.*#"kubernetesVersion: v1.14.10"#g kubeadm.yaml
sed -i s#"^ *podSubnet: ".*""#" podSubnet: \"10.10.0.0/16\""#g kubeadm.yaml
sed -i s#^imageRepository.*#"imageRepository: registry.aliyuncs.com/google_containers"#g kubeadm.yaml
cat >> kubeadm.yaml <
二、master节点初始化,flannel安装,node节点加入master节点操作
#master节点初始化
kubeadm init --kubernetes-version=v1.23.6 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
#master节点安装flannel,网络问题可以自行下载,上传直接apply
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
vim kube-flannel.yml
kubectl apply -f kube-flannel.yml
#node节点加入master节点 token和sha256每次安装新的集群会重新生成,并记录以防丢失
kubeadm join 192.168.0.15:6443 --token 27g8c2.eg3w52d52s0sgqzc \
--discovery-token-ca-cert-hash sha256:c95e87f90627671eaa828f302771c8a9a17698785e21ea997b7043974f44a135 --ignore-preflight-errors=Swap
#查看节点资源信息
kubectl describe node master
#查看集群CoreDNS代理方式
kubectl cluster-info
三、常用命令学习
systemctl daemon-reload
systemctl restart docker
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
#加入master节点
kubeadm join 192.168.0.15:6443 --token 27g8c2.eg3w52d52s0sgqzc \
--discovery-token-ca-cert-hash sha256:c95e87f90627671eaa828f302771c8a9a17698785e21ea997b7043974f44a135 --ignore-preflight-errors=Swap
##禁用swap
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
#kubeadm初始化
kubeadm init --kubernetes-version=v1.23.6 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
#kubeadm 重置
kubeadm reset
kubectl get nodes #查看集群节点状态
kubectl get pods -n kube-system -o wide #查看集群pod
kubectl create deployment myapp --image=ikubernetes/myapp:v1
#创建多个pod
kubectl scale --replicas=4 deployment myapp
#创建pod
kubectl create deployment myapp --image=ikubernetes/myapp:v1
#查看pod yaml文件
kubectl get pod myapp-9cbc4cf76-zr9nf -o yaml
#查看标签
[root@master ~]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
myapp 1/1 Running 4 (2d ago) 3d19h run=myapp
myapp-9cbc4cf76-zr9nf 1/1 Running 0 44h app=myapp,pod-template-hash=9cbc4cf76
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d ago) 3d20h app=nginx-deploy,pod-template-hash=6c758c8d46
#查看标签
[root@master ~]# kubectl get pods -l app --show-labels
NAME READY STATUS RESTARTS AGE LABELS
myapp-9cbc4cf76-zr9nf 1/1 Running 0 44h app=myapp,pod-template-hash=9cbc4cf76
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d ago) 3d20h app=nginx-deploy,pod-template-hash=6c758c8d46
[root@master ~]# kubectl get pods -L app,run
NAME READY STATUS RESTARTS AGE APP RUN
myapp 1/1 Running 4 (2d ago) 3d19h myapp
myapp-9cbc4cf76-zr9nf 1/1 Running 0 44h myapp
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d ago) 3d20h nginx-deploy
#创建容器
[root@master home]# kubectl create -f pod-demo.yaml
pod/pod-demo created
#创建标签
[root@master home]# kubectl label pods nginx-deploy-6c758c8d46-ff2rp release=canary
pod/nginx-deploy-6c758c8d46-ff2rp labeled
#修改容器标签
[root@master home]# kubectl label pods pod-demo release=canary
pod/pod-demo labeled
#查看release标签
[root@master home]# kubectl get pods -l release
NAME READY STATUS RESTARTS AGE
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d3h ago) 3d23h
pod-demo 2/2 Running 2 (29m ago) 149m
#查看relesae标签指定canary
[root@master home]# kubectl get pods -l release=canary
NAME READY STATUS RESTARTS AGE
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d3h ago) 3d23h
#同时查看relesae和app
[root@master home]# kubectl get pods -l release,app
NAME READY STATUS RESTARTS AGE
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d3h ago) 3d23h
pod-demo 2/2 Running 2 (31m ago) 151m
#查看relesae标签指定stable和app指定标签myapp
[root@master home]# kubectl get pods -l release=stable,app=myapp
NAME READY STATUS RESTARTS AGE
pod-demo 2/2 Running 2 (32m ago) 152m
#集合方式查看带有以下标签容器
[root@master home]# kubectl get pods -l "release in (canary,beta,alpha)"
NAME READY STATUS RESTARTS AGE
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d3h ago) 3d23h
#集合方式查看不带有以下标签容器(取反notin)
[root@master home]# kubectl get pods -l "release notin (canary,beta,alpha)"
NAME READY STATUS RESTARTS AGE
myapp 1/1 Running 4 (2d3h ago) 3d22h
myapp-9cbc4cf76-zr9nf 1/1 Running 0 47h
pod-demo 2/2 Running 2 (42m ago) 162m
#查看node标签
[root@master home]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master Ready control-plane,master 4d3h v1.23.6 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
node01 Ready 4d2h v1.23.6 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux
node02 Ready 4d2h v1.23.6 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node02,kubernetes.io/os=linux
#给node01打标签
[root@master home]# kubectl label nodes node01 disktype=ssd
node/node01 labeled
[root@master home]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master Ready control-plane,master 4d3h v1.23.6 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
node01 Ready 4d2h v1.23.6 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux
node02 Ready 4d2h v1.23.6 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node02,kubernetes.io/os=linux
#查看pod运行在那个节点
[root@master home]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
myapp 1/1 Running 4 (2d3h ago) 3d23h 10.244.1.7 node01
myapp-9cbc4cf76-zr9nf 1/1 Running 0 47h 10.244.1.12 node01
nginx-deploy-6c758c8d46-ff2rp 1/1 Running 4 (2d3h ago) 3d23h 10.244.2.6 node02
pod-demo 2/2 Running 3 (9m15s ago) 3h9m 10.244.2.14 node02
#查询指定一个或多个资源的详细信息
[root@master home]# kubectl describe pods pod-demo
#打补丁方式增加到5个pod
[root@master home]# kubectl patch deployment myapp-deploy -p '{"spec":{"replicas":5}}'
deployment.apps/myapp-deploy patched
#灰度更新1个pod
[root@master home]# kubectl set image deployment myapp-deploy myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-deploy
deployment.apps/myapp-deploy image updated
deployment.apps/myapp-deploy paused
#更新后没有问题,更新其余pod
[root@master home]# kubectl rollout resume deployment myapp-deploy
deployment.apps/myapp-deploy resumed
#查看历史更新版本
[root@master home]# kubectl rollout history deployment myapp-deploy
deployment.apps/myapp-deploy
REVISION CHANGE-CAUSE
1
2
3
#查看实时更新状态
[root@master home]# kubectl get pods -l app=myapp -w
NAME READY STATUS RESTARTS AGE
myapp-deploy-68c554dfcb-kjp94 1/1 Running 0 27s
myapp-deploy-68c554dfcb-pgt56 1/1 Running 0 27s
myapp-deploy-68c554dfcb-tvr2h 0/1 ContainerCreating 0 27s
myapp-deploy-9f9b5bd95-f9bqr 1/1 Running 0 20m
myapp-deploy-9f9b5bd95-g69js 1/1 Running 0 15m
#查看rs在那个版本
[root@master home]# kubectl get rs -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
myapp-deploy-68c554dfcb 5 5 5 3m29s myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=68c554dfcb,release=canary
myapp-deploy-6bb4786bcd 0 0 0 28m myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=6bb4786bcd,release=canary
myapp-deploy-9f9b5bd95 0 0 0 23m myapp ikubernetes/myapp:v2 app=myapp,pod-template-hash=9f9b5bd95,release=canary
#查看Kind 对应apiVersion使用的库
[root@master ~]# kubectl api-resources
NAME SHORTNAMES APIVERSION NAMESPACED KIND
bindings v1 true Binding
componentstatuses cs v1 false ComponentStatus
configmaps cm v1 true ConfigMap
endpoints ep v1 true Endpoints
events ev v1 true Event
limitranges limits v1 true LimitRange
namespaces ns v1 false Namespace
nodes no v1 false Node
persistentvolumeclaims pvc v1 true PersistentVolumeClaim
persistentvolumes pv v1 false PersistentVolume
pods po v1 true Pod
podtemplates v1 true PodTemplate
replicationcontrollers rc v1 true ReplicationController
resourcequotas quota v1 true ResourceQuota
secrets v1 true Secret
serviceaccounts sa v1 true ServiceAccount
services svc v1 true Service
mutatingwebhookconfigurations admissionregistration.k8s.io/v1 false MutatingWebhookConfiguration
validatingwebhookconfigurations admissionregistration.k8s.io/v1 false ValidatingWebhookConfiguration
customresourcedefinitions crd,crds apiextensions.k8s.io/v1 false CustomResourceDefinition
apiservices apiregistration.k8s.io/v1 false APIService
controllerrevisions apps/v1 true ControllerRevision
daemonsets ds apps/v1 true DaemonSet
deployments deploy apps/v1 true Deployment
replicasets rs apps/v1 true ReplicaSet
statefulsets sts apps/v1 true StatefulSet
tokenreviews authentication.k8s.io/v1 false TokenReview
localsubjectaccessreviews authorization.k8s.io/v1 true LocalSubjectAccessReview
selfsubjectaccessreviews authorization.k8s.io/v1 false SelfSubjectAccessReview
selfsubjectrulesreviews authorization.k8s.io/v1 false SelfSubjectRulesReview
subjectaccessreviews authorization.k8s.io/v1 false SubjectAccessReview
horizontalpodautoscalers hpa autoscaling/v2 true HorizontalPodAutoscaler
cronjobs cj batch/v1 true CronJob
jobs batch/v1 true Job
certificatesigningrequests csr certificates.k8s.io/v1 false CertificateSigningRequest
leases coordination.k8s.io/v1 true Lease
endpointslices discovery.k8s.io/v1 true EndpointSlice
events ev events.k8s.io/v1 true Event
flowschemas flowcontrol.apiserver.k8s.io/v1beta2 false FlowSchema
prioritylevelconfigurations flowcontrol.apiserver.k8s.io/v1beta2 false PriorityLevelConfiguration
ingressclasses networking.k8s.io/v1 false IngressClass
ingresses ing networking.k8s.io/v1 true Ingress
networkpolicies netpol networking.k8s.io/v1 true NetworkPolicy
runtimeclasses node.k8s.io/v1 false RuntimeClass
poddisruptionbudgets pdb policy/v1 true PodDisruptionBudget
podsecuritypolicies psp policy/v1beta1 false PodSecurityPolicy
clusterrolebindings rbac.authorization.k8s.io/v1 false ClusterRoleBinding
clusterroles rbac.authorization.k8s.io/v1 false ClusterRole
rolebindings rbac.authorization.k8s.io/v1 true RoleBinding
roles rbac.authorization.k8s.io/v1 true Role
priorityclasses pc scheduling.k8s.io/v1 false PriorityClass
csidrivers storage.k8s.io/v1 false CSIDriver
csinodes storage.k8s.io/v1 false CSINode
csistoragecapacities storage.k8s.io/v1beta1 true CSIStorageCapacity
storageclasses sc storage.k8s.io/v1 false StorageClass
volumeattachments storage.k8s.io/v1 false VolumeAttachment
k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
anjia0532/google-containers.ingress-nginx.kube-webhook-certgen:v1.1.1
k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185
anjia0532/google-containers.ingress-nginx.controller:v1.2.0
资源配置清单:
自主式Pod资源
资源的清单格式:
一级字段:apiVersion(group/version),kind, metadata(name,namespace,labels,annotations, ...),spec, status(只读)
Pod资源:
spec.containers <[]object>
- name
image
imagePullPolicy
Always, Never, IfNotPresent
修改镜像中的默认应用:
command, args
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/
标签:
key=value
key: 字母、数字、_、-、.
value: 可以为空,只能字母或数字开头及结尾,中间可使用
标签选择器:
等值关系: =, ==, !=
集合关系:
KEY in (VALUE1,VALUE2,...)
KEY notin (VALUE1,VALUE2,...)
KEY
!KEY
许多资源支持内嵌字段定义其使用的标签选择器:
matchLabels: 直接给定键值
matchExpressions: 基于给定的表达式来定义使用标签选择器,{key:"KEY", operator:"OPERATOR", values:[VAL1,VAL2,...]}
操作符:
In, Notin: values字段的值必须为非空列表
Exists, NotExists: values字段的值必须为空列表
nodeSelector
#解决nginx上传文件代理504错误
fastcgi_connect_timeout 300;
fastcgi_send_timeout 300;
fastcgi_read_timeout 300;
fastcgi_buffer_size 64k;
fastcgi_buffers 4 64k;
fastcgi_busy_buffers_size 128k;
fastcgi_temp_file_write_size 256k;
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io