k8s的一些yaml的使用

apiVersion: V1
kind: pod
metadata:
  name: pod1
spec:
  containers:
    - name: nginx-pod
      image: nginx:latest
      imagePullPolicy: IfNotPresent
      ports:
        - name: nginx-port
          containerPort: 80
      resources:
        requests:
          memery: 100Mi
          cpu: 100m # 0.1 核
        limits:
          memery: 150Gi
          cpu: 0.2m
      readinessProbe:  # 这个是用来检测服务是否准备就绪,可以挂到负载均衡上去了
        tcpSocket:
          port: 8080
        initialDelaySeconds: 10   # 容器启动后,过多久执行命令
        periodSeconds: 10    # 每隔多久执行一次命令
        failureThreshold: 1  # 连续执行几次失败重启pod
        successThreshold: 1  # 连续成功几次,认定为一切正常
        timeoutSeconds: 5    # 执行命令的超时时间
      livenessProbe: # 这是用来探活,用来决定是否需要重启pod的
        tcpSocket:
          port: 8080
        initialDelaySeconds: 10   # 容器启动后,过多久执行命令
        periodSeconds: 10    # 每隔多久执行一次命令
        failureThreshold: 1  # 连续执行几次失败重启pod
        successThreshold: 1  # 连续成功几次,认定为一切正常
        timeoutSeconds: 5    # 执行命令的超时时间

---

apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
          livenessProbe:  # 这是用来探活,用来决定是否需要重启pod的
            httpGet:
              path: /examples/index.html
              port: 80   # 容器启动端口
              scheme: HTTP         # 注意,只有返回200才算成功,重定向都不行
            initialDelaySeconds: 10   # 容器启动后,过多久执行命令
            periodSeconds: 10    # 每隔多久执行一次命令
            failureThreshold: 1  # 连续执行几次失败重启pod
            successThreshold: 1  # 连续成功几次,认定为一切正常
            timeoutSeconds: 5    # 执行命令的超时时间


---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-app3
  labels:
    app: nginx
spec:
  replicas: 2
  selectors:
    matchLabels:
      app: nginx
    matchExpressions:
      - {key: grop, values: [nap, wap], operator: In}  # key为grop,value为nap或wap
  templates:
    metadata:
      labels:
        grop: nap   # 这里必须和上面的选择器匹配,否则无法造pod
        app: nginx
    spec:
      containers:
      - name: nginx-app3
        images: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        livenessProbe:   # 容器服务探活指针
          exec:
            command:
              - /bin/bash
              - -c
              - ps -ef|grep java|grep -v grep
          initialDelaySeconds: 10   # 容器启动后,过多久执行命令
          periodSeconds: 10    # 每隔多久执行一次命令
          failureThreshold: 2  # 连续执行几次失败重启pod
          successThreshold: 1  # 连续成功几次,认定为一切正常
          timeoutSeconds: 5    # 执行命令的超时时间

      nodeSelector:   # 节点选择器
        disktype: ssd    # 这是一个key: value的标签


---

apiVersion: V1
kind: Service
metadata:
  name: nginx-service
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: nginx

---
# ingress-nginx
# 做高可用的时候,用于搞服务的发现的,这块的东西,在k8s的官网上可以找到,过程很简单,就是将yml文件先下载下来,然后,修改yaml文件
# 只需要在最后的deploy的部分的template部分的spec部分添加2行代码即可,hostNetwork: true 和 nodeSelector: xxx(给个标签),
# 就可以了。基本上就是为了将ingress的服务固定到一个node上,省的执行yml文件以后,找不到ingress服务部署到哪去了,或者来说,
# 我们更希望,它部署到我们指定的node上
apiVersion: v1
kind: Ingress
metadata:
  name: web
spec:
  rules:
    - host: web.server.com   # 定义浏览器访问的域名
      http:
        paths:
          - path: /
            backend:
              serviceName: nginx-service  # 这里给service的名称,比如在122行那个service的名称
              servicePort: 80 # 这里给service自己的port


---

apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
      affinity:   # 亲和性
        nodeAffinity:  # 节点亲和性
          requiredDuringSchedulingIgnoredDuringExecution:  # 必须满足的亲和条件(可以有多个,每个之间是或的关系)
            nodeSelectorTerms:  # 节点选择策略
              - matchExpressions:
                - key: beta.sss/arc   # node的标签名称
                  operator: In
                  values:
                    - amd64          # 标签 beta.sss/arc 对应的值
          preferredDuringSchedulingIgnoredDuringExecution: # 最好满足的亲和条件(可以有多个,每个之间是或的关系)
            - weight: 1
              preference:
                - matchExpressions:
                    - key: disktype
                      operator: NotIn
                      values:
                        - asd

---


apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
      affinity:   # 亲和性
        podAffinity:  # pod亲和性
          requiredDuringSchedulingIgnoredDuringExecution:  # 必须满足的亲和条件(可以有多个,每个之间是或的关系)
            - labelSelector:
                matchExpressions:
                  - key: app
                    operator: In
                    values:
                      - web-demo
              topologyKey: kubernetes.io/hostname   # 节点的选择范围(具有这样标签的节点)
          preferredDuringSchedulingIgnoredDuringExecution: # 最好满足的亲和条件(可以有多个,每个之间是或的关系)
            - weight: 100
              podAffinityTerm:  # 节点亲和策略
                labelSelector:
                  - matchExpressions:
                      - key: disktype
                        operator: NotIn
                        values:
                          - asd
                topologyKey: kubernetes.io/hostname   # 节点的选择范围(具有这样标签的节点)


---

apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
      affinity:   # 亲和性
        podAntiAffinity:  # pod的反亲和性(不跟满足下面条件的节点在一起)
          requiredDuringSchedulingIgnoredDuringExecution:  # 必须满足的亲和条件(可以有多个,每个直接是或的关系)
            - labelSelector:
                matchExpressions:
                  - key: app
                    operator: In
                    values:
                      - web-demo
              topologyKey: kubernetes.io/hostname   # 节点的选择范围(具有这样标签的节点)
          preferredDuringSchedulingIgnoredDuringExecution: # 最好满足的亲和条件(可以有多个,每个直接是或的关系)
            - weight: 100
              podAffinityTerm:  # 节点亲和策略
                labelSelector:
                  - matchExpressions:
                      - key: disktype
                        operator: NotIn
                        values:
                          - asd
                topologyKey: kubernetes.io/hostname   # 节点的选择范围(具有这样标签的节点)

---
# 污点的标签组成:key=value:effect
# 每个污点有一个 key 和 value 作为污点的标签,其中 value 可以为空,effect 描述污点的作用。
# 打污点:kubectl taint nodes k8s-node2 check=yuanzhang:NoExecute
# 查看污点:kubectl describe nodes k8s-node2
# 去除污点:kubectl taint nodes k8s-node2 check:NoExecute-
# 有污点就需要,有pod去容忍污点


apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
      tolerations:  # 污点容忍
        - key: "key"
          operator: "Equal"
          value: "value"
          effect: "NoSchedule"  # 效果(必须跟打污点时给定的效果一致)


---

apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  strategy:  # 部署策略
    type: Recreate  # 部署的时候,先停止原先的容器,然后重新启动容器
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
      tolerations:  # 污点容忍
        - key: "key"
          operator: "Equal"
          value: "value"
          effect: "NoSchedule"  # 效果(必须跟打污点时给定的效果一致)


---

apiVersion: apps/V1
kind: Deployment
metadata:
  name: nginx-app2
  labels:
    app: nginx
spec:
  strategy:  # 部署策略(升级策略这一项如果不配置,那么默认就是滚动升级,然后,参数就是2个25%)
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 25%   # 滚动升级时,最多同时启动的实例比例
      maxUnavailable: 25%  # 滚动升级时,最多不可用的实例比例
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  templates:
    metadata:
      name: pod1
      labels:
        app: nginx
    spec:
      containers:
        - name: nginxapp2-container
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
      tolerations:  # 污点容忍
        - key: "key"
          operator: "Equal"
          value: "value"
          effect: "NoSchedule"  # 效果(必须跟打污点时给定的效果一致)

# 升级的回滚命令: kubectl rollout undo deploy xxx.yaml -n xx_namespace


---
# ============================  蓝绿部署  green ========================

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-app3
  labels:
    app: nginx
spec:
  replicas: 2
  selectors:
    matchLabels:
      app: nginx
  templates:
    metadata:
      labels:
        app: nginx
        version: green
    spec:
      containers:
      - name: nginx-app3
        images: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

---

apiVersion: V1
kind: Service
metadata:
  name: nginx-service
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: nginx
    version: green


---
# ============================  蓝绿部署 blue ========================

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-app3
  labels:
    app: nginx
spec:
  replicas: 2
  selectors:
    matchLabels:
      app: nginx
  templates:
    metadata:
      labels:
        app: nginx
        version: blue
    spec:
      containers:
      - name: nginx-app3
        images: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

---

apiVersion: V1
kind: Service
metadata:
  name: nginx-service
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: nginx
    version: blue


---
# ============================ 金丝雀部署 ========================

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-app3
  labels:
    app: nginx
spec:
  replicas: 2
  selectors:
    matchLabels:
      app: nginx
  templates:
    metadata:
      labels:
        app: nginx
        version: blue
    spec:
      containers:
      - name: nginx-app3
        images: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

---

apiVersion: V1
kind: Service
metadata:
  name: nginx-service
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
  selector: # 不给version标签,就行
    app: nginx


---

# 浅谈pod:
# pod本身是个逻辑性的概念,在k8s中,并不存在一个叫pod的组件。pod的本质上来讲,就是一个容器,这个
# 容器大概100~200K,很小,而且平常也都处于暂停状态,所以并不会对node的性能造成任何影响。k8s是基于
# 这个容器,引入pod的概念,并通过pod(也就是这个容器)来管理其他的容器的。这个容器,本身是可以看到
# 其他容器的。因为pod本身就是容器,所以,它本身就具备许多容器的特性,比如卷...所以,在pod这块,是
# 可以创建卷的,然后,由于容器于容器之间网络互通,资源共享,所以,pod中可以定义卷,然后,在pod中的
# 容器就可以通过挂载卷来访问卷中的内容,同样,基于这样的特点,被同一个pod所管理的容器之间,就可以
# 以这种方式,进行进行文件的传递。比如,有的容器负责记录日志,那就使劲的写文件。有的容器负责采集
# 日志信息,那就使劲的读文件。有了这个共享卷,就可以很方便的去做这样的事情.

apiVersion: v1
kind: Pod
metadata:
  name: pod-volume
spec:
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      volumeMounts:   # 挂载卷
        - name: share-volume   # 要挂载哪个卷,这里给卷的名字
          mountPath: /shared-web  # 挂载到容器内的啥地方,这里给路径
    - name: dubbo  # 第二个容器
      image: hub/k8s/dubbo:v1
      ports:
        - containerPort: 8081
          hostPort: 8081
          protocol: TCP
      env:
        - name: DUBBO_PORT
          value: "8081"
      volumeMounts:
        - name: share-volume   # 要挂载哪个卷,这里给卷的名字
          mountPath: /share-dubbo
  volumes:  # 构建pod的卷,以供容器挂载
    - name: share-volume
      hostPath:
        path: /shared-volume-data

---
# 同理,由于pod是要管理在它里面的容器的,所以,有需要公共性质的东西,是不能直接在容器内部直接改的
# 需要在pod的层面去修改,比如:/etc/hostname,如果要给这文件添加新的hostname信息,是不可以直接
# 加到具体的容器内部的。需要在pod这里加。

apiVersion: v1
kind: Pod
metadata:
  name: pod-volume
spec:
  hostNetwork: true # 是否使用数组基的网络
  hostPID: true # 是否使用数组基的PID
  hostAliases: # 在这里定义需要添加到容器的新的 hostname,然后被该pod所管理的所有容器里就都有这个host的信息了
    - ip: "10.55.12.11"
      hostnames:
        - "web.aa.com"
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      volumeMounts:   # 挂载卷
        - name: share-volume   # 要挂载哪个卷,这里给卷的名字
          mountPath: /shared-web  # 挂载到容器内的啥地方,这里给路径
    - name: dubbo  # 第二个容器
      image: hub/k8s/dubbo:v1
      ports:
        - containerPort: 8081
          hostPort: 8081
          protocol: TCP
      env:
        - name: DUBBO_PORT
          value: "8081"
      volumeMounts:
        - name: share-volume   # 要挂载哪个卷,这里给卷的名字
          mountPath: /share-dubbo
  volumes:  # 构建pod的卷,以供容器挂载
    - name: share-volume
      hostPath:
        path: /shared-volume-data


---
# 容器的生命周期,是可以做一些管理的


apiVersion: v1
kind: Pod
metadata:
  name: pod-volume
spec:
  hostNetwork: true # 是否使用数组基的网络
  hostPID: true # 是否使用数组基的PID
  hostAliases: # 在这里定义需要添加到容器的新的 hostname,然后被该pod所管理的所有容器里就都有这个host的信息了
    - ip: "10.55.12.11"
      hostnames:
        - "web.aa.com"
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      volumeMounts:   # 挂载卷
        - name: share-volume   # 要挂载哪个卷,这里给卷的名字
          mountPath: /shared-web  # 挂载到容器内的啥地方,这里给路径
    - name: dubbo  # 第二个容器
      image: hub/k8s/dubbo:v1
      ports:
        - containerPort: 8081
          hostPort: 8081
          protocol: TCP
      env:
        - name: DUBBO_PORT
          value: "8081"
      volumeMounts:
        - name: share-volume   # 要挂载哪个卷,这里给卷的名字
          mountPath: /share-dubbo
      lifecycle:
        postStart: # 这里的启动执行,是在启动容器时就会执行的,是同步执行,所以这里不要执行那些需要容器彻底启动之后才可以执行的命令
          exec:
            command: ["/bin/sh", "-c", "echo dubbo staring ... >> /var/log/messages"]
        preStop: # 这里是容器停止之前要执行的命令,是顺序执行。在发出停止容器的指令之后,这个命令会立刻执行,它执行结束,
                 # 才会停止容器。但是,这里容器的停止,并不会无限的等待,也是存在一个超时时间的,如果执行这条命令耗时很长,容器最后也会直接停止的。
          exec:
            command: ["/bin/sh", "-c", "echo dubbo stopping ... >> /var/log/messages && sleep 3"]
  volumes:  # 构建pod的卷,以供容器挂载
    - name: share-volume
      hostPath:
        path: /shared-volume-data


---

apiVersion: v1
kind: Secret
metadata:
  name: dbpass
type: Opaque # 混乱类型,不让别人看明文
data:
  username: eW1r  # 这里给 base64 码就行
  passwd: aW1vb2M=

---
# 将 secret 投射到pod,通常有些pod在执行的时候需要用到一些特定文件,比如用户名,密码这种东西,这样往往apiServer往往在一开始,就会
# 将相应的文件投射到pod上,然后,容器通过挂载的方式,去获取这些文件的内容

apiVersion: v1
kind: Pod
metadata:
  name: pod-volume
spec:
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      volumeMounts:   # 挂载卷
        - name: db-secret   # 要挂载哪个卷,这里给卷的名字,注意,这里挂载上去后,会出现两个文件,一个username, passwd,只要 cat xx,就能看见具体内容了
          mountPath: /db-secret  # 挂载到容器内的啥地方,这里给路径
          readOnly: true   # 设定文件权限

  volumes:  # 构建pod的卷,以供容器挂载
    - name: db-secret
      projected: # 投射
        sources:
          - secret:
              name: dbpass  # 这里选择上面 kind为 secret 的 yml 中的那个matadata的name就行

---
# k8s中除了可以搞secret类型的数据存储,然后投射到pod以外,还可以搞configmap类型的内容,做类似的操作
# kebectl create configmap web-game(创建一个configmap类型的对象,名字为web-game) --from-file game.properties
# 这是直接以命令去创建,当然,也可以通过yml去创

apiVersion: v1
kind: ConfigMap
metadata:
  name: web-game
data:
  game.properties:  |    # 注意这里有个 | ,这代表,下面的内容都属于这个文件,或者叫变量,都可以。
    enemies=aliens
    lives=3
    secret.code.allowed=true

---

apiVersion: v1
kind: Pod
metadata:
  name: pod-volume
spec:
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      volumeMounts:   # 挂载卷
        - name: game  # 这里会在挂载文件目录下看到一个文件 game.properties, cat game.properties 就可看到具体内容
          mountPath: /etc/config/game  # 挂载到容器内的啥地方,这里给路径
          readOnly: true   # 设定文件权限

  volumes:  # 构建pod的卷,以供容器挂载
    - name: game
      configMap:
        name: web-game

---
# 除了挂载的方式去取configMap的值以外,还可以以环境变量的方式直接在容器中取值

apiVersion: v1
kind: ConfigMap
metadata:
  name: configs
data:
  PATHONPATH: /home/abc/build/
  LOG_LEVEL: DEBUG

---

apiVersion: v1
kind: Pod
metadata:
  name: pod-env
spec:
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      env:
        - name: LOG_LEVEL_CONFIG # 定义环境变量 LOG_LEVEL_CONFIG
          valueFrom:
            configMapKeyRef:
              name: configs   # configMap的名称,用于选择具体的configMap对象
              key: LOG_LEVEL  # 这个key就是名为configs的configmap类型的对象中的,key: LOG_LEVEL,这里将取到LOG_LEVEL对应的值,
                              # 并将其赋值给 LOG_LEVEL_CONFIG

---
# 这里搞出的环境变量,也可以直接应用到容器的命令里去

apiVersion: v1
kind: Pod
metadata:
  name: pod-env
spec:
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      command: ["/bin/sh", "-c", "python xxx.py --python-path=$(PATHONPATH)"]  # 这里直接引用了下面定义的环境变量
      env:
        - name: PATHONPATH # 定义环境变量 PATHON_PATH
          valueFrom:
            configMapKeyRef:
              name: configs   # configMap的名称,用于选择具体的configMap对象
              key: PATHONPATH

---
# downwardAPI,这种类型的作用是,可以直接得到pod本身的一些东西,同样是用过挂载的方式,可以让容器访问到

apiVersion: v1
kind: Pod
metadata:
  name: pod-downwardapi
  labels:
    app: downwardapi
    type: webapp
spec:
  containers:
    - name: web
      image: hub/k8s/web:v1
      ports:
        - containerPort: 8080
      volumeMountes:
        - name: podinfo
          mountPath: /etc/podinfo
  volumes:
    - name: podinfo
      projected:
        sources:
          - downwardAPI:
              items:
                - path: "labels"  # 这里注意下,在这定义的变量,会以文件的方式显示到挂载目录,
                                  # cat labels,就可以看到metadata.labels中的labels信息,有啥就显示啥,有几个就显示几个
                  fieldRef:
                    fieldPath: metadata.labels
                - path: "name"
                  fieldRef:
                    fieldPath: metadata.name
                - path: "namespace"   # 没有定义,就是 default,所以,cat namespace,看到的就是default
                  fieldRef:
                    fieldPath: metadata.namespace
                - path: "mem-request"  # 没定义,拿到的就是整个机子的磁盘大小
                  resourceFieldRef:
                    containerName: web
                    resource: limits.memory

你可能感兴趣的:(kubernetes,nginx,linux)