k8s阿里云服务器安装

[TOC]


常用命令


kubectl -h
#获取所有pod信息
kubectl get pods --all-namespaces -o wide
kubectl get cs,node,svc,pods,ingress --all-namespaces -o wide
 
kubectl get pods -n kube-system  -o wide
 
watch kubectl get pod -n kube-system -o wide
 
kubectl describe pod  calico-node-qxfrt -n kube-system
 
kubeadm config view
kubectl cluster-info
 
kubectl create -f manifest.yaml
kubectl delete -f manifest.yaml
kubectl logs -f manifest...
kubectl get pod pod_name
kubectl run b1 -it --rm --image=alpine /bin/sh
kubectl run b1 -it --rm --image=harbor.lisea.cn/k8s/alpine-base:2.0 /bin/sh
curl -s microservice-cloud-config-service.default.svc.cluster.local:8888/future-dev.yml

安装


简单部署:kubeasz
https://github.com/gjmzj/kubeasz
快速部署
https://github.com/gjmzj/kubeasz/blob/master/docs/setup/quickStart.md

  • k8s——部署基于公网的k8s集群
  • 解决阿里云ECS下kubeadm部署k8s无法指定公网IP(作废)
  • 内网不互通的云服务器安装k8s
  • 端口要求

升级内核

CentOS7.x系统自带的3.10.x内核存在一些Bug,Docker运行不稳定,建议升级内核
#下载内核源
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装最新版本内核
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 查看可用内核
cat /boot/grub2/grub.cfg |grep menuentry
# 设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.230-1.el7.elrepo.x86_64) 7 (Core)"
# 查看内核启动项
grub2-editenv list
# 重启系统使内核生效
reboot
# 查看内核版本是否生效
uname -r

安装kubeadm,kubectl,kubelet

#添加yum源
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#卸载
yum remove -y kubelet kubeadm kubectl
#安装
yum install -y kubelet kubeadm kubectl
# 设置kubelet开机启动
systemctl enable kubelet && systemctl start kubelet

参考脚本

# 在 master 节点和 worker 节点都要执行
# 最后一个参数 1.18.9 用于指定 kubenetes 版本,支持所有 1.18.x 版本的安装
# 腾讯云 docker hub 镜像
# export REGISTRY_MIRROR="https://mirror.ccs.tencentyun.com"
# DaoCloud 镜像
# export REGISTRY_MIRROR="http://f1361db2.m.daocloud.io"
# 华为云镜像
# export REGISTRY_MIRROR="https://05f073ad3c0010ea0f4bc00b7105ec20.mirror.swr.myhuaweicloud.com"
# 阿里云 docker hub 镜像
export REGISTRY_MIRROR=https://registry.cn-hangzhou.aliyuncs.com
curl -sSL https://kuboard.cn/install-script/v1.18.x/install_kubelet.sh | sh -s 1.18.9

install_kubelet.sh

#!/bin/bash
 
# 在 master 节点和 worker 节点都要执行
 
# 安装 docker
# 参考文档如下
# https://docs.docker.com/install/linux/docker-ce/centos/
# https://docs.docker.com/install/linux/linux-postinstall/
 
# 卸载旧版本
yum remove -y docker \
docker-client \
docker-client-latest \
docker-ce-cli \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
 
# 设置 yum repository
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
 
# 安装并启动 docker
yum install -y docker-ce-19.03.8 docker-ce-cli-19.03.8 containerd.io
systemctl enable docker
systemctl start docker
 
# 安装 nfs-utils
# 必须先安装 nfs-utils 才能挂载 nfs 网络存储
yum install -y nfs-utils
yum install -y wget
 
# 关闭 防火墙
systemctl stop firewalld
systemctl disable firewalld
 
# 关闭 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
 
# 关闭 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
 
# 修改 /etc/sysctl.conf
# 如果有配置,则修改
sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding=1#g"  /etc/sysctl.conf
# 可能没有,追加
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.conf
# 执行命令以应用
sysctl -p
 
# 配置K8S的yum源
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 
# 卸载旧版本
yum remove -y kubelet kubeadm kubectl
 
# 安装kubelet、kubeadm、kubectl
# 将 ${1} 替换为 kubernetes 版本号,例如 1.17.2
yum install -y kubelet-${1} kubeadm-${1} kubectl-${1}
 
# 修改docker Cgroup Driver为systemd
# # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
# # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
# 如果不修改,在添加 worker 节点时可能会碰到如下错误
# [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd".
# Please follow the guide at https://kubernetes.io/docs/setup/cri/
sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service
 
# 设置 docker 镜像,提高 docker 镜像下载速度和稳定性
# 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤
curl -sSL https://kuboard.cn/install-script/set_mirror.sh | sh -s ${REGISTRY_MIRROR}
 
# 重启 docker,并启动 kubelet
systemctl daemon-reload
systemctl restart docker
systemctl enable kubelet && systemctl start kubelet
 
docker version

初始化master


#设置hostname
hostnamectl set-hostname master
 
kubeadm init --kubernetes-version=1.18.9 --apiserver-advertise-address=[master节点IP] --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16 --image-repository=registry.aliyuncs.com/google_containers
  • kubernetes-version:为控制平面选择一个特定的 Kubernetes 版本。

  • apiserver-advertise-address:API 服务器所公布的其正在监听的 IP 地址。如果未设置,则使用默认网络接口。

  • service-cidr:为服务的虚拟 IP 地址另外指定 IP 地址段。

  • pod-network-cidr:指明 pod 网络可以使用的 IP 地址段。如果设置了这个参数,控制面板将会为每一个节点自动分配 CIDRs。

  • image-repository:选择用于拉取控制面板镜像的容器仓库 默认 k8s.gcr.io。

部署阿里云如果公网IP没有绑定到网卡需要修改ETCD

k8s——部署基于公网的k8s集群

解决阿里云ECS下kubeadm部署k8s无法指定公网IP(作废)

内网不互通的云服务器安装k8s

当卡在

[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.

的时候新开窗口修改以下文件

vim /etc/kubernetes/manifests/etcd.yaml

修改成以下参数

- --listen-client-urls=https://127.0.0.1:2379
- --listen-peer-urls=https://127.0.0.1:2380

配置用户证书

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

禁用非安全端口

kubectl get cs,node,svc,pods,ingress --all-namespaces -o wide
 
vim /etc/kubernetes/manifests/kube-scheduler.yaml
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
 
#去掉port=0
systemctl restart kubelet

查看集群状态

#master现在状态是notready因为没有安装pod网络
kubectl get node
 
kubectl -h
#获取所有pod信息
kubectl get pods --all-namespaces -o wide
kubectl get cs,node,svc,pods,ingress --all-namespaces -o wide
 
kubectl get pods -n kube-system  -o wide
 
watch kubectl get pod --all-namespaces -o wide
 
kubectl describe pod  calico-node-qxfrt -n kube-system
 
kubeadm config view
kubectl cluster-info

安装pod网络

如果要使用外网IP

#每个节点都需要改成外网IP
kubectl annotate nodes master flannel.alpha.coreos.com/public-ip-overwrite=47.103.63.71

安装flannel

#flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kube-flannel.yml

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.14.0-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

添加Node节点

kubeadm token create --print-join-command
 
# kubeadm token create 命令的输出
kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt     --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303

创建nginx

kubectl run --image=nginx nginx-app --port=80
 
kubectl delete pod nginx-app

允许master节点部署pod

kubectl taint nodes --all node-role.kubernetes.io/master-

不允许调度

kubectl taint nodes master1 node-role.kubernetes.io/master=:NoSchedule

使用云服务器可能存在的问题


日志链接不上的问题

如果内网ip也不在一个网段不能相互访问,那访问部署在master以外的节点时看不到运行日志已经连接不上。

注意是在master执行以下命令

#注意是在master执行以下命令,将其他节点的内网ip转变成外网ip
iptables -t nat -A OUTPUT -d [其他节点内网IP] -j DNAT --to-destination [其他节点公网IP]

修改与删除

#查看
iptables -t nat -vnL OUTPUT --line-number
#删除,[num]是某条记录的序号最左边
iptables -t nat -D OUTPUT [num]

不能使用cluster ip的问题


配置kube-proxy 基于 ipvs 模式工作

#修改mode为ipvs
kubectl edit cm kube-proxy -n kube-system
---
    mode: "ipvs"
    nodePortAddresses: null
    oomScoreAdj: null
    portRange: ""
-- INSERT --

删除然后会自动重新创建

kubectl delete pod -l k8s-app=kube-proxy -n kube-system
img

卸载

# 卸载服务
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
#删除网卡
ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1
ifconfig kube-ipvs0 down
ip link delete kube-ipvs0
rm -rf /var/lib/cni/
rm -rf /var/lib/etcd
rm -rf /var/lib/kubelet
rm -rf /var/run/kubernetes
rm -rf /etc/cni/*
# 删除node
kubectl delete node node-01
# 删除rpm包
rpm -qa|grep kube*|xargs rpm --nodeps -e
#停止删除所有的容器
docker stop $(docker ps -q) & docker rm $(docker ps -aq)
# 删除容器及镜像
docker images -qa|xargs docker rmi -f

remove.sh

echo "----------清理开始----------"
kubeadm reset
 
#停止所有docker容器
docker stop `docker ps |awk {'print $1'}|grep -v CONTAINER`
# 删除所有容器
docker rm -f $(docker ps -qa)
# 删除所有容器卷
docker volume rm $(docker volume ls -q)
# 删除所有的镜像,慎用
#docker rmi -f `docker images|awk {'print $3'}`
 
#停止服务
systemctl  disable kubelet.service
systemctl  disable kube-scheduler.service
systemctl  disable kube-proxy.service
systemctl  disable kube-controller-manager.service
systemctl  disable kube-apiserver.service
 
systemctl  stop kubelet.service
systemctl  stop kube-scheduler.service
systemctl  stop kube-proxy.service
systemctl  stop kube-controller-manager.service
systemctl  stop kube-apiserver.service
 
#卸载mount目录
for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done
 
#删除残留路径
rm -rf /etc/ceph
rm -rf /etc/cni
rm -rf /etc/kubernetes
rm -rf /run/secrets/kubernetes.io
rm -rf /run/calico
rm -rf /run/flannel
rm -rf /var/lib/calico
rm -rf /var/lib/cni
rm -rf /var/lib/kubelet
rm -rf /var/lib/etcd
rm -rf /var/log/containers
rm -rf /var/log/pods
rm -rf /var/run/calico
rm -rf /var/run/kubernetes
rm -rf /opt/cni
rm -rf ~/.kube/config
 
#清理网络接口
network_interface=`ls /sys/class/net`
for net_inter in $network_interface;
do
  if ! echo $net_inter | grep -qiE 'lo|docker0|eth*|ens*';then
    ip link delete $net_inter
  fi
done
 
#清理Iptables表
## 注意:如果节点Iptables有特殊配置,以下命令请谨慎操作
sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
 
systemctl restart docker
 
echo "----------清理完成----------"

网络调试工具


busybox.yaml

apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28.4
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always

安装Dashboard


wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
  • 修改kubernetes-dashboard的service类型为NodePort类型
[root@k8s-master dashboard]# vim recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort # 新增
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30443 # 新增
  selector:
  • 安装
kubectl create -f recommended.yaml
  • 创建serviceaccount和clusterrolebinding资源YAML文件:默认Dashboard为最小RBAC权限,添加集群管理员权限以便从Dashboard操作集群资源
[root@k8s-master dashboard]# vim adminuser.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
  • 创建
kubectl create -f adminuser.yaml
  • 获取token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
  • 浏览器访问https://IP:30443

安装traefik


  • traefik-ingress.yaml:根据端口限制设置开放端口,默认端口:23456
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - pods
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress-lb
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: traefik-ingress-lb
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress-lb
        name: traefik-ingress-lb
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: traefik:v1.7.20
        imagePullPolicy: IfNotPresent
        name: traefik-ingress-lb
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress-lb
  ports:
    - protocol: TCP
      # 该端口为 traefik ingress-controller的服务端口
      port: 80
      # 集群hosts文件中设置的 NODE_PORT_RANGE 作为 NodePort的可用范围
      # 从默认20000~40000之间选一个可用端口,让ingress-controller暴露给外部的访问
      nodePort: 23456
      name: web
    - protocol: TCP
      # 该端口为 traefik 的管理WEB界面
      port: 8080
      name: admin
  type: NodePort
  • traefik-ui.ing.yaml
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
spec:
  rules:
  - host: traefik-ui.test.com
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-ingress-service
          servicePort: 8080
  • 添加hosts
39.103.232.193    traefik-ui.test.com

访问:http://traefik-ui.test.com:30000

安装harbor

每次重启需要确保harbor的容器都启动起来,可以执行docker-compose up -d

  • 安装docker-compose
curl -L https://github.com/docker/compose/releases/download/1.29.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
  • 下载离线文件:harbor
  • 复制配置文件:cp harbor.yml.tmpl harbor.yml
    • 修改hostname可以是ip地址
    • 可以注释掉https相关配置
  • 检查配置是否正确:./prepare
  • 安装:./install.sh
  • harbor的控制
# 启动
docker-compose up -d
# 停止
docker-compose stop
# 重新启动
docker-compose restart

docker使用

下面端口是改成9090的,默认是80端口

如果是使用ip这需要配置信任

[root@master harbor]# vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://registry.cn-hangzhou.aliyuncs.com"],
  "insecure-registries": ["192.168.1.20:9090"]
}
 
#重启
systemctl daemon-reload
systemctl restart docker

需要在harbor先创建一个demo的项目

登录harbor仓库

[root@master]# docker login -u admin -p Harbor12345 192.168.1.20:9090
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
 
Login Succeeded
 
#上传镜像到私有仓库
docker pull nginx:latest
docker tag nginx:latest 192.168.1.20:9090/demo/nginx:v1.0
docker push 192.168.1.20:9090/demo/nginx:v1.0

组件介绍


Deployment


apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.15-alpine
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
 
---
 
spec:
    ...
    spec:
      containers:
        ...
        volumeMounts:
        - mountPath: /nginx
          name: nginx
      volumes:
      - name: nginx
        persistentVolumeClaim:
          claimName: nginx

labels:是打个标签,参数随意 app: nginx 或者 name: nginx

subPath: 将/nginx/config目录下文件挂载到 pv 的 config 目录下。

spec:
        ...
        volumeMounts:
        - mountPath: /nginx/config
          name: nginx
          subPath: config
      volumes:
      - name: nginx
        persistentVolumeClaim:
          claimName: nginx

指定部署到node


先给node打标签

#查看标签
kubectl get node --show-labels
#打标签
kubectl label nodes node-1 tag=node-1
#修改便签
kubectl label nodes node-1 tag=node-1 --overwrite
#删除标签
kubectl label nodes node-1 tag-

修改部署文件

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx
    spec:
      nodeSelector:
        tag: node
      containers:
      - name: nginx
        image: nginx:1.15-alpine
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

Service


apiVersion: v1
kind: Service
metadata:
  name: nginx-service
  labels:
    name: nginx
spec:
  type: NodePort
  ports:
  - port: 80
    nodePort: 30080
    protocol: TCP
    name: first-port
  selector:
    app: nginx
 
---
 
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
    name: first-port
  selector:
    app: nginx
  • selector:选取的标签要和 Deployment 的 labels 一样

Service 有四种类型:

  • ClusterIP:默认类型,自动分配一个仅 cluster 内部可以访问的虚拟 IP

  • NodePort:在 ClusterIP 基础上为 Service 在每台机器上绑定一个端口,这样就可 以通过 :NodePort 来访问该服务。如果 kube-proxy 设置了 --

nodeport-addresses=10.240.0.0/16 (v1.10 支持),那么仅该 NodePort 仅对 设置在范围内的 IP 有效。

  • LoadBalancer:在 NodePort 的基础上,借助 cloud provider 创建一个外部的负载 均衡器,并将请求转发到 :NodePort

  • ExternalName:将服务通过 DNS CNAME 记录方式转发到指定的域名(通过 spec.externlName 设定)。需要 kube-dns 版本在 1.7 以上。

Service、Endpoints 和 Pod 支持三种类型的协议:

  • TCP(Transmission Control Protocol,传输控制协议)是一种面向连接的、可靠 的、基于字节流的传输层通信协议。

  • UDP(User Datagram Protocol,用户数据报协议)是一种无连接的传输层协议, 用于不可靠信息传送服务。

  • SCTP(Stream Control Transmission Protocol,流控制传输协议),用于通过IP网 传输SCN(Signaling Communication Network,信令通信网)窄带信令消息。

PV


apiVersion: v1
kind: PersistentVolume
metadata:
  name: nginx
spec:
  accessModes:
  - ReadWriteMany
  capacity:
    storage: 50Gi
  nfs:
    path: /data/nfs/nginx
    server: 192.168.0.200
  volumeMode: Filesystem

注意:配置目录权限,不知道哪个有用

chmod 777 /data/nfs/nginx
chown nfsnobody.nfsnobody /data/nfs/nginx
chown -R 200 /data/nfs/nginx

PersistentVolume(PV)是集群之中的一块网络存储。跟 Node 一样,也是集群的资 源。PV 跟 Volume (卷) 类似,不过会有独立于 Pod 的生命周期。

PV 的访问模式(accessModes)有三种:

  • ReadWriteOnce(RWO):是最基本的方式,可读可写,但只支持被单个节点挂 载。

  • ReadOnlyMany(ROX):可以以只读的方式被多个节点挂载。

  • ReadWriteMany(RWX):这种存储可以以读写的方式被多个节点共享。不是每一 种存储都支持这三种方式,像共享方式,目前支持的还比较少,比较常用的是 NFS。在 PVC 绑定 PV 时通常根据两个条件来绑定,一个是存储的大小,另一个就 是访问模式。

PV 的回收策略(persistentVolumeReclaimPolicy,即 PVC 释放卷的时候 PV 该如何操作)也有三种

  • Retain,不清理, 保留 Volume(需要手动清理)

  • Recycle,删除数据,即 rm -rf /thevolume/* (只有 NFS 和 HostPath 支持)

  • Delete,删除存储资源,比如删除 AWS EBS 卷(只有 AWS EBS, GCE PD, AzureDisk 和 Cinder 支持)

PVC


apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 5Gi
  volumeName: nginx

可视化工具


  • Kubernetic
  1. 安装windows版

  2. 创建 C:\Users\10711\\.kube目录

#打开cmd
mkdir .kube
  1. 查看config文件
cat ~/.kube/config
  1. 将config文件复制到windows下的C:\Users\10711\.kube目录,就能访问了
  • lens

https://github.com/lensapp/lens

kubectl


地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#client-binaries-1

配置文件

/etc/kubernetes/admin.conf

下载二进制文件

chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl

配置~/.kube/config文件

shell自动补全

# 安装bash-completion
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
# 应用kubectl的completion到系统环境
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

常用

kubectl -h
#获取所有pod信息
kubectl get pods --all-namespaces
kubectl create -f manifest.yaml
kubectl delete -f manifest.yaml
kubectl logs -f manifest...
kubectl get pod pod_name
kubectl run b1 -it --rm --image=alpine /bin/sh
kubectl run b1 -it --rm --image=harbor.lisea.cn/k8s/alpine-base:2.0 /bin/sh
curl -s microservice-cloud-config-service.default.svc.cluster.local:8888/future-dev.yml

NFS


安装服务端

yum install nfs-utils rpcbind
systemctl start rpcbind.service
systemctl start nfs.service
#开机启动
chkconfig rpcbind on
chkconfig nfs on
#配置
#1、创建文件夹
mkdir -p /data/nfs-share
mkdir -p /data/tmp
#2、配置权限,都试下不知道哪个有用........
chmod 777 /data/nfs-share
chown nfsnobody.nfsnobody /data/nfs-share
chown -R 200 /data/nfs-share
#添加挂载信息,在新增目录也要添加以下操作
vi /etc/exports
#添加以下
/data/nfs-share *(rw,no_root_squash)
#可能有权限问题
/data/nfs-share *(rw,async,root_squash)
#重载配置
exportfs -a
#此时可用showmount -e 服务端ip来查看可mount目录
showmount -e  192.168.90.128
#测试
#1、挂载配置
mount -t nfs 192.168.90.128:/data/nfs-share /data/tmp
#2、卸载挂载
umount /data/tmp

redis


apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    name: redis
  name: redis
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      containers:
      - image: redis:latest
        imagePullPolicy: IfNotPresent
        name: redis
        ports:
        - containerPort: 6379
          protocol: TCP
 
---
apiVersion: v1
kind: Service
metadata:
  name: redis
spec:
  ports:
  - port: 6379
    protocol: TCP
    targetPort: 6379
  selector:
    app: redis

你可能感兴趣的:(k8s阿里云服务器安装)