pod、pod控制器、service、ingress
Usage: kubectl get resource [-o wide|json|yaml] [-n namespace]
Man: 获取资源的相关信息,-n 指定名称空间,-o 指定输出格式
resource可以是具体资源名称,如pod nginx-xxx;也可以是资源类型,如pod;或者all(仅展示几种核心资源,并不完整)
-A, --all-namespace 表示显示所有名称空间
Usage: kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME) [-n namespace]
Man: 描述某个资源信息
登录任一运算节点,我这里选择10.4.7.21
[root@hdss7-21 ~]# kubectl get namespace # 简写是:kubectl get ns
NAME STATUS AGE
default Active 6d17h
kube-node-lease Active 6d17h
kube-public Active 6d17h
kube-system Active 6d17h
查看名称空间为default的全部资源
[root@hdss7-21 ~]# kubectl get all [-n default]
NAME READY STATUS RESTARTS AGE
pod/nginx-ds-t8mwg 1/1 Running 2 44h
pod/nginx-ds-tjkt2 1/1 Running 2 44h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 192.168.0.1 443/TCP 6d17h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/nginx-ds 2 2 2 2 2 44h
[root@hdss7-21 ~]# kubectl create namespace app
namespace/app created
[root@hdss7-21 ~]# kubectl get ns
NAME STATUS AGE
app Active 12s
default Active 6d17h
kube-node-lease Active 6d17h
kube-public Active 6d17h
kube-system Active 6d17h
[root@hdss7-21 ~]# kubectl delete ns app
namespace "app" deleted
[root@hdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
[root@hdss7-21 ~]# kubectl get deploy -n kube-public
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-dp 1/1 1 1 69s
[root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-mxtnz 1/1 Running 0 2m46s
[root@hdss7-21 ~]# kubectl get pods -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dp-5dfc689474-mxtnz 1/1 Running 0 4m1s 172.7.21.3 hdss7-21.host.com
[root@hdss7-21 ~]# kubectl describe deployment nginx-dp -n kube-public
Name: nginx-dp
Namespace: kube-public
CreationTimestamp: Wed, 21 Apr 2021 21:10:41 +0800
Labels: app=nginx-dp
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nginx-dp
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=nginx-dp
Containers:
nginx:
Image: harbor.od.com/public/nginx:v1.7.9
Port:
Host Port:
Environment:
Mounts:
Volumes:
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets:
NewReplicaSet: nginx-dp-5dfc689474 (1/1 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 14m deployment-controller Scaled up replica set nginx-dp-5dfc689474 to 1
[root@hdss7-21 ~]# kubectl exec -it nginx-dp-5dfc689474-mxtnz /bin/bash -n kube-public
root@nginx-dp-5dfc689474-mxtnz:/# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
8: eth0@if9: mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:07:15:03 brd ff:ff:ff:ff:ff:ff
inet 172.7.21.3/24 brd 172.7.21.255 scope global eth0
valid_lft forever preferred_lft forever
也可以使用docker exec -it
进入容器
等于是一部重启的操作
,因为删除之后再查看,pod的名字变了
[root@hdss7-21 ~]# kubectl delete pod nginx-dp-5dfc689474-mxtnz -n kube-public
pod "nginx-dp-5dfc689474-mxtnz" deleted
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-vcqw5 1/1 Running 0 3s
[root@hdss7-21 ~]# kubectl delete deployment nginx-dp -n kube-public
deployment.extensions "nginx-dp" deleted
[root@hdss7-21 ~]# kubectl get all -n kube-public
No resources found.
先创建pod控制器
[root@hdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl get all -n kube-public
NAME READY STATUS RESTARTS AGE
pod/nginx-dp-5dfc689474-twtg5 1/1 Running 0 24s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-dp 1/1 1 1 24s
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-dp-5dfc689474 1 1 1 24s
再创建service
[root@hdss7-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed
[root@hdss7-21 ~]# kubectl get all -n kube-public
NAME READY STATUS RESTARTS AGE
pod/nginx-dp-5dfc689474-twtg5 1/1 Running 0 5m3s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx-dp ClusterIP 192.168.44.93 80/TCP 59s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-dp 1/1 1 1 5m3s
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-dp-5dfc689474 1 1 1 5m3s
[root@hdss7-22 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.4.7.21:6443 Masq 1 0 0
-> 10.4.7.22:6443 Masq 1 0 0
TCP 192.168.44.93:80 nq
-> 172.7.21.3:80 Masq 1 0 0
[root@hdss7-21 ~]# kubectl scale deployment nginx-dp --replicas=2 -n kube-public
deployment.extensions/nginx-dp scaled
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.4.7.21:6443 Masq 1 0 0
-> 10.4.7.22:6443 Masq 1 0 0
TCP 192.168.44.93:80 nq
-> 172.7.21.3:80 Masq 1 0 0
-> 172.7.22.3:80 Masq 1 0 0
声明式资源管理方法依赖于—资源配置清单(yaml/json)
root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-twtg5 1/1 Running 1 23h
[root@hdss7-21 ~]# kubectl get pods nginx-dp-5dfc689474-twtg5 -o yaml -n kube-public
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2021-04-21T14:01:09Z"
generateName: nginx-dp-5dfc689474-
labels:
app: nginx-dp
pod-template-hash: 5dfc689474
name: nginx-dp-5dfc689474-twtg5
namespace: kube-public
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: nginx-dp-5dfc689474
uid: fd61affd-bb07-494b-bd80-cca2830a53a0
resourceVersion: "51413"
selfLink: /api/v1/namespaces/kube-public/pods/nginx-dp-5dfc689474-twtg5
uid: 4c1b4cb9-44c8-4772-b448-16ae4073ac9c
spec:
containers:
- image: harbor.od.com/public/nginx:v1.7.9
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-rxkkz
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: hdss7-21.host.com
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-rxkkz
secret:
defaultMode: 420
secretName: default-token-rxkkz
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2021-04-21T14:01:09Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2021-04-22T13:14:49Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2021-04-22T13:14:49Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2021-04-21T14:01:09Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://4f8cbfdc4c1fc3c8ba8b196f76a3f0c5ec269ba069344849f3b5d3a29f9ca826
image: harbor.od.com/public/nginx:v1.7.9
imageID: docker-pullable://harbor.od.com/public/nginx@sha256:b1f5935eb2e9e2ae89c0b3e2e148c19068d91ca502e857052f14db230443e4c2
lastState:
terminated:
containerID: docker://3b965ed0b5ce249c19cf0c543ae52578f4224d8e12360267384dcfdefe730b76
exitCode: 255
finishedAt: "2021-04-22T13:14:41Z"
reason: Error
startedAt: "2021-04-21T14:01:10Z"
name: nginx
ready: true
restartCount: 1
state:
running:
startedAt: "2021-04-22T13:14:48Z"
hostIP: 10.4.7.21
phase: Running
podIP: 172.7.21.2
qosClass: BestEffort
startTime: "2021-04-21T14:01:09Z"
[root@hdss7-22 ~]# kubectl get svc -n kube-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-dp ClusterIP 192.168.44.93 80/TCP 23h
[root@hdss7-22 ~]# kubectl get svc nginx-dp -o yaml -n kube-public
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2021-04-21T14:05:13Z"
labels:
app: nginx-dp
name: nginx-dp
namespace: kube-public
resourceVersion: "48685"
selfLink: /api/v1/namespaces/kube-public/services/nginx-dp
uid: ed0dc55e-3cae-4c6c-85f1-cf4e5d902267
spec:
clusterIP: 192.168.44.93
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-dp
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
[root@hdss7-22 ~]# kubectl explain service
KIND: Service
VERSION: v1
DESCRIPTION:
Service is a named abstraction of software service (for example, mysql)
consisting of local port (for example 3306) that the proxy listens on, and
the selector that determines which pods will answer requests sent through
the proxy.
FIELDS:
apiVersion
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
kind
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
metadata
[root@hdss7-21 ~]# vim nginx-ds-svc.yaml
apiVersion: V1
kind: Service
metadata:
labels:
app: nginx-ds
name: nginx-ds
namespace: default
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
[root@hdss7-21 ~]# kubectl create -f nginx-ds-svc.yaml
service/nginx-ds created
[root@hdss7-21 ~]# kubectl get svc -n default
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 443/TCP 7d18h
nginx-ds ClusterIP 192.168.167.187 80/TCP 55s
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl get svc nginx-ds -o yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2021-04-22T14:05:32Z"
labels:
app: nginx-ds
name: nginx-ds
namespace: default
resourceVersion: "55757"
selfLink: /api/v1/namespaces/default/services/nginx-ds
uid: 78abf20e-2e7c-4632-b00c-8a377a4fbe0d
spec:
clusterIP: 192.168.167.187
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kubectl apply -f nginx-ds-svc.yaml
修改nginx-ds-svc.yaml文件,并用kubectl apply -f nginx-ds-svc.yaml 使之生效
直接使用 kubectl edit service nginx-ds 在线编辑资源配置清单并保存
kubectl delete service nginx-ds -n kube-public
kubectl delete -f nginx-ds-svc.yaml
Kubernetes设计了网络模型,但却将他的实现交给了网络插件,CNI网络插件最主要的功能就是实现POD资源能够跨宿主机进行通信
常见的CNI网络插件:
主机名 | 角色 | ip |
---|---|---|
HDSS7-21.host.com | flannel | 10.4.7.21 |
HDSS7-22.host.com | flannel | 10.4.7.22 |
这里以部署10.4.7.21
为例,10.4.7.22
同样
下载地址:https://github.com/coreos/flannel/releases/
[root@hdss7-21 src]# cd /opt/src
[root@hdss7-21 src]# mkdir /opt/flannel-v0.11.0/
[root@hdss7-21 src]# tar zxf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
[root@hdss7-21 src]# ln -s flannel-v0.11.0 flannel
登录到10.4.7.200
上
[root@hdss7-200 certs]# cd /opt/certs
[root@hdss7-200 certs]# scp -r client.pem client-key.pem ca.pem 10.4.7.21:/opt/flannel/cert/
[root@hdss7-200 certs]# scp -r client.pem client-key.pem ca.pem 10.4.7.22:/opt/flannel/cert/
[root@hdss7-21 flannel]# vim subnet.env # 注意修改IP
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
[root@hdss7-21 flannel]# vim flanneld.sh # 注意修改IP地址
#!/bin/sh
./flanneld
--public-ip=10.4.7.21 \
--etcd-endpoints=https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
--etcd-keyfile=./cert/client-key.pem
--etcd-certfile=./cert/client.pem
--etcd-cafile=./cert/ca.pem
--iface=ens33 \
--subnet-file=./subnet.env
--healthz-port=2401
[root@hdss7-22 flannel]# chmod +x flanneld.sh
[root@hdss7-21 flannel]# mkdir -p /home/logs/flanneld
在任一etcd节点上操作即可,这里在10.4.7.21
上操作
[root@hdss7-21 ~]# cd /opt/etcd
[root@hdss7-21 ~]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
[root@hdss7-21 etcd]# ./etcdctl get /coreos.com/network/config
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
[root@hdss7-21 ~]# vim /etc/supervisord.d/flannel.ini # 这注意路径修改
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/home/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@hdss7-21 ~]# supervisorctl update
flanneld-7-21: added process group
[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21 RUNNING pid 6523, uptime 1:43:52
flanneld-7-21 RUNNING pid 36736, uptime 0:00:34
kube-apiserver-7-21 RUNNING pid 6528, uptime 1:43:52
kube-controller-manager-7-21 RUNNING pid 6536, uptime 1:43:52
kube-kubelet-7-21 RUNNING pid 6520, uptime 1:43:52
kube-proxy-7-21 RUNNING pid 6549, uptime 1:43:51
kube-scheduler-7-21 RUNNING pid 6564, uptime 1:43:51
[root@hdss7-22 flannel]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.4.7.254 0.0.0.0 UG 100 0 0 ens33
10.4.7.0 0.0.0.0 255.255.255.0 U 100 0 0 ens33
172.7.19.0 10.4.7.21 255.255.255.0 UG 0 0 0 ens33
172.7.22.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
[root@hdss7-22 flannel]# yum -y install iptables-services
[root@hdss7-22 flannel]# systemctl start iptables
[root@hdss7-22 flannel]# systemctl enable iptables
Created symlink from /etc/systemd/system/basic.target.wants/iptables.service to /usr/lib/systemd/system/iptables.service.
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]# iptables-save |grep -i postrouting
:POSTROUTING ACCEPT [9:546]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.7.22.0/24 ! -o docker0 -j MASQUERADE # 我们要优化这一条规则
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose" -m set --match-set KUBE-LOOP-BACK dst,dst,src -j MASQUERADE
[root@hdss7-22 flannel]# # 删掉下面这一条规则
[root@hdss7-22 flannel]# iptables -t nat -D POSTROUTING -s 172.7.22.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-22 flannel]# # 添加一条新的规则在下面
[root@hdss7-22 flannel]# iptables -t nat -I POSTROUTING -s 172.7.22.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@hdss7-22 flannel]# iptables-save |grep -i postrouting
:POSTROUTING ACCEPT [4:240]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.7.22.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose" -m set --match-set KUBE-LOOP-BACK dst,dst,src -j MASQUERADE
[root@hdss7-22 flannel]# # 将规则保存到文件里
[root@hdss7-22 flannel]# iptables-save > /etc/sysconfig/iptables
服务发现就是服务(应用)之间相互定位的过程
服务发现并不是云计算时代独有的,传统的单体架构时代也会用到。下面的应用场景更需要服务发现:
在K8S集群里,POD是不断变化的,如何以不变应万变呢?
那么如何自动关联Service资源的“名称”和“集群IP”,从而达到服务被集群自动发现的目的呢?
实现K8S里DNS功能的插件(软件)
部署K8S的内网资源配置清单http服务
在运维主机10.4.7.200
上配置一个nginx虚拟主机,用于提供k8s统一的资源配置清单访问入口
[root@hdss7-200 ~]# vim /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
listen 80;
server_name k8s-yaml.od.com k8s-yaml.grep.pro;
location / {
autoindex on;
default_type text/plain;
root /home/k8s-yaml;
}
}
[root@hdss7-200 ~]# mkdir -p /home/k8s-yaml/coredns
在dns主机上10.4.7.11
添加一条dns记录到最下面
[root@hdss7-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2021032902 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A k8s-yaml.od.com @10.4.7.11 +short
10.4.7.200
coredns官方Github地址:https://github.com/coredns/coredns
coredns官方Docker地址:https://hub.docker.com/r/coredns/coredns/tags?page=1&ordering=last_updated
登录运维主机10.4.7.200
[root@hdss7-200 coredns]# docker pull docker.io/coredns/coredns:1.6.1
[root@hdss7-200 coredns]# docker images |grep coredns
coredns/coredns 1.6.1 c0f6e815079e 21 months ago 42.2MB
[root@hdss7-200 coredns]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@hdss7-200 coredns]# docker push harbor.od.com/public/coredns:v1.6.1
The push refers to repository [harbor.od.com/public/coredns]
da1ec456edc8: Pushed
225df95e717c: Pushed
v1.6.1: digest: sha256:c7bf0ce4123212c87db74050d4cbab77d8f7e0b49c041e894a35ef15827cf938 size: 739
参考:https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
[root@hdss7-200 coredns]# cd /home/k8s-yaml/coredns
[root@hdss7-200 coredns]# vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
[root@hdss7-200 coredns]# vim cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . 10.4.7.11
cache 30
loop
reload
loadbalance
}
[root@hdss7-200 coredns]# vim dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.od.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
登录10.4.7.21
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created
[root@hdss7-21 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-6b6c4f9648-xklfx 1/1 Running 0 5m9s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/coredns ClusterIP 192.168.0.2 53/UDP,53/TCP,9153/TCP 5m1s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/coredns 1/1 1 1 5m9s
NAME DESIRED CURRENT READY AGE
replicaset.apps/coredns-6b6c4f9648 1 1 1 5m9s
K8S的DNS实现了服务在集群“内”被自动发现,那如何使得服务在K8S集群“外”被使用和访问呢?
traefik官方GitHub:https://github.com/traefik/traefik
treafik官方DockerHub:
登录运维主机10.4.7.200
[root@hdss7-200 k8s-yaml]# cd /home/k8s-yaml/
[root@hdss7-200 k8s-yaml]# mkdir traefik && cd traefik
[root@hdss7-200 traefik]# docker pull traefik:v1.7.2-alpine
[root@hdss7-200 traefik]# docker images |grep traefik
traefik v1.7.2-alpine add5fac61ae5 2 years ago 72.4MB
[root@hdss7-200 traefik]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 traefik]# docker push harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 traefik]# vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
[root@hdss7-200 traefik]# vim ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: traefik-ingress
namespace: kube-system
labels:
k8s-app: traefik-ingress
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress
name: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: harbor.od.com/public/traefik:v1.7.2
name: traefik-ingress
ports:
- name: controller
containerPort: 80
hostPort: 81
- name: admin-web
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
- --insecureskipverify=true
- --kubernetes.endpoint=https://10.4.7.10:7443
- --accesslog
- --accesslog.filepath=/var/log/traefik_access.1og
- --traefiklog
- --traefiklog.filepath=/var/log/traefik.1og
- --metrics.prometheus
[root@hdss7-200 traefik]# vim svc.yaml
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: controller
- protocol: TCP
port: 8080
name: admin-web
[root@hdss7-200 traefik]# vim ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: traefik.od.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
登录到10.4.7.21
机器
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
daemonset.extensions/traefik-ingress created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
service/traefik-ingress-service created
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml
ingress.extensions/traefik-web-ui created
[root@hdss7-21 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-xklfx 1/1 Running 1 23h
traefik-ingress-vqk7m 1/1 Running 0 3m57s
10.4.7.11
和10.4.7.12
两台机器上的nginx需要配置
[root@hdss7-11 ~]# vim /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
server 10.4.7.21:81 max_fails=3 fail_timeout=10s;
server 10.4.7.22:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.od.com *.grep.pro;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@hdss7-11 ~]# nginx -s reload
[root@hdss7-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2021032904 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
浏览器访问:http://traefik.od.com
出现以下画面:
登录10.4.7.200
机器
[root@hdss7-200 k8s-yaml]# docker pull k8scn/kubernetes-dashboard-amd64:v1.8.3
[root@hdss7-200 k8s-yaml]# docker images |grep dashboard
k8scn/kubernetes-dashboard-amd64 v1.8.3 fcac9aa03fd6 2 years ago 102MB
[root@hdss7-200 k8s-yaml]# docker tag fcac9aa03fd6 harbor.od.com/public/dashboard.od.com:v1.8.3
[root@hdss7-200 k8s-yaml]# docker push harbor.od.com/public/dashboard.od.com:v1.8.3
The push refers to repository [harbor.od.com/public/dashboard.od.com]
23ddb8cbb75a: Pushed
v1.8.3: digest: sha256:ebc993303f8a42c301592639770bd1944d80c88be8036e2d4d0aa116148264ff size: 529
[root@hdss7-200 k8s-yaml]# mkdir dashboard
[root@hdss7-200 k8s-yaml]# cd dashboard/
[root@hdss7-200 dashboard]# vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: Cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
[root@hdss7-200 dashboard]# vim dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-clister-critical
containers:
- name: kubernetes-dashboard
image: harbor.od.com/public/dashboard:v1.8.3
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 50m
memory: 100Mi
ports:
- containerPort: 8443
protocol: TCP
args:
# PLATFORM-SPECIFIC ARGS HERE
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard-admin
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
[root@hdss7-200 dashboard]# vim svc.yaml
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
[root@hdss7-200 dashboard]# vim ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths :
- backend :
serviceName : kubernetes-dashboard
servicePort: 443
任选一个运算节点,这里我选择10.4.7.22
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac.yaml
serviceaccount/kubernetes-dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
[root@hdss7-22 ~]#
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
deployment.apps/kubernetes-dashboard created
[root@hdss7-22 ~]#
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/svc.yaml
service/kubernetes-dashboard created
[root@hdss7-22 ~]#
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/ingress.yaml
ingress.extensions/kubernetes-dashboard created
[root@hdss7-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2021032905 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
dashboard A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A dashboard.od.com @10.4.7.11 +short
10.4.7.10
登录到10.4.7.200
主机配置证书
[root@hdss7-200 ~]# cd /opt/certs/
[root@hdss7-200 certs]# (umask 077; openssl genrsa -out dashboard.od.com.key 2048)
Generating RSA private key, 2048 bit long modulus
...................................................................+++
...+++
e is 65537 (0x10001)
[root@hdss7-200 certs]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"
[root@hdss7-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.od.com.crt -days 3650
Signature ok
subject=/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops
[root@hdss7-200 certs]# ll dashboard.od.com.*
-rw-r--r-- 1 root root 1196 May 10 22:09 dashboard.od.com.crt
-rw-r--r-- 1 root root 1005 May 10 22:05 dashboard.od.com.csr
-rw------- 1 root root 1675 May 10 21:57 dashboard.od.com.key
切换到10.4.7.11
服务器
[root@hdss7-11 ~]# cd /etc/nginx/
[root@hdss7-11 nginx]# mkdir certs
[root@hdss7-11 nginx]# cd certs/
[root@hdss7-11 certs]# scp -r hdss7-200:/opt/certs/dashboard.od.com.* ./
[root@hdss7-11 certs]# rm -f dashboard.od.com.csr
[root@hdss7-11 conf.d]# cd /etc/nginx/conf.d
[root@hdss7-11 conf.d]# vim dashboard.od.com.conf
server {
listen 80;
server_name dashboard.od.com;
rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
listen 443 ssl;
server_name dashboard.o .com;
ssl_certificate "certs/dashboard.od.com.crt";
ssl_certificate_key "certs/dashboard.od.com.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
网页重新刷新,点击安全,继续前往
[root@hdss7-22 ~]# kubectl get secret -n kube-system
NAME TYPE DATA AGE
coredns-token-snpx8 kubernetes.io/service-account-token 3 20h
default-token-z6pmn kubernetes.io/service-account-token 3 21d
kubernetes-dashboard-admin-token-pbr2v kubernetes.io/service-account-token 3 16m
kubernetes-dashboard-key-holder Opaque 2 6m19s
traefik-ingress-controller-token-t27zn kubernetes.io/service-account-token 3 8h
[root@hdss7-22 ~]# kubectl describe secret kubernetes-dashboard-admin-token-pbr2v -n kube-system
Name: kubernetes-dashboard-admin-token-pbr2v
Namespace: kube-system
Labels:
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard-admin
kubernetes.io/service-account.uid: 1f03a210-3dae-4b10-9a19-1c5b6679edd4
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1346 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImtYmVybmV0ZXMuaW8vc2VydmljZWFY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi1wYnIydiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjFmMDNhMjEwLTNkYWUtNGIxMC05YTE5LTFjNWI2Njc5ZWRkNCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.G76b_oYCaqIL2h6ejhak5qeO4BnibLxv9RNmi-y23DLvcekzs_wKk7D1KSUDTF_yGF9GnQZ_ECA_4d8yH2q3l0vwpCcitXw0H_YsOaGw5t8wZbATSUKEEZfjAULXXnZREP9Aa8as14i1tcgw2DGcHxyBCcP9bvhZcj3INsat3lBcmotr3Y3ynDGXAkE-8CSRFnK2YbnUtCc0CijC2nPgugBNR-wV9SMhoLQ1L5SZHOQgmaC9OKlQhGCDvWukDXUdBtaNdBW1UJUMHrg1UV5iwFAtQccpOxfoUa8WJVkGBQYsDlpT3LqG21sPUt6HwD4228MbiWvqRbgNBL3IQcgowg
[root@hdss7-22 ~]# 用这里的token登陆就可以
10.4.7.200
[root@hdss7-200 dashboard]# vim rbac-minamal.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-minimal
namespace: kub-system
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
在运算节点10.4.7.22
上应用一下
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac-minamal.yaml
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
10.4.7.200
上修改一下dp.yaml
[root@hdss7-200 dashboard]# vim dp.yaml
# 将第51行修改为: serviceAccountName: kubernetes-dashboard
在运算节点10.4.7.22
上重新应用一下dp.yaml
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
deployment.apps/kubernetes-dashboard configured
登录运维主机10.4.7.200
[root@hdss7-200 dashboard]# cd /home/k8s-yaml/dashboard
[root@hdss7-200 dashboard]# mkdir heapster
[root@hdss7-200 dashboard]# cd heapster/
[root@hdss7-200 heapster]# docker pull quay.io/bitnami/heapster:1.5.4
[root@hdss7-200 heapster]# docker images |grep heap
quay.io/bitnami/heapster 1.5.4 c359b95ad38b 2 years ago 136MB
[root@hdss7-200 heapster]# docker tag c359b95ad38b harbor.od.com/public/heapster:v1.5.4
[root@hdss7-200 heapster]# docker push harbor.od.com/public/heapster:v1.5.4
[root@hdss7-200 heapster]# vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
[root@hdss7-200 heapster]# vim dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: harbor.od.com/public/heapster:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /opt/bitnami/heapster/bin/heapster
- --source=kubernetes:https://kubernetes.default
[root@hdss7-200 heapster]# vim svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line. kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
回到节点主机上,应用一下,这里选择10.4.7.22
服务器
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/rbac.yaml
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
[root@hdss7-22 ~]#
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/dp.yaml
deployment.extensions/heapster created
[root@hdss7-22 ~]#
[root@hdss7-22 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/svc.yaml
service/heapster created
先查看当前版本
[root@hdss7-22 ~]# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
hdss7-21.host.com Ready master,node 22d v1.15.11 10.4.7.21 CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://20.10.5
hdss7-22.host.com Ready master,node 22d v1.15.11 10.4.7.22 CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://20.10.5
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6b6c4f9648-xklfx 1/1 Running 5 15d 172.7.21.4 hdss7-21.host.com
heapster-b5b9f794-8dvvh 1/1 Running 0 23m 172.7.22.4 hdss7-22.host.com
traefik-ingress-rn7fq 1/1 Running 4 14d 172.7.22.3 hdss7-22.host.com
traefik-ingress-vqk7m 1/1 Running 4 14d 172.7.21.3 hdss7-21.host.com
将其中一个节点删除,再次查看,pod都跑到另一个节点上去了
[root@hdss7-21 ~]# kubectl delete node hdss7-21.host.com
将新的包复制到同一个目录下,解压缩,将之前的包里的脚本等全都复制过来,重启supervisord程序
主机名 | 角色 | ip |
---|---|---|
HDSS7-11.host.com | k8s代理节点1,zk1 | 10.4.7.11 |
HDSS7-12.host.com | k8s代理节点2,zk2 | 10.4.7.12 |
HDSS7-21.host.com | k8s运算节点1,zk3 | 10.4.7.21 |
HDSS7-22.host.com | k8s运算节点2,jenkins | 10.4.7.22 |
HDSS7-200.host.com | k8s运维几点(docker仓库) | 10.4.7.200 |
3台zk角色主机
)官方网址:https://www.oracle.com/cn/java/technologies/javase/javase-jdk8-downloads.html
官方下载地址:https://download.oracle.com/otn/java/jdk/8u291-b10/d7fc238d0cbf4b0dac67be84580cfb4b/jdk-8u291-linux-x64.tar.gz
百度网盘:https://pan.baidu.com/s/1HIgHE3bGS19hz0P09dntMQ,提取码:lszs
安装过程以10.4.7.11
为例
解压jdk-8u291-linux-x64.tar.gz
[root@hdss7-11 src]# mkdir /usr/java
[root@hdss7-11 src]# tar zxf jdk-8u291-linux-x64.tar.gz -C /usr/java/
[root@hdss7-11 src]# ln -s /usr/java/jdk1.8.0_291 /usr/java/jdk
[root@hdss7-11 src]# vim /etc/profile
export JAVA_HOME=/usr/java/jdk
export PATH=$JAVA_HOME/bin:$JAVA_HOME/bin:$PATH
export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib/$JAVA_HOME/lib/tools.jar
[root@hdss7-11 src]# source /etc/profile
[root@hdss7-11 src]# java -version
java version "1.8.0_291"
Java(TM) SE Runtime Environment (build 1.8.0_291-b10)
Java HotSpot(TM) 64-Bit Server VM (build 25.291-b10, mixed mode)
下载地址:https://archive.apache.org/dist/zookeeper/
网盘地址:https://pan.baidu.com/s/14HmIcZi225p2wIUzXIzfjQ,提取码:lszs
这里以10.4.7.11
服务器安装zookeeper-3.4.11.tar
为例
[root@hdss7-11 src]# tar zxf zookeeper-3.4.11.tar.gz -C /opt/
[root@hdss7-11 src]# cd ..
[root@hdss7-11 opt]# ln -s zookeeper-3.4.11 zookeeper
[root@hdss7-11 opt]# mkdir -vp /home/zookeeper/data /home/zookeeper/logs
[root@hdss7-11 opt]# vim /opt/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/zookeeper/data
dataLogDir=/home/zookeeper/logs
clientPort=2181
server.1=zk1.od.com:2888:3888
server.2=zk2.od.com:2888:3888
server.3=zk3.od.com:2888:3888
[root@hdss7-11 zookeeper]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2021032906 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
dashboard A 10.4.7.10
zk1 A 10.4.7.11
zk2 A 10.4.7.12
ZK3 A 10.4.7.21
[root@hdss7-11 zookeeper]# systemctl restart named
10.4.7.11
执行
[root@hdss7-11 zookeeper]# vim /home/zookeeper/data/myid
1
10.4.7.12
执行
[root@hdss7-12 zookeeper]# vim /home/zookeeper/data/myid
2
10.4.7.21
执行
[root@hdss7-21 zookeeper]# vim /home/zookeeper/data/myid
3
[root@hdss7-11 zookeeper]# /opt/zookeeper/bin/zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
准备镜像:
jenkins官网:
https://www.jenkins.io/
jenkins镜像:
https://hub.docker.com/r/jenkins/jenkins/tags?page=1&ordering=last_updated&name=2.190.3
拉取命令:
docker pull jenkins/jenkins:2.190.3
登录到10.4.7.200
机器
[root@hdss7-200 ~]# docker pull jenkins/jenkins:2.190.3
[root@hdss7-200 ~]# docker tag 22b8b9a84dbe harbor.od.com/public/jenkins:v2.190.3
[root@hdss7-200 ~]# docker push harbor.od.com/public/jenkins:v2.190.3
The push refers to repository [harbor.od.com/public/jenkins]
e0485b038afa: Pushed
2950fdd45d03: Pushed
cfc53f61da25: Pushed
29c489ae7aae: Pushed
473b7de94ea9: Pushed
6ce697717948: Pushed
0fb3a3c5199f: Pushed
23257f20fce5: Pushed
b48320151ebb: Pushed
911119b5424d: Pushed
5051dc7ca502: Pushed
a8902d6047fe: Pushed
99557920a7c5: Pushed
7e3c900343d0: Pushed
b8f8aeff56a8: Pushed
687890749166: Pushed
2f77733e9824: Pushed
97041f29baff: Pushed
v2.190.3: digest: sha256:64576b8bd0a7f5c8ca275f4926224c29e7aa3f3167923644ec1243cd23d611f3 size: 4087
[root@hdss7-200 ~]# ssh-keygen -t rsa -b 2048 -C "[email protected]" -N "" -f /root/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:XGLKu1260LNjoyti/iQk5k7/qAwB/sOhHn/XNCbDjH4 [email protected]
The key's randomart image is:
+---[RSA 2048]----+
| |
| |
|. o . |
|o . + o |
|oo.. * S |
|o++ .. B + |
|.=.+o o O o |
|* *o+o E+* |
| Bo*+o*+=+ |
+----[SHA256]-----+
[root@hdss7-200 ~]# cd /home/dockerfile/jenkins
[root@hdss7-200 jenkins]# vim Dockerfile
FROM harbor.od.com/public/jenkins:v2.190.3
USER root
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime &&\
echo 'Asia/Shanghai' > /etc/timezone
ADD id_rsa /root/.ssh/id_rsa
ADD config.json /root/.docker/config.json
ADD get-docker.sh /get-docker.sh
RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config &&\
/get-docker.sh
[root@hdss7-200 jenkins]# cp /root/.ssh/id_rsa ./
[root@hdss7-200 jenkins]# cp /root/.docker/config.json ./
[root@hdss7-200 jenkins]# curl -fsSL get.docker.com -o get-docker.sh
这个Dockerfile这里主要做了几件事
[root@hdss7-200 jenkins]# docker build . -t harbor.od.com/infra/jenkins:v2.190.3
[root@hdss7-200 jenkins]# docker push harbor.od.com/infra/jenkins:v2.190.3
在任意运算节点,这里我选择10.4.7.21
[root@hdss7-21 conf]# kubectl create ns infra
namespace/infra created
[root@hdss7-21 conf]#
[root@hdss7-21 conf]# kubectl create secret docker-registry harbor --docker-server=harbor.od.com --docker-username=admin --docker-password=Harbor12345 -n infra
secret/harbor created
[root@hdss7-21 conf]# kubectl describe secrets harbor -n infra
Name: harbor
Namespace: infra
Labels:
Annotations:
Type: kubernetes.io/dockerconfigjson
Data
====
.dockerconfigjson: 107 bytes
在运维主机上
以及所有的运算节点
服务器上执行
[root@hdss7-21 conf]# yum install -y nfs-utils -y
在运维主机10.4.7.200上
[root@hdss7-200 ~]# vim /etc/exports
/home/nfs-volume 10.4.7.0/24(rw,no_root_squash)
[root@hdss7-200 ~]# mkdir /home/nfs-volume
[root@hdss7-200 ~]# systemctl start nfs
[root@hdss7-200 ~]# systemctl enable nfs
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
[root@hdss7-200 ~]# cd /home/k8s-yaml/
[root@hdss7-200 k8s-yaml]# mkdir jenkins
[root@hdss7-200 k8s-yaml]# cd jenkins
[root@hdss7-200 jenkins]#
[root@hdss7-200 jenkins]# vim dp.yaml
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: jenkins
namespace: infra
labels:
name: jenkins
spec:
replicas: 1
selector:
matchLabels:
name: jenkins
template:
metadata:
labels:
app: jenkins
name: jenkins
spec:
volumes:
- name: data
nfs:
server: hdss7-200
path: /home/nfs-volume/jenkins_home
- name: docker
hostPath:
path: /run/docker.sock
type: ''
containers:
- name: jenkins
image: harbor.od.com/infra/jenkins:v2.190.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
protocol: TCP
env:
- name: JAVA_OPTS
value: -Xmx512m -Xms512m
volumeMounts:
- name: data
mountPath: /var/jenkins_home
- name: docker
mountPath: /run/docker.sock
imagePullSecrets:
- name: harbor
securityContext:
runAsUser: 0
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
recisionHistoryLimit: 7
progressDeadlineSeconds: 600
[root@hdss7-200 jenkins]# vim svc.yaml
kind: Service
apiVersion: v1
metadata:
name: jenkins
namespace: infra
spec:
ports:
- protocol: TCP
port: 80
targetPort: 8080
selector:
app: jenkins
[root@hdss7-200 jenkins]# vim ingress.yaml
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: jenkins
namespace: infra
spec:
rules:
- host: jenkins.od.com
http:
paths:
- path: /
backend:
serviceName: jenkins
servicePort: 80
[root@hdss7-200 jenkins]# mkdir /home/nfs-volume/jenkins_home
[root@hdss7-200 jenkins]# kubectl create -f http://k8s-yaml.od.com/jenkins/deployment.yaml
[root@hdss7-200 jenkins]# kubectl create -f http://k8s-yaml.od.com/jenkins/svc.yaml
[root@hdss7-200 jenkins]# kubectl create -f http://k8s-yaml.od.com/jenkins/ingress.yaml
[root@hdss7-200 jenkins]# kubectl get pod -n infra
NAME READY STATUS RESTARTS AGE
jenkins-66799565d8-n7w6m 1/1 Running 0 111s
[root@hdss7-200 jenkins]# kubectl exec -it -n infra jenkins-66799565d8-n7w6m /bin/bash
root@jenkins-66799565d8-n7w6m:/# date
Sat Feb 27 09:23:26 CST 2021
root@jenkins-66799565d8-n7w6m:/# whoami
root
登录10.4.7.11
服务器
[root@hdss7-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2021032907 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
dashboard A 10.4.7.10
zk1 A 10.4.7.11
zk2 A 10.4.7.12
ZK3 A 10.4.7.21
jenkins A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A jenkins.od.com @10.4.7.11 +short
10.4.7.10