目录
一、环境搭建
1、准备环境
1)计算机说明,建议系统版本7.4或者7.6
2)修改所有主机的计算机名设置host文件
2、安装master节点
1)安装etcd配置etcd
2)安装k8s-master节点
3)配置apiserver
4)配置controller和scheduler
5)启动k8s服务
3、安装k8s-master上的node
1)安装node
2)配置kubelet
3)启动kubeket启动自动启动docker服务
4)启动kubelet-proxy
5)检查node节点
4、安装配置k8s-node1节点
1)安装node
2)node1连接k8s-master
3)配置kubelet
4)启动服务
5)在master节点检测node节点状态
5、安装k8s-node2节点
1)安装node
2)node1连接k8s-master
3)配置kubelet
4)启动服务
6、为所有node节点配置flannel网络
7、配置docker开启加载防火墙规则允许转发数据
二、k8s常用资源管理
1、创建一个pod
2、pod管理
3、pod操作
主机名 |
IP地址 |
角色 |
硬件 |
k8s-master |
192.168.147.138 |
Master,node |
Etcd、apiserver、controlor-manager、scheduler、kube-proxy、docker、registry |
K8s-node1 |
192.168.147.139 |
Node |
Kubletel、kube-proxy、docker |
K8s-node2 |
192.168.147.140 |
Node |
Kubletel、kube-proxy、docker |
[root@slave ~]# hostnamectl set-hostname k8s-master
[root@slave ~]# bash
[root@k8s-master ~]# vim /etc/hosts
192.168.147.138 k8s-master
192.168.147.139 k8s-node1
192.168.147.140 k8s-node2
[root@k8s-master ~]# scp /etc/hosts 192.168.147.139:/etc/hosts
[root@k8s-master ~]# scp /etc/hosts 192.168.147.140:/etc/hosts
[root@k8s-master ~]# yum install etcd -y
[root@k8s-master ~]# cp /etc/etcd/etcd.conf /etc/etcd/etcd.conf.bak
[root@k8s-master ~]# vim /etc/etcd/etcd.conf
6 ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
21 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.147.138:2379"
[root@k8s-master ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
[root@k8s-master ~]# yum -y install kubernetes-master.x86_64
[root@k8s-master ~]# vim /etc/kubernetes/apiserver
8 KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" //修改监听IP地址
12 KUBE_API_PORT="--port=8080" //监听端口
16 KUBELET_PORT="--kubelet-port=10250" //kubelet监听端口
19 KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.147.138:2379" //连接etcd
24 KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
[root@k8s-master ~]# vim /etc/kubernetes/config
22 KUBE_MASTER="--master=http://192.168.147.138:8080"
[root@k8s-master ~]# systemctl start kube-apiserver.service
[root@k8s-master ~]# systemctl start kube-controller-manager.service
[root@k8s-master ~]# systemctl start kube-scheduler.service
[root@k8s-master ~]# systemctl enable kube-apiserver.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@k8s-master ~]# systemctl enable kube-controller-manager.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@k8s-master ~]# systemctl enable kube-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
6)检查节点安装都是健康的
[root@k8s-master ~]# kubectl get componentstatus
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
[root@k8s-master ~]# yum install kubernetes node.x86_64
[root@k8s-master ~]# vim /etc/kubernetes/kubelet
5 KUBELET_ADDRESS="--address=192.168.147.138" //监听IP地址
11 KUBELET_HOSTNAME="--hostname-override=k8s-master" //监听计算机名
14 KUBELET_API_SERVER="--api-servers=http://192.168.147.138:8080" //监听apiserver端口
[root@k8s-master ~]# systemctl start kubelet
[root@k8s-master ~]# systemctl enable kubelet
[root@k8s-master ~]# systemctl start kube-proxy
[root@k8s-master ~]# systemctl enable kube-proxy
[root@k8s-master ~]# kubectl get nodes
NAME STATUS AGE
k8s-master Ready 2m
[root@k8s-node1 ~]# yum install kubernetes node.x86_64
[root@k8s-node1 ~]# vim /etc/kubernetes/config
22 KUBE_MASTER="--master=http://192.168.147.138:8080"
[root@k8s-node1 ~]# vim /etc/kubernetes/kubelet
5 KUBELET_ADDRESS="--address=192.168.147.140
11 KUBELET_HOSTNAME="--hostname-override=k8s-node1"
15 KUBELET_API_SERVER="--api-servers=http://192.168.147.138:8080"
[root@k8s-node1 ~]# systemctl start kubelet
[root@k8s-node1 ~]# systemctl start kube-proxy
[root@k8s-node1 ~]# systemctl enable kubelet
[root@k8s-node1 ~]# systemctl enable kube-proxy
[root@k8s-master ~]# kubectl get nodes
NAME STATUS AGE
k8s-master Ready 50m
k8s-node1 Ready 15s //发现节点node1
[root@k8s-node2 ~]# yum install kubernetes node.x86_64
[root@k8s-node2 ~]# vim /etc/kubernetes/config
22 KUBE_MASTER="--master=http://192.168.147.138:8080"
[root@k8s-node2 ~]# vim /etc/kubernetes/kubelet
5 KUBELET_ADDRESS="--address=192.168.147.140"
11 KUBELET_HOSTNAME="--hostname-override=k8s-node2"
15 KUBELET_API_SERVER="--api-servers=http://192.168.147.138:8080"
[root@k8s-node2 ~]# systemctl start kubelet
[root@k8s-node2 ~]# systemctl start kube-proxy
[root@k8s-node2 ~]# systemctl enable kubelet
[root@k8s-node2 ~]# systemctl enable kube-proxy
[root@k8s-master ~]# kubectl get nodes
NAME STATUS AGE
k8s-master Ready 16m
k8s-node1 Ready 11s
k8s-node2 Ready 12s
1)在k8s-master节点安装flannel
[root@k8s-master ~]# yum install flannel -y
[root@k8s-master ~]# vim /etc/sysconfig/flanneld
4 FLANNEL_ETCD_ENDPOINTS=http://192.168.147.138:2379
[root@k8s-master ~]# etcdctl set /atomic.io/network/config '{ "Network": "172.16.0.0/16" }' //配置网络
{ "Network": "172.16.0.0/16" }
[root@k8s-master ~]# systemctl start flanneld
[root@k8s-master ~]# systemctl enable flanneld
[root@k8s-master ~]# ifconfig //查看多一个网络
flannel0: flags=4305 mtu 1472
inet 172.16.63.0 netmask 255.255.0.0 destination 172.16.63.0
inet6 fe80::41de:6f31:283f:fd63 prefixlen 64 scopeid 0x20
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 1 bytes 48 (48.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@k8s-master ~]# systemctl restart docker //重新启动docker服务和flannel网络一至
[root@k8s-master ~]# systemctl enable docker
flannel0: flags=4305 mtu 1472
inet 172.16.63.0 netmask 255.255.0.0 destination 172.16.63.0
inet6 fe80::41de:6f31:283f:fd63 prefixlen 64 scopeid 0x20
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3 bytes 144 (144.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
2)配置node1节点flannel网络
[root@k8s-node1 ~]# yum install flannel -y
[root@k8s-node1 ~]# vim /etc/sysconfig/flanneld
4 FLANNEL_ETCD_ENDPOINTS=http://192.168.147.138:2379
[root@k8s-node1 ~]# systemctl start flanneld
[root@k8s-node1 ~]# systemctl enable flanneld
[root@k8s-node1 ~]# systemctl restart docker
[root@k8s-node1 ~]# systemctl enable docker
3)安装node2节点flannel网络
[root@k8s-node2 ~]# yum install flannel -y
[root@k8s-node2 ~]# vim /etc/sysconfig/flanneld
4 FLANNEL_ETCD_ENDPOINTS=http://192.168.200.112:2379
[root@k8s-node2 ~]# systemctl start flanneld
[root@k8s-node2 ~]# systemctl enable flanneld
[root@k8s-node2 ~]# systemctl restart docker
[root@k8s-node2 ~]# systemctl enable docker
[root@k8s-master ~]# kubectl get nodes
NAME STATUS AGE
k8s-master Ready 21m
k8s-node1 Ready 5m
k8s-node2 Ready 5m
4)测试docker容器跨宿主机通信
[root@k8s-node1 ~]# iptables -P FORWARD ACCEPT //允许转发数据 [root@k8s-node2 ~]# iptables -P FORWARD ACCEPT [root@k8s-master ~]# iptables -P FORWARD ACCEPT [root@k8s-master ~]# docker run -it busybox //下载镜像 / # ping 172.16.63.1 //测试和其他docker宿主机之间通信 PING 172.16.63.1 (172.16.63.1): 56 data bytes 64 bytes from 172.16.63.1: seq=0 ttl=64 time=0.088 ms 64 bytes from 172.16.63.1: seq=1 ttl=64 time=0.131 ms
1)配置k8s-master节点
[root@k8s-master ~]# vim /usr/lib/systemd/system/docker.service
18 ExecStartPort=/usr/sbin/iptables -P FORWARD ACCEPT #手动添加
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl restart docker
2)配置k8s-node1节点
[root@k8s-node1 ~]# vim /usr/lib/systemd/system/docker.service
18 ExecStartPort=/usr/sbin/iptables -P FORWARD ACCEPT
[root@k8s-node1 ~]# systemctl daemon-reload
[root@k8s-node1 ~]# systemctl restart docker
3)配置k8s-node2节点
[root@k8s-node2 ~]# vim /usr/lib/systemd/system/docker.service
18 ExecStartPort=/usr/sbin/iptables -P FORWARD ACCEPT
[root@k8s-node2 ~]# systemctl daemon-reload
[root@k8s-node2 ~]# systemctl restart docker
1)创建yuml文件
[root@k8s-master ~]# mkdir k8s
[root@k8s-master ~]# vim ./k8s/nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: web
spec:
containers:
- name: nginx
image: nginx:1.13
ports:
- containerPort: 80
2)创建容器
方法一. yum安装
[root@k8s-master ~]#yum install *rhsm*
执行命令:
[root@k8s-master ~]#wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
[root@k8s-master ~]#rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem
前两个命令会生成/etc/rhsm/ca/redhat-uep.pem文件.
[root@k8s-master ~]# docker pull registry.access.redhat.com/rhel7/pod-infrastructure:latest
latest: Pulling from rhel7/pod-infrastructure
26e5ed6899db: Pull complete
66dbe984a319: Pull complete
9138e7863e08: Pull complete
Digest: sha256:47db25d46e39f338142553f899cedf6b0ad9f04c6c387a94b6b0964b7d1b7678
Status: Downloaded newer image for registry.access.redhat.com/rhel7/pod-infrastructure:latest
[root@k8s-master ~]# kubectl create -f ./k8s/nginx.yaml
3)查看所有pod创建运行状态
[root@k8s-master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 32s
4)查看指定pod资源
[root@k8s-master ~]# kubectl get pod nginx
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 20m
5)查看pod运行的详细信息
[root@k8s-master ~]# kubectl describe pod nginx
Name: nginx
Namespace: default
Node: k8s-node2/192.168.147.140
Start Time: Mon, 27 Dec 2021 22:35:49 +0800
Labels: app=web
Status: Pending
IP:
Controllers:
Containers:
nginx:
Container ID:
Image: nginx:1.13
Image ID:
Port: 80/TCP
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Volume Mounts:
Environment Variables:
Conditions:
Type Status
Initialized True
Ready False
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations:
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
1d 58s 14 {kubelet k8s-node2} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "POD" with ErrImagePull: "image pull failed for registry.access.redhat.com/rhel7/pod-infrastructure:latest, this may be because there are no credentials on this request. details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)"
1d 6s 118 {kubelet k8s-node2} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "POD" with ImagePullBackOff: "Back-off pulling image \"registry.access.redhat.com/rhel7/pod-infrastructure:latest\""
[root@k8s-master ~]# kubectl get pod nginx -o wide //显示调度节点
NAME READY STATUS RESTARTS AGE IP NODE
nginx 0/1 ContainerCreating 0 1d k8s-node2
6)验证运行的pod
root@k8s-master ~]# kubectl get pod nginx -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 3 1d 172.16.56.2 k8s-node2
1)删除pod
[root@k8s-master ~]# kubectl delete pod nginx
pod "nginx" deleted
2)查看删除pod无法找到
[root@k8s-master ~]# kubectl get pod nginx -o wide
Error from server (NotFound): pods "nginx" not found
3)创建pod
[root@k8s-master ~]# kubectl create -f ./k8s/nginx.yaml
pod "nginx" created
4)发现最先创建的pod运行在k8s-master节点上,下载镜像速度太慢没法运行
[root@k8s-master ~]# kubectl get pod nginx -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 0/1 ContainerCreating 0 4m k8s-master
5)访问pod节点中的服务
[root@k8s-master ~]# curl -I http://172.16.7.3
HTTP/1.1 200 OK
Server: nginx/1.13.12
Date: Mon, 03 Jan 2022 13:35:54 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Mon, 09 Apr 2018 16:01:09 GMT
Connection: keep-alive
ETag: "5acb8e45-264"
Accept-Ranges: bytes
1)在一个pod中修改配置文件运行多个业务容器
[root@k8s-master ~]# vim ./k8s/nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: test1
labels:
app: web
spec:
containers:
- name: nginx01
image: nginx:1.13
ports:
- containerPort: 80
- name: busybox
image: docker.io/busybox:latest
command: ["sleep","3600"]
ports:
- containerPort: 80
2)创建资源test2
[root@k8s-master ~]# kubectl create -f ./k8s/nginx.yaml
pod "test1" created
3)查看资源使用情况
[root@k8s-master ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 0 5d 172.16.7.3 k8s-master
test 1/1 Running 1 1h 172.16.96.2 k8s-node2
test1 2/2 Running 0 33s 172.16.12.2 k8s-node1