虚拟机CPU至少是2核,内存这里给的是2G
k8s-master 192.168.8.20
k8s-node1 192.168.8.21
k8s-node2 192.168.8.22
cat >> /etc/hosts << EOF
192.168.8.20 k8s-master
192.168.8.21 k8s-node1
192.168.8.22 k8s-node2
EOF
# 关闭并禁用防火前
systemctl stop firewalld && systemctl disable firewalld
#查看默认防火墙状态(关闭后显示not running,开启后显示running)
firewall-cmd --state
[root@k8s-master ~]# systemctl stop firewalld && systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@k8s-master ~]# firewall-cmd --state
not running
[root@k8s-master ~]#
#关闭Selinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
#重启虚拟机
reboot
#查看Selinux状态
sestatus
[root@k8s-master ~]# setenforce 0
[root@k8s-master ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
[root@k8s-master ~]# sestatus
SELinux status: enabled
SELinuxfs mount: /sys/fs/selinux
SELinux root directory: /etc/selinux
Loaded policy name: targeted
Current mode: permissive
Mode from config file: disabled
Policy MLS status: enabled
Policy deny_unknown status: allowed
Max kernel policy version: 31
[root@k8s-master ~]#
# 重启虚拟机
#查看SELinux状态
[root@k8s-master ~]# sestatus
SELinux status: disabled
[root@k8s-master ~]#
[root@k8s-master ~]# swapoff -a
[root@k8s-master ~]# sed -i 's/.*swap.*/#&/' /etc/fstab
[root@k8s-master ~]# free
total used free shared buff/cache available
Mem: 995748 151052 694420 7864 150276 697376
Swap: 0 0 0
[root@k8s-master ~]#
一些 RHEL/CentOS 7 的用户曾经遇到过问题:
由于 iptables 被绕过而导致流量无法正确路由的问题。
您应该确保 在sysctl 配置中的 net.bridge.bridge-nf-call-iptables 被设置为 1
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#使配置生效
[root@localhost ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
[root@localhost ~]#
[root@k8s-master ~]# yum -y install ntp
[root@k8s-master ~]# ntptime
ntp_gettime() returns code 0 (OK)
time e21ae5d4.084f5000 Tue, Mar 17 2020 13:59:48.032, (.032460),
maximum error 1500 us, estimated error 0 us, TAI offset 0
ntp_adjtime() returns code 0 (OK)
modes 0x0 (),
offset -2006.000 us, frequency 2.816 ppm, interval 1 s,
maximum error 1500 us, estimated error 0 us,
status 0x1 (PLL),
time constant 4, precision 1.000 us, tolerance 500 ppm,
[root@localhost ~]# timedatectl
Local time: 二 2020-03-17 14:00:01 CST
Universal time: 二 2020-03-17 06:00:01 UTC
RTC time: 二 2020-03-17 06:00:01
Time zone: Asia/Shanghai (CST, +0800)
NTP enabled: yes
NTP synchronized: yes
RTC in local TZ: no
DST active: n/a
[root@localhost ~]# systemctl enable ntpd
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.
[root@localhost ~]# systemctl restart ntpd.service
[root@localhost ~]# ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
[root@localhost ~]#
cd /etc/yum.repos.d/
yum -y install wget
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-master ~]# yum -y install docker-ce
[root@k8s-master ~]# systemctl enable docker && systemctl start docker
[root@k8s-master ~]# docker --version
Docker version 19.03.8, build afacb8b
[root@k8s-master ~]#
我们安装的docker使用的文件驱动是systemd,需要文件驱动由默认systemd改成cgroupfs,否则造成驱动不一致,导致镜像无法启动
#设置镜像加速
#文件驱动改成cgroupfs
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": [
"https://l10nt4hq.mirror.aliyuncs.com"
],
"exec-opts": [
"native.cgroupdriver=systemd"
],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
#重启docker
systemctl daemon-reload
systemctl restart docker
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
- kubelet 负责与其他节点集群通信,并进行本节点pod和容器生命周期的管理。
- kubeadm 是kubernetes的自动化部署工具,降低了部署难度,提高效率。
- kubectl 是kubernetes集群管理工具。
# 安装 kubelet、kubeadm、kubectl
yum install -y kubelet kubeadm kubectl
# 设置开机自启
systemctl enable kubelet && systemctl start kubelet
kubeadm init --kubernetes-version="v1.17.3"
--pod-network-cidr="10.244.0.0/8"
--image-repository registry.aliyuncs.com/google_containers
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.8.20:6443 --token 0rvzpx.f5nexczvdwu7swiv \
--discovery-token-ca-cert-hash sha256:a57629e2342bd19e6d650a028519458b464411e60d6c0c226550172d7ad41b27
#kubeadm会自动对应组件的镜像
[root@k8s-master ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.aliyuncs.com/google_containers/kube-proxy v1.17.3 ae853e93800d 4 weeks ago 116MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.17.3 b0f1517c1f4b 4 weeks ago 161MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.17.3 90d27391b780 4 weeks ago 171MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.17.3 d109c0821a2b 4 weeks ago 94.4MB
registry.aliyuncs.com/google_containers/coredns 1.6.5 70f311871ae1 4 months ago 41.6MB
registry.aliyuncs.com/google_containers/etcd 3.4.3-0 303ce5db0e90 4 months ago 288MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 2 years ago 742kB
[root@k8s-master ~]#
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 20m v1.17.4
[root@k8s-master ~]#
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
如果无法访问,则可手动下载
https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml
文件到当前目录,再执行 kubectl apply -f kube-flannel.yml 命令。
[root@k8s-master k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 56m v1.17.4
[root@k8s-master k8s]#
kubeadm join 192.168.8.20:6443 --token bzho3l.78mn7jxsqtzi7gcw \
--discovery-token-ca-cert-hash sha256:d797dbbdd14faeb2a971dd4a07e042258a6208103a7cd7c6bfd3d3a49d904523
#查看所有节点
kubectl get nodes
# -o wide 显示详情
kubectl get nodes -o wide
[root@k8s-master f]# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready master 43h v1.17.4 192.168.8.20 <none> CentOS Linux 7 (Core) 3.10.0-1062.el7.x86_64 docker://19.3.8
k8s-node1 Ready <none> 9m8s v1.17.4 192.168.8.21 <none> CentOS Linux 7 (Core) 3.10.0-1062.el7.x86_64 docker://19.3.8
k8s-node2 Ready <none> 47m v1.17.4 192.168.8.22 <none> CentOS Linux 7 (Core) 3.10.0-1062.el7.x86_64 docker://19.3.8
[root@k8s-master f]#
# 查看所有pod 必须指定命名空间
kubectl get pods -n kube-system
# -n kube-system 指定命名空间
# -o wide 显示详情信息
kubectl get pods -n kube-system -o wide
[root@k8s-master f]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-9d85f5447-4ls2b 1/1 Running 115 43h 10.244.0.3 k8s-master <none> <none>
coredns-9d85f5447-n7jr9 1/1 Running 118 43h 10.244.0.2 k8s-master <none> <none>
etcd-k8s-master 1/1 Running 2 43h 192.168.8.20 k8s-master <none> <none>
kube-apiserver-k8s-master 1/1 Running 4 43h 192.168.8.20 k8s-master <none> <none>
kube-controller-manager-k8s-master 1/1 Running 3 43h 192.168.8.20 k8s-master <none> <none>
kube-flannel-ds-amd64-2cfd2 1/1 Running 0 75m 192.168.8.20 k8s-master <none> <none>
kube-flannel-ds-amd64-fpscx 1/1 Running 0 18m 192.168.8.21 k8s-node1 <none> <none>
kube-flannel-ds-amd64-rx2h8 1/1 Running 0 24m 192.168.8.22 k8s-node2 <none> <none>
kube-proxy-77bnj 1/1 Running 0 18m 192.168.8.21 k8s-node1 <none> <none>
kube-proxy-dc7hs 1/1 Running 2 43h 192.168.8.20 k8s-master <none> <none>
kube-proxy-vnchf 1/1 Running 0 56m 192.168.8.22 k8s-node2 <none> <none>
kube-scheduler-k8s-master 1/1 Running 3 43h 192.168.8.20 k8s-master <none> <none>
[root@k8s-master f]#
[root@k8s-master f]# kubectl get pods
No resources found in default namespace.
[root@k8s-master f]# kubectl run --help
Create and run a particular image, possibly replicated.
Creates a deployment or job to manage the created container(s).
Examples:
# 启动nginx的单个实例.
kubectl run nginx --image=nginx
# 启动一个hazelcast实例并让容器暴露端口5701.
kubectl run hazelcast --image=hazelcast --port=5701
#启动一个hazelcast实例并设置环境变量“DNS_DOMAIN=cluster”和“POD_NAMESPACE=default”在容器里。
kubectl run hazelcast --image=hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default"
# 启动一个hazelcast实例,并在容器中设置标签“app=hazelcast”和“env=prod”
kubectl run hazelcast --image=hazelcast --labels="app=hazelcast,env=prod"
# 启动nginx的复制实例。
kubectl run nginx --image=nginx --replicas=5
# 排练。打印相应的API对象而不创建它们。
kubectl run nginx --image=nginx --dry-run
# 启动nginx的单个实例,但使用从JSON解析的部分值集合重载部署规范。
kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
# 启动一个busybox并将其保持在前台,如果它退出,不要重新启动它。
kubectl run -i -t busybox --image=busybox --restart=Never
# 使用默认命令启动nginx容器,但使用自定义参数(arg1。。 argN)对于那个命令。
kubectl run nginx --image=nginx -- <arg1> <arg2> ... <argN>
# 使用不同的命令和自定义参数启动nginx容器。
kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
# 启动perl容器计算π到2000个位置并打印出来。
kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'
# 启动cron作业计算π到2000个位置,每5分钟打印一次。
kubectl run pi --schedule="0/5 * * * ?" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'
[root@k8s-master f]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@k8s-master f]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-6db489d4b7-r6k8d 1/1 Running 0 96m 10.244.4.2 k8s-node1 <none> <none>
[root@k8s-master f]#
[root@k8s-node2]# curl 10.244.4.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@k8s-node2]#
[root@k8s-master f]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 1 1 84m
[root@k8s-master f]#
[root@k8s-master f]# kubectl expose deployment nginx --name=nginx --port=80 --target-port=80
service/nginx exposed
[root@k8s-master f]# kubectl get service
NAME TYPE LUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 45h
nginx ClusterIP 10.105.85.136 <none> 80/TCP 21s
[root@k8s-master f]#
[root@k8s-master f]# kubectl delete pod nginx-6db489d4b7-r6k8d
pod "nginx-6db489d4b7-r6k8d" deleted
[root@k8s-master f]#
[root@k8s-master f]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-6db489d4b7-2mq56 0/1 ContainerCreating 0 13s <none> k8s-node2 <none> <none>
[root@k8s-master f]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-6db489d4b7-2mq56 1/1 Running 0 28s 10.244.3.2 k8s-node2 <none> <none>
[root@k8s-master f]#
kubectl describe svc nginx
# -cvs 是services 的缩写
[root@k8s-master f]# kubectl describe svc nginx
Name: nginx
Namespace: default
Labels: run=nginx
Annotations: <none>
IP: 10.105.85.136
Port: <unset> 80/TCP
TargetPort: 80/TCP
Endpoints: 10.244.3.2:80
Session Affinity: None
Events: <none>
[root@k8s-master f]#
[root@k8s-master f]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-6db489d4b7-2mq56 1/1 Running 0 39m pod-template-hash=6db489d4b7,run=nginx
[root@k8s-master f]#
kubectl edit svc nginx
[root@k8s-master f]# kubectl edit svc nginx
error: the server doesn't have a resource type "cvs"
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2020-03-19T06:06:25Z"
labels:
run: nginx
name: nginx
namespace: default
resourceVersion: "127217"
selfLink: /api/v1/namespaces/default/services/nginx
uid: 62d4a968-d7ce-4461-9872-5bce7e4aa3c1
spec:
clusterIP: 10.105.85.136
"/tmp/kubectl-edit-pi540.yaml" 27L, 665C
kubectl describe deployment nginx
[root@k8s-master f]# kubectl describe deployment nginx
Name: nginx
Namespace: default
CreationTimestamp: Thu, 19 Mar 2020 12:02:23 +0800
Labels: run=nginx
Annotations: deployment.kubernetes.io/revision: 1
Selector: run=nginx
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: run=nginx
Containers:
nginx:
Image: nginx
Port: <none>
Host Port: <none>
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Progressing True NewReplicaSetAvailable
Available True MinimumReplicasAvailable
OldReplicaSets: <none>
NewReplicaSet: nginx-6db489d4b7 (1/1 replicas created)
Events: <none>
[root@k8s-master f]#