机器准备

  • 环境信息
机器名称 内外IP 外网IP 操作系统 k8s版本 docker版本
master01 10.0.0.106 192.168.1.9 CentOS Linux release 7.8.2003 v1.15.2 18.09.7
node01 10.0.0.107 192.168.1.11 CentOS Linux release 7.8.2003 v1.15.2 18.09.7
node02 10.0.0.108 192.168.1.15 CentOS Linux release 7.8.2003 v1.15.2 18.09.7
  • 配置主机名称
#分别修改主机名称
hostnamectl set-hostname master01
hostnamectl set-hostname node01
hostnamectl set-hostname node02

#添加名称解析
cat <>/etc/hosts
10.0.0.106 master01
10.0.0.107 node01
10.0.0.108 node02
EOF
  • 检查防火墙
systemctl status firewalld

#如果没有关闭,需要关闭

```systemctl disable firewalld
systemctl disable firewalld
systemctl stop firewalld


* 检查selinux是否关闭

[root@node02 ~]# getenforce
Disabled

#重启会失效
[root@master ~]# setenforce 0
#永久关闭
[root@master ~]# vi /etc/selinux/config
SELINUX=disabled


* 关闭swap

#关闭swap swapoff -a;sed -i '/swap/s/^/#/' /etc/fstab

[root@node02 ~]# free -m
total used free shared buff/cache available
Mem: 1837 102 1571 8 164 1590
Swap: 2047 0 2047
[root@node02 ~]# swapo
swapoff swapon
[root@node02 ~]# swapoff -a
[root@node02 ~]# free -m
total used free shared buff/cache available
Mem: 1837 101 1572 8 164 1591
Swap: 0 0 0
[root@node02 ~]#

[root@node02 ~]# grep swap /etc/fstab
/dev/mapper/centos-swap swap swap defaults 0 0
[root@node02 ~]# sed -i '/swap/s/^/#/' /etc/fstab
[root@node02 ~]# grep swap /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
[root@node02 ~]#


* 内核设置

#创建k8s.conf文件
cat < /etc/sysctl.d/k8s.conf
#文件内容
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl -p


* docker 安装

#使用阿里云镜像,安装docker
yum -y install yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io

#配置存储卷
[root@node01 ~]# cat </etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

#开启自动启动
[root@node01 ~]# systemctl restart docker;systemctl enable docker;docker info | grep Cgroup
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
Cgroup Driver: systemd


* kubeadm安装

cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#安装包
yum -y makecache
yum -y install kubelet-1.15.2 kubeadm-1.15.2 kubectl-1.15.2

#检查是否成功
rpm -aq kubeadm kubelet kubectl
#加入自动启动
systemctl enable kubelet.service;systemctl enable kubelet


* kubeadm 配置

#master01上创建第一个节点
kubeadm init --kubernetes-version=v1.15.2 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12

#输出信息:
[init] Using Kubernetes version: v1.15.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.9]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master01 localhost] and IPs [192.168.1.9 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master01 localhost] and IPs [192.168.1.9 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 39.007909 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[kubelet-check] Initial timeout of 40s passed.
[bootstrap-token] Using token: xknie1.dm76a39ntgnwkyid
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.9:6443 --token xknie1.dm76a39ntgnwkyid \
--discovery-token-ca-cert-hash sha256:76896f39087f6fa66a43a0c336c081649ae65a781c80d140ba492b57bb038df9

#按照提示配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#查看到拉取的镜像:
[root@master01 ~]# docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.aliyuncs.com/google_containers/kube-proxy v1.15.2 167bbf6c9338 12 months ago 82.4MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.15.2 34a53be6c9a7 12 months ago 207MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.15.2 9f5df470155d 12 months ago 159MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.15.2 88fa9cb27bd2 12 months ago 81.1MB
registry.aliyuncs.com/google_containers/coredns 1.3.1 eb516548c180 19 months ago 40.3MB
registry.aliyuncs.com/google_containers/etcd 3.3.10 2c4adeb21b4f 20 months ago 258MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 2 years ago 742kB
[root@master01 ~]#

#查看node节点
[root@master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 NotReady master 3m19s v1.15.2
[root@master01 ~]#

#需要安装flannel模块后,会变为Ready
#***下载
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#创建flannel网络
[root@master01 ~]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@master01 ~]#

#镜像失败
[root@master01 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-bccdc95cf-pfgls 0/1 Pending 0 15m
coredns-bccdc95cf-qcb4d 0/1 Pending 0 15m
etcd-master01 1/1 Running 0 14m
kube-apiserver-master01 1/1 Running 0 14m
kube-controller-manager-master01 1/1 Running 0 15m
kube-flannel-ds-amd64-jdmjs 0/1 Init:ErrImagePull 0 93s
kube-proxy-bx8jv 1/1 Running 0 15m
kube-scheduler-master01 1/1 Running 0 15m
[root@master01 ~]#

#更改镜像重试:
sponse from daemon: Get https://quay.io/v2/coreos/flannel/manifests/v0.12.0-amd64: net/http: TLS handshake timeout
Normal Pulling 52s (x3 over 2m25s) kubelet, master01 Pulling image "quay.io/coreos/flannel:v0.12.0-amd64"
Warning Failed 39s (x3 over 119s) kubelet, master01 Error: ErrImagePull
Warning Failed 39s kubelet, master01 Failed to pull image "quay.io/coreos/flannel:v0.12.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://quay.io/v2/: net/http: TLS handshake timeout
Normal BackOff 1s (x5 over 118s) kubelet, master01 Back-off pulling image "quay.io/coreos/flannel:v0.12.0-amd64"
Warning Failed 1s (x5 over 118s) kubelet, master01 Error: ImagePullBackOff
[root@master01 ~]#

#解决
docker pull registry.cn-hangzhou.aliyuncs.com/chentging/flannel:v0.12.0-amd64

#已经启动:
[root@master01 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-bccdc95cf-pfgls 1/1 Running 0 23m
coredns-bccdc95cf-qcb4d 1/1 Running 0 23m
etcd-master01 1/1 Running 0 22m
kube-apiserver-master01 1/1 Running 0 22m
kube-controller-manager-master01 1/1 Running 0 22m
kube-flannel-ds-amd64-jdmjs 1/1 Running 0 8m44s
kube-proxy-bx8jv 1/1 Running 0 23m
kube-scheduler-master01 1/1 Running 0 22m

#已经变为:
[root@master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master01 Ready master 23m v1.15.2
[root@master01 ~]#


* node节点加入

#参入node01,node02
kubeadm join 192.168.1.9:6443 --token xknie1.dm76a39ntgnwkyid \
--discovery-token-ca-cert-hash sha256:76896f39087f6fa66a43a0c336c081649ae65a781c80d140ba492b57bb038df9

#输出日志如下:
[root@node01 ~]# kubeadm join 192.168.1.9:6443 --token xknie1.dm76a39ntgnwkyid \

--discovery-token-ca-cert-hash sha256:76896f39087f6fa66a43a0c336c081649ae65a781c80d140ba492b57bb038df9 

[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:

  • Certificate signing request was sent to apiserver and a response was received.
  • The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@node01 ~]#

#检查状态
[root@master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master01 Ready master 33m v1.15.2
node01 Ready 7m30s v1.15.2
node02 Ready 5m20s v1.15.2
[root@master01 ~]#

#如果成功后,那么就会正常
[root@node02 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 4 months ago 52.8MB
registry.aliyuncs.com/google_containers/kube-proxy v1.15.2 167bbf6c9338 12 months ago 82.4MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 2 years ago 742kB
[root@node02 ~]#


* 验证

#创建一个deployment测试
[root@master01 ~]# kubectl create deployment nginx --image=nginx

root@master01 ~]# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-554b9c67f9-bbrm5 1/1 Running 0 89s 10.244.1.2 node01

#访问验证已经成功了
[root@master01 ~]# curl http://10.244.1.2



Welcome to nginx!



Welcome to nginx!


If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.

For online documentation and support please refer to
nginx.org.
;
Commercial support is available at
nginx.com.

;

Thank you for using nginx.




* 图形界面安装

下载文件,需要***,https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml

[root@master01 ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
[root@master01 ~]#

[root@master01 ~]# kubectl get pod -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-76679bc5b9-2psr7 1/1 Running 0 52s
kubernetes-dashboard-65bb64d6cb-wvw5m 1/1 Running 0 52s
[root@master01 ~]#

#运行:
[root@master01 ~]# kubectl proxy
Starting to serve on 127.0.0.1:8001


* web访问

https://10.0.0.106:30000/#!/login

创建权限
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

#获取token
[root@master01 ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name: dashboard-admin-token-4m5bz
Namespace: kube-system
Labels:
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: 026851ee-43da-4de4-9b36-a1a739dc2fc5

Type: kubernetes.io/service-account-token

Data

ca.crt: 1025 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNG01YnoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMDI2ODUxZWUtNDNkYS00ZGU0LTliMzYtYTFhNzM5ZGMyZmM1Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.N54WwjiD4Acho_7GStW0jxJ3aQLhRXOOQOT-EMyGF93Hu1hJpR4H3jpyArPeS-zP4BKYyv9aDd5OTSvVOQGh6t0jSKMiOm0ZK6HNFanQNf4AxgAdDtV1dFRwHozn5MtVorPrdgeiVzh3wkbj55fISdWn3Q5E2BF5PubKG1vXZETK8XuCvkSxmuiDZtCR45majTEg-axnUO33uUnfxlxtPCsVaxsj5vNhZfzU_901yFytAKDfdLNNI1Qz3fC7BjQcTGKxAdLj-F64gKo_Dx6xADcHYyyfBdKAhUytK0WMGH-eVBfuruZNXi1R2kCTaCefUAx8j-bq81YImphYxswPBQ
[root@master01 ~]#