角色 | 基本配置 | IP | 安装的组件 | 执行的动作 | 解决操作失败(重置配置) |
---|---|---|---|---|---|
master | 内存1.5G | 172.27.25.33 | docker, |
kubeadm init … | kubeadm reset |
node1 | 内存1G | 172.27.25.31 | docker, |
kubeadm join xx --token… | kubeadm reset |
node2 | 内存1G | 172.27.25.32 | docker, |
kubeadm join xx --token… | kubeadm reset |
## 根据 IP 来分别设置 hostname
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
## 配置 hosts
cat >> /etc/hosts << EOF
172.27.25.33 master
172.27.25.31 node1
172.27.25.32 node2
EOF
# 同步系统时间
ntpdate time.windows.com
## 安装依赖包
yum install conntrack ntpdate ipvsadm ipset jq iptables curl sysstat libseccomp wget net-tools git update -y
## 设置防火墙为 iptables 并设置空规则
systemctl stop firewalld && systemctl disable firewalld
yum install -y iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
## 关闭 selinux
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
## 调整内核参数
modprobe br_netfilter
cat >> /etc/rc.d/rc.local << EOF
modprobe br_netfilter
EOF
chmod +x /etc/rc.d/rc.local
cat > kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0
vm.swappiness = 0
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 1048576
fs.file-max = 52706963
fs.nr_open = 52706963
net.ipv6.conf.all.disable_ipv6 = 1
net.netfilter.nf_conntrack_max = 2310720
EOF
cp kubernetes.conf /etc/sysctl.d/
sysctl -p /etc/sysctl.d/kubernetes.conf
## 关闭系统不需要的服务
systemctl stop postfix && systemctl disable postfix
## 设置 rsyslogd 和 systemd journald
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf << EOF
[Journal]
Storage=persistent
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
SystemMaxUse=10G
SystemMaxFileSize=200M
MaxRetentionSec=2week
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
## 升级系统内核为 4.4
rpm -Uvh https://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt
## 查看上面安装的内核版本是什么
grub2-set-default 'CentOS Linux (4.4.215-1.el7.elrepo.x86_64) 7 (Core)'
reboot
## 检查是否为 4.4 内核
uname -r
## 查看可启动的内核项
grep menuentry /boot/grub2/grub.cfg
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod |grep -e ip_vs -e nf_conntrack_ipv4
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce-18.06.1.ce-3.el7
mkdir /etc/docker
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"registry-mirrors": ["https://6kx4zyno.mirror.aliyuncs.com", "http://hub-mirror.c.163.com", "https://registry.docker-cn.com"]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.15.11 kubeadm-1.15.11 kubectl-1.15.11
systemctl enable kubelet && systemctl start kubelet
# 启用kubectl命令的自动补全功能,安装并配置bash-completion
yum install -y bash-completion
echo 'source /usr/share/bash-completion/bash_completion' >> /etc/profile
source /etc/profile
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
# --pod-network-cidr=10.244.0.0/16用于flannel或canal网络
kubeadm init \
--apiserver-advertise-address=172.27.25.33 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version=1.15.11 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16
# master初始化成功后执行以下命令:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 附:master初始化成功后的输出,记录下最后一行“kubeadm join...的内容”
[init] Using Kubernetes version: v1.15.11
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.1.0.1 172.27.25.33]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [172.27.25.33 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [172.27.25.33 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.002168 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 6p3ek8.cwgwt3ldhhm32wez
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.27.25.33:6443 --token 6p3ek8.cwgwt3ldhhm32wez \
--discovery-token-ca-cert-hash sha256:27f162384d930a8b1736d56043989a5627a8c50f34e8d8d34453731d26903ea6
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]# mkdir k8s
# 安装flannel
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
sed -i 's#quay.io#quay-mirror.qiniu.com#g' ./kube-flannel.yml
kubectl apply -f ./kube-flannel.yml
# 安装canal网络,参考https://v1-15.docs.kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/
kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/canal.yaml
kubeadm join 172.27.25.33:6443 --token 6p3ek8.cwgwt3ldhhm32wez \
--discovery-token-ca-cert-hash sha256:27f162384d930a8b1736d56043989a5627a8c50f34e8d8d34453731d26903ea6
# 加入成功后的输出提示如下:
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@node1 flannel]#
kubectl get node
kubectl get pod -n kube-system -o wide
#部署一个nginx 应用,测试集群是否正常
kubectl create deployment nginx --image=nginx:latest
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
#使用curl 节点ip:service暴露的端口,来验证
wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
#修改 yaml 文件中使用的源镜像。
sed -i 's/k8s.gcr.io/loveone/g' kubernetes-dashboard.yaml
#使用 NodePort 模式映射 30001 至 k8s 所有宿主机的 30001 端口上。
sed -i '/targetPort:/a\ \ \ \ \ \ nodePort: 30001\n\ \ type: NodePort' kubernetes-dashboard.yaml
#部署DashBoard
kubectl apply -f kubernetes-dashboard.yaml
#创建完成后,检查相关服务运行状态
kubectl get deployment kubernetes-dashboard -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
kubernetes-dashboard 1/1 1 1 3m
kubectl get services -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 40m
kubernetes-dashboard NodePort 10.99.190.175 443:30001/TCP 4m
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
#使用输出的 token 登录 Dashboard
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
# 安装客户端
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh
$ chmod 700 get_helm.sh
$ ./get_helm.sh
# 安装服务端Tiller
$ helm init
# 检查客户端及Tiller是否安装成功
[root@master helm]# helm version
Client: &version.Version{SemVer:"v2.16.3", GitCommit:"1ee0254c86d4ed6887327dabed7aa7da29d7eb0d", GitTreeState:"clean"}
Server: &version.Version{SemVer:"v2.16.3", GitCommit:"1ee0254c86d4ed6887327dabed7aa7da29d7eb0d", GitTreeState:"clean"}
# 如果看到报错,通常是因为 Tiller 服务器的权限不足,要给Tiller授权
[root@master kubernetes-study]# helm list --all
Error: configmaps is forbidden: User "system:serviceaccount:kube-system:default" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
# 在master节点执行授权
[root@master kubernetes-study]# kubectl create serviceaccount --namespace kube-system tiller
serviceaccount/tiller created
[root@master kubernetes-study]# kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
clusterrolebinding.rbac.authorization.k8s.io/tiller-cluster-rule created
[root@master kubernetes-study]# kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
deployment.extensions/tiller-deploy patched
[root@master kubernetes-study]#
## 6.1 由于无法访问k8s的google库,我们还是先从其他镜像仓库下载并打tag,得到ingress nginx的依赖镜像
# 拉镜像
docker pull registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/defaultbackend-amd64:1.5
docker pull registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/nginx-ingress-controller:0.20.0
# 打tag
docker tag registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/defaultbackend-amd64:1.5 k8s.gcr.io/defaultbackend-amd64:1.5
docker tag registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/nginx-ingress-controller:0.20.0 quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0
# 删原标签
docker rmi registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/defaultbackend-amd64:1.5
docker rmi registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/nginx-ingress-controller:0.20.0
## 6.2安装ingress nginx服务:
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.20.0/deploy/mandatory.yaml
kubectl apply -f mandatory.yaml
## 6.3 添加ingress-nginx-controller的service,以NodePoart方式暴露到集群外面
[root@master ingress-nginx]# cat service-nodeport.yaml
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
type: NodePort
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
nodePort: 32080 #http
- name: https
port: 443
targetPort: 443
protocol: TCP
nodePort: 32443 #https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
[root@master ingress-nginx]# kubectl apply -f service-nodeport.yaml
## 6.4安装完成后检查namespace、pod及service的情况
[root@master ingress-nginx]# kubectl get ns
NAME STATUS AGE
ingress-nginx Active 12m
[root@master ingress-nginx]# kubectl get pod -n ingress-nginx -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default-http-backend-c858dffd-l97bf 1/1 Running 0 13m 10.244.2.27 node2
nginx-ingress-controller-7cbb74c44b-fq9w5 1/1 Running 0 13m 10.244.1.23 node1
[root@master ingress-nginx]# kubectl get services -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default-http-backend ClusterIP 10.1.251.214 80/TCP 13h
ingress-nginx NodePort 10.1.229.23 80:30080/TCP,443:30443/TCP 55m