目录
1.安装k8s依赖环境
2.安装docker
3.安装k8s
swapoff -a
free -h
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
reboot
getenforce
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 新内核 配置,如果没有,再装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
#grep menuentry /boot/grub2/grub.cfg
grub2-set-default "CentOS linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
#grub2-set-default 'CentOS Linux (5.4.197-1.el7.elrepo.x86_64) 7 (Core)'
reboot
uname -r
wget http://mirrors.aliyun.com/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-4.4.184-1.el7.elrepo.x86_64.rpm
yum install kernel-lt-4.4.184-1.el7.elrepo.x86_64.rpm
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
#根据上面查到内核编号,下面设置启动顺序。
grub2-set-default 0
yum install -y ipvsadm
yum install -y conntrack
echo 'modprobe br_netfilter' >>/etc/profile
echo 'modprobe -- ip_vs' >>/etc/profile
echo 'modprobe -- ip_vs_rr' >>/etc/profile
echo 'modprobe -- ip_vs_wrr' >>/etc/profile
echo 'modprobe -- ip_vs_sh' >>/etc/profile
echo 'modprobe -- nf_conntrack' >>/etc/profile
#modprobe -- nf_conntrack_ipv4
#modprobe -- nf_conntrack 如果是内核5.2以上去掉了ipv4的后缀
#验证
reboot
lsmod |grep -e ip_vs -e nf_conntrack
#调整这里,先重启下服务器,因为之前的参数没生效,影响这儿了。
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
#内核4.x以上去掉了这个参数
#net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
cat > /etc/docker/daemon.json <
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": ["https://registry.docker-cn.com"]
}
EOF
systemctl start docker
systemctl enable docker
如果打印一行Hello from Docker! 表示安装完成
docker run hello-world
rm -rf /var/lib/docker
/var/lib/docker/containers/<容器id>/<容器id>-json.log 里面
cat <
[k8s]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
yum clean all && yum makecache
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
#开启开启启动(现在无法启动,还没初始化集群)
systemctl enable kubelet
########################################################################################
↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑
以上操作,所有节点都要操作
↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑
#只在主节点操作
#推荐导出配置文件,然后指定配置文件完成初始化,这样可以保留初始化参数。
#默认有许多参数,只需要调整主节点ip,pod网络段,以及kubeproxy支持ipvs模式,版本信息即可。
#初始化时候,会读取hosts去识别主机名字,改了名字记得在这里解析,不然无法启动。
#虚拟机如果cpu只有1核要改2,不然也报错。
kubeadm config print init-defaults > kubeadm-config.yaml
#初始化主节点,过程下载许多镜像,需要等待。
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs |tee kubeadm-init.log
#初始化报错
#修改docker配置文件"exec-opts": ["native.cgroupdriver=systemd"]
#Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cgroup-driver=systemd
#重启两个软件。
#重置初始化
#echo y |kubeadm reset
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
#主节点增加访问控制文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 192.168.18.101:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:a3a8b8e1ee899c0df1f19887942d71c6412d9de56955e8aa43191ca49d10b961
#删除节点
#主节点操作
kubectl delete node c102
#被删除的节点:
rm -f /etc/kubernetes/kubelet.conf
rm -f /etc/kubernetes/pki/ca.crt
rm -f /etc/kubernetes/bootstrap-kubelet.conf
reboot
#从节点加入成功后,可能网络、proxy 容器无法启动,可以把主节点的pause proxy 镜像导入节点
#如果还有节点的容器没有启动成功,可以重新创建它,方法如下:
kubectl get pod kube-flannel-ds-g85zs -n kube-system -o yaml >kube-flannel-ds-g85zs.yaml
kubectl delete -f kube-flannel-ds-g85zs.yaml
kubectl apply -f kube-flannel-ds-g85zs.yaml
#如果在没网的环境,需要自己手动修改
检查证书时间:
#重新更新所有证书,期限1年
#安装metrics监控模块
用于查看pod使用的服务器的使用率和Hpa自动扩容的基础组件
安装如下所有yaml文件使用命令:kubectl apply -f ./
aggregated-metrics-reader.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
auth-delegator.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
[root@c101 1.8+]# cat auth-delegator.yaml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
auth-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
metrics-apiservice.yaml
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
metrics-server-deployment.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
command:
- /metrics-server
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
- --metric-resolution=30s
volumeMounts:
- name: tmp-dir
mountPath: /tmp
metrics-server-service.yaml
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
resource-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system