三个节点
2G 2C
在本地快速部署一个单点k8s,用于尝试k8s
提供kubeadm init和kubeadm join,用于快速部署k8s集群
推荐,从官方下载发行版二进制包,手工部署每个组件,组成k8s集群
集群规划:
master1节点:
主机名:k8s-master1
IP:192.168.88.151
worker1节点
主机名:k8s-node1
IP:192.168.88.152
worker2节点
主机名:k8s-node2
IP:192.168.88.153
k8s版本:1.18
安装方式:二进制离线
操作系统版本:CentOS 7.6
后增加master2节点:
主机名:k8s-master2
IP:192.168.88.154
后增加Keepalived + LB1节点:
主机名:k8s-LB1
IP:192.168.88.156
后增加Keppalived + LB2节点:
主机名:k8s-LB2
IP:192.168.88.157
VIP:192.168.88.155
地址:k8s官方github
备注:master(主和备都需要)上需要能解析所有节点的主机名,
即master节点的/etc/hosts文件需要有所有节点的信息。
否则exec到容器会有问题。
# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
iptables -F
setenforce 0 && sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
# 配置host
echo "192.168.88.151 k8s-master1" >> /etc/hosts
echo "192.168.88.152 k8s-node1" >> /etc/hosts
echo "192.168.88.153 k8s-node2" >> /etc/hosts
# 安装并开机启动Docker
# curl -fsSL "https://get.docker.com/" | sh
# systemctl enable docker && systemctl start docker
# 关闭交换分区
swapoff -a
sed -i 's#.*swap.*##g' /etc/fstab
free -m
# 设置系统参数
# cat < /etc/sysctl.d/k8s.conf
# net.ipv4.ip_forward = 1
# net.bridge.bridge-nf-call-ip6tables = 1
# net.bridge.bridge-nf-call-iptables = 1
# EOF
# sysctl -p /etc/sysctl.d/k8s.conf
echo "# k8s path" >> /etc/profile
echo "export PATH=\$PATH:/opt/kubernetes/bin/:/opt/etcd/bin/" >> /etc/profile
source /etc/profile
sed -i 's#IPADDR=192.168.88.130#IPADDR=192.168.88.151#g' /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
# 配置主机名
hostnamectl set-hostname k8s-master1
# 配置时间同步
# sed -i 's#^server 0.*##g' /etc/chrony.conf
# sed -i 's#^server 1.*##g' /etc/chrony.conf
# sed -i 's#^server 2.*##g' /etc/chrony.conf
sed -i 's#^server 3.*#server 127.127.1.0 iburst#g' /etc/chrony.conf
sed -i 's@^#allow .*@allow 192.168.88.0/24@g' /etc/chrony.conf
sed -i 's@^#local stratum 10@local stratum 10@g' /etc/chrony.conf
systemctl restart chronyd && systemctl enable chronyd
ss -unl | grep 123
# 安装CFSSL工具
export CFSSL_URL="https://pkg.cfssl.org/R1.2"
yum install -y wget
wget "${CFSSL_URL}/cfssl_linux-amd64" -O /usr/local/bin/cfssl
wget "${CFSSL_URL}/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
wget "${CFSSL_URL}/cfssl-certinfo_linux-amd64" -O /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
#-------------------------------------------------------
# 此处先部署etcd,再继续部署
#-------------------------------------------------------
# 创建k8s集群证书
mkdir ~/k8s_ssl/
# 定义k8s CA信息
cat <<\EOF > ~/k8s_ssl/ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "Shenzhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cat <<\EOF > ~/k8s_ssl/ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
# hosts 字段指定授权使用该证书的 IP 或域名列表,这里列出了 VIP 、apiserver节点 IP、kubernetes 服务 IP 和域名;
# 域名最后字符不能是 . (如不能为kubernetes.default.svc.cluster.local. ),否则解析时失败,提示: x509:cannot parse dnsName "kubernetes.default.svc.cluster.local." ;
# 如果使用非 cluster.local 域名,如 opsnull.com ,则需要修改域名列表中的最后两个域名为: kubernetes.default.svc.opsnull 、 kubernetes.default.svc.opsnull.com
# kubernetes 服务 IP 是 apiserver 自动创建的,一般是 --service-cluster-ip-range 参数指定的网段的第一个IP,后续可以通过如下命令获取:
# kubectl get svc kubernetes
# 定义k8s证书信息
cat <<\EOF > ~/k8s_ssl/server-csr.json
{
"CN": "kubernetes",
"hosts": [
"172.18.255.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"192.168.88.151",
"192.168.88.152",
"192.168.88.153",
"192.168.88.154",
"192.168.88.155",
"192.168.88.156",
"192.168.88.157",
"192.168.88.158"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "Shenzhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cat <<\EOF > ~/k8s_ssl/kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "Shenzhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cd ~/k8s_ssl/
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
# 创建master主要程序的目录
mkdir -p /opt/kubernetes/bin
mkdir -p /opt/kubernetes/cfg
mkdir -p /opt/kubernetes/logs
mkdir -p /opt/kubernetes/ssl
# 拷贝生成的证书到ssl目录
cp ~/k8s_ssl/ca-key.pem /opt/kubernetes/ssl/
cp ~/k8s_ssl/ca.pem /opt/kubernetes/ssl/
cp ~/k8s_ssl/server-key.pem /opt/kubernetes/ssl/
cp ~/k8s_ssl/server.pem /opt/kubernetes/ssl/
# 安装 kube-apiserver, kube-controller-manager, kube-scheduler, kubectl 二进制文件到/opt/kubernetes/bin目录
# https://github.com/kubernetes/kubernetes/releases中CHANGELOG/CHANGELOG-x.xx.md
# Client binaries中包含kubectl,在kubernetes/client/bin
# Server binaries中包含kube-apiserver, kube-controller-manager, kube-scheduler,在kubernetes/server/bin
cd ~
tar zxvf kubernetes-client-linux-amd64.tar.gz
tar zxvf kubernetes-server-linux-amd64.tar.gz
cp kubernetes/client/bin/kubectl /opt/kubernetes/bin/
cp kubernetes/server/bin/kube-apiserver /opt/kubernetes/bin/
cp kubernetes/server/bin/kube-controller-manager /opt/kubernetes/bin/
cp kubernetes/server/bin/kube-scheduler /opt/kubernetes/bin/
# 创建颁发证书的用户配置信息
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat <<EOF > /opt/kubernetes/cfg/token.csv
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
cat /opt/kubernetes/cfg/token.csv | awk -F "," '{print $1}'
# 创建apiserver配置文件
cat <<\EOF > /opt/kubernetes/cfg/kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://192.168.88.151:2379,https://192.168.88.152:2379,https://192.168.88.153:2379 \
--bind-address=192.168.88.151 \
--secure-port=6443 \
--advertise-address=192.168.88.151 \
--allow-privileged=true \
--service-cluster-ip-range=172.18.255.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF
# 创建controller-manager配置文件
cat <<\EOF > /opt/kubernetes/cfg/kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \
--master=127.0.0.1:8080 \
--address=127.0.0.1 \
--allocate-node-cidrs=true \
--cluster-cidr=172.19.0.0/16 \
--service-cluster-ip-range=172.18.255.0/24 \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s"
EOF
# 创建scheduler配置文件
cat <<\EOF > /opt/kubernetes/cfg/kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \
--master=127.0.0.1:8080 \
--address=127.0.0.1"
EOF
# 配置apiserver, controller-manager, scheduler 为守护进程
## apiserver
cat <<\EOF > /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
## controller-manager
cat <<\EOF > /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
## scheduler
cat <<\EOF > /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 启动master
systemctl restart kube-apiserver && systemctl enable kube-apiserver
systemctl restart kube-scheduler && systemctl enable kube-scheduler
systemctl restart kube-controller-manager && systemctl enable kube-controller-manager
# 配置TLS基于bootstrap自动颁发证书
# 对颁发证书的用户授权,该用户信息记录在/opt/kubernetes/cfg/token.csv
/opt/kubernetes/bin/kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
# 检测进程是否正常,输出4
ps aux | grep kube | wc -l
sed -i 's#IPADDR=192.168.88.130#IPADDR=192.168.88.152#g' /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
# 配置主机名
hostnamectl set-hostname k8s-node1
# 配置时间同步
sed -i 's#^server 1.*##g' /etc/chrony.conf
sed -i 's#^server 2.*##g' /etc/chrony.conf
sed -i 's#^server 3.*##g' /etc/chrony.conf
sed -i 's#^server 0.*#server 192.168.88.151 iburst#g' /etc/chrony.conf
systemctl restart chronyd && systemctl enable chronyd
chronyc sources
# 安装并开机启动Docker
yum -y update
yum -y install deltarpm
yum -y install curl
curl -fsSL "https://get.docker.com/" | sh
systemctl enable docker && systemctl start docker
docker info
# 创建worker主要程序的目录
mkdir -p /opt/kubernetes/bin
mkdir -p /opt/kubernetes/cfg
mkdir -p /opt/kubernetes/logs
mkdir -p /opt/kubernetes/ssl
# 安装 kubelet, kube-proxy 二进制文件到/opt/kubernetes/bin目录
# https://github.com/kubernetes/kubernetes/releases中CHANGELOG/CHANGELOG-x.xx.md
# Node binaries中包含kubelet, kube-proxy,在kubernetes/node/bin
tar zxvf ~/kubernetes-node-linux-amd64.tar.gz
cp kubernetes/node/bin/kubelet /opt/kubernetes/bin/
cp kubernetes/node/bin/kube-proxy /opt/kubernetes/bin/
# 创建kubelet, kube-proxy 配置文件
# ---------------------------------------------------此处需要在master上获取token
# cat /opt/kubernetes/cfg/token.csv | awk -F "," '{print $1}'
# kubelet
cat <<\EOF > /opt/kubernetes/cfg/kubelet.kubeconfig
EOF
cat <<\EOF > /opt/kubernetes/cfg/bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://192.168.88.151:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: 96848d76c92077410dfa608e4df9930e
EOF
cat <<\EOF > /opt/kubernetes/cfg/kubelet-config.yml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 172.18.255.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
cat <<\EOF > /opt/kubernetes/cfg/kubelet.conf
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--hostname-override=HOST_NAME \
--network-plugin=cni \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
EOF
sed -i "s#HOST_NAME#`hostname`#g" /opt/kubernetes/cfg/kubelet.conf
cat /opt/kubernetes/cfg/kubelet.conf
# kube-proxy
cat <<\EOF > /opt/kubernetes/cfg/kube-proxy.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://192.168.88.151:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
EOF
cat <<EOF > /opt/kubernetes/cfg/kube-proxy-config.yml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: `hostname`
clusterCIDR: 172.18.255.0/24
mode: ipvs
ipvs:
scheduler: "rr"
iptables:
masqueradeAll: true
EOF
cat <<\EOF > /opt/kubernetes/cfg/kube-proxy.conf
KUBE_PROXY_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF
# 配置kubelet, kube-proxy 为守护进程
# kubelet
cat <<\EOF > /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Before=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# kube-proxy
cat <<\EOF > /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 从master复制ca.pem kube-proxy.pem kube-proxy-key.pem证书到/opt/kubernetes/ssl
cp k8s_ssl/ca.pem /opt/kubernetes/ssl/
cp k8s_ssl/kube-proxy-key.pem /opt/kubernetes/ssl/
cp k8s_ssl/kube-proxy.pem /opt/kubernetes/ssl/
# 启动kube-proxy和kubelet
systemctl start kube-proxy && systemctl enable kube-proxy
systemctl start kubelet && systemctl enable kubelet
# 在master节点为worker颁发证书
/opt/kubernetes/bin/kubectl get csr | grep Pending | awk '{print $1}'
/opt/kubernetes/bin/kubectl certificate approve `/opt/kubernetes/bin/kubectl get csr | grep Pending | awk '{print $1}'`
/opt/kubernetes/bin/kubectl get csr
/opt/kubernetes/bin/kubectl get node
# 在node节点安装网络插件
grep "cni" /opt/kubernetes/cfg/kubelet.conf
mkdir -pv /opt/cni/bin /etc/cni/net.d
# 在https://github.com/containernetworking/plugins/releases/ 下载cni网络插件
tar xf ~/cni-plugins-linux-* -C /opt/cni/bin
# 在master节点执行yaml脚本,实现在worker节点安装启动网络插件功能
# https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
cat <<\EOF > ~/kube-flannel.yaml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.19.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
EOF
/opt/kubernetes/bin/kubectl apply -f ~/kube-flannel.yaml
# 看到pods中的flannel status Running
/opt/kubernetes/bin/kubectl get pods -n kube-system
# NotReady变成Ready
/opt/kubernetes/bin/kubectl get nodes
# 授权apiserver访问kubelet
cat <<\EOF > ~/apiserver-to-kubelet-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
- pods/log
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
/opt/kubernetes/bin/kubectl apply -f ~/apiserver-to-kubelet-rbac.yaml
kubectl label node k8s-node1 node-role.kubernetes.io/worker=worker
sed -i 's#IPADDR=192.168.88.130#IPADDR=192.168.88.153#g' /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
# 配置主机名
hostnamectl set-hostname k8s-node2
所有etcd公共配置
mkdir -p /opt/etcd/cfg
mkdir -p /opt/etcd/bin
mkdir -p /opt/etcd/ssl
# 在master生成证书
mkdir etcd_ssl
# 定义etcd CA信息
cat <<\EOF > ~/etcd_ssl/ca-csr.json
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "Shenzhen"
}
]
}
EOF
cat <<\EOF > ~/etcd_ssl/ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
# 定义etcd证书信息
cat <<\EOF > ~/etcd_ssl/server-csr.json
{
"CN": "etcd",
"hosts": [
"192.168.88.151",
"192.168.88.152",
"192.168.88.153"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "Shenzhen"
}
]
}
EOF
cd ~/etcd_ssl/
# 创建etcd CA,生成etcd证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
ls *pem
# 拷贝ssl证书 ca.pem, server.pem, server-key.pem 放在/opt/etcd/ssl
cp ~/etcd_ssl/ca.pem /opt/etcd/ssl
cp ~/etcd_ssl/server-key.pem /opt/etcd/ssl
cp ~/etcd_ssl/server.pem /opt/etcd/ssl
# 安装etcd; etcd, etcdctl 放在/opt/etcd/bin
# https://github.com/etcd-io/etcd/releases 二进制文件下载位置
tar zxvf ~/etcd-*
cp ~/etcd-*/etcd /opt/etcd/bin
cp ~/etcd-*/etcdctl /opt/etcd/bin
etcd-1配置
sed -i 's#IPADDR=192.168.88.130#IPADDR=192.168.88.151#g' /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
# 生成etcd配置
cat <<\EOF > /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.88.151:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.88.151:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.88.151:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.88.151:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.88.151:2380,etcd-2=https://192.168.88.152:2380,etcd-3=https://192.168.88.153:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
etcd-2配置
sed -i 's#IPADDR=192.168.88.130#IPADDR=192.168.88.152#g' /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
# 生成etcd配置
cat <<\EOF > /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.88.152:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.88.152:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.88.152:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.88.152:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.88.151:2380,etcd-2=https://192.168.88.152:2380,etcd-3=https://192.168.88.153:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
etcd-3配置
sed -i 's#IPADDR=192.168.88.130#IPADDR=192.168.88.153#g' /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
# 生成etcd配置
cat <<\EOF > /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.88.153:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.88.153:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.88.153:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.88.153:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.88.151:2380,etcd-2=https://192.168.88.152:2380,etcd-3=https://192.168.88.153:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
同时启动所有节点
# 配置etcd为守护进程
cat <<\EOF > /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl start etcd && systemctl enable etcd
# 参考以下位置文件
# https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed
cat <<\EOF > ~/coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local 172.19.0.0/16 {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 172.18.255.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
kubectl apply -f ~/coredns.yaml
# https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dashboard/dashboard.yaml
cat <<\EOF > ~/dashboard.yaml
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: EnsureExists
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.1
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
EOF
# 默认安装在kubernetes-dashboard名称空间中
kubectl apply -f ~/dashboard.yaml
# 删除
# kubectl delete -f ~/dashboard.yaml
kubectl apply -f https://kuboard.cn/install-script/kuboard.yaml
kubectl apply -f https://addons.kuboard.cn/metrics-server/0.3.7/metrics-server.yaml
kubectl get pods -l k8s.kuboard.cn/name=kuboard -n kube-system
# NAME READY STATUS RESTARTS AGE
# kuboard-54c9c4f6cb-6lf88 1/1 Running 0 45s
# 生成token,给kuboard登录时使用
# 如果您参考 www.kuboard.cn 提供的文档安装 Kuberenetes,可在第一个 Master 节点上执行此命令
echo $(kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d)
默认情况下只有master节点可以管理集群
# 在master节点下
# 复制kubectl二进制到远程管理设备,/opt/kubernetes/bin/kubectl
cd ~/k8s_ssl
# 填写证书生成内容
cat <<\EOF > ~/k8s_ssl/admin-csr.json
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "Shenzhen",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
# 生成kubeconfig文件
# 设置集群参数
kubectl config set-cluster kubernetes \
--server=https://192.168.88.151:6443 \
--certificate-authority=ca.pem \
--embed-certs=true \
--kubeconfig=config
# 设置客户端认证参数
kubectl config set-crddentials cluster-admin \
--certificate-authority=ca.pem \
--embed-certs=true \
--client-key=admin-key.pem \
--client-certificate=admin.pem \
--kubeconfig=config
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=config
# 将生成的config文件发送到远程管理设备,~/k8s_ssl/config
# 在远程管理设备上使用config文件
kubectl get nodes --kubeconfig=config
# 创建.kube,并移动config文件至该目录下
mkdir ~/.kube && mv ~/config ~/.kube/
#无需添加--kubeconfig管理集群
kubectl get nodes
# 配置host
echo "192.168.88.154 k8s-master2" >> /etc/hosts
# 配置主机名
hostnamectl set-hostname k8s-master2
# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
SELINUX=enforcing
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
# 关闭交换分区
swapoff -a
sed -i 's#.*swap.*##g' /etc/fstab
free -m
# 拷贝旧master上的文件到新master
scp -r /opt/kubernetes/ [email protected]:/opt
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service [email protected]:/usr/lib/systemd/system/
scp -r /opt/etcd [email protected]:/opt
# 在新master上修改/opt/kubernetes/cfg/kube-apiserver.conf
sed -i 's#--bind-address=.*\\#--bind-address=192.168.88.154 \\#g' /opt/kubernetes/cfg/kube-apiserver.conf
sed -i 's#--advertise-address=.*\\#--advertise-address=192.168.88.154 \\#g' /opt/kubernetes/cfg/kube-apiserver.conf
# 启动
systemctl start kube-apiserver && systemctl enable kube-apiserver
systemctl start kube-controller-manager && systemctl enable kube-controller-manager
systemctl start kube-scheduler && systemctl enable kube-scheduler
echo "# k8s path" >> /etc/profile
echo "export PATH=\$PATH:/opt/kubernetes/bin/" >> /etc/profile
source /etc/profile
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
SELINUX=enforcing
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
cat <<\EOF > /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
EOF
yum list && yum install nginx -y
cat <<\EOF > /etc/nginx/nginx.conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
proxy_connect_timeout 1;
# 每4s内如果失效次数到达2次
# 则任务后端服务不可用,判定为不可用
# 会将其从后端列表中摘除,4s再次加入后端服务器列表
upstream k8s-apiserver {
server 192.168.88.151:6443 max_fails=2 fail_timeout=4s weight=1;
server 192.168.88.154:6443 max_fails=2 fail_timeout=4s weight=1;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
EOF
# 检查配置
nginx -t
systemctl start nginx && systemctl enable nginx
# keepalived检测脚本
cat <<\EOF > /etc/keepalived/check.sh
count=$(ps -ef | grep nginx | egrep -cv "grep | $$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
EOF
chmod +x /etc/keepalived/check.sh
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
SELINUX=enforcing
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
# 下载源码
# 下载模块
# Patch源码
# 编译源码
cat <<\EOF > /etc/nginx/nginx.conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
proxy_connect_timeout 1;
# check interval 健康检查时间间隔,单位为毫秒
# rise 检查几次正常后,将server加入以负载列表中
# fall 检查几次失败后,从负载队列移除server
# timeout 检查超时时间,单位为毫秒
upstream k8s-apiserver {
server 192.168.88.151:6443;
server 192.168.88.154:6443;
check interval=3000 rise=2 fall=3 timeout=1000 type=tcp;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
EOF
# 检查配置
nginx -t
systemctl start nginx && systemctl enable nginx
# keepalived检测脚本
cat <<\EOF > /etc/keepalived/check.sh
count=$(ps -ef | grep nginx | egrep -cv "grep | $$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
EOF
chmod +x /etc/keepalived/check.sh
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
SELINUX=enforcing
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
yum -y install haproxy
cat <<\EOF > /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend main *:6443
mode tcp
default_backend k8s-apiserver
backend k8s-apiserver
mode tcp
balance roundrobin
server master1 172.16.22.151:6443 check inter 500 rise 1 fall 2
server master2 172.16.22.154:6443 check inter 500 rise 1 fall 2
EOF
systemctl start haproxy && systemctl enable haproxy
# keepalived检测脚本
cat <<\EOF > /etc/keepalived/check.sh
count=$(ps -ef | grep haproxy | egrep -cv "grep | $$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
EOF
chmod +x /etc/keepalived/check.sh
hostnamectl set-hostname k8s-LB1
cat <<\EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id k8s-LB1
}
vrrp_script check {
script "/etc/keepalived/check.sh" ##检测脚本的路径
}
vrrp_instance VI_1 {
state MASTER
interface ens32
virtual_router_id 51
priority 100 ##优先级
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.88.155/24 ##虚拟IP地址
}
track_script {
check
}
}
EOF
systemctl start keepalived && systemctl enable keepalived
hostnamectl set-hostname k8s-LB2
cat <<\EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id k8s-LB2
}
vrrp_script check {
script "/etc/keepalived/check.sh" ## 检测脚本的路径
}
vrrp_instance VI_1 {
state BACKUP
interface ens32
virtual_router_id 51
priority 90 ## 优先级低于master
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.88.155/24 ## 虚拟IP地址
}
track_script {
check
}
}
EOF
systemctl start keepalived && systemctl enable keepalived
# 主要修改三个文件中的apiserver地址,192.168.88.151 --> 192.168.88.155:
# 1. /opt/kubernetes/cfg/bootstrap.kubeconfig
# 2. /opt/kubernetes/cfg/kubelet.kubeconfig
# 3. /opt/kubernetes/cfg/kube-proxy.kubeconfig
cat /opt/kubernetes/cfg/bootstrap.kubeconfig | grep "192.168.88.151"
cat /opt/kubernetes/cfg/kubelet.kubeconfig | grep "192.168.88.151"
cat /opt/kubernetes/cfg/kube-proxy.kubeconfig | grep "192.168.88.151"
cd /opt/kubernetes/cfg
sed -i 's#server: https://192.168.88.151:6443#server: https://192.168.88.155:6443#g' bootstrap.kubeconfig
sed -i 's#server: https://192.168.88.151:6443#server: https://192.168.88.155:6443#g' kubelet.kubeconfig
sed -i 's#server: https://192.168.88.151:6443#server: https://192.168.88.155:6443#g' kube-proxy.kubeconfig
systemctl restart kubelet && systemctl restart kube-proxy