etcd集群搭建
下载二进制软件包
https://github.com/coreos/etcd/releases
https://github.com/coreos/etcd/releases/
https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
解压
将两个执行文件复制到/usr/bin/
etcd和etcdctl文件复制到/usr/bin目录
配置etcd-1
vi /etc/etcd/etcd.yml
name: etcd-1
data-dir: /var/lib/etcd
listen-client-urls: http://163.44.167.92:2379,http://127.0.0.1:2379
advertise-client-urls: http://163.44.167.92:2379,http://127.0.0.1:2379
listen-peer-urls: http://163.44.167.92:2380
initial-advertise-peer-urls: http://163.44.167.92:2380
initial-cluster: etcd-1=http://163.44.167.92:2380,etcd-2=http://150.95.148.52:2380,etcd-3=http://133.130.122.48:2380
initial-cluster-token: etcd-cluster-token
initial-cluster-state: new
配置etcd-2
vi /etc/etcd/etcd.yml
name: etcd-2
data-dir: /var/lib/etcd
listen-client-urls: http://150.95.148.52:2379,http://127.0.0.1:2379
advertise-client-urls: http://150.95.148.52:2379,http://127.0.0.1:2379
listen-peer-urls: http://150.95.148.52:2380
initial-advertise-peer-urls: http://150.95.148.52:2380
initial-cluster: etcd-1=http://163.44.167.92:2380,etcd-2=http://150.95.148.52:2380,etcd-3=http://133.130.122.48:2380
initial-cluster-token: etcd-cluster-token
initial-cluster-state: new
配置etcd-3
name: etcd-3
data-dir: /var/lib/etcd
listen-client-urls: http://133.130.122.48:2379,http://127.0.0.1:2379
advertise-client-urls: http://133.130.122.48:2379,http://127.0.0.1:2379
listen-peer-urls: http://133.130.122.48:2380
initial-advertise-peer-urls: http://133.130.122.48:2380
initial-cluster: etcd-1=http://163.44.167.92:2380,etcd-2=http://150.95.148.52:2380,etcd-3=http://133.130.122.48:2380
initial-cluster-token: etcd-cluster-token
initial-cluster-state: new
最后启动
etcd --config-file=/etc/etcd/etcd.yml &
查看成员
etcdctl member list
每个节点上执行查看健康状态
etcdctl cluster-health
下载二进制软件包
https://dl.k8s.io/v1.12.2/kubernetes.tar.gz
https://dl.k8s.io/v1.12.2/kubernetes-server-linux-amd64.tar.gz
https://dl.k8s.io/v1.12.2/kubernetes-client-darwin-amd64.tar.gz
https://dl.k8s.io/v1.12.2/kubernetes-node-linux-amd64.tar.gz
解压
tar -zxvf kubernetes-server-linux-amd64-12.2.tar.gz
进入命令目录
cd kubernetes/server/bin
复制文件到 /usr/bin下面
cp kube-apiserver /usr/bin/
cp kube-controller-manager /usr/bin/
cp kube-scheduler /usr/bin/
cp kubectl /usr/bin/
cat /lib/systemd/system/kube-apiserver.service
创建服务文件
创建kube-apiserver.service
vim /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
创建kube-controller-manager.service
cat /lib/systemd/system/kube-controller-manager.service
vim /lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
创建kube-scheduler.service
cat /lib/systemd/system/kube-scheduler.service
vim /lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
创建kubernetes master组件配置文件
创建apiserver
mkdir -p /etc/kubernetes
cat /etc/kubernetes/apiserver
vim /etc/kubernetes/apiserver
KUBE_API_ARGS="--storage-backend=etcd3 --etcd-servers=http://127.0.0.1:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --service-cluster-ip-range=169.169.0.0/16 --service-node-port-range=1-65535 --admission-control=NamespaceLifecycle,LimitRanger,ResourceQuota --logtostderr=false --log-dir=/var/log/kubernets/log --v=2"
创建controller-manager
cat /etc/kubernetes/controller-manager
vim /etc/kubernetes/controller-manager
KUBE_CONTROLLER_MANAGER_ARGS="--master=http://127.0.0.1:8080 --logtostderr=true --log-dir=/var/log/kubernets/log --v=2"
创建scheduler
cat /etc/kubernetes/scheduler
vim /etc/kubernetes/scheduler
KUBE_SCHEDULER_ARGS="--master=http://127.0.0.1:8080 --logtostderr=false --log-dir=/var/log/kubernets/log --v=2"
配置开机启动
systemctl daemon-reload
systemctl enable kube-apiserver.service
systemctl enable kube-controller-manager.service
systemctl enable kube-scheduler.service
systemctl start kube-apiserver.service
systemctl start kube-controller-manager.service
systemctl start kube-scheduler.service
验证Master是否安装成功
kubectl get componentstatuses
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health": "true"}
安装node节点
将kubernetes-node-linux-amd64-12.2.tar.gz复制所有的node节点服务器上解压,将 kubectl、kubelet、kube-proxy 复制到/usr/bin/目录下
tar -zxvf kubernetes-node-linux-amd64-12.2.tar.gz
cd kubernetes/node/bin
cp kubectl /usr/bin
cp kubelet /usr/bin
cp kube-proxy /usr/bin
创建 kubelet.service
cat /lib/systemd/system/kubelet.service
#手动创建此目录
mkdir /var/kubeletwork
cat /lib/systemd/system/kubelet.service
vim /lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=-/var/kubeletwork
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet $KUBELET_ARGS
Restart=on-failure
[Install]
WantedBy=multi-user.target
创建 kube-proxy.service
cat /lib/systemd/system/kube-proxy.service
vim /lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.service
#Requires=network.service
[Service]
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/bin/kube-proxy $KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
创建 kubelet.kubeconfig
mkdir -p /etc/kubernetes
cat /etc/kubernetes/kubelet.kubeconfig
vim /etc/kubernetes/kubelet.kubeconfig
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: http://192.168.1.251:8080
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: ""
name: system:node:kube-master
current-context: system:node:kube-master
kind: Config
preferences: {}
users: []
创建kubelet
这里不在重复去其他node节点配置文件,其他节点 --hostname-override= 和 address= 值不一样,换成对用的机器IP地址
cat /etc/kubernetes/kubelet
vim /etc/kubernetes/kubelet
KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig --hostname-override=192.168.1.247 --logtostderr=true --log-dir=/var/log/kubernets/log --v=2 --address=192.168.1.247 --port=10250 --fail-swap-on=false --pod-infra-container-image=zengshaoyong/pod-infrastructure"
KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig --hostname-override=192.168.1.240 --logtostderr=true --log-dir=/var/log/kubernets/log --v=2 --address=192.168.1.240 --port=10250 --fail-swap-on=false --pod-infra-container-image=zengshaoyong/pod-infrastructure"
创建proxy
cat /etc/kubernetes/proxy
vim /etc/kubernetes/proxy
KUBE_PROXY_ARGS="--master=http://192.168.1.251:8080 --hostname-override=node1 --v=2 --logtostderr=true --log-dir=/var/log/kubernets/log"
配置开机启动
systemctl daemon-reload
systemctl enable kubelet
systemctl enable kube-proxy
systemctl start kubelet
systemctl start kube-proxy
systemctl status kubelet
systemctl status kube-proxy
检查节点状态
kubectl get nodes
4、安装flannel网路组件
在所有节点上安装flannel(包括master 、node节点)
下载 flannel
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
mkdir flannel-v0.10.0-linux-amd64
tar xvf flannel-v0.10.0-linux-amd64.tar.gz -C flannel-v0.10.0-linux-amd64
mv flannel-v0.10.0-linux-amd64 /opt/flannel
cp /opt/flannel/{flanneld,mk-docker-opts.sh} /usr/bin/
cat /usr/lib/systemd/system/flanneld.service
vim /usr/lib/systemd/system/flanneld.service
[Unit]
Description=flannel
After=network-online.target network.target
Before=docker.service
[Service]
ExecStart=/opt/flannel/flanneld -etcd-endpoints=http://127.0.0.1:2379 -etcd-prefix=/flannel/network
ExecStartPost=/opt/flannel/mk-docker-opts.sh
#-k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
etcdctl mk /flannel/network/config '{"Network":"172.17.1.0/16"}'
systemctl stop flanneld.service
systemctl daemon-reload
systemctl disable flanneld.service
systemctl enable flanneld.service
systemctl restart flanneld.service
systemctl status flanneld.service
重启 docker、kube-apiserver、kube-apiserver、kube-apiserver、kubelet、kube-proxy服务
systemctl daemon-reload
systemctl enable flanneld.service
systemctl start flanneld.service
systemctl restart docker.service
systemctl restart kube-apiserver.service
systemctl restart kube-scheduler.service
systemctl restart kube-controller-manager.service
systemctl daemon-reload
systemctl enable flanneld.service
systemctl restart flanneld.service
systemctl restart docker.service
systemctl restart kubelet.service
systemctl restart kube-proxy.service
kubectl get svc
root@master:/opt/app# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
nginx 1/1 Running 2 3h8m 172.17.0.2 192.168.1.240
curl 192.168.1.240:30080