第六节、K8s相关介绍以及主从节点服务安装

6.1、K8s最佳应用场景微服务架构

ccc765f55ca166dd64710d3c2caeb03.png

6.2、K8s核心组件及架构

57198394f0d28d49cd84e180bc990f4.png

18e36a27ed6253a53660f13764a0565.png

bf67180c6ac098e38597036e251b97b.png

6.3、K8s安装部署前准备工作

实验配置3台虚拟机
master 192.168.18.121 etcd/apiserver/controller manager/scheduler kublete/kube-proxy
node1 192.168.18.122 kublete/kube-proxy/docker
node2 192.168.18.123 kublete/kube-proxy/docker
虚拟机器优化

#关闭selinux
#关闭防火墙服务firewalld
#关闭NetworkManager.service
[root@master ~]# systemctl stop NetworkManager.service 
[root@master ~]# systemctl disable NetworkManager.service 
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
#配置阿里源
#安装一些服务
[root@master ~]# yum install bash-completion.noarch -y
[root@master ~]# yum install -y net-tools vim lrzsz wget tree screen lsof tcpdump
#关闭postfix.service服务
[root@master ~]# systemctl stop postfix.service 
[root@master ~]# systemctl disable postfix.service 
Removed symlink /etc/systemd/system/multi-user.target.wants/postfix.service.
#配置好host解析(所有机器都需要配置)
[root@master ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.18.121  master
192.168.18.122  node1
192.168.18.123  node2
#远程推送文件
[root@master ~]# scp -rp /etc/hosts 192.168.18.122:/etc/hosts
The authenticity of host '192.168.18.122 (192.168.18.122)' can't be established.
ECDSA key fingerprint is SHA256:+BtfrhBtnaZlfOcA+jp7GC9MN32UwcX9l9qMSpa25uw.
ECDSA key fingerprint is MD5:e7:19:3d:34:57:53:e4:5b:88:0f:cb:1f:d1:81:b8:9d.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.18.122' (ECDSA) to the list of known hosts.
[email protected]'s password: 
hosts  

6.4、Master主机上服务安装

6.4.1、etcd服务安装
[root@master ~]# yum install -y etcd
#修改etcd配置文件
[root@master ~]# vim /etc/etcd/etcd.conf 
  6 ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
 21 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.18.121:2379"
#检查etcd有效配置文件
[root@master ~]# grep -Ev "^$|^#"  /etc/etcd/etcd.conf 
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.18.121:2379"
#启动etcd并设置开机自启动
[root@master ~]# systemctl start etcd.service 
[root@master ~]# systemctl enable etcd.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
#检查已启动监听端口2379以及2380
#2379对外提供服务,2380用于etcd集群内部通讯
[root@master ~]# netstat -lntup
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:2380          0.0.0.0:*               LISTEN      1864/etcd           
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      773/sshd            
tcp6       0      0 :::2379                 :::*                    LISTEN      1864/etcd           
tcp6       0      0 :::22                   :::*                    LISTEN      773/sshd            
[root@master ~]# 

测试

[root@master ~]# etcdctl set testdir/testkey0 0
0
[root@master ~]# etcdctl get testdir/testkey0 
0
[root@master ~]# etcdctl -C http://192.168.18.121:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://192.168.18.121:2379
cluster is healthy
6.4.2、安装master节点
#yum源上搜索k8s-master安装包
[root@master ~]# yum search kubernetes-master
Loaded plugins: fastestmirror
Repository base is listed more than once in the configuration
Repository updates is listed more than once in the configuration
Repository extras is listed more than once in the configuration
Repository centosplus is listed more than once in the configuration
Repository contrib is listed more than once in the configuration
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
=================================================== N/S matched: kubernetes-master ====================================================
kubernetes-master.x86_64 : Kubernetes services for master host

  Name and summary matches only, use "search all" for everything.
#安装
[root@master ~]# yum install   -y  kubernetes-master.x86_64

配置apiserver

[root@master ~]# vim /etc/kubernetes/apiserver 
8 KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
#只允许本地访问监听127.0.0.1,允许任意机器访问监听0.0.0.0
 10 # The port on the local server to listen on.
 11 KUBE_API_PORT="--port=8080"
#打开监听端口8080
 13 # Port minions listen on
 14 KUBELET_PORT="--kubelet-port=10250"
#打开minion监听端口10250
 16 # Comma separated list of nodes in the etcd cluster
 17 KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.18.121:2379"
#查看apiserver配置文件
[root@master ~]# grep -Ev "^#|^$"  /etc/kubernetes/apiserver 
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.18.121:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_API_ARGS=""

配置controller manager/scheduler(共用一个文件)

[root@master ~]# vim /etc/kubernetes/config 
 21 # How the controller-manager, scheduler, and proxy find the apiserver
 22 KUBE_MASTER="--master=http://192.168.18.121:8080"
[root@master ~]# grep -Ev "^#|^$"  /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.18.121:8080"

完成配置启动服务并设置开机自启动

[root@master ~]# systemctl start kube-apiserver.service 
[root@master ~]# systemctl start kube-controller-manager.service 
[root@master ~]# systemctl start kube-scheduler.service 
[root@master ~]# systemctl enable kube-scheduler.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master ~]# systemctl enable kube-controller-manager.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master ~]# systemctl enable kube-apiserver.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.

检查K8s组件健康状态

[root@master ~]# kubectl get componentstatus 
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
controller-manager   Healthy   ok  

6.5、node节点安装

[root@node1 ~]# yum install -y kubernetes-node.x86_64
6.5.1、master节点上配置node
#/etc/kubernetes/config已配置过不需要管
#/etc/kubernetes/kubelet 
[root@master ~]# vim /etc/kubernetes/kubelet 
  4 # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
  5 KUBELET_ADDRESS="--address=192.168.18.121"
  6 
  7 # The port for the info server to serve on
  8 KUBELET_PORT="--port=10250"
 10 # You may leave this blank to use the actual hostname
 11 KUBELET_HOSTNAME="--hostname-override=master"
 13 # location of the api-server
 14 KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
#查看配置
[root@master ~]# grep -Ev "^#|^$"  /etc/kubernetes/kubelet 
KUBELET_ADDRESS="--address=192.168.18.121"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=master"
KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
#启动服务加入开机自启动kubelet会自动启动docker
[root@master ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)
   Active: inactive (dead)
     Docs: http://docs.docker.com
[root@master ~]# systemctl start kubelet.service 
[root@master ~]# systemctl start kube-proxy.service 
[root@master ~]# systemctl enable  kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master ~]# systemctl enable  kube-proxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@master ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)
   Active: active (running) since Thu 2022-09-15 00:58:28 CST; 55s ago
     Docs: http://docs.docker.com
 Main PID: 2734 (dockerd-current)
   CGroup: /system.slice/docker.service
           ├─2734 /usr/bin/dockerd-current --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current --default-runtime=docke...
           └─2740 /usr/bin/docker-containerd-current -l unix:///var/run/docker/libcontainerd/docker-containerd.sock --metrics-interv...

Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.241532749+08:00" level=warning msg="Docker could not e...ystem"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.280003502+08:00" level=info msg="Graph migration to co...conds"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.280865641+08:00" level=info msg="Loading containers: start."
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.326211940+08:00" level=info msg="Firewalld running: false"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.414513143+08:00" level=info msg="Default bridge (docke...dress"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.454367659+08:00" level=info msg="Loading containers: done."
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.472704447+08:00" level=info msg="Daemon has completed ...ation"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.472734973+08:00" level=info msg="Docker daemon" commit...1.13.1
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.483495766+08:00" level=info msg="API listen on /var/ru....sock"
Sep 15 00:58:28 master systemd[1]: Started Docker Application Container Engine.
Hint: Some lines were ellipsized, use -l to show in full.

检查master上的node节点已自动注册

[root@master ~]# kubectl get node
NAME      STATUS    AGE
master    Ready     1m

其余node节点安装

[root@node1 ~]# vim /etc/kubernetes/config 
 21 # How the controller-manager, scheduler, and proxy find the apiserver
 22 KUBE_MASTER="--master=http://192.168.18.121:8080"
[root@node1 ~]# vim /etc/kubernetes/kubelet
  4 # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
  5 KUBELET_ADDRESS="--address=192.168.18.122"
  7 # The port for the info server to serve on
  8 KUBELET_PORT="--port=10250"
 10 # You may leave this blank to use the actual hostname
 11 KUBELET_HOSTNAME="--hostname-override=node1"
 13 # location of the api-server
 14 KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
[root@node2 ~]# grep -Ev "^$|^#"  /etc/kubernetes/kubelet 
KUBELET_ADDRESS="--address=192.168.18.123"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=node2"
KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
#更改完配置后记得开启服务并加入开机自启动

验证node节点(在master上)

[root@master ~]# kubectl get node
NAME      STATUS    AGE
master    Ready     10m
node1     Ready     2m
node2     Ready     27s
#删除节点命令(无效的)
[root@master ~]# kubectl delete   node  node-name

6.6、为所有node节点配置flannel网络

为了实现所有node节点上容器之间的通讯

6.6.1、master配置flannel
[root@master ~]# yum install -y flannel
  3 # etcd url location.  Point this to the server where etcd runs
  4 FLANNEL_ETCD_ENDPOINTS="http://192.168.18.121:2379"
  6 # etcd config key.  This is the configuration key that flannel queries
  7 # For address range assignment
  8 FLANNEL_ETCD_PREFIX="/atomic.io/network"
#到etcd中创建对那个的key
[root@master ~]# grep -Ev  "^$|^#"  /etc/sysconfig/flanneld 
FLANNEL_ETCD_ENDPOINTS="http://192.168.18.121:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
 
[root@master ~]# etcdctl set /atomic.io/network/config  '{ "Network":"172.16.0.0/16" }'

重启flannel服务以及docker服务

[root@master ~]# systemctl restart docker
[root@master ~]# systemctl start flanneld.service 
[root@master ~]# systemctl enable flanneld.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
Created symlink from /etc/systemd/system/docker.service.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
#新增的flannel网卡用于容器间的通讯
[root@master ~]# ifconfig
docker0: flags=4099  mtu 1500
        inet 172.16.49.1  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 02:42:b3:03:1a:7b  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
flannel0: flags=4305  mtu 1472
        inet 172.16.49.0  netmask 255.255.0.0  destination 172.16.49.0
        inet6 fe80::75dd:f199:d48d:e8c  prefixlen 64  scopeid 0x20
        unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00  txqueuelen 500  (UNSPEC)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 3  bytes 144 (144.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
[root@master ~]# ip add
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: enp0s3:  mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 08:00:27:f8:64:fc brd ff:ff:ff:ff:ff:ff
    inet 192.168.18.121/24 brd 192.168.18.255 scope global enp0s3
       valid_lft forever preferred_lft forever
    inet6 fe80::a00:27ff:fef8:64fc/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0:  mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:b3:03:1a:7b brd ff:ff:ff:ff:ff:ff
    inet 172.16.49.1/24 scope global docker0
       valid_lft forever preferred_lft forever
4: flannel0:  mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none 
    inet 172.16.49.0/16 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::75dd:f199:d48d:e8c/64 scope link flags 800 
       valid_lft forever preferred_lft forever
6.6.2、node配置flannel

不修改配置指定IP会卡住无法启动

[root@node1 ~]# tail -f /var/log/messages
Sep 15 11:26:09 node1 flanneld-start: E0915 11:26:09.441937    1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
Sep 15 11:26:10 node1 flanneld-start: E0915 11:26:10.443056    1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
Sep 15 11:26:11 node1 flanneld-start: E0915 11:26:11.443681    1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
Sep 15 11:26:12 node1 flanneld-start: E0915 11:26:12.444417    1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused

改配置文件并重启服务

[root@node1 ~]# vi /etc/sysconfig/flanneld 
[root@node2 ~]# grep -Ev "^#|^$"  /etc/sysconfig/flanneld 
FLANNEL_ETCD_ENDPOINTS="http://192.168.18.121:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@node1 ~]# systemctl start flanneld
[root@node1 ~]# systemctl enable flanneld
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
Created symlink from /etc/systemd/system/docker.service.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@node1 ~]# systemctl restart docker

测试不同主机之间容器通讯

#下载busybox镜像 
[root@master ~]# docker pull busybox
#导入压缩tar包模式
[root@master ~]# docker  load  -i  docker_busybox.tar.gz 
#在宿主机上期启动容器查看各自IP
#master上容器
[root@master ~]# docker run -it busybox
/ # ip add
5: eth0@if6:  mtu 1472 qdisc noqueue 
    link/ether 02:42:ac:10:31:02 brd ff:ff:ff:ff:ff:ff
    inet 172.16.49.2/24 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:acff:fe10:3102/64 scope link 
       valid_lft forever preferred_lft forever
#node1上容器
[root@node1 ~]# docker run -it busybox
/ # ip add
5: eth0@if6:  mtu 1472 qdisc noqueue 
    link/ether 02:42:ac:10:5f:02 brd ff:ff:ff:ff:ff:ff
    inet 172.16.95.2/24 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:acff:fe10:5f02/64 scope link 
       valid_lft forever preferred_lft forever
#node2上容器
[root@node2 ~]# docker run -it busybox
/ # ip add
5: eth0@if6:  mtu 1472 qdisc noqueue 
    link/ether 02:42:ac:10:13:02 brd ff:ff:ff:ff:ff:ff
    inet 172.16.19.2/24 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:acff:fe10:1302/64 scope link 
       valid_lft forever preferred_lft forever
##############################
#在master上的容器ping其余node节点上容器的IP
/ # ping 172.16.95.2
PING 172.16.95.2 (172.16.95.2): 56 data bytes
64 bytes from 172.16.95.2: seq=0 ttl=60 time=2.165 ms
64 bytes from 172.16.95.2: seq=1 ttl=60 time=0.607 ms
^C
--- 172.16.95.2 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.607/1.386/2.165 ms
/ # ping 172.16.19.2
PING 172.16.19.2 (172.16.19.2): 56 data bytes
64 bytes from 172.16.19.2: seq=0 ttl=60 time=1.187 ms
64 bytes from 172.16.19.2: seq=1 ttl=60 time=1.475 ms
64 bytes from 172.16.19.2: seq=2 ttl=60 time=1.321 ms
^C
--- 172.16.19.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 1.187/1.327/1.475 ms

如果容器无法通讯检查防火墙规则

[root@master ~]# iptables -L -n
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         
KUBE-FIREWALL  all  --  0.0.0.0/0            0.0.0.0/0           
#这里必须时Accept
Chain FORWARD (policy ACCEPT)
target     prot opt source               destination         
DOCKER-ISOLATION  all  --  0.0.0.0/0            0.0.0.0/0           
DOCKER     all  --  0.0.0.0/0            0.0.0.0/0           
ACCEPT     all  --  0.0.0.0/0            0.0.0.0/0            ctstate RELATED,ESTABLISHED
ACCEPT     all  --  0.0.0.0/0            0.0.0.0/0           
ACCEPT     all  --  0.0.0.0/0            0.0.0.0/0           

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
KUBE-FIREWALL  all  --  0.0.0.0/0            0.0.0.0/0           
KUBE-SERVICES  all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */

Chain DOCKER (1 references)
target     prot opt source               destination         

Chain DOCKER-ISOLATION (1 references)
target     prot opt source               destination         
RETURN     all  --  0.0.0.0/0            0.0.0.0/0           

Chain KUBE-FIREWALL (2 references)
target     prot opt source               destination         
DROP       all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes firewall for dropping marked packets */ mark match 0x8000/0x8000

Chain KUBE-SERVICES (1 references)
target     prot opt source               destination  
#修正命令
[root@master ~]# iptables -P  FORWARD ACCEPT
#将该防火墙配置加入docker启动文件
[root@master ~]# vim /usr/lib/systemd/system/docker.service
 18 ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
#重载docker配置后重启docker服务
[root@master ~]# systemctl daemon-reload
systemctl daemon-reload
[root@master ~]# systemctl restart docker.service 

你可能感兴趣的:(第六节、K8s相关介绍以及主从节点服务安装)