超详细saltstack安装部署及应用

1.环境准备

准备两台虚拟机

主机名

  ip

role

linux-node1

10.0.0.7

master

linux-node2

10.0.0.8

minion

 

在节点1上安装 master minion

[root@linux-node1 ~]yum install salt-master salt-minion -y

 

在节点2上安装 minion

[root@linux-node2 ~]yum install  salt-minion -y

 

分别设置开机自启动

[root@linux-node1 ~]chkconfig  salt-master on

[root@linux-node1 ~]chkconfig  --add salt-master

[root@linux-node1 ~]chkconfig  salt-minion on

[root@linux-node1 ~]chkconfig  --add salt-minion

[root@linux-node2 ~]chkconfig  salt-minion on

[root@linux-node1 ~]chkconfig  --add salt-minion

 

指定master

vim /etc/salt/minion

master: 10.0.0.7

 

授权节点1和节点2

slat-key -a linux*

 

2.测试

测试 ping 节点1 和节点2

salt '*' test.ping

 

执行 cmd.run  执行bash查看负载命令

salt '*' cmd.run 'uptime'

 

设置sls文件的路径

[root@linux-node1 ~]mkdir -p /srv/salt/base

[root@linux-node1 ~]mkdir -p /srv/salt/test

[root@linux-node1 ~]mkdir -p /srv/salt/prod

 

vim /etc/salt/master

file_roots:

  base:

    - /srv/salt/base

  test:

    - /srv/salt/test

  prod:

- /srv/salt/prod

 

重启master

/etc/init.d/salt-master restart

 

编写YMAL安装Apache 并设置启动文件

cd /srv/salt

vim apache.sls

apache-install:

  pkg.installed:

    - names:

      - httpd

      - httpd-devel

 

apache-service:

  service.running:

    - name: httpd

    - enable: True

    - reload: True

 

执行状态文件

salt '*' state.sls apache

 

编写高级状态文件

vim top.sls

base:

  'linux-node2':

  - apache

 

slat '*' state.highstate   #执行高级状态 top.sls

 

3.数据系统之 Grains

salt 'linux-node1' grains.items  #查询所有键值

 

salt 'linux-node1' grains.get fqdn #查询单个主机值

 

显示所有 节点1 eth0ip

[root@linux-node1 ~]# salt 'linux-node1' grains.get ip_interfaces:eth0

linux-node1:

    - 10.0.0.7

- fe80::20c:29ff:fe9d:57e8

 

#根据系统名称匹配执行cmd.run命令

[root@linux-node1 ~]# salt -G os:CentOS cmd.run 'w'  #-G 代表使用grains匹配

linux-node2:

     03:47:49 up  9:58,  2 users,  load average: 0.00, 0.00, 0.00

    USER     TTY      FROM              LOGIN@   IDLE   JCPU   PCPU WHAT

    root     pts/1    10.0.0.1         17:50    1:31m  0.14s  0.14s -bash

    root     pts/0    10.0.0.1         03:37    5:40   0.00s  0.00s -bash

linux-node1:

     03:47:49 up  1:35,  2 users,  load average: 0.00, 0.00, 0.00

    USER     TTY      FROM              LOGIN@   IDLE   JCPU   PCPU WHAT

    root     pts/0    10.0.0.1         02:13    1:01m  0.08s  0.01s vim top.sls

    root     pts/1    10.0.0.1         03:37    0.00s  0.52s  0.34s /usr/bin/python

 

vim /etc/salt/grains

web: nginx

salt -G web:nginx cmd.run 'w'

 

4.数据系统之 Pillar

设置pillar文件的路径

vim /etc/salt/master

pillar_roots:

  base:

    - /srv/pillar

 

mkdir /srv/pillar #创建默认pillar目录

 

/etc/init.d/salt-master restart

vim /srv/pillar/apache.sls  #使用jinja模板语言

{%if grains['os'] == 'CentOS' %}

apache: httpd

{% elif grains['os'] == 'Debian' %}

apache: apche2

{% endif %}

 

vim /srv/pillar/top.sls

base:

  '*':

- apache

 

[root@linux-node1 ~]# salt '*' pillar.items

linux-node2:

    ----------

    apache:

        httpd

linux-node1:

    ----------

    apache:

        httpd

 

配置完 pillar需要刷新 生效

[root@linux-node1 ~]salt '*' saltutil.refresh_pillar

[root@linux-node1 ~]#  salt -I 'apache:httpd' test.ping

linux-node2:

    True

linux-node1:

    True

 

http://docs.saltstack.cn/topics/index.html    #slatstack中文网站

slatstack 之远程执行  

targeting

moudles 

returners   

 

基于对模块的访问控制

[root@linux-node1 ~]vim /etc/salt/master       

client_acl:

  oldboy:                      #oldboy用户下只能使用test.ping network的所有方法

    - test.ping

    - network.*

  user01:                    

    - linux-node1*:

      - test.ping

 

权限设置   

chmod 755 /var/cache/salt /var/cache/salt/master /var/cache/salt/master/jobs /var/run/salt /var/run/salt/master

 

 

[root@linux-node1 ~]/etc/ini.d/salt-master restart

[root@linux-node1 ~]# su - oldboy

[oldboy@linux-node1 ~]$ salt '*' cmd.run 'df -h'

[WARNING ] Failed to open log file, do you have permission to write to /var/log/salt/master?

Failed to authenticate! This is most likely because this user is not permitted to execute commands, but there is a small possibility that a disk error occurred (check disk/inode usage).

 

创建表结构 3个表:

CREATE DATABASE `salt`

DEFAULT CHARACTER SET utf8

DEFAULT COLLATE utf8_general_ci;

USE `salt`;

 

CREATE TABLE `jids` (

`jid` varchar(255) NOT NULL,

`load` mediumtext NOT NULL,

UNIQUE KEY `jid` (`jid`)

) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE INDEX jid ON jids(jid) USING BTREE;

 

CREATE TABLE `salt_returns` (

`fun` varchar(50) NOT NULL,

`jid` varchar(255) NOT NULL,

`return` mediumtext NOT NULL,

`id` varchar(255) NOT NULL,

`success` varchar(10) NOT NULL,

`full_ret` mediumtext NOT NULL,

`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,

KEY `id` (`id`),

KEY `jid` (`jid`),

KEY `fun` (`fun`)

) ENGINE=InnoDB DEFAULT CHARSET=utf8;

 

CREATE TABLE `salt_events` (

`id` BIGINT NOT NULL AUTO_INCREMENT,

`tag` varchar(255) NOT NULL,

`data` mediumtext NOT NULL,

`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,

`master_id` varchar(255) NOT NULL,

PRIMARY KEY (`id`),

KEY `tag` (`tag`)

) ENGINE=InnoDB DEFAULT CHARSET=utf8;

 

授权salt用户

grant all on salt.* to salt@'10.0.0.0/255.255.255.0 identified by 'salt';

 

yum install -y MySQL-python     #同步数据依赖 MySQL-python

vim /etc/salt/master

底部添加

master_job_cache: mysql   #加上这一句 执行的命令自动保存到数据库不用加--return mysql

mysql.host: '10.0.0.7'

mysql.user: 'salt'

mysql.pass: 'salt'

mysql.db: 'salt'

mysql.port: 3306

/etc/init.d/salt-master restart

 

测试命令执行结果是否同步到数据库

[root@linux-node1 ~]# salt '*' cmd.run 'ls' --return mysql

 

编译安装所需的依赖包

yum install gcc gcc-c++ glibc autoconf make openssl openssl-devel

 

5.web集群架构自动化部署

5.1安装haproxy

cd /usr/local/src && tar zxf haproxy-1.7.9.tar.gz && cd haproxy-1.7.9 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy

cd /usr/local/src/haproxy-1.7.9/examples/

vim haproxy.init

BIN=/usr/local/haproxy/sbin/$BASENAME  更改启动脚本的默认路径

cp haproxy-init /srv/salt/prod/haproxy/files/

 

编写YMAL脚本

mkdir /srv/salt/prod/pkg            #源码安装依赖包sls

mkdir /srv/salt/prod/haproxy        #haproxy安装 sls

mkdir /srv/salt/prod/haproxy/files    #存放haproxy源码压缩包

 

haproxy自动化编译安装。

cd /srv/salt/prod/pkg

 

编译安装所需依赖包的自动化安装

vim pkg-init.sls  

pkg-init:

  pkg.installed:                 #pkginstalled

    - names:

      - gcc

      - gcc-c++

      - glibc

      - make

      - autoconf

      - openssl

      - openssl-devel

  

cd /srv/salt/prod/haproxy

vim install.sls   #haproxy自动化编译安装YMAL脚本

include:

  - pkg.pkg-init

 

haproxy-install:

  file.managed:

    - name: /usr/local/src/haproxy-1.7.9.tar.gz

    - source: salt://haproxy/files/haproxy-1.7.9.tar.gz #salt:相当于/srv/salt/prod

    - user: root

    - group: root

    - mode: 755

  cmd.run:

    - name: cd /usr/local/src && tar zxf haproxy-1.7.9.tar.gz && cd haproxy-1.7.9 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy

    - unless: test -d /usr/local/haproxy

    - require:

      - pkg: pkg-init

      - file: haproxy-install

 

haproxy-init:

  file.managed:

    - name: /etc/init.d/haproxy   创建一个/etc/init.d/haproxy 文件

    - source: salt://haproxy/files/haproxy.init

    - user: root

    - group: root

    - mode: 755

    - require:

      - cmd: haproxy-install

  cmd.run:

    - name: chkconfig --add haproxy

    - unless: chkconfig --list | grep haproxy #返回false才执行和-onlyif相反,有就不执行上面的命令

    - require:

      - file: haproxy-init

net.ipv4.ip_nonlocal_bind:   #cat /proc/sys/net/ipv4/ip_nonlocal_bind 默认是0改为1,意思是可以监听非本地的ip

  sysctl.present:             #设定内核参数的方法

    - value: 1

 

haproxy-config-dir:

  file.directory:   #文件的创建目录的方法

    - name: /etc/haproxy  #创建一个/etc/haproxy的目录

    - user: root

    - group: root

    - mode: 755

 

手动执行 节点1上面的安装haproxy脚本

salt 'linux-node1' state.sls haproxy.install env=prod #env指定使用prod目录下的

 

创建集群目录

mkdir /srv/salt/prod/cluster

mkdir /srv/salt/prod/cluster/files

cd /srv/salt/prod/cluster/files

vim haproxy-outside.cfg

global

maxconn 100000

chroot /usr/local/haproxy

uid 99

gid 99

daemon

nbproc 1

pidfile /usr/local/haproxy/logs/haproxy.pid

log 127.0.0.1 local3 info

 

defaults

option http-keep-alive

maxconn 100000

mode http

timeout connect 5000ms

timeout client  50000ms

timeout server  50000ms

 

listen stats

mode http

bind 0.0.0.0:8888

stats enable

stats uri       /haproxy-status

stats auth      haproxy:saltstack

frontend frontend_www_example_com

bind    10.0.0.11:80

mode    http

option  httplog

log global

        default_backend backend_www_example_com

 

backend backend_www_example_com

option forwardfor header X-REAL-IP

option httpchk HEAD / HTTP/1.0

balance source

server web-node1        10.0.0.7:8080 check inter 2000 rise 30 fall 15

server web-node2        10.0.0.8:8080 check inter 2000 rise 30 fall 15

 

cd ..

vim haproxy-outside.sls

include:

  - haproxy.install

 

haproxy-service:

  file.managed:

    - name: /etc/haproxy/haproxy.cfg

    - source: salt://cluster/files/haproxy-outside.cfg

    - user: root

    - group: root

    - mode: 644

  service.running:

    - name: haproxy

    - enable: True

    - reload: True

    - require:

      - cmd: haproxy-init

    - watch:

      - file: haproxy-service

编辑top.sls

cd /srv/salt/base/

vim top.sls

base:

  '*':

    - init.env_init

 

prod:

  'linux-node1':

    - cluster.haproxy-outside

  'linux-node2':

    - cluster.haproxy-outside

在节点1和节点2上分别修改httpd 的监听端口

vim /etc/httpd/conf/httpd.conf 80端口改为8080

Listen 8080  

然后重启 /etc/init.d/httpd restart

 

vim /var/www/html/index.html

linux-node1  #节点2linux-node2

 

在浏览器中输入 10.0.0.7:8888/haproxy-status  健康检查

账号密码 haproxy/saltstack

 

[root@linux-node1 html]# cd /srv/salt/prod/

[root@linux-node1 prod]# tree

.

|-- cluster

|   |-- files

|   |   `-- haproxy-outside.cfg

|   `-- haproxy-outside.sls

|-- haproxy

|   |-- files

|   |   |-- haproxy-1.7.9.tar.gz

|   |   `-- haproxy.init

|   `-- install.sls

`-- pkg

    `-- pkg-init.sls

 

5.2安装keepalived

wget http://www.keepalived.org/software/keepalived-1.2.19.tar.gz && tar zxf keepalived-1.2.19.tar.gz && cd keepalived-1.2.19 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install

/usr/local/src/keepalived-1.2.19/keepalived/etc/init.d/keepalived.init #启动脚本

/usr/local/src/keepalived-1.2.19/keepalived/etc/keepalived/keepalived.conf #模板文件

[root@linux-node1 etc]# mkdir /srv/salt/prod/keepalived

[root@linux-node1 etc]# mkdir /srv/salt/prod/keepalived/files

[root@linux-node1 etc]# cp init.d/keepalived.init /srv/salt/prod/keepalived/files/

[root@linux-node1 etc]# cp keepalived/keepalived.conf /srv/salt/prod/keepalived/files/

[root@linux-node1 keepalived]# cd /usr/local/keepalived/etc/sysconfig/

[root@linux-node1 sysconfig]# cp keepalived /srv/salt/prod/keepalived/files/keepalived.sysconfig

[root@linux-node1 etc]# cd /srv//salt/prod/keepalived/files/

[root@linux-node1 files]# vim keepalived.init

daemon /usr/local/keepalived/sbin/keepalived ${KEEPALIVED_OPTIONS} 修改启动时的加载文件路径

[root@linux-node1 files] cp /usr/local/src/keepalived-1.2.19.tar.gz .

[root@linux-node1 files]# cd ..    

[root@linux-node1 keepalived]# vim install.sls

include:

  - pkg.pkg-init

 

keepalived-install:

  file.managed:

    - name: /usr/local/src/keepalived-1.2.19.tar.gz

    - source: salt://keepalived/files/keepalived-1.2.19.tar.gz

    - user: root

    - group: root

    - mode: 755

  cmd.run:

    - name: wget http://www.keepalived.org/software/keepalived-1.2.19.tar.gz && tar zxf keepalived-1.2.19.tar.gz && cd keepalived-1.2.19 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install

    - unless: test -d /usr/local/keepalived

    - require:

      - pkg: pkg-init

      - file: keepalived-install

 

keepalived-init:

  file.managed:

    - name: /etc/init.d/keepalived

    - source: salt://keepalived/files/keepalived.init

    - user: root

    - group: root

    - mode: 755

  cmd.run:

    - name: chkconfig --add keepalived

    - unless: chkconfig --list | grep keepalived

    - require:

      - file: keepalived-init

 

/etc/sysconfig/keepalived:

  file.managed:

    - source: salt://keepalived/files/keepalived.sysconfig

    - user: root

    - group: root

    - mode: 644

 

/etc/keepalived:

  file.directory:

    - user: root

    - group: root

    - mode: 755

 

[root@linux-node1 ~]# cd /srv/salt/prod/cluster/files/

[root@linux-node1 files]# vim haproxy-outside-keepalived.conf

! Configuration File for keepalived

global_defs {

   notification_email {

     [email protected]

   }

   notification_email_from [email protected]

   smtp_server 127.0.0.1

   smtp_connect_timeout 30

   router_id { {ROUTEID}}

}

vrrp_instance haproxy_ha {

state { {STATEID}}

interface eth0

    virtual_router_id 36

priority { {PRIORITYID}}

    advert_int 1

authentication {

auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

       10.0.0.11

    }

 

[root@linux-node1 cluster]# vim haproxy-outside-keepalived.sls

include:

  - keepalived.install

 

keepalived-service:

  file.managed:

    - name: /etc/keepalived/keepalived.conf

    - source: salt://cluster/files/haproxy-outside-keepalived.conf

    - user: root

    - group: root

    - mode: 644

    - template: jinja

    {% if grains['fqdn'] == 'linux-node1' %}

    - ROUTEID: haproxy_ha

    - STATEID: MASTER

    - PRIORITYID: 150

    {% elif grains['fqdn'] == 'linux-node2' %}

    - ROUTEID: haproxy_ha

    - STATEID: BACKUP

    - PRIORITYID: 100

    {% endif %}

  service.running:

    - name: keepalived

    - enable: True

    - watch:

      - file: keepalived-service

 

[root@linux-node1 cluster]salt '*' state.sls cluster.haproxy-outside-keepalived env=prod

[root@linux-node1 base]# cd /srv/salt/base/

[root@linux-node1 base]# vim top.sls

base:

  '*':

    - init.env_init

 

prod:

  'linux-node1':

    - cluster.haproxy-outside

    - cluster.haproxy-outside-keepalived

  'linux-node2':

    - cluster.haproxy-outside

    - cluster.haproxy-outside-keepalived

验证keeplivedalived

[root@linux-node1 prod]# ip ad li

1: lo: mtu 65536 qdisc noqueue state UNKNOWN

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever

2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:9d:57:e8 brd ff:ff:ff:ff:ff:ff

    inet 10.0.0.7/24 brd 10.0.0.255 scope global eth0

    inet 10.0.0.11/32 scope global eth0

    inet6 fe80::20c:29ff:fe9d:57e8/64 scope link

       valid_lft forever preferred_lft forever

 

[root@linux-node2 html]# ip ad li

1: lo: mtu 65536 qdisc noqueue state UNKNOWN

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever

2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:ca:41:95 brd ff:ff:ff:ff:ff:ff

    inet 10.0.0.8/24 brd 10.0.0.255 scope global eth0

    inet6 fe80::20c:29ff:feca:4195/64 scope link

       valid_lft forever preferred_lft forever    

[root@linux-node1 prod]# /etc/init.d/keepalived stop

Stopping keepalived:                                       [  OK  ]

[root@linux-node2 html]# ip ad li

1: lo: mtu 65536 qdisc noqueue state UNKNOWN

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever

2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:ca:41:95 brd ff:ff:ff:ff:ff:ff

    inet 10.0.0.8/24 brd 10.0.0.255 scope global eth0

    inet 10.0.0.11/32 scope global eth0

    inet6 fe80::20c:29ff:feca:4195/64 scope link

       valid_lft forever preferred_lft forever

[root@linux-node1 prod]# /etc/init.d/keepalived start

Starting keepalived:                                       [  OK  ]

[root@linux-node2 html]# ip ad li

1: lo: mtu 65536 qdisc noqueue state UNKNOWN

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever

2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:ca:41:95 brd ff:ff:ff:ff:ff:ff

    inet 10.0.0.8/24 brd 10.0.0.255 scope global eth0

    inet6 fe80::20c:29ff:feca:4195/64 scope link

       valid_lft forever preferred_lft forever

[root@linux-node1 prod]# vim /srv/salt/prod/cluster/files/haproxy-outside.cfg    

balance roundrobin   #roundrobin表示轮询,source表示固定。

 

5.3安装zabbix-agent

[root@linux-node1 prod]# cd /srv/salt/base/init

[root@linux-node1 init]# vim zabbix.agent.sls

zabbix-agent-install:

  pkg.installed:

    - name: zabbix-agent

 

  file.managed:

    - name: /etc/zabbix/zabbix_agentd.conf

    - source: salt://init/files/zabbix_agent.conf

    - template: jinja

    - defaults:

      Server: { { pillar['zabbix-agent']['Zabbix_Server'] }}

    - require:

      - pkg: zabbix-agent-install

 

  service.running:

    - name: zabbix-agent

    - enable: True

    - watch:

      - pkg: zabbix-agent-install

      - file: zabbix-agent-install

[root@linux-node1 init]# vim /etc/salt/master

pillar_roots:

  base:

    - /srv/pillar/base

[root@linux-node1 init]# mkdir /srv/pillar/base

[root@linux-node1 init]# /etc/init.d/salt-master restart

[root@linux-node1 init]# cd /srv/pillar/base/

[root@linux-node1 base]# vim top.sls

base:

  '*':

    - zabbix

[root@linux-node1 base]# vim zabbix.sls

zabbix-agent:

  Zabbix_Server: 10.0.0.7

[root@linux-node1 base]# cd /srv/salt/base/init/files

[root@linux-node1 files]# cp /etc/zabbix/zabbix_agent.conf .

[root@linux-node1 files]# vim zabbix_agent.conf  #使用模板语言的变量引用

Server={ { Server }}  

 

[root@linux-node1 init]# vim env_init.sls

include:

  - init.dns

  - init.history

  - init.audit

  - init.sysctl

  - init.zabbix_agent

[root@linux-node1 ~]# salt '*' state.highstate

 

nginx+php 以及 memcache 的安装

https://github.com/a7260488/slat-test

 

percona-zabbix-templates  #zabbix监控mysql的软件

5.4配置master-syndic

功能有点类似 zabbix-proxy

[root@linux-node2 ~]# yum install salt-master salt-syndic -y

[root@linux-node2 ~]# vim /etc/salt/master

syndic_master 10.0.0.7

[root@linux-node2 ~]# vim /etc/salt/master

[root@linux-node2 ~]# /etc/init.d/salt-master start

Starting salt-master daemon:                               [  OK  ]

[root@linux-node2 ~]# /etc/init.d/salt-syndic start

Starting salt-syndic daemon:                               [  OK  ]

[root@linux-node1 ~]# vim /etc/salt/mast

order_masters: True

[root@linux-node1 ~]# /etc/init.d/salt-master restart

[root@linux-node1 ~]# /etc/init.d/salt-minion stop

Stopping salt-minion daemon:                               [  OK  ]

[root@linux-node2 ~]# /etc/init.d/salt-minion stop

Stopping salt-minion daemon:                               [  OK  ]

[root@linux-node2 ~]# salt-key -D

[root@linux-node1 ~]# cd /etc/salt/pki/minion/

[root@linux-node1 minion]# rm -fr *

[root@linux-node1 ~]# cd  /etc/salt/pki/minion

[root@linux-node2 minion]# rm -fr *

[root@linux-node1 salt]# vim /etc/salt/minion

master 10.0.0.8

[root@linux-node1 salt]# vim /etc/salt/minion

master 10.0.0.8

[root@linux-node1 salt]# /etc/init.d/salt-minion start

Starting salt-minion daemon:                               [  OK  ]

[root@linux-node2 salt]# /etc/init.d/salt-minion start

Starting salt-minion daemon:                               [  OK  ]

[root@linux-node1 minion]# salt-key -A

The following keys are going to be accepted:

Unaccepted Keys:

linux-node2

Proceed? [n/Y] y

Key for minion linux-node2 accepted.

[root@linux-node1 minion]# salt-key

Accepted Keys:

linux-node2

Denied Keys:

Unaccepted Keys:

Rejected Keys:

[root@linux-node2 salt]# salt-key

Accepted Keys:

Denied Keys:

Unaccepted Keys:

linux-node1

linux-node2

Rejected Keys:

[root@linux-node2 salt]# salt-key -A

The following keys are going to be accepted:

Unaccepted Keys:

linux-node1

linux-node2

Proceed? [n/Y] y

Key for minion linux-node1 accepted.

Key for minion linux-node2 accepted.

 

5.5saltstack自动扩容

zabbix监控--->Action---->创建一台虚拟机/Docker容器---->部署服务---->部署代码---->测试状态----->加入集群--->加入监控--->通知

基于域名下载etcd

https://github.com/coreos/etcd/releases/download/v2.2.1/etcd-v2.2.1-linux-amd64.tar.gz

rz etcd-v2.2.1-linux-amd64.tar.gz 2进制包)

[root@linux-node1 src]# cd etcd-v2.0.5-linux-amd64

[root@linux-node1 etcd-v2.0.5-linux-amd64]# cp etcd etcdctl  /usr/local/bin/

[root@linux-node1 etcd-v2.0.5-linux-amd64] . /etcd &

或者这样启动

nohub etcd --name auto_scale --data-dir /data/etcd/ \

--listen-peer-urls 'http://10,0,0,7:2380,http://10.0.0.7:7001' \

--listen-client-urls 'http://10.0.0.7:2379,http://10.0.0.7:4001' \

--adevertise-client-urls 'http://10.0.0.7:2379,http://10.0.0.7:4001' &

设置key的值

[root@linux-node1 wal]# curl -s http://localhost:2379/v2/keys/message -XPUT -d value="Hello world" | python -m json.tool      

{

    "action": "set",

    "node": {

        "createdIndex": 8,

        "key": "/message",

        "modifiedIndex": 8,

        "value": "Hello world"

    },

    "prevNode": {

        "createdIndex": 7,

        "key": "/message",

        "modifiedIndex": 7,

        "value": "Hello world"

    }

}

获取key的值

[root@linux-node1 wal]# curl -s http://localhost:2379/v2/keys/message |python -m json.tool          {

    "action": "get",

    "node": {

        "createdIndex": 8,

        "key": "/message",

        "modifiedIndex": 8,

        "value": "Hello world"

    }

}

删除key

[root@linux-node1 wal]# curl -s http://localhost:2379/v2/keys/message -XDELETE |python -m json.tool      

{

    "action": "delete",

    "node": {

        "createdIndex": 8,

        "key": "/message",

        "modifiedIndex": 9

    },

    "prevNode": {

        "createdIndex": 8,

        "key": "/message",

        "modifiedIndex": 8,

        "value": "Hello world"

    }

}

删除key以后再次获取key not found

[root@linux-node1 wal]# curl -s http://localhost:2379/v2/keys/message |python -m json.tool          {

    "cause": "/message",

    "errorCode": 100,

    "index": 9,

    "message": "Key not found"

}

设置key 有效时间55秒后过期  "message": "Key not found"

[root@linux-node1 wal]# curl -s http://localhost:2379/v2/keys/ttl_use -XPUT -d valu=="Hello world" |"Hello world 1" -d ttl=5 |python -m json.tool        

{

    "action": "set",

    "node": {

        "createdIndex": 10,

        "expiration": "2017-11-17T12:59:41.572099187Z",

        "key": "/ttl_use",

        "modifiedIndex": 10,

        "ttl": 5,

        "value": ""

    }

}

 

[root@linux-node1 ~]# vim /etc/salt/master  #行尾添加

etcd_pillar_config:

  etcd.host: 10.0.0.7

  etcd.port: 4001

 

ext_pillar:

  - etcd: etcd_pillar_config root=/salt/haproxy/

 

[root@linux-node1 ~]# /etc/init.d/salt-master restart

[root@linux-node1 ~]# curl -s http://localhost:2379/v2/keys/salt/haproxy/backend_www_oldboyedu_com/web-node1 -XPUT -d value="10.0.0.7:8080" | python -m json.tool       

{

    "action": "set",

    "node": {

        "createdIndex": 10,

        "key": "/salt/haproxy/backend_www_oldboyedu_com/web-node1", #添加一个web-node1的节点

        "modifiedIndex": 10,

        "value": "10.0.0.7:8080"

    }

}

[root@linux-node1 ~]#pip install python-etcd

[root@linux-node1 etcd-v2.2.1-linux-amd64]# salt '*' pillar.items

linux-node2:

    ----------

    backend_www_oldboyedu_com:

        ----------

        web-node1:

            10.0.0.7:8080

    zabbix-agent:

        ----------

        Zabbix_Server:

            10.0.0.7

linux-node1:

    ----------

    backend_www_oldboyedu_com:

        ----------

        web-node1:

            10.0.0.7:8080

    zabbix-agent:

        ----------

        Zabbix_Server:

            10.0.0.7

 

[root@linux-node1 ~]# vi /srv/salt/prod/cluster/files/haproxy-outside.cfg  #行尾添加

{% for web,web_ip in pillar.backend_www_oldboyedu_com.iteritems() -%}

server { { web }} { { web_ip }} check inter 2000 rise 30 fall 15

{% endfor %}

vim /srv/salt/prod/cluster/haproxy-outside.sls

- template: jinja

重启master

执行状态 salt '*' statehighstate

转载于:https://www.cnblogs.com/benjamin77/p/7868451.html

你可能感兴趣的:(运维,开发工具,python)