saltstack
简介
名字来源于盐湖城
使用python语言开发
运行方式:
- local
- master/minion
- salt ssh
功能:
- 远程执行
- 配置管理
- 云管理
实战
环境:
- 一台master,两台minion
- CentOS 6
- 配置好epel源
安装
- yum 安装
yum install salt-master salt-minion -y
yum install salt-minion -y
chkconfig salt-master on
chkconfig salt-minion on
/etc/init.d/salt-master start
修改minion配置文件 master 和 id
/etc/init.d/salt-minion start
- 秘钥认证
# 通过pki秘钥认证,启动时生成:
/etc/salt/pki/{master,minion}
# salt-key来管理秘钥,支持通配符
salt-key -a '*'# 同意秘钥
salt-key -h
- 远程管理实例:
salt '*' test.ping
salt '*' cmd.run 'uptime'
# 安装apache
## master 配置文件(不能用tab)
file_roots:
base:
- /srv/salt
mkdir /srv/salt && /srv/salt
vim apache.sls
apache-install:
pkg.installed:
- names:
- httpd
- httpd-devel
apache-service:
service.running:
- name: httpd
- enable: True
- reload: True
salt '*' state.sls apache
## 入口状态文件,top.sls
base:
'*':
- apache
salt '*' state.highstate
salt-call -version
saltstack数据系统
Grains
Pillar
Grains
minion端
salt 'linux-node1' grains.ls
salt 'linux-node1' grains.items
salt 'linux-node1' grains.item fqdn
salt 'linux-node1' grains.get fqdn
salt 'linux-node1' grains.get ip_interfaces:eth0
salt -G os:CentOS cmd.run 'w'
vi /etc/salt/minion 自定义grains,重启minion服务生效
salt -G 'roles:memcache' cmd.run 'echo hehe'
vi /etc/salt/grains
web: nginx
salt -G 'web: nginx' cmd.run 'echo hehe'
vi /salt/salt/top.sls
base:
'web: nginx'
- match: grain
- apache
Pillar
给minion指定想要的数据
salt '*' pillar.items # 默认是关闭的
开启pillar
vi /etc/salt/master
pillar_opts: True # /etc/init.d/salt-master restart
定义pillar配置文件
vi /etc/salt/master
pillar_roots:
base:
- /srv/pillar
pillar也有top file
mkdir /srv/pillar
vi /srv/pillar/apache.sls
{% if grains['os'] == 'CentOS' %}
apache: httpd
{% if grains['os'] == 'CentOS' %}
apache: apache2
{% endif %}
指定哪些minion可见
vi /srv/pillar/top.sls
base:
'*':
- apache
salt '*' pillar.items
salt '*' saltutil.refresh_pillar # 刷新后才能使用
salt -I 'apache:httpd' test.ping
远程执行
https://docs.saltstack.com/en/latest/
https://docs.saltstack.com/en/latest/topics/execution/index.html
配置管理
基于远程执行
https://docs.saltstack.com/en/latest/topics/states/index.html
使用的yaml语法:
冒号和两个空格层级关系
短横线代表列表,短横线后面有一个空格
jinjia2 模板:
标识状态:
-template:jinjia
变量:
- defaults:
DNS_SERVER: 10.0.0.2
{{}}标识标量:
nameserver {{DNS_SERVER}}
{{ grains['fqdn_ip4'] }}
文件管理:
/etc/resolve.conf:
file.managed:
- source: salt://files/resolve.conf
- user: root
- group: root
- mode: 644
案例:
思路:
- 系统初始化
- 功能模块
- 业务模块
状态模块:
onlyif:返回ture时执行
unless:返回false执行
require: 依赖某个状态
watch:关注某个状态
saltstack是并行的,ansible是串行的
- 配置基础环境,执行
slat '*' state.highstate test=True
测试
# grep -Ev "^$|^#" /etc/salt/master
file_roots:
base:
- /srv/salt/base
dev:
- /srv/salt/dev
prod:
- /srv/salt/prod
# service salt-master restart
#mkdir /srv/salt/base
#cd /srv/salt/base/
#mkdir files init
#cat init/dns.sls
/etc/resolv.conf:
file.managed:
- source: salt://init/files/resolv.conf
- user: root
- group: root
- mode: 644
#cp /etc/resolv.conf files/
#cat init/history.sls
/etc/profile:
file.append:
- text:
- export HISTTIMEFORMAT="%F %T `whoami`"
#cat init/audit.sls
/etc/bashrc:
file.append:
- text:
- export PROMPT_COMMAND='{ msg=$(history 1|{ read x y; echo $y; });logger "[euid=$(whoami)]":$(who am i):[`pwd`]"$msg";}'
# cat init/sysctl.sls
vm.swappiness:
sysctl.present:
- value: 0
net.ipv4.ip_local_port_range:
sysctl.present:
- value: 10000 65000
fs.file-max:
sysctl.present:
- value: 100000
#cat init/env_init.sls
include:
- init.dns
- init.history
- init.audit
- init.sysctl
# cat top.sls
base:
'*':
- init.env_init
# tree
.
├── files
│ └── resolv.conf
├── init
│ ├── audit.sls
│ ├── dns.sls
│ ├── env_init.sls
│ ├── history.sls
│ └── sysctl.sls
└── top.sls
- 编译基础环境
# mkdir -p /srv/salt/prod/pkg
# cd /srv/salt/prod/
# cat pkg/pkg-init.sls
pkg-init:
pkg.installed:
- names:
- gcc
- gcc-c++
- glibc
- make
- autoconf
- openssl
- openssl-devel
- haproxy 模块,执行
salt 'linux-node1' state.sls haproxy.install env=prod test=True
测试
# mkdir -p /srv/salt/prod/haproxy{,/files}
# tree /srv/salt/prod/
/srv/salt/prod/
├── haproxy
│ └── files
└── pkg
# curl -o haproxy/files/haproxy-1.6.2.tar.gz http://www.haproxy.org/download/1.6/src/haproxy-1.6.2.tar.gz
# cd /srv/salt/prod/haproxy/
# cat install.sls
include:
- pkg.pkg-init
haproxy-install:
file.managed:
- name: /usr/local/src/haproxy-1.6.2.tar.gz
- source: salt://haproxy/files/haproxy-1.6.2.tar.gz
- user: root
- group: root
- mode: 755
cmd.run:
- name: tar zxf /usr/local/src/haproxy-1.6.2.tar.gz && cd /usr/local/src/haproxy-1.6.2 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy
- unless: test -d /usr/local/haproxy
- require:
- pkg: pkg-init
- file: haproxy-install
net.ipv4.ip_nonlocal_bind:
sysctl.present:
- value: 1
haproxy-config-dir:
file.directory:
- name: /etc/haproxy
- user: root
- group: root
- mode: 755
haproxy-init:
file.managed:
- name: /etc/init.d/haproxy
- source: salt://haproxy/files/haproxy.init
- user: root
- group: root
- mode: 755
- require:
- cmd: haproxy-install
cmd.run:
- name: chkconfig --add haproxy
- unless: chkconfig --list | grep haproxy
- require:
- file: haproxy-init
- cmd: haproxy-install
- 创建集群配置文件
# mkdir -p /srv/salt/prod/cluster{,/files}
# cat haproxy-outside.sls
include:
- haproxy.install
haproxy-service:
file.managed:
- name: /etc/haproxy/haproxy.cfg
- source: salt://cluster/files/haproxy-outside.cfg
- user: root
- group: root
- mode: 644
service.running:
- name: haproxy
- enable: True
- reload: True
- require:
- cmd: haproxy-init
- watch:
- file: haproxy-service
# cat /srv/salt/base/top.sls
base:
'*':
- init.env_init
prod:
'*':
- cluster.haproxy-outside
- keepalive安装
# curl -o /usr/local/src/keepalived-1.2.19.tar.gz http://www.keepalived.org/software/keepalived-1.2.19.tar.gz
# mkdir keepalived
# mkdir keepalived/files
# cat keepalived/install.sls
include:
- pkg.pkg-init
keepalived-install:
file.manage:
- name: /usr/local/src/keepalived-1.2.19.tar.gz
- source: salt://keepalived/files/keepalived-1.2.19.tar.gz
- user: root
- group: root
- mode: 755
cmd.run:
- mame: tar xzf /usr/local/src/keepalived-1.2.19.tar.gz && cd /usr/local/src/keepalived-1.2.19 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install
- unless: test -d /usr/local/keepalived
- require:
- pkg: pkg-init
- file: keepalived-install
keepalived-init:
file.managed:
- name: /etc/init.d/keeplived
- source: salt://keepalived/files/keepalived.init
- user: root
- group: root
- mode: 755
cmd.run:
- name: chkconfig --add keepalived
- unless: chkconfig --list | grep keepalived
- require:
- file: keepalived-init
/etc/sysconfig/keepalived:
file.manage:
- source: salt://keepalived/files/keepalived.sysconfig
- user: root
- group: root
- mode: 644
/etc/keepalived:
file.directory:
- user: root
- group: root
- mode: 755
- keepalived 业务引用
# cat cluster/haproxy-outside-keepalived.sls
include:
- keepalived.install
keepalived-service:
file.managed:
- name: /etc/keepalived/keepalived.conf
- user: root
- group: root
- mode: 644
- template: jinjia
{% if grains['fqdn'] == 'linux-node1' %}
- ROUTEID: haproxy_ha
- STATEID: MASTER
- PRIORITYID: 150
{% elif grains['fqdn'] == 'linux-node2' %}
- ROUTEID: haproxy_ha
- STATEID: BACKUP
- PRIORITYID: 100
{% endif %}
service.running:
- name: keepalived
- enable: True
- watch:
- file: keepalived-service
# cat ../base/top.sls
base:
'*':
- init.env_init
prod:
'*':
- cluster.haproxy-outside
- cluster.haproxu-outside-keepalived
- zabbix.sls
# pwd
/srv/salt/base
# cat zabbix_agent.sls
zabbix-agent-install:
pkg.installed:
- name: zabbix-agent
file.managed:
- name: /etc/zabbix/zabbix_agentd.conf
- source: salt://init/files/zabbix_agentd.conf
- template: jinjia
- defaults:
Server: {{ pillar['zabbix-agent']['Zabbix_Server'] }}
- require:
- pkg: zabbix-agent-install
service.running:
- name: zabbix-agent
- enable: True
- watch:
- pkg: zabbix-agent-install
- file: zabbix-agent-install
# grep -Ev "^$|^#" /etc/salt/master
file_roots:
base:
- /srv/salt/base
dev:
- /srv/salt/dev
prod:
- /srv/salt/prod
pillar_roots:
base:
- /srv/pillar/base
# mkdir -p /srv/pillar/base
# cd /srv/pillar/base
# vi zabbix.sls
# cat top.sls
base:
'*':
- zabbix
[root@linux-node1 base]# cat zabbix.sls
zabbix-agent:
Zabbix_Server: 10.0.0.7
未完成:
- 使用saltstack完成nginx+php Memcached的自动化安装和配置
https://github.com/unixhot/saltbook-code
底层基于zeroMQ 监听端口:4505 4506
minion和master保持长连接状态
加etcd实现自动化扩容
基于etcd_pilar 模块