telnet 服务及抓包 查看
yum install -y telnet-server
systemctl restart telnet.socket
telnet [email protected] 23
ssh客户端命令
ssh -p22 [email protected] whoami
123456
ssh服务端配置文件详解
[root@m01 ~]# egrep -i '^port|^permitroot' /etc/ssh/sshd_config
Port 52113
PermitRootLogin no
[root@m01 ~]# systemctl reload sshd
[root@m01 ~]# ss -lntup |grep ssh
tcp LISTEN 0 128 *:52113 *:* users:(("sshd",pid=7129,fd=3))
tcp LISTEN 0 128 :::52113 :::* users:(("sshd",pid=7129,fd=4))
[root@m01 ~]# grep -in ^listenaddress /etc/ssh/sshd_config
20:ListenAddress 172.16.1.61
案例:多个网段多端口
Port 52213
内网 外网
ssh -p 52113 10.0.0.61
scp -P 52113 10.0.0.61
sftp -P 52113 10.0.0.61
ssh -p 22 10.0.0.61
scp -P 22 10.0.0.61
sftp -P 22 10.0.0.61
[root@m01 ~]# grep -in ^listenaddress /etc/ssh/sshd_config
20:ListenAddress 10.0.0.61:52113
21:ListenAddress 172.16.1.61:22
[root@m01 ~]# systemctl reload sshd
[root@m01 ~]# ss -lntup |grep sshd
tcp LISTEN 0 128 10.0.0.61:52113 : users:(("sshd",pid=7129,fd=4))
tcp LISTEN 0 128 172.16.1.61:22 : users:(("sshd",pid=7129,fd=3))
[root@m01 ~]# ssh -p52113 10.0.0.61 hostname
[email protected]'s password:
m01
[root@m01 ~]# ssh -p22 172.16.1.61 hostname
[email protected]'s password:
m01
yum install -y sshpass pssh
error
创建秘钥认证:
创建秘钥对
[root@m01 ~]# ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/root/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_dsa.
Your public key has been saved in /root/.ssh/id_dsa.pub.
The key fingerprint is:
SHA256:VW1UamyTZ0YDIHQxi00U7DrtJAX/BN0k5cbuhNRRA58 root@m01
The key's randomart image is:
+---[DSA 1024]----+
| .ooO*BB=|
| .+ooO==|
| .=o.oBE+|
| . +.++= |
| S + o. o |
| + o .o |
| = . |
| . |
| |
+----[SHA256]-----+
检查 秘钥
[root@m01 ~]# ll ~/.ssh/
total 12
-rw------- 1 root root 668 May 27 12:13 id_dsa
-rw-r--r-- 1 root root 598 May 27 12:13 id_dsa.pub
-rw-r--r-- 1 root root 695 May 27 11:22 known_hosts
发送公钥
[root@m01 ~]# ssh-copy-id -i ~/.ssh/id_dsa.pub 172.16.1.41
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_dsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
[email protected]'s password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh '172.16.1.41'"
and check to make sure that only the key(s) you wanted were added.
进行测试
[root@m01 ~]# ssh 172.16.1.41 hostname
backup
批量并行执行命令
[root@m01 ~]# cat hosts.txt
[email protected]:22
[email protected]:22
[root@m01 ~]# pssh -Ph hosts.txt hostname
172.16.1.41: backup
[1] 12:42:51 [SUCCESS] [email protected]:22
172.16.1.7: web01
[2] 12:42:51 [SUCCESS] [email protected]:22
[root@m01 ~]# prsync -A -azh hosts.txt /etc/hostname /tmp/
Warning: do not enter your password if anyone else has superuser
privileges or access to your account.
Password:
[1] 12:52:11 [SUCCESS] [email protected]:22
[2] 12:52:11 [SUCCESS] [email protected]:22
[root@m01 ~]#
[root@m01 ~]#
[root@m01 ~]# pssh -A -Ph hosts.txt cat /tmp/hostname
Warning: do not enter your password if anyone else has superuser
privileges or access to your account.
Password:
172.16.1.41: m01
[1] 12:52:32 [SUCCESS] [email protected]:22
172.16.1.7: m01
[2] 12:52:32 [SUCCESS] [email protected]:22
ssh "$@" "
exec sh -c '
cd ;
umask 077 ;
mkdir -p .ssh &&
cat >> .ssh/authorized_keys || exit 1 ; if type restore
con >/dev/null 2>&1 ;
then restorecon -F .ssh .ssh/authorized_keys ;
fi'"
免密连接
sshpass -p123456 ssh 172.16.1.7 hostname
ssh-keygen -t dsa -f ~/.ssh/id_dsa -P ''
-t 指定秘钥类型 das rsa
-f 指定私钥位置
-P 指定密码短语
sshpass -p 123456 ssh -oStrictHostKeyChecking=no 172.16.1.7 hostname
web01
sshpass -p123456 ssh-copy-id -oStrictHostKeyChecking=no 172.16.1.7
for循环
格式:
for 变量 in 列表(清单)
do
命令
done
for ip in 7 41 {1..6}
do
echo 172.16.1.$ip
done
批量分发秘钥到 172.16.1.7 和172.16.1.41 写出for循环
创建秘钥 :
ssh-keygen -t dsa -f ~/.ssh/id_dsa -P ''
[root@m01 ~]# vim /server/scripts/fenfa.sh
#!/bin/bash
make key pair
ssh-keygen -t dsa -f ~/.ssh/id_dsa -P ''
fenfa public key
for ip in 7 41 31
do
sshpass -p123456 ssh-copy-id -oStrictHostKeyChecking=no 172.16.1.$ip
done
for ip in 7 41 31 ;
do
sshpass -p123456 ssh-copy-id -oStrictHostKeyChecking=no 172.16.1.ip
sshpass -p123456 ssh-copy-id -oStrictHostKeyChecking=no 172.16.1.ip
done
/etc/ansible
/etc/ansible/ansible.cfg
/etc/ansible/hosts
/etc/ansible/roles
[root@m01 ~]# tail -3 /etc/ansible/hosts
[oldboy]
172.16.1.7
172.16.1.41
[root@m01 ~]# ansible oldboy -m ping
172.16.1.41 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python"
},
"changed": false,
"ping": "pong"
}
172.16.1.7 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python"
},
"changed": false,
"ping": "pong"
}
[root@m01 ~]# ansible oldboy -m command -a 'hostname '
172.16.1.41 | CHANGED | rc=0 >>
backup
172.16.1.7 | CHANGED | rc=0 >>
web01
[root@m01 ~]# ansible oldboy -a 'hostname '
172.16.1.41 | CHANGED | rc=0 >>
backup
172.16.1.7 | CHANGED | rc=0 >>
web01
[root@m01 ~]# ansible 172.16.1.7 -a 'hostname '
172.16.1.7 | CHANGED | rc=0 >>
web01
[root@m01 ~]# ansible all -a 'hostname '
172.16.1.7 | CHANGED | rc=0 >>
web01
172.16.1.41 | CHANGED | rc=0 >>
backup
[root@m01 ~]# #ansible all -m copy -a 'src=/etc/hostname dest=/tmp/'
[root@m01 ~]#
[root@m01 ~]#
[root@m01 ~]# ansible all -a 'cat /tmp/hostname '
172.16.1.7 | CHANGED | rc=0 >>
m01
172.16.1.41 | CHANGED | rc=0 >>
m01
ansible-doc -s +命令 查看帮助该命令的帮助信息
scipt 模块
[root@m01 ~]# #ansible all -m script -a "/server/scripts/yum.sh"
[root@m01 ~]# ansible all -a 'rpm -qa ipvsadm'
[WARNING]: Consider using the yum, dnf or zypper module rather than running 'rpm'. If you need to use command because
yum, dnf or zypper is insufficient you can add 'warn: false' to this command task or set 'command_warnings=False' in
ansible.cfg to get rid of this message.
172.16.1.41 | CHANGED | rc=0 >>
ipvsadm-1.27-7.el7.x86_64
172.16.1.7 | CHANGED | rc=0 >>
ipvsadm-1.27-7.el7.x86_64
yum模块
ansible all -m yum -a 'name=sl state=present'
file模块
[root@m01 ~]# #ansible all -m file -a 'path=/tmp/a/b/c/d/e/f/g state=directory '
[root@m01 ~]# #ansible all -m file -a 'path=/tmp/a/b/c/d/e/f/g/oldboy.txt state=touch '
[root@m01 ~]# ansible all -a 'tree /tmp/ '
for n in {1..10} ;
do
echo i
done
每5分钟同步系统时间
*/5 * * * * ntpdate ntp1.aliyun.com >>/tmp/ntpdate.log 2>&1
-
-
-
-
- ntpdate ntp1.aliyun.com >>/tmp/ntpdate.log 2>&1
-
-
-
sync time
*/5 * * * * /sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1
定时任务中 识别PATH环境变量默认 /usr/bin和/bin
定时任务模块 cron
sync time
*/5 * * * * /sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1.
ansible all -m yum -a 'name=ntpdate state=present'
ansible all -m cron -a 'name="sync time" minute="*/5" job="/sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1"'
ansible all -m cron -a 'name="zwav time" state=absent '
ansible 172.16.1.7 -m mount -a "src=172.16.1.31:/data path=/backup fstype=nfs opts=defaults state=present"
nfs01
/nfs 172.16.1.0/24(rw,all_squash,anonuid=888,anongid=888)
[root@nfs01 ~]# showmount -e 172.16.1.31
Export list for 172.16.1.31:
/nfs 172.16.1.0/24
/upload 172.16.1.0/24
web01 把nfs01 共享的/nfs 挂载到 /upload
/upload
[root@m01 ~]# #mount -t nfs 172.16.1.31:/nfs /upload
[root@m01 ~]# ansible 172.16.1.7 -m mount -a 'fstype=nfs src=172.16.1.31:/nfs path=/upload state=mounted'
172.16.1.7 | CHANGED => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python"
},
"changed": true,
"dump": "0",
"fstab": "/etc/fstab",
"fstype": "nfs",
"name": "/upload",
"opts": "defaults",
"passno": "0",
"src": "172.16.1.31:/nfs"
}
[root@m01 ~]# ansible 172.16.1.7 -a 'df -h'
172.16.1.7 | CHANGED | rc=0 >>
Filesystem Size Used Avail Use% Mounted on
172.16.1.31:/nfs 99G 1.7G 98G 2% /upload
[root@m01 ~]# ansible 172.16.1.7 -a 'grep upload /etc/fstab'
172.16.1.7 | CHANGED | rc=0 >>
172.16.1.31:/nfs /upload nfs defaults 0 0
playbook
[root@m01 /etc/ansible]# cat touch.yml
---
- hosts: all
tasks:
- name: make av
command: mkdir -p /tmp/a/b/c/d/e/f/g/
ansible-playbook -C touch.yml
ansible-playbook touch.yml
[root@m01 /etc/ansible]# ansible all -a 'crontab -l'
172.16.1.7 | CHANGED | rc=0 >>
backup
00 00 * * * sh /server/scripts/bak.sh >/dev/null 2>&1
Ansible: sync time
*/5 * * * * /sbin/ntpdate ntp1.aliyun.com >/d▽v/null 2>&1
172.16.1.41 | CHANGED | rc=0 >>
Ansible: sync time
*/5 * * * * /sbin/ntpdate ntp1.aliyun.com >/d▽v/null 2>&1
[root@m01 /etc/ansible]# cat cron-time.yml
- hosts: all
tasks:- name: sync time
cron: name="sync time" minute="*/5" job="/sbin/ntpdate ntp1.aliyun.com >/d▽v/null 2>&1" state=present
- name: sync time
ansible实现 部署nfs服务 及在web01 web02 挂载 playbook
#!/bin/bash
#one key install nfs
#package install?
yum install -y rpcbind nfs-utils
#configure
cp /etc/exports{,.bak} /nfs 172.16.1.0/24(rw,all_squash) mkdir -p /nfs systemctl start rpcbind nfs [web] 在nfs01 backup 创建/backup/lidao的目录 把 /etc/目录打包压缩到/backup/lidao目录下面 etc.tar.gz dir=/backup/lidao [root@m01 /etc/ansible]# stderr standard error ansible 调试功能
cat >/etc/exports<#nfs01
EOF#dir owner
chown nfsnobody.nfsnobody /nfs#start rpcbind nfs
systemctl enable rpcbind nfs部署nfs服务
web01 web02 挂载
172.16.1.7
172.16.1.8
172.16.1.9ansible中变量的使用
[root@m01 /etc/ansible]# cat 01-var.yml
---
- hosts: all
vars:
ip: 10.0.0.200
tasks:
- name: mkdir
file:
path: /oldboy/{{ ip }}
state: directory
目录名存放在变量中
mkdir -p /backup/lidao
tar zcf /backup/lidao/xxx.tar.gz /etc---
- hosts: all
vars:
dir: /backup/lidao
tasks:
- name: mkdir
file:
path: "{{dir}}"
state: directory
- name: tar
archive:
path: /etc
dest: "{{dir}}/etc.tar.gz "
ip=`hostname -I|awk '{print $NF}'`
[root@m01 /etc/ansible]# cat 03-var-reg.yml
---
- hosts: all
tasks:
- name: ip
shell: hostname -I|awk '{print $NF}'
register: ipaddr
- name: print ip
shell: echo {{ipaddr}}>/tmp/ip.txt
ipaddr变量的内容
[root@nfs01 ~]# cat /tmp/ip.txt
{stderr_lines: []
uchanged: True
uend: u2019-05-31 11:24:45.080356
failed: False
ustdout: u172.16.1.31,
ucmd: uhostname -I|awk '{print }',
urc: 0,
ustart: u2019-05-31 11:24:45.073817,
ustderr: u,
udelta: u0:00:00.006539,
stdout_lines: [u172.16.1.31]
}
stdout standard output 标准输出
debug 调试[root@m01 /etc/ansible]# cat 03-var-reg.yml
---
- hosts: all
tasks:
- name: ip
shell: hostname -I|awk '{print $NF}'
register: ipaddr
- name: debug ipaddr
debug: msg={{ipaddr.stdout}}
[root@m01 /etc/ansible]# cat 04-var-reg-tar.yml
---
- hosts: all
tasks:
- name: ip
shell: hostname -I|awk '{print $NF}'
register: ipaddr
- name: date
shell: date +%F
register: time
- name: mkdir dir
file:
path: /backup/{{ipaddr.stdout}}
state: directory
- name: tar
archive:
path: /etc
dest: /backup/{{ipaddr.stdout}}/etc-{{time.stdout}}.tar.gz
[root@manager ~]# cat f7.yml
---
- hosts: all
remote_user: root
tasks:
- name: Installed Pkg
yum: name={{ item }} state=present
with_items:
- wget
- tree
- lrzsz
for item in wget tree lrzsz
do
yum install -y $item
done
---
- hosts: all
remote_user: root
tasks:
- name: Installed Pkg
yum: name={{ item }} state=present
with_items:
- wget
- tree
- lrzsz
[root@manager ~]# cat f7.yml
- hosts: all
remote_user: root
tasks:
- name: Add Users
user: name={{ item.name }} groups={{ item.groups }} state=present
with_items:
- { name: 'testuser1', groups: 'bin' }
- { name: 'testuser2', groups: 'root' }