[root@proxy ~]# partprobe
[root@proxy ~]# lsblk
....................................................
vdb 252:16 0 20G 0 disk
[root@proxy ~]# yum -y install targetcli.noarch
[root@proxy ~]# yum -y info targetcli
[root@proxy ~]# parted /dev/vdb mklabel gpt
[root@proxy ~]# parted /dev/vdb mkpart primary 1 100%
整个硬盘只分一个区
[root@proxy ~]# lsblk /dev/vdb
[root@proxy ~]# targetcli
/> backstores/block create store /dev/vdb1
块设备 创建
/> iscsi/ create iqn.2018-01.cn.tedu:proxy
iscsi/iqn.2018-01.cn.tedu:proxy/tpg1/acls create iqn.2018-01.cn.tedu:client 访问
/> iscsi/iqn.2018-01.cn.tedu:proxy/tpg1/luns create /backstores/block/store 浪设备
/> saveconfig 保存
/> exit 退出
[root@proxy ~]# systemctl restart target
[root@proxy ~]# systemctl enable target
backstore:
1.store /dev/vdb1 20G
2.db /dev/vdc1 80G
iscsi:
iqn-2018
acl 字圈 (iqn)
lun store
iqn-2019
acl
lun db
[root@proxy ~]# ss -utlnp | grep :3260
[root@client ~]# yum -y install iscsi-initiator-utils.i686
[root@client ~]# vim /etc/iscsi/initiatorname.iscsi
InitiatorName=iqn.2018-01.cn.tedu:client
[root@client ~]# iscsiadm --mode discoverydb --type sendtargets --portal 192.168.4.5 --discover
:discoverydb看看你有没有共享
:login登陆共享 :logout取消共享
[root@client ~]# systemctl restart iscsi
[root@client ~]# systemctl daemon-reload
[root@client ~]# systemctl restart iscsi
[root@client ~]# systemctl enable iscsi
[root@client ~]# lsblk
...........................................................
sda 8:0 0 20G 0 disk
[root@client ~]# parted /dev/sda mklabel gpt
[root@client ~]# parted /dev/sda mkpart primary 1 100%
[root@client ~]# mkfs.xfs /dev/sda1
[root@client ~]# mount /dev/sda1 /mnt/
[root@client ~]# echo "aaa" > /mnt/a.txt
[root@client ~]# cat /mnt/a.txt
aaa
iscsi
web1 (ftp)/var/ftp web2 (csi--sda)
iscsi--sda
mount /sda1 /var/ftp mount /dev/sda1 /var/ftp
数据不会丢
[root@web1 ~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.4.100/24 connection.autoconnect yes
[root@web1 ~]# nmcli connection up eth0
[root@web1 ~]# iscsiadm --mode discoverydb --type sendtargets --portal 192.168.2.5 --discover
[root@web1 ~]# lsblk
部署Multipath多路径环境
[root@web1 ~]# yum -y install device-mapper-multipath
[root@web1 ~]# cd /usr/share/doc/device-mapper-multipath-0.4.9/
[root@web1 device-mapper-multipath-0.4.9]# cp multipath.conf /etc/
[root@web1 ~]# /usr/lib/udev/scsi_id --whitelisted --device=/dev/sda
360014058e9e3a373bc34ee8bc19daf9e 把ID号复制
[root@web1 ~]# vim /etc/multipath.conf
multipaths {
multipath {
wwid 360014058e9e3a373bc34ee8bc19daf9e
alias yellow
}
}
[root@web1 ~]# systemctl restart multipathd.service
[root@web1 ~]# systemctl enable multipathd.service
[root@web1 ~]# multipath -rr
[root@web1 ~]# multipath -ll
[root@web1 ~]# ifconfig eth0 down
[root@web1 ~]# nmcli connection up eth0
[root@web1 ~]# multipath -rr
[root@web1 ~]# multipath -ll
[root@web1 ~]# systemctl stop multipathd
[root@web1 ~]# iscsiadm --mode node --targetname iqn.2018-01.cn.tedu:proxy --portal 192.168.4.5:3260 --logout
[root@web1 ~]# iscsiadm --mode node --targetname iqn.2018-01.cn.tedu:proxy --portal 192.168.2.5:3260 --logout
[root@web1 ~]# rm -rf /etc/target/saveconfig.json
[root@web1 ~]# systemctl stop target
服务端:
iscsi
targetcli
iscsi
acl
lun
客户端:
遗忘
vim /etc/iscsi/initiatorname.iscsi
ACL
iscsiadm discovry
systemctl start iscsi
配置并访问NFS共享
[root@proxy ~]# vim /etc/exports
/root 192.168.2.100(rw)
/usr/src 192.168.2.0/24(ro)
[root@proxy ~]# systemctl restart nfs
[root@web1 ~]# showmount -e 192.168.2.5
[root@web1 ~]# mount 192.168.2.5:/root /mnt/
[root@web1 ~]# ls /mnt/
ls: 无法打开目录/mnt/: 权限不够
root--->nfsnobody
[root@proxy ~]# chmod 777 /root/
[root@proxy ~]# vim /etc/exports
/root 192.168.2.100(rw,no_root_squash)
/usr/src 192.168.2.0/24(ro)
[root@proxy ~]# systemctl restart nfs
[root@proxy ~]# systemctl status rpcbind
ftp:20,21 httpd:80 smtp:25 pop3:110
NFS:随机端口 客户端挂载mount 192.168.4.4:/mnt/
rpcbind:111端口 注册端口
[root@proxy ~]# rpm -q nfs-utils rpcbind
编写udev规则
[root@proxy ~]# uname -a
[root@proxy ~]# uname -r
udev(2.6) 3.10
duev设备管理
所有设备都存放在/dev/目录(静态)
udev动态设备管理/sys /dev
处理设备命名
决定要创建哪些设备文件或链接
决定如何设置属性
决定触发哪些事件 RUN=
[root@proxy ~]# cd /etc/udev/rules.d/
[root@proxy rules.d]# ls
70-persistent-ipoib.rules
[root@room9pc01 ~]# udevadm monitor --property
打命令插U盘
ID_VENDOR=Generic
ID_SERIAL=Generic_Flash_Disk_2D203645-0:0
DEVTYPE=partition
ACTION=add
[root@room9pc01 ~]# udevadm monitor --property
插U盘打命令
[root@room9pc01 ~]# udevadm info --query=path --name=/dev/sda
/block/sda事实看属性
[root@room9pc01 ~]# udevadm info --query=property --path=/block/sda 事实后端看属性
[root@room9pc01 ~]# which systemctl
[root@room9pc01 ~]# vim /etc/udev/rules.d/myusb.rules
ENV{ID_VENDOR}=="Generic",ACTION="add",RUN+="/usr/bin/systemctl restart httpd"
ENV{ID_VENDOR}=="Generic",ACTION="remove",RUN+="/usr/bin/systemctl stop httpd"
[root@room9pc01 ~]# which systemctl
[root@room9pc01 ~]# ss -ntulp | grep :80
[root@room9pc01 ~]# systemctl status httpd
[root@room9pc01 ~]# vim /etc/udev/rules.d/70-myusb.rules
ENV{ID_VENDOR}=="Generic",ACTION="add",MODE="777",OWNER="radvd",SYMLINK+="myusbbbb"
[root@room9pc01 ~]# ll /dev/sda
LVS:性能高,功能少,不支持正则
nginx:性能中,功能多,支持正则
|应用服务器
用户访问--------》调度器-------》(前端)------|应用服务器-----》MySQL
|应用服务器
Director Server
Real Server
VIP(Director Server对用户提供的IP)virtual IP
DIP(Director Server连接后端服务器的IP)
RIP(Real Server的本地IP)
CIP(客户端IP,client)
LVS有几种工作模式
1.NAT地址绢换模式
2.DR直连路由模式
3.TUN隧道模式
[root@proxy ~]# yum -y install ipvsadm.x86_64
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -A -t 192.168.4.5:80 -s rr
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -a -t 192.168.4.5:80 -r 192.168.2.100:80
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -A -t 192.168.4.5:3306 -s rr
[root@proxy ~]# ipvsadm -a -t 192.168.4.5:3306 -r 2.2.2.2
[root@proxy ~]# ipvsadm -a -t 192.168.4.5:3306 -r 3.3.3.3
[root@proxy ~]# ipvsadm -a -t 192.168.4.5:3306 -r 4.4.4.4
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -E -t 192.168.4.5:80 -s wrr
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -D -t 192.168.4.5:80
[root@proxy ~]# ipvsadm -Ln
-m是NAT-i是隧道-g直连路由
你不写默认直连路由模式
[root@proxy ~]# ipvsadm -e -t 192.168.4.5:3306 -r 2.2.2.2 -w 2 -m
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -d -t 192.168.4.5:3306 -r 4.4.4.4
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -C 清除
[root@proxy ~]# ipvsadm -Ln
[root@proxy ~]# ipvsadm -A -t 1.1.1.1:80 -s rr
[root@proxy ~]# ipvsadm -a -t 1.1.1.1:80 -r 10.10.10.10
[root@proxy ~]# ipvsadm -a -t 1.1.1.1:80 -r 10.10.10.11
[root@proxy ~]# ipvsadm -a -t 1.1.1.1:80 -r 10.10.10.12
[root@proxy ~]# ipvsadm-save -n > /etc/sysconfig/ipvsadm 保存
[root@proxy ~]# cat /etc/sysconfig/ipvsadm
部署LVS-NAT集群
[root@web1 ~]# nmcli connection down eth0
[root@web1-2 ~]# nmcli connection modify eth1 ipv4.method manual ipv4.gateway 192.168.2.5
[root@web1-2 ~]# nmcli connection up eth1
[root@web1-2 ~]# route -n
[root@web1-2 ~]# yum -y install httpd
[root@web1-2 ~]# systemctl restart httpd
[root@web1 ~]# ss -ntulp | grep :80
[root@web1 ~]# echo "192.168.2.100" > /var/www/html/index.html
[root@web2 ~]# echo "192.168.2.200" > /var/www/html/index.html
[root@proxy ~]# curl http://192.168.2.100
192.168.2.100
[root@proxy ~]# curl http://192.168.2.200
192.168.2.200
[root@proxy ~]# echo "1" > /proc/sys/net/ipv4/ip_forward 临时打开路由
[root@proxy ~]# echo "0" > /proc/sys/net/ipv4/ip_forward 临时关闭路由
[root@proxy ~]# vim /etc/sysctl.conf
net.ipv4.ip_forward = 1 永久打开路由
[root@proxy ~]# sysctl -p
net.ipv4.ip_forward = 1
[root@proxy ~]# ipvsadm -A -t 192.168.4.5:80 -s wrr
[root@proxy ~]# ipvsadm -a -t 192.168.4.5:80 -r 192.168.2.100:80 -w 1 -m
[root@proxy ~]# ipvsadm -a -t 192.168.4.5:80 -r 192.168.2.200:80 -w 1 -m
[root@proxy ~]# ss -ntulp | grep :80
[root@client ~]# curl http://192.168.4.5
192.168.2.100
[root@client ~]# curl http://192.168.4.5
192.168.2.200
部署LVS-DR集群
CIP是客户端的IP地址
VIP是对客户端提供服务的IP地址
RIP是后端服务器的真实IP地址
DIP是调度器与后端服务器通信的IP地址(VIP必须配置在虚拟接口)
[root@proxy ~]# cd /etc/sysconfig/network-scripts/
放网卡配置文件目录
[root@proxy network-scripts]# cp ifcfg-eth0 ifcfg-eth0:0
[root@proxy network-scripts]# vim ifcfg-eth0:0
TYPE=Ethernet 网卡:什么类型的网卡
BOOTPROTO=none 你的网卡怎么配IP
NAME=eth0:0 你的这个网卡叫什么名字
DEVICE=eth0:0 设备名
ONBOOT=yes 激活
IPADDR=192.168.4.15 IP地址
PREFIX=24 子网验码
[root@proxy network-scripts]# systemctl restart network
[root@proxy network-scripts]# ifconfig eth0:0
[root@web1 ~]# ping 192.168.4.15
[root@web2 ~]# ping 192.168.4.15
[root@client ~]# ping 192.168.4.15
[root@web1 ~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.4.100/24 connection.autoconnect yes
[root@web1 ~]# nmcli connection up eth0
[root@web2 ~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.4.200/24 connection.autoconnect yes
[root@web2 ~]# nmcli connection up eth0
[root@web1-2 ~]# cd /etc/sysconfig/network-scripts/
[root@web1-2 network-scripts]# cp ifcfg-lo ifcfg-lo:0
[root@web1-2 network-scripts]# vim ifcfg-lo:0
DEVICE=lo:0 设备名
IPADDR=192.168.4.15 IP地址
NETMASK=255.255.255.255 32位子网验码
NETWORK=192.168.4.15 网络位
BROADCAST=192.168.4.15 广播
ONBOOT=yes 激活
NAME=lo:0 你的这个网卡叫什么名字
[root@web1-2 network-scripts]# vim /etc/sysctl.conf
..................................................
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.lo.arp_ignore = 1
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
[root@web2-2 network-scripts]# sysctl -p
刷新
[root@web1-2 network-scripts]# systemctl restart network
[root@web1-2 network-scripts]# ip a s lo
[root@proxy ~]# ipvsadm -C
[root@proxy ~]# ipvsadm -a -t 192.168.4.15:80 -r 192.168.4.100:80
[root@proxy ~]# ipvsadm -a -t 192.168.4.15:80 -r 192.168.4.200:80
[root@proxy ~]# ipvsadm -Ln
[root@client ~]# curl http://192.168.4.15
192.168.2.100
[root@client ~]# curl http://192.168.4.15
192.168.2.200
[root@web1 ~]# systemctl stop httpd
[root@client ~]# curl http://192.168.4.15
curl: (7) Failed connect to 192.168.4.15:80; 拒绝连接
[root@client ~]# curl http://192.168.4.15
192.168.2.200
ping 192.168.4.100 [主机好的,服务坏了]
curl http://192.168.4.100 [服务好的,网页被串改]
[root@proxy ~]# vim LVS.sh
#!/bin/bash
web1=192.168.4.100
web2=192.168.4.200
web=192.168.4.15
while :
do
for i in $web1 $web2
do
curl http://$i &> /dev/null
if [ $? -ne 0 ];then
ipvsadm -Ln | grep $i &> /dev/null && ipvsadm -d -t ${web}:80 -r $i
else
ipvsadm -Ln | grep $i &> /dev/null || ipvsadm -a -t $web:80 -r $i
fi
done
sleep 5
done
[root@proxy ~]# bash LVS.sh &
[root@web1 ~]# systemctl restart httpd
[root@proxy ~]# ipvsadm -Ln
[root@web1 ~]# systemctl stop httpd
[root@proxy ~]# ipvsadm -Ln
nginx代理 (我帮你去访问)
LVS调度器(转发一个数据包) LVS-NAT,LVS-DR,LVS-TUN
yum -y install ipvsadm
ipvsadm -A -t|-u VIP:port -s sh rr lcipvsadm -a -t|-u VIP:port -r RIP -g -m -i
ipvsadm -E,-D,-C
ipvsadm -e,-d
ipvsadm -Ln
LVS-NAT
环境
web服务器一定不能跟客户端在一个网端
一定要有网关
VIP对外:eth0 DIP对内:eth0:0
|应用服务器RIP,VIP
调度器 (前端)--------------|应用服务器RIP,VIP
Keepalived
调度器 (前端)
|应用服务器RIP,VIP
Keepalived就是给LVS写
1.自动配置LVS规则,做健康检查
2.学习了路由器上的功能VRRP HSRP路由热备份
安装Keepalived软件
注意:两台Web服务器做相同的操作
[root@web1-2 ~]# yum -y install keepalived.x86_64
[root@web1 ~]# vim /etc/keepalived/keepalived.conf
..........................................................
global_defs { 全局设置
notification_email {
[email protected]
[email protected]
[email protected] 3个收件人
}
notification_email_from [email protected]
邮件从那来了发件人
smtp_server 192.168.200.1
邮件服务器
smtp_connect_timeout 30
router_id web1 ID号
...........................................................
vrrp_instance VI_1 {
state MASTER
主服务器为MASTER(备服务器需要修改为BACKUP)
interface eth0 定义网络接口
virtual_router_id 50 主备服务器VRID号必须一致
priority 100 服务器优先级
virtual_ipaddress {
192.168.4.80 谁是主服务器谁获得该VIP(实验需要修改)
}
找到virtual_server 192.168.200.100 443
999dd
[root@web1 ~]# scp /etc/keepalived/keepalived.conf [email protected]:/etc/keepalived/
[root@web2 ~]# vim /etc/keepalived/keepalived.conf
..........................................................
global_defs { 全局设置
notification_email {
[email protected]
[email protected]
[email protected] 3个收件人
}
notification_email_from [email protected]
邮件从那来了发件人
smtp_server 192.168.200.1
邮件服务器
smtp_connect_timeout 30
router_id web2 ID号
...........................................................
vrrp_instance VI_1 {
state MASTER
主服务器为MASTER(备服务器需要修改为BACKUP)
interface eth0 定义网络接口
virtual_router_id 50 主备服务器VRID号必须一致
priority 80 服务器优先级
virtual_ipaddress {
192.168.4.80 谁是主服务器谁获得该VIP(实验需要修改)
}
找到virtual_server 192.168.200.100 443
999dd
[root@web1-2 ~]# systemctl restart keepalived.service
[root@web1-2 ~]# iptables -F
[root@web1 ~]# ip a s eth0
[root@proxy ~]# ping 192.168.4.80
[root@proxy ~]# curl http://192.168.4.80
192.168.2.100
[root@proxy ~]# ping 192.168.4.80
让它ping不要动
[root@web1 ~]# systemctl stop keepalived.service
[root@web2 ~]# ip a s eth0
[root@web1 ~]# systemctl restart keepalived.service
[root@web2 ~]# ip a s eth0
[root@web1 ~]# ip a s eth0
[root@proxy ~]# ping 192.168.4.80
你会发现它一直在ping
[root@web1 ~]# iptables -F
每启服务就要清防火墙
[root@proxy ~]# curl http://192.168.4.80
[root@web1 ~]# tailf /var/log/messages
日志文件
配置网络环境
web1,web2,proxy1,proxy2,client
[root@proxy2 ~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.4.6/24 connection.autoconnect yes
[root@proxy2 ~]# nmcli connection up eth0
[root@web1-2 ~]# systemctl stop keepalived.service
[root@proxy1-2 ~]# yum -y install keepalived.x86_64
[root@proxy1 ~]# ipvsadm -C
[root@proxy2 ~]# yum -y install ipvsadm.x86_64
[root@proxy2 ~]# ipvsadm -C
[root@proxy1 ~]# rm -rf /etc/sysconfig/network-scripts/ifcfg-eth0:0
[root@proxy1 ~]# systemctl restart network
[root@proxy1 ~]# systemctl enable network
[root@proxy1 ~]# ip a s eth0
[root@proxy1 ~]# vim /etc/keepalived/keepalived.conf
router_id LVS1 改路由ID
192.168.4.15 改虚拟IP
virtual_server 192.168.4.15 80
lb_kind DR
# persistence_timeout 50
笔记:ipcsadm -A -t 192.168.4.15:80 -s wrr
real server {
TCP CHECK{端口}
HTTP_GET{网页
url /a.html
digest md5sum
}
SSL_GET{网页}
real_server 192.168.4.100 80 {
weight 1 设置权重为1
TCP_CHECK {
对后台real_server做健康检查
connect_timeout 3 超时时间3秒
nb_get_retry 3 重试3次
delay_before_retry 3 每格3秒做一次健康检查
}
}
999dd
[root@proxy1 ~]# systemctl restart keepalived.service
[root@proxy1 ~]# ipvsadm -Ln
[root@proxy1 ~]# ip a s eth0
[root@proxy1 ~]# iptables -F
[root@proxy1 ~]# scp /etc/keepalived/keepalived.conf [email protected]:/etc/keepalived/keepalived.conf
[root@proxy2 ~]# vim /etc/keepalived/keepalived.conf
router_id LVS2
state BACKUP
priority 80
[root@proxy2 ~]# systemctl restart keepalived.service
[root@proxy2 ~]# iptables -F
[root@proxy2 ~]# ip a s eth0
[root@proxy2 ~]# ipvsadm -Ln
[root@client ~]# curl http://192.168.4.15
192.168.2.100
[root@client ~]# curl http://192.168.4.15
192.168.2.200
[root@web1 ~]# systemctl stop httpd
[root@proxy1-2 ~]# ipvsadm -Ln
[root@web1 ~]# systemctl restart httpd
[root@proxy1-2 ~]# ipvsadm -Ln
笔记:ipcsadm -A -t 192.168.4.15:80 -s wrr
real server {
TCP CHECK{端口}
HTTP_GET{网页
url {
path= /a.html
digest sj1232323fgfg1232323
MD5值
}
SSL_GET{网页}
配置HAProxy负载平衡集群
Nginx , LVS , Haproxy , F5 big-ip
速度:F5 > LVS > HAproxy > Nginx
硬件
LVS 群4层调度,不支持7层调度
HAproxy对正则的支持不如nginx
HAproxy 4,7层调度
nginx 4,7层调度
[root@web1-2 ~]# rm -rf /etc/sysconfig/network-scripts/ifcfg-lo:0
[root@web1-2 ~]# vim /etc/sysctl.conf
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_announce = 2
[root@web1-2 ~]# nmcli connection down eth0
[root@web1-2 ~]# systemctl restart network
[root@proxy1-2 ~]# systemctl stop keepalived.service
[root@proxy1-2 ~]# ipvsadm -Ln
部署HAProxy服务器
[root@proxy1 ~]# yum -y install haproxy.x86_64
[root@proxy1 ~]# ha proxy 高可用 代理
[root@proxy1 ~]# vim /etc/haproxy/haproxy.cfg
笔记:global全局设置
maxconn=2000
并发量是2000
defaults默认设置
maxconn=1000
并发量是1000
main frontend集群配置
listen集群
maxconn=500
并发量是500
2种方式定义集群
frontend+backend
前端 + 后端
listen
daemon 启动之后把程序放到后台
方法1:frontend abc *:80
use_backend xxx
backend xxx
server ip1
server ip2
方法2:listen xxx *:80
server ip1
server ip2
# main frontend which proxys to the backends
#---------------------------------------------------------------------
999dd
listen webx *:80
balance roundrobin
server web1 192.168.2.100:80
server web2 192.168.2.200:80
[root@proxy1 ~]# systemctl restart haproxy
[root@client ~]# curl http://192.168.4.5
[root@proxy1 ~]# cd /usr/share/doc/haproxy-1.5.18/
[root@proxy1 haproxy-1.5.18]# vim configuration.txt
/balance
balance [ ] 1584行
+算法
roundrobin
static-rr
leastconn 最少连接算法
[root@proxy1 ~]# vim /etc/haproxy/haproxy.cfg
listen status 0.0.0.0:1080
stats refresh 30s
等会你打开的那个监控页面,每格30秒会自动刷新
stats uri /stats
访问的地址楠是/stats
stats realm Haproxy Manage
我要打开管理页面
stats auth admin:admin
帐号:密码
[root@proxy1 ~]# systemctl restart haproxy.service
192.168.4.5:1080/stats
[root@client ~]# ab -c 100 -n 10000 http://192.168.4.5/
[root@proxy1 ~]# vim /etc/haproxy/haproxy.cfg
listen webs 0.0.0.0:80
balance roundrobin
server web1 192.168.2.100:80 check inter 2000 rise 2 fall 5
server web2 192.168.2.200:80 check inter 2000 rise 2 fall 5
[root@proxy1 ~]# systemctl restart haproxy
Keepalived+LVS
Haproxy+keepalived
nginx+keepalived
网站架构
用户---->互联网(调度器) 应用服务器/var/www/html
调度器 应用服务器/var/www/html
应用服务器/usr/local/nginx/html
应用服务器/usr/local/nginx/html
共享存储NFS,samba 数据库
mount ip:/nfs1 /var/www/html
mount ip:/nfs2 /var/www/html
分布式存储
1.数据被分开存储
2.数据的读写是并行的
3.数据会像raid一样,单个文件会被打散
4.Ceph所有数据是3副本
PB->TB->GB
Ceph-osd Ceph-mon Ceph-mds Ceph-radosgw
必须装的 文件系统 对象存储
客户端:Ceph-common
用户访问,只能以块的方式访问,以文件系统方式访问
iscsi[块]磁盘
NFS[文件系统]目录
Ceph提供:块,文件系统,对象存储
环境:
创建:node1,node2,node3
4网端 11,12,13
[root@room9pc01 ~]# yum -y install vsftpd
[root@room9pc01 ~]# mount /iso/rhcs2.0-rhosp9-20161113-x86_64.iso /var/ftp/Ceph/
[root@room9pc01 ~]# systemctl restart vsftpd
[root@room9pc01 ~]# systemctl enable vsftpd
[root@room9pc01 ~]# cd /var/ftp/Ceph/
[root@room9pc01 Ceph]# ls
rhceph-2.0-rhel-7-x86_64
rhscon-2.0-rhel-7-x86_64
rhel-7-server-openstack-9-rpms
[root@room9pc01 Ceph]# cd rhceph-2.0-rhel-7-x86_64/
[root@room9pc01 rhceph-2.0-rhel-7-x86_64]# ls
[root@client ~]# vim /etc/yum.repos.d/Ceph.repo
[MON]
name=MON
baseurl=ftp://192.168.4.254/Ceph/rhceph-2.0-rhel-7-x86_64/MON/
gpgcheck=0
[Tools]
name=Tools
baseurl=ftp://192.168.4.254/Ceph/rhceph-2.0-rhel-7-x86_64/Tools/
gpgcheck=0
[OSD]
name=OSD
baseurl=ftp://192.168.4.254/Ceph/rhceph-2.0-rhel-7-x86_64/OSD/
gpgcheck=0
[root@client yum.repos.d]# for i in 11 12 13
> do
> scp /etc/yum.repos.d/Ceph.repo 192.168.4.$i:/etc/yum.repos.d/
> done
4台机清空yum缓存5088
[root@client ~]# vim /etc/hosts
192.168.4.10 client
192.168.4.11 node1
192.168.4.12 node2
192.168.4.13 node3
[root@client ~]# for i in 11 12 13
> do
> scp /etc/hosts 192.168.4.$i:/etc/
> done
配置无密码连接(包括自己远程自己也不需要密码)
[root@node1 ~]# ssh-keygen -N '' -f /root/.ssh/id_rsa
[root@node1 ~]# for i in 10 11 12 13
> do
> ssh-copy-id 192.168.4.$i
> done
[root@node1 ~]# for i in client node1 node2 node3
> do
> ssh-copy-id $i
> done
[root@room9pc01 ~]# yum -y install chrony
[root@client ~]# vim /etc/chrony.conf
server 1923168.4.254 iburst
[root@client ~]# systemctl restart chronyd
[root@client ~]# systemctl enable chronyd
[root@client ~]# for i in client node2 node3
> do
> scp /etc/chrony.conf $i:/etc/
> done
[root@client ~]# for i in client node2 node3
> do
> ssh $i "systemctl restart chronyd"
> ssh $i "systemctl enable chronyd"
> done
[root@client ~]# vim /etc/bashrc
alias 10='ssh -X [email protected]'
alias 11='ssh -X [email protected]'
alias 12='ssh -X [email protected]'
alias 13='ssh -X [email protected]'
[root@client ~]# for i in 11 12 13
> do
> scp /etc/bashrc 192.168.4.$i:/etc/bashrc
> done
[root@client ~]# for i in client node1 node2 node3
> do
> ssh $i "source /etc/bashrc"
> done
在node1,node2,node3下装三个盘
每台虚拟机装三个盘
[root@node1-2-3 ~]# lsblk
部署ceph集群
安装部署工具ceph-deploy
创建ceph集群
准备日志磁盘分区
创建OSD存储空间
查看ceph状态,验证
[root@node1 ~]# yum -y install ceph-deploy
[root@node1 ~]# ceph-deploy --help
[root@node1 ~]# mkdir ceph-cluster
[root@node1 ~]# cd ceph-cluster
[root@node1 ceph-cluster]# ceph-deploy new node1 node2 node3
远程装包
[root@node1 ceph-cluster]# ceph-deploy install node1 node2 node3
[root@node1-2-3 ~]# rpm -qa | grep ceph
远程启服务
[root@node1 ceph-cluster]# ceph-deploy mon create-initial
查看状态
[root@node1 ceph-cluster]# ceph -s
创建OSD
node1 node2 node3
vdb vdb vdb
vdc vdc vdc
vdd vdd vdd
20G 20G 20G 60G
vdb1做vdc的缓存 vdb2做vdd的缓存
[root@node1-2-3 ~]# parted /dev/vdb mklabel gpt
[root@node1-2-3 ~]# parted /dev/vdb mkpart primary 1 50%
[root@node1-2-3 ~]# parted /dev/vdb mkpart primary 50% 100%
[root@node1-2-3 ~]# chown ceph.ceph /dev/vdb1
[root@node1-2-3 ~]# chown ceph.ceph /dev/vdb2
[root@node1-2-3 ~]# vim /etc/udev/rules.d/70-vdb.rules
ENV{DEVNAME}=="/dev/vdb1",OWNER="ceph",GROUP="ceph"
ENV{DEVNAME}=="/dev/vdb2",OWNER="ceph",GROUP="ceph"
初始化清空磁盘数据
[root@node1 ceph-cluster]# ceph-deploy disk zap node1:vdc node1:vdd
[root@node1 ceph-cluster]# ceph-deploy disk zap node2:vdc node2:vdd
[root@node1 ceph-cluster]# ceph-deploy disk zap node3:vdc node3:vdd
创建OSD存储空间
[root@node1 ceph-cluster]# ceph-deploy osd create node1:vdc:/dev/vdb1 node1:vdd:/dev/vdb2
[root@node1 ceph-cluster]# ceph-deploy osd create node2:vdc:/dev/vdb1 node2:vdd:/dev/vdb2
[root@node1 ceph-cluster]# ceph-deploy osd create node3:vdc:/dev/vdb1 node3:vdd:/dev/vdb2
査看状态
[root@node1 ceph-cluster]# systemctl status [email protected]
[root@node1 ceph-cluster]# df -h
创建Ceph块存储
Ceph可以做3个共享,块共享,文件系统共享,对象共享
查看存储池
[root@node3 ~]# ceph osd lspools
创建镜像、查看镜像
[root@node3 ~]# rbd help create
[root@node3 ~]# rbd create demo-image --image-feature layering --size 5G
[root@node3 ~]# rbd create rbd/image --image-feature layering --size 5G
[root@node3 ~]# rbd list
[root@node3 ~]# rbd info demo-image
node1,node2,node3都可以査出来
动态调整
缩小容量
[root@node3 ~]# rbd resize --size 250M image --allow-shrink
[root@node3 ~]# rbd info demo-image
扩容容量
[root@node3 ~]# rbd resize --size 750M image
[root@node3 ~]# rbd info demo-image
ceph-mon ceph-mon ceph-mon
ceph-osd ceph-osd ceph-mon
rbd create image
[root@node3 ~]# rbd map demo-image
/dev/rbd0
[root@node3 ~]# lsblk
rbd0 251:0 0 5G 0 disk
客户端通过KRBD访问
[root@client ~]# yum -y install ceph-common.x86_64
[root@node1 ceph-cluster]# cd /etc/ceph/
[root@node1 ceph]# scp ceph.conf client:/etc/ceph/
[root@node1 ceph]# cat ceph.client.admin.keyring
[client.admin]用户名
key = AQAfpqVcm+DOJRAAoeYOyWznmNPvFslLu2Hg8A==密码
[root@node1 ceph]# scp ceph.client.admin.keyring client:/etc/ceph/
[root@client ~]# rbd map image 挂载
/dev/rbd1
[root@client ~]# lsblk
rbd0 251:0 0 750M 0 disk
[root@client ~]# rbd unmap /dev/rbd1 卸载
[root@client ~]# lsblk
[root@client ~]# rbd showmapped
[root@client ~]# mkfs.xfs /dev/rbd0
[root@client ~]# mount /dev/rbd0 /mnt/
[root@client ~]# echo "AAAA" /mnt/a.txt
[root@client ~]# echo "AAAA" > /mnt/a.txt
创建镜像快照
快照 COW (Copy on Write)写时复制
原始盘100G
a.txt:111–xxx rm -rf a.txt 1G
b.txt:222
快照备份 10M
a.txt:111
b.txt:222
10T数据库 锁 快照4-5s cp tar
[root@node1 ceph-cluster]# rbd snap ls image
[root@node1 ceph-cluster]# rbd list
demo-image
image
[root@node1 ceph-cluster]# rbd snap create image --snap image-sanp1 创建快照
[root@node1 ceph-cluster]# rbd snap ls image
査看快照
[root@client ~]# rm -rf /mnt/a.txt
[root@client ~]# umount /mnt/
[root@client ~]# mount /dev/rbd0 /mnt/
[root@client ~]# ls /mnt
a.txt
[root@client ~]# mkfs.xfs -f /dev/rbd0
当你格式化错吴加-f
[root@node1 ceph-cluster]# rbd snap create image --snap image-lsc2 创建快照
[root@node1 ceph-cluster]# rbd snap protect image --snap image-lsc2 保护快照
[root@node1 ceph-cluster]# rbd clone image --snap image-snap1 image-clone --image-feature layering
克隆快照
[root@node1 ceph-cluster]# rbd list
demo-image
image 父
image-clone 子
[root@node1 ~]# rbd info image-clone
[root@node1 ~]# rbd snap rm image --snap image-clone
[root@node1 ~]# rbd rm image 删除快照与镜像
KVM虚拟机
/var/lib/libvirt/images/镜像文件
/etc/libvirt/qemu/XML文件
[root@room9pc01 ~]# cd /var/lib/libvirt/images/
[root@room9pc01 images]# du -sh Node1-1.qcow2
11G Node1-1.qcow2 镜像文件以用11G
cp xx.qcow2 new.qcow2
cp xx.xml new.xml
[root@room9pc01 images]# ls .rh7_template.img
镜像文件
[root@room9pc01 images]# ls .rhel7.xml
配置文件
[root@room9pc01 images]# man qemu-img
[root@room9pc01 images]# qemu-img create -f qcow2 -b .rh7_template.img lsc.qcow2 拷贝虚拟机
[root@room9pc01 images]# cp .rhel7.xml /etc/libvirt/qemu/lsc.xml 拷贝配置文件
[root@room9pc01 images]# vim /etc/libvirt/qemu/lsc.xml
SSS 9行
39行
[root@room9pc01 images]# virsh define /etc/libvirt/qemu/lsc.xml
[root@room9pc01 images]# virsh console SSS
物理主机
虚拟机1(物理主机的一个文件) /var/lib/libvirt/images
虚拟机2(物理主机的一个文件)
物理主机
虚拟机1(ceph块镜像1) ceph ip:port
虚拟机2(ceph块镜像2)
物理主机
虚拟机1(ceph块镜像1) ceph ip:port
虚拟机2(ceph块镜像2)
创建磁盘镜像
[root@node1 ceph-cluster]# rbd create vm1-image --image-feature layering --size 10G
[root@node1 ceph-cluster]# rbd create vm2-image --image-feature layering --size 10G
[root@node1 ceph-cluster]# rbd list
[root@room9pc01 ~]# yumdownloader ceph-common.x86_64
[root@room9pc01 ~]# yum -y install ceph-common.x86_64
[root@room9pc01 ceph]# scp 192.168.4.11:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
[root@room9pc01 ceph]# scp 192.168.4.11:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
编写账户信息文件
[root@room9pc01 ~]# vim server.xml
新建临时文件,内容如下
client.admin secret
使用XML配置文件创建server
[root@room9pc01 ~]# virsh --help | grep secret
[root@room9pc01 ~]# virsh secret-list
查看
[root@room9pc01 ~]# virsh secret-undefine
删除
[root@room9pc01 ~]# virsh secret-define --file server.xml
生成 secret a5851b84-26de-493a-9e54-62d8b672d42e
随机的UUID,这个UUID对应的有账户信息
[root@room9pc01 ~]# virsh secret-list
a5851b84-26de-493a-9e54-62d8b672d42e ceph client.admin secret
编写账户信息文件
[root@room9pc01 ~]# cat /etc/ceph/ceph.client.admin.keyring
[root@room9pc01 ~]# virsh secret-set-value --secret a5851b84-26de-493a-9e54-62d8b672d42e --base64 AQAfpqVcm+DOJRAAoeYOyWznmNPvFslLu2Hg8A==
[root@room9pc01 ~]# cat /etc/libvirt/qemu/lsc.xml
[root@node1 ceph]# rbd list
vm1-image
[root@room9pc01 ~]# virsh edit SSS
Ceph文件系统
什么是文件系统?
格式化之后就是文件系统
NTFS,fat32,xfs文件系统
easyrecovery12破解版 sjkf
osd osd osd
osd osd osd
mon mon mon mds inode block
Node4 192.168.4.14/24
创建存储池
[root@node1 ~]# cd /root/ceph-cluster/
[root@node1 ceph-cluster]# ceph-deploy mds create mode3
[root@node1 ceph-cluster]# ceph osd lspools
[root@node1 ceph-cluster]# ceph osd pool create cephfs_data 128 创建:存储池,对应128个PG
[root@node1 ceph-cluster]# ceph osd pool create cephfs_metadata 128 创建存储池,对应128个PG
[root@node1 ceph-cluster]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQAfpqVcm+DOJRAAoeYOyWznmNPvFslLu2Hg8A==
[root@client ~]# mount -t ceph 192.168.4.11:6789:/ /mnt/ -o name=admin,secret=AQAfpqVcm+DOJRAAoeYOyWznmNPvFslLu2Hg8A==
[root@client ~]# df -h
应盘:格式化 (inode|block) inode存文件的信息
block
ceph支持的存储方案:
1.块 (磁盘) osd map
2.文件系统 mds mount
3.对象存储 rgw(radosgteway)
对象存储必须开发人员写程序API
云盘,yunpan.baiud.com
[root@node1 ceph-cluster]# ceph-deploy rgw create node3 远程启服务
[root@node3 ~]# systemctl restart [email protected]
[root@node3 ~]# systemctl status [email protected]
RGW默认服务端口为7480
[root@node3 ~]# ss -ntulp | grep 7480
[root@node3 ~]# vim /etc/ceph/ceph.conf
[client.rgw.node3] node3为主机名
host = node3
rgw_frontends = "civetweb port=8000"
civetweb是RGW内置的一个web服务
[root@node3 ~]# systemctl start [email protected]
[root@node3 ~]# ss -ntulp | grep 8000
客户端测试
curl测试
[root@client ~]# curl 192.168.4.13:8000
使用第三方软件访问
登陆node3(RGW)创建账户
[root@node3 ~]# radosgw-admin user create --uid="testuser" --display-name="First User"
testuser为用户,key是账户访问密钥