前言:CEPH是一个开源的项目,它是软件定义的、同一的存储解决方案。CEPH是一个可大规模扩展、高性能并且无单点故障的分布式存储系统。从一开始它就运行在通用的商用的硬件上,具有高度的可伸缩性,容量可扩展至EB界别,甚至更大
[root@localhost ~]# hostnamectl set-hostname ceph01
[root@localhost ~]# hostnamectl set-hostname ceph02
[root@localhost ~]# hostnamectl set-hostname ceph03
[root@ceph01 ~]# ifconfig
ens33: flags=4163 mtu 1500
inet 192.168.9.128 netmask 255.255.255.0 broadcast 192.168.9.255
ens36: flags=4163 mtu 1500
inet 192.168.124.130 netmask 255.255.255.0 broadcast 192.168.124.255
[root@ceph02 ~]# ifconfig
ens33: flags=4163 mtu 1500
inet 192.168.9.133 netmask 255.255.255.0 broadcast 192.168.9.255
ens36: flags=4163 mtu 1500
inet 192.168.124.131 netmask 255.255.255.0 broadcast 192.168.124.255
[root@ceph03 ~]# ifconfig
ens33: flags=4163 mtu 1500
inet 192.168.9.133 netmask 255.255.255.0 broadcast 192.168.9.255
ens36: flags=4163 mtu 1500
inet 192.168.124.131 netmask 255.255.255.0 broadcast 192.168.124.255
[root@ceph01 ~]# vi /etc/hosts
192.168.124.130 ceph01
192.168.124.131 ceph02
192.168.124.133 ceph03
//其他两个节点配置一样
[root@ceph01 ~]# systemctl stop firewalld.service
[root@ceph01 ~]# systemctl disable firewalld.service
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@ceph01 ~]# setenforce 0
[root@ceph01 ~]# vi /etc/selinux/config
SELINUX=disabled
[root@ceph01 ~]# ssh-keygen //生成密钥对,一直回车即可
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:nFxzejhiyTSleK7xl8Xe/uBAEE5O86pJSIRwosWPh4I root@ceph03
The key's randomart image is:
+---[RSA 2048]----+
| .+.... * |
| o.o.. . B + |
|o + o = * o |
|E o o. O = O |
| . . o S = = |
| * + B . |
| . + o o o |
| . + . |
| o..|
+----[SHA256]-----+
[root@ceph01 ~]# ssh-copy-id root@ceph01
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'ceph01 (192.168.124.130)' can't be established.
ECDSA key fingerprint is SHA256:lkgQj7t5I4b1dE9wSNQ1Wr842aOvd+uf+gp0wi17/xY.
ECDSA key fingerprint is MD5:e4:9b:7a:56:c8:ef:5c:64:7e:fb:c9:95:62:3f:e7:d1.
Are you sure you want to continue connecting (yes/no)? yes //输入yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@ceph01's password: //输入密码
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@ceph01'"
and check to make sure that only the key(s) you wanted were added.
[root@ceph01 ~]# ssh-copy-id root@ceph02
[root@ceph01 ~]# ssh-copy-id root@ceph03
。。。省略部分内容
[root@ceph01 ~]# vi /etc/yum.conf
keepcache=1 //开启缓存
[root@ceph01 ~]# yum install wget curl vim net-tools bash-completion -y
//其他两个节点一样
[root@ceph01 ~]# cd /etc/yum.repos.d/
[root@ceph01 yum.repos.d]# ls
CentOS-Base.repo CentOS-Debuginfo.repo CentOS-Media.repo CentOS-Vault.repo
CentOS-CR.repo CentOS-fasttrack.repo CentOS-Sources.repo
[root@ceph01 yum.repos.d]# mkdir bak
[root@ceph01 yum.repos.d]# mv C* bak
[root@ceph01 yum.repos.d]# ls
bak
[root@ceph01 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo //下载基础源
[root@ceph01 yum.repos.d]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo //下载epel源
[root@ceph01 yum.repos.d]# vi local.repo //配置本地源
[ceph]
name=Ceph packages for
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[root@ceph01 yum.repos.d]# yum update -y //更新
[root@ceph01 yum.repos.d]# yum install ntpdate ntp -y //aliyun现网源自带,可以不用安装
[root@ceph01 yum.repos.d]# ntpdate ntp1.aliyun.com //同步aliyun时间
30 Mar 22:11:07 ntpdate[103922]: adjust time server 120.25.115.20 offset 0.012028 sec
[root@ceph01 yum.repos.d]# clock -w //把当前系统时间写入到CMOS中
[root@ceph01 yum.repos.d]# vi /etc/ntp.conf
##第8行改为 restrict default nomodify
##第17行改为 restrict 192.168.100.0 mask 255.255.255.0 nomodify notrap
##将21行到24行删除##
21 server 0.centos.pool.ntp.org iburst
22 server 1.centos.pool.ntp.org iburst
23 server 2.centos.pool.ntp.org iburst
24 server 3.centos.pool.ntp.org iburst
###删除的插入下面内容###
fudge 127.127.1.0 stratum 10
server 127.127.1.0
[root@ceph01 yum.repos.d]# systemctl start ntpd //开启服务
[root@ceph01 yum.repos.d]# systemctl enable ntpd //开启自启动
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.
//ceph02,ceph03同步ceoh01,并建立周期性计划
[root@ceph02 yum.repos.d]# ntpdate ceph01
30 Mar 22:57:58 ntpdate[105916]: adjust time server 192.168.124.130 offset 0.023610 sec
[root@ceph02 yum.repos.d]# crontab -e
*/2 * * * * /usr/bin/ntpdate 192.168.124.130 >> /var/log/ntpdate.log
[root@ceph02 yum.repos.d]# systemctl restart crond
[root@ceph02 yum.repos.d]# crontab -l
*/2 * * * * /usr/bin/ntpdate 192.168.124.130 >> /var/log/ntpdate.log
[root@ceph03 yum.repos.d]# ntpdate ceph01
30 Mar 22:58:55 ntpdate[27637]: adjust time server 192.168.124.130 offset 0.005745 sec
[root@ceph03 yum.repos.d]# systemctl restart crond
[root@ceph03 yum.repos.d]# crontab -l
*/2 * * * * /usr/bin/ntpdate 192.168.124.130 >> /var/log/ntpdate.log
[root@ceph01 yum.repos.d]# yum install python-setuptools ceph-deploy -y
//管理节点安装部署工具
[root@ceph01 yum.repos.d]# mkdir /etc/ceph
[root@ceph01 yum.repos.d]# yum -y install ceph
[root@ceph01 yum.repos.d]# cd /etc/ceph
[root@ceph01 ceph]# ceph-deploy new ceph01 ceph02 //在ceph01上创建群集
[root@ceph01 ceph]# ceph-deploy mon create-initial //在ceph01上初始化mon 并收取密钥
[root@ceph01 ceph]# ceph -s //查看群集状态
cluster:
id: 3ef8403a-1b42-4b95-b859-f8b01111440f
health: HEALTH_OK
services:
mon: 2 daemons, quorum ceph01,ceph02
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[root@ceph01 ceph]# ceph-deploy osd create --data /dev/sdb ceph01
[root@ceph01 ceph]# ceph-deploy osd create --data /dev/sdb ceph02
[root@ceph01 ceph]# ceph -s
cluster:
id: 3ef8403a-1b42-4b95-b859-f8b01111440f
health: HEALTH_WARN
no active mgr
services:
mon: 2 daemons, quorum ceph01,ceph02
mgr: no daemons active
osd: 2 osds: 2 up, 2 in //创建成功
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[root@ceph01 ceph]# ceph osd tree //查看osd目录树
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 1.99799 root default
-3 0.99899 host ceph01
0 hdd 0.99899 osd.0 up 1.00000 1.00000
-5 0.99899 host ceph02
1 hdd 0.99899 osd.1 up 1.00000 1.00000
[root@ceph01 ceph]# ceph osd stat //查看osd状态
2 osds: 2 up, 2 in; epoch: e9
[root@ceph01 ceph]# ceph-deploy admin ceph01 ceph02
[root@ceph01 ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring
[root@ceph02 ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring
[root@ceph01 ceph]# ceph-deploy osd create --data /dev/sdb ceph03
[root@ceph01 ceph]# ceph -s
cluster:
id: 3ef8403a-1b42-4b95-b859-f8b01111440f
health: HEALTH_WARN
no active mgr
services:
mon: 2 daemons, quorum ceph01,ceph02
mgr: no daemons active
osd: 3 osds: 3 up, 3 in //成功添加
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[root@ceph01 ceph]# vi /etc/ceph/ceph.conf
[global]
fsid = 3ef8403a-1b42-4b95-b859-f8b01111440f
mon_initial_members = ceph01, ceph02,ceph03 //添加ceph03
mon_host = 192.168.124.130,192.168.124.131,192.168.124.132 //添加ceph03IP
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network = 192.168.124.0/24 //添加内部通信网段
[root@ceph01 ceph]# ceph-deploy --overwrite-conf admin ceph03 //下发配置给ceph03
[root@ceph03 ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring
[root@ceph01 ceph]# ceph-deploy mon add ceph03 //ceoh03 mon扩加入到群集中
[root@ceph01 ceph]# systemctl restart ceph-mon.target //三个节点都重启下服务
[root@ceph01 ceph]# ceph -s
cluster:
id: 3ef8403a-1b42-4b95-b859-f8b01111440f
health: HEALTH_WARN
no active mgr
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 //加入成功
mgr: no daemons active
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[root@ceph01 ceph]# systemctl list-unit-files | grep mon //如果不知道重启mon服务,可以通过如下命令查看
accounts-daemon.service enabled
avahi-daemon.service enabled
[email protected] enabled
certmonger.service disabled
lvm2-monitor.service enabled
[email protected] static
mdmonitor-oneshot.service static
mdmonitor.service enabled
ndctl-monitor.service disabled
rtkit-daemon.service enabled
avahi-daemon.socket enabled
ceph-mon.target enabled
[root@ceph01 ceph]# ceph osd tree //查看osd信息
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 2.99698 root default
-3 0.99899 host ceph01
0 hdd 0.99899 osd.0 up 1.00000 1.00000
-5 0.99899 host ceph02
1 hdd 0.99899 osd.1 up 1.00000 1.00000
-7 0.99899 host ceph03
2 hdd 0.99899 osd.2 up 1.00000 1.00000
[root@ceph01 ceph]# ceph osd out osd.2 //移除osd.2
Amarked out osd.2.
[root@ceph01 ceph]# ceph osd crush remove osd.2 //删除osd.2
removed item id 2 name 'osd.2' from crush map
[root@ceph01 ceph]# ceph auth del osd.2 ##删除osd.2的认证
updated
[root@ceph01 ceph]# ceph osd tree //osd.2没有权重了
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 1.99799 root default
-3 0.99899 host ceph01
0 hdd 0.99899 osd.0 up 1.00000 1.00000
-5 0.99899 host ceph02
1 hdd 0.99899 osd.1 up 1.00000 1.00000
-7 0 host ceph03
2 0 osd.2 up 0 1.00000
[root@ceph03 yum.repos.d]# systemctl restart ceph-osd.target
//在ceph03上重启
[root@ceph01 ceph]# ceph osd rm osd.2 //彻底删除
removed osd.2
[root@ceph01 ceph]# ceph osd tree //osd.2删除成功
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 1.99799 root default
-3 0.99899 host ceph01
0 hdd 0.99899 osd.0 up 1.00000 1.00000
-5 0.99899 host ceph02
1 hdd 0.99899 osd.1 up 1.00000 1.00000
-7 0 host ceph03
//登入ceph03
[root@ceph03 yum.repos.d]# df -hT //查看磁盘信息
。。省略部分内容
tmpfs tmpfs 1.9G 52K 1.9G 1% /var/lib/ceph/osd/ceph-2
[root@ceph03 yum.repos.d]# cd /var/lib/ceph/osd/ceph-2/
[root@ceph03 ceph-2]# more fsid //查看osd信息
dfdd5560-a9c2-47d2-97be-a47f3db9d327
[root@ceph03 ceph-2]# ceph osd create dfdd5560-a9c2-47d2-97be-a47f3db9d327 //执行恢复 ceph osd create uuid
2
[root@ceph03 ceph-2]# ceph auth add osd.2 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-2/keyring //增加权限
added key for osd.2
[root@ceph03 ceph-2]# ceph osd crush add 2 0.99899 host=ceph03
set item id 2 name 'osd.2' weight 0.99899 at location {host=ceph03} to crush map //0.99899是权重,host是主机名称
[root@ceph03 ceph-2]# ceph osd in osd.2
osd.2 is already in.
[root@ceph03 ceph-2]# systemctl restart ceph-osd.target //重启服务
[root@ceph01 ceph]# ceph osd tree //查看状态,osd.2恢复成功
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 2.99696 root default
-3 0.99899 host ceph01
0 hdd 0.99899 osd.0 up 1.00000 1.00000
-5 0.99899 host ceph02
1 hdd 0.99899 osd.1 up 1.00000 1.00000
-7 0.99898 host ceph03
2 hdd 0.99898 osd.2 up 1.00000 1.00000
[root@ceph01 ceph]# ceph-deploy mgr create ceph01 ceph02 ceph03//创建mgr服务
[root@ceph01 ceph]# ceph -s
cluster:
id: 3ef8403a-1b42-4b95-b859-f8b01111440f
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03
mgr: ceph01(active), standbys: ceph02, ceph03 //创建成功
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 3.0 TiB / 3.0 TiB avail
pgs:
[root@ceph01 ceph]# ceph osd pool create cinder 64
pool 'cinder' created
[root@ceph01 ceph]# ceph osd pool create nova 64
pool 'nova' created
[root@ceph01 ceph]# ceph osd pool create glance 64
pool 'glance' created
[root@ceph01 ceph]# ceph osd pool ls
cinder
nova
glance
[root@ceph01 ceph]# vi ceph.conf
mon_allow_pool_delete=true
[root@ceph01 ceph]# ceph-deploy --overwrite-conf admin ceph02 ceph03 //下发配置给两外两个节点
[root@ceph01 ceph]# systemctl restart ceph-mon.target //三个节点重启服务
[root@ceph01 ceph]# ceph osd pool rm cinder cinder --yes-i-really-really-mean-it
pool 'cinder' removed
[root@ceph01 ceph]# ceph osd pool ls
nova
glance
[root@ceph01 ceph]# ceph osd pool rename nova nova2
pool 'nova' renamed to 'nova2'
[root@ceph01 ceph]# ceph osd pool ls
nova2
glance
[root@ceph01 ceph]# ceph --help
[root@ceph01 ceph]# ceph osd --help
[root@ceph01 ceph]# vi ceph.conf
public_network = 192.168.124.0/24
[root@ceph01 ceph]# ceph-deploy --overwrite-conf admin ceph02 ceph03 //将文件下发ceoh02 ceph03
[root@ceph01 ceph]# systemctl restart ceph-mon.target
[root@ceph01 ceph]# systemctl restart ceph-osd.target