所有Ceph集群都需要至少一个monitor,以及至少与集群中存储的对象副本一样多的OSD。引导初始monitor是部署Ceph存储集群的第一步。Monitor部署还为整个集群设置了重要条件,例如池的副本数,每个OSD的placement groups放置组数,心跳间隔,是否需要身份验证等。这些值中的大多数默认设置为在设置集群以进行生产时了解它们很有用。
引导monitor(理论上是Ceph存储集群)需要做很多事情:
Unique Identifier:该fsid作为集群的唯一标识符.
Cluster Name:Ceph集群名称,这是一个不含空格的简单字符串。默认集群名称为ceph,但可以指定其他集群名称。
Monitor Name::集群中的每个monitor实例都有一个唯一的名称。通常,Ceph Monitor名称是主机名。
Monitor Map:启动初始monitor需要生成monitor map。monitor映射需要fsid,集群名称(或使用默认名称),以及至少一个主机名及其IP地址。
Monitor Keyring:Monitors通过密钥相互通信。必须生成一个带有 monitor密钥的keyring,并在引导初始monitor时提供它。
Administrator Keyring:要使用cephCLI工具,必须有一个client.admin用户。因此,必须生成admin用户和keyring,并且还必须将client.admin用户添加到monitor keyring。
没有特殊说明,以下操作在所有节点执行。
#node1
hostnamectl set-hostname node1
#node2
hostnamectl set-hostname node2
#node3
hostnamectl set-hostname node3
cat >> /etc/hosts <<EOF
192.168.93.20 node1
192.168.93.21 node2
192.168.93.22 node3
EOF
#关闭防火墙和selinux
systemctl disable --now firewalld
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
#配置时间同步
yum install -y chrony
systemctl enable --now chronyd
#centos基础源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
#epel源
yum install -y epel-release
#阿里云ceph源,该源指定了ceph安装版本
cat >/etc/yum.repos.d/ceph.repo <<EOF
[Ceph]
name=Ceph packages for \$basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/\$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
EOF
yum clean all
yum makecache
yum install createrepo --downloadonly --downloaddir=/var/www/html/ceph
yum install httpd --downloadonly --downloaddir=/var/www/html/ceph
yum install snappy leveldb gdisk python-argparse gperftools-libs --downloadonly --downloaddir=/var/www/html/ceph
yum install ceph --downloadonly --downloaddir=/var/www/html/ceph
#创建离线仓库
createrepo -dup /var/www/html/ceph /var/www/html/ceph
#更新离线源
createrepo --update /var/www/html/ceph
yum install httpd
systemctl restart httpd
systemctl enable httpd
#修改repo
cat >/etc/yum.repo.d/ceph.repo <<EOF
[ceph-repo]
name=internal-ceph-repo
baseurl=http://192.168.93.20/ceph/
enabled=1
gpgcheck=0
更新源
yum clean all
yum makecache
安装ceph依赖包
yum install -y snappy leveldb gdisk python-argparse gperftools-libs
安装ceph包
yum install -y ceph
[root@node1 ~]# uuidgen
8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
export cephuuid=8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
cat > /etc/ceph/ceph.conf <<EOF
[global]
fsid = 8d2cfd33-9132-48a7-8c00-3ef10cb5ddeb
mon initial members = node1, node2, node3
mon host = 192.168.93.20, 192.168.93.21, 192.168.93.22
public network = 192.168.93.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 128
osd pool default pgp num = 128
osd crush chooseleaf type = 1
EOF
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
chown ceph:ceph /tmp/ceph.mon.keyring
monmaptool --create --add node1 192.168.93.20 --add node2 192.168.93.21 --add node3 192.168.93.22 --fsid $cephuuid /tmp/monmap
scp /tmp/monmap root@node2
:/tmp
scp /tmp/monmap root@node3:/tmp
scp /etc/ceph/ceph.client.admin.keyring root@node2:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring root@node3:/etc/ceph/
scp /tmp/ceph.mon.keyring root@node2:/tmp/
scp /tmp/ceph.mon.keyring root@node3:/tmp/
[root@node2 ~]# chown ceph:ceph /tmp/ceph.mon.keyring
[root@node3 ~]# chown ceph:ceph /tmp/ceph.mon.keyring
#node1
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node1
#node2
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node2
#node3
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node3
#node1
sudo -u ceph ceph-mon --mkfs -i node1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
#node2
sudo -u ceph ceph-mon --mkfs -i node2 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
#node3
sudo -u ceph ceph-mon --mkfs -i node3 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
[root@node1 ~]# ls /var/lib/ceph/mon/ceph-node1/
keyring kv_backend store.db
#node1
systemctl restart ceph-mon@node1
systemctl enable ceph-mon@node1
#node2
systemctl restart ceph-mon@node2
systemctl enable ceph-mon@node2
#node3
systemctl restart ceph-mon@node3
systemctl enable ceph-mon@node3
ceph -s
ceph mon enable-msgr2
在运行ceph-mon守护程序的每个节点上,还应该设置一个ceph-mgr守护程序。
#node1
ceph auth get-or-create mgr.node1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node1
sudo -u ceph vim /var/lib/ceph/mgr/ceph-node1/keyring
[mgr.node1]
key = AQDMt+9ejx8HLhAA6IqshHKAg72p8MW/s6cdLg==
#node2
ceph auth get-or-create mgr.node2 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node2
sudo -u ceph vim /var/lib/ceph/mgr/ceph-node2/keyring
[mgr.node2]
key = AQDSt+9e+T6kKRAAW8A6zelgtQiHbdmaNCSGag==
#node3
ceph auth get-or-create mgr.node3 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node3
sudo -u ceph vim /var/lib/ceph/mgr/ceph-node3/keyring
[mgr.node3]
key = AQDYt+9e9iE2EBAAP+cyRwxGP80lDDzwc/eFGA==
#node1
systemctl restart ceph-mgr@node1
systemctl enable ceph-mgr@node1
#node2
systemctl restart ceph-mgr@node2
systemctl enable ceph-mgr@node2
#node3
systemctl restart ceph-mgr@node3
systemctl enable ceph-mgr@node3
ceph status
参考:https://ceph.readthedocs.io/en/latest/rados/operations/add-or-rm-osds/
初始化monitor后,群集具有默认的CRUSH map;但是,CRUSH map没有任何Ceph OSD守护进程映射到Ceph节点。
添加OSD,必须有足够的OSD来处理对象的副本数(如:osd pool default size = 2至少需要两个OSD),才能使群集达到 active + clean状态。
Ceph提供了ceph-volume实用程序,该实用程序可以准备逻辑卷,磁盘或分区以供Ceph使用。ceph-volume -h 获取帮助。
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@node2:/var/lib/ceph/bootstrap-osd/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@node3:/var/lib/ceph/bootstrap-osd/
[root@node1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 70G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 69G 0 part
├─centos-root 253:0 0 60.1G 0 lvm /
├─centos-swap 253:1 0 3.9G 0 lvm [SWAP]
└─centos-home 253:2 0 5G 0 lvm /home
sdb 8:16 0 20G 0 disk
sr0 11:0 1 1G 0 rom
ceph-volume lvm create --data /dev/sdb
#node1
systemctl restart ceph-osd@0
systemctl enable ceph-osd@0
#node2
systemctl restart ceph-osd@1
systemctl enable ceph-osd@1
#node3
systemctl restart ceph-osd@2
systemctl enable ceph-osd@2
[root@node1 ~]# ceph -s
#node1
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-node1
#node2
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-node2
#node3
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-node3
#node1
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node1/keyring --gen-key -n mds.node1
#node2
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node2/keyring --gen-key -n mds.node2
#node3
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node3/keyring --gen-key -n mds.node3
#node1
ceph auth add mds.node1 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node1/keyring
chown ceph:ceph /var/lib/ceph/mds/ceph-node1/keyring
#node2
ceph auth add mds.node2 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node2/keyring
chown ceph:ceph /var/lib/ceph/mds/ceph-node2/keyring
#node3
ceph auth add mds.node3 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node3/keyring
chown ceph:ceph /var/lib/ceph/mds/ceph-node3/keyring
cat >> /etc/ceph/ceph.conf <<EOF
[mds.node1]
host = node1
[mds.node2]
host = node2
[mds.node3]
host = node3
EOF
#node1
systemctl restart ceph-mon@node1
systemctl restart ceph-mgr@node1
systemctl restart ceph-mds@node1
systemctl enable ceph-mds@node1
systemctl restart ceph-osd@0
#node2
systemctl restart ceph-mon@node2
systemctl restart ceph-mgr@node2
systemctl restart ceph-mds@node2
systemctl enable ceph-mds@node2
systemctl restart ceph-osd@1
#node3
systemctl restart ceph-mon@node3
systemctl restart ceph-mgr@node3
systemctl restart ceph-mds@node3
systemctl enable ceph-mds@node3
systemctl restart ceph-osd@2
[root@node1 ~]# ceph -s
[root@node1 ~]# ceph osd tree
[root@node1 ~]# yum -y install ceph-radosgw
[root@node2 ~]# yum -y install ceph-radosgw
[root@node3 ~]# yum -y install ceph-radosgw
这里通过脚本快速创建这些资源池,脚本内容如下
#!/bin/bash
PG_NUM=64
PGP_NUM=64
SIZE=3
for i in `cat /root/pool`
do
ceph osd pool create $i $PG_NUM
ceph osd pool set $i size $SIZE
done
for i in `cat /root/pool`
do
ceph osd pool set $i pgp_num $PGP_NUM
done
[root@node1 ~]# sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# sudo chown ceph:ceph /etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.rgw.node1 --gen-key --cap osd 'allow rwx' --cap mon 'allow rwx'
[root@node1 ~]# ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.rgw.node1 -i /etc/ceph/ceph.client.radosgw.keyring
[client.rgw.node1]
rgw frontends=fastcgi socket_port=9000 socket_host=0.0.0.0
host=node1
keyring=/etc/ceph/ceph.client.radosgw.keyring
log file=/var/log/radosgw/client.radosgw.gateway.log
rgw print continue=false
rgw content length compat = true
[client.rgw.node2]
rgw frontends=fastcgi socket_port=9000 socket_host=0.0.0.0
host=node2
keyring=/etc/ceph/ceph.client.radosgw.keyring
log file=/var/log/radosgw/client.radosgw.gateway.log
rgw print continue=false
rgw content length compat = true
[client.rgw.node3]
rgw frontends=fastcgi socket_port=9000 socket_host=0.0.0.0
host=node3
keyring=/etc/ceph/ceph.client.radosgw.keyring
log file=/var/log/radosgw/client.radosgw.gateway.log
rgw print continue=false
rgw content length compat = true
[root@node1 ~]# mkdir /var/log/radosgw
[root@node1 ~]# chown ceph:ceph /var/log/radosgw
[root@node1 ~]# systemctl start [email protected]
[root@node1 ~]# systemctl enable [email protected]
以下命令在node1上执行即可
[root@node1 ~]# ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.rgw.node2 --gen-key
[root@node1 ~]# ceph-authtool -n client.rgw.node2 --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.rgw.node2 -i /etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.rgw.node3 --gen-key
[root@node1 ~]# ceph-authtool -n client.rgw.node3 --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.rgw.node3 -i /etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# scp /etc/ceph/ceph.client.radosgw.keyring node2:/etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# scp /etc/ceph/ceph.client.radosgw.keyring node3:/etc/ceph/ceph.client.radosgw.keyring
[root@node1 ~]# scp /etc/ceph/ceph.conf node2:/etc/ceph/ceph.conf
[root@node1 ~]# scp /etc/ceph/ceph.conf node3:/etc/ceph/ceph.conf
[root@node2 ~]# mkdir /var/log/radosgw
[root@node2 ~]# chown ceph:ceph /var/log/radosgw
[root@node3 ~]# mkdir /var/log/radosgw
[root@node3 ~]# chown ceph:ceph /var/log/radosgw
[root@node2 ~]# systemctl restart [email protected]
[root@node3 ~]# systemctl restart [email protected]
[root@node2 ~]# systemctl enable [email protected]
[root@node3 ~]# systemctl enable [email protected]
参考:
https://blog.csdn.net/networken/article/details/106892818