192.168.126.101 ceph01
192.168.126.102 ceph02
192.168.126.103 ceph03
192.168.126.104 ceph04
192.168.126.105 ceph-admin
192.168.48.11 ceph01
192.168.48.12 ceph02
192.168.48.13 ceph03
192.168.48.14 ceph04
192.168.48.15 ceph-admin
###官方要求所有节点内核版本要求4.10以上
uname -r
5.2.2-1.el7.elrepo.x86_64
yum -y install chrony
统一与ceph-admin的时间同步
[root@ceph-admin ~]# vim /etc/chrony.conf
....
#allow 192.168.0.0/16
allow 192.168.48.0/24
[root@ceph-admin ~]# systemctl enable chronyd
[root@ceph-admin ~]# systemctl start chronyd
ceph01,ceph02,ceph03,ceph04 删除其他server,只有一个server
vim /etc/chrony.conf
...
server 192.168.48.15 iburst
systemctl enable chronyd
systemctl start chronyd
[root@ceph01 ~]# chronyc sources -v
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^* ceph-admin 3 6 17 12 +100us[ +136us] +/- 52ms
192.168.126.101 ceph01
192.168.126.102 ceph02
192.168.126.103 ceph03
192.168.126.104 ceph04
192.168.126.105 ceph-admin
192.168.48.11 ceph01
192.168.48.12 ceph02
192.168.48.13 ceph03
192.168.48.14 ceph04
192.168.48.15 ceph-admin
每个节点都准备2个10g硬盘,sdb,sdc
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 2G 0 lvm
└─centos-home 253:2 0 47G 0 lvm /home
sdb 8:16 0 10G 0 disk
sdc 8:32 0 10G 0 disk
vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
cat > /etc/yum.repos.d/epel.repo << EOF
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
EOF
useradd cephadm
echo "ceph" | passwd --stdin cephadm
vim /etc/sudoers.d/cephadm
cephadm ALL=(root) NOPASSWD: ALL
su - cephadm
ssh-keygen
ssh-copy-id cephadm@ceph-admin
ssh-copy-id cephadm@ceph01
ssh-copy-id cephadm@ceph02
ssh-copy-id cephadm@ceph03
ssh-copy-id cephadm@ceph04
[root@ceph-admin ~]# yum install ceph-deploy python-setuptools python2-subprocess32 ceph-common
yum -y install ceph ceph-radosgw
在ceph-admin节点上以cephadm用户运行操作命令
[cephadm@ceph-admin ~]$ mkdir ceph-cluster
[cephadm@ceph-admin ~]$ cd ceph-cluster/
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy install ceph01 ceph02 ceph03 ceph04 --no-adjust-repos
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy new --cluster-network 192.168.126.0/24 --public-network 192.168.48.0/24 ceph01 ceph02 ceph03
[cephadm@ceph-admin ceph-cluster]$ ls
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
[cephadm@ceph-admin ceph-cluster]$ cat ceph.conf
[global]
fsid = a384da5c-a9ae-464a-8a92-e23042e5d267
public_network = 192.168.48.0/24
cluster_network = 192.168.126.0/24
mon_initial_members = ceph01, ceph02, ceph03
mon_host = 192.168.48.11,192.168.48.12,192.168.48.13
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy mon create-initial
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy mon add 主机名
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy admin ceph01 ceph02 ceph03 ceph04 ceph-admin
setfacl -m u:cephadm:rw /etc/ceph/ceph.client.admin.keyring
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy mgr create ceph04
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy mgr create ceph03
[cephadm@ceph-admin ceph-cluster]$ ceph -s
cluster:
id: 8a83b874-efa4-4655-b070-704e63553839
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 29m)
mgr: ceph04(active, since 30s), standbys: ceph03
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph01 /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph02 /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph03 /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph04 /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph01 /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph02 /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph03 /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy disk zap ceph04 /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph01 --data /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph02 --data /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph03 --data /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph04 --data /dev/sdb
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph01 --data /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph02 --data /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph03 --data /dev/sdc
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd create ceph04 --data /dev/sdc
而后可使用”ceph-deploy osd list”命令列出指定节点上的OSD:
[cephadm@ceph-admin ceph-cluster]$ ceph-deploy osd list ceph01
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephadm/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /bin/ceph-deploy osd list ceph01
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : list
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf :
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : ['ceph01']
[ceph_deploy.cli][INFO ] func :
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph01][DEBUG ] connection detected need for sudo
[ceph01][DEBUG ] connected to host: ceph01
[ceph01][DEBUG ] detect platform information from remote host
[ceph01][DEBUG ] detect machine type
[ceph01][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Listing disks on ceph01...
[ceph01][DEBUG ] find the location of an executable
[ceph01][INFO ] Running command: sudo /usr/sbin/ceph-volume lvm list
[ceph01][DEBUG ]
[ceph01][DEBUG ]
[ceph01][DEBUG ] ====== osd.0 =======
[ceph01][DEBUG ]
[ceph01][DEBUG ] [block] /dev/ceph-25b4e0c5-0297-41c4-8c84-3166cf46e5a6/osd-block-d9349281-5ae9-49d7-8c8c-ca3774320fbd
[ceph01][DEBUG ]
[ceph01][DEBUG ] block device /dev/ceph-25b4e0c5-0297-41c4-8c84-3166cf46e5a6/osd-block-d9349281-5ae9-49d7-8c8c-ca3774320fbd
[ceph01][DEBUG ] block uuid sKPwP3-o1L3-xbBu-az3d-N0MB-XOq9-0psakY
[ceph01][DEBUG ] cephx lockbox secret
[ceph01][DEBUG ] cluster fsid 8a83b874-efa4-4655-b070-704e63553839
[ceph01][DEBUG ] cluster name ceph
[ceph01][DEBUG ] crush device class None
[ceph01][DEBUG ] encrypted 0
[ceph01][DEBUG ] osd fsid d9349281-5ae9-49d7-8c8c-ca3774320fbd
[ceph01][DEBUG ] osd id 0
[ceph01][DEBUG ] type block
[ceph01][DEBUG ] vdo 0
[ceph01][DEBUG ] devices /dev/sdb
[ceph01][DEBUG ]
[ceph01][DEBUG ] ====== osd.4 =======
[ceph01][DEBUG ]
[ceph01][DEBUG ] [block] /dev/ceph-f8d33be2-c8c2-4e7f-97ed-892cbe14487c/osd-block-e3102a32-dfb3-42c7-8d6f-617c030808f7
[ceph01][DEBUG ]
[ceph01][DEBUG ] block device /dev/ceph-f8d33be2-c8c2-4e7f-97ed-892cbe14487c/osd-block-e3102a32-dfb3-42c7-8d6f-617c030808f7
[ceph01][DEBUG ] block uuid 1vdMB5-bjal-IKY2-PBzw-S0c1-48kV-4Hfszq
[ceph01][DEBUG ] cephx lockbox secret
[ceph01][DEBUG ] cluster fsid 8a83b874-efa4-4655-b070-704e63553839
[ceph01][DEBUG ] cluster name ceph
[ceph01][DEBUG ] crush device class None
[ceph01][DEBUG ] encrypted 0
[ceph01][DEBUG ] osd fsid e3102a32-dfb3-42c7-8d6f-617c030808f7
[ceph01][DEBUG ] osd id 4
[ceph01][DEBUG ] type block
[ceph01][DEBUG ] vdo 0
[ceph01][DEBUG ] devices /dev/sdc
事实上,管理员也可以使用ceph命令查看OSD的相关信息:
[cephadm@ceph-admin ceph-cluster]$ ceph osd stat
8 osds: 8 up (since 58s), 8 in (since 58s); epoch: e33
或者使用如下命令了解相关的信息:
[cephadm@ceph-admin ceph-cluster]$ ceph osd ls
0
1
2
3
4
5
6
7
[cephadm@ceph-admin ceph-cluster]$ ceph -s
cluster:
id: 8a83b874-efa4-4655-b070-704e63553839
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 37m)
mgr: ceph04(active, since 7m), standbys: ceph03
osd: 8 osds: 8 up (since 115s), 8 in (since 115s)
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 8.0 GiB used, 64 GiB / 72 GiB avail
pgs:
[cephadm@ceph-admin ceph-cluster]$ ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.07031 root default
-3 0.01758 host ceph01
0 hdd 0.00879 osd.0 up 1.00000 1.00000
4 hdd 0.00879 osd.4 up 1.00000 1.00000
-5 0.01758 host ceph02
1 hdd 0.00879 osd.1 up 1.00000 1.00000
5 hdd 0.00879 osd.5 up 1.00000 1.00000
-7 0.01758 host ceph03
2 hdd 0.00879 osd.2 up 1.00000 1.00000
6 hdd 0.00879 osd.6 up 1.00000 1.00000
-9 0.01758 host ceph04
3 hdd 0.00879 osd.3 up 1.00000 1.00000
7 hdd 0.00879 osd.7 up 1.00000 1.00000