一、环境准备
1、本次搭建环境为centos7.2版本
2、本次搭建机器为
ceph-node1 192.168.13.72
ceph-node2 192.168.13.84
二、ceph搭建前环境准备
1、准备ceph-deploy的镜像源
vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://download.ceph.com/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://download.ceph.com/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
yum update
yum -y install ceph-deploy
2、修改hosts文件、安装ceph-deploy
[root@ceph-node1 my-cluster]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.13.72 ceph-node1
192.168.13.84 ceph-node2
3、同步时间
1)修改时区 cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
2)vim /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd
3)修改其他node节点的/etc/chrony.conf
##删除其他server,修改一行
server 192.168.13.72 iburst
4)systemctl restart chronyd
5)yum install ntp ntpdate
6)ntpdate 192.168.13.72
4、创建ceph用户并赋予sudo权限
[root@ceph-node1 ~]# useradd -d /home/ceph -m ceph
[root@ceph-node1 ~]# passwd ceph
Changing password for user ceph.
New password:
BAD PASSWORD: The password is a palindrome
Retype new password:
passwd: all authentication tokens updated successfully.
[root@ceph-node1 ~]# echo "ceph ALL=(root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
ceph ALL=(root) NOPASSWD:ALL
[root@ceph-node1 ~]# chmod 0440 /etc/sudoers.d/ceph
5、设置免密登录
[root@ceph-node1 ~]# ssh-keygen
[root@ceph-node1 ~]# ssh-copy-id [email protected]
[root@ceph-node1 ~]# vim /root/.ssh/config
Host node1
Hostname ceph-node1
User ceph
Host node2
Hostname ceph-node2
User ceph
6、关闭selinux,添加防火墙规则6789、6800、7300
[root@ceph-node1 systemd]# setenforce 0
[root@ceph-node1 systemd]# iptables -I INPUT 1 -p tcp --dport 6789 -J ACCEPT
[root@ceph-node1 systemd]# iptables -I INPUT 1 -p tcp --dport 6800 -j ACCEPT
[root@ceph-node1 systemd]# iptables -I INPUT 1 -p tcp --dport 7300 -j ACCEPT
[root@ceph-node1 systemd]# iptables -I OUTPUT 1 -p tcp --dport 7300 -j ACCEPT
[root@ceph-node1 systemd]# iptables -I OUTPUT 1 -p tcp --dport 6800 -j ACCEPT
[root@ceph-node1 systemd]# iptables -I OUTPUT 1 -p tcp --dport 6789 -j ACCEPT
7、创建新的集群
[root@ceph-node1 my-cluster]# ceph-deploy new ceph-node1 (ceph-node1为本机的hostname)
8、两台node安装ceph
[root@ceph-node1 my-cluster]# ceph-deploy install ceph-node1 ceph-node2
9、部署初始监视器并收集密钥
[root@ceph-node1 my-cluster]# ceph-deploy mon create-initial
10、将配置文件和管理密钥复制到管理节点和你的Ceph的节点
[root@ceph-node1 my-cluster]# ceph-deploy admin ceph-node1 ceph-node2
11、部署管理器守护程序
[root@ceph-node1 my-cluster]# ceph-deploy mgr create ceph-node1
12、添加OSD
[root@ceph-node1 my-cluster]# ceph-deploy osd create --data /dev/vdc ceph-node1
[root@ceph-node1 my-cluster]# ceph-deploy osd create --data /dev/vdc ceph-node2
13、检测集群状态
ceph -s
[root@ceph-node1 my-cluster]# ceph -s
cluster:
id: 5c202bf8-aab1-4317-bf76-28a7ed57652a
health: HEALTH_OK
services:
mon: 1 daemons, quorum ceph-node1
mgr: ceph-node1(active)
osd: 2 osds: 2 up, 2 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 2.0 GiB used, 98 GiB / 100 GiB avail
pgs:
14、添加元数据服务器(如需要使用文件系统需要添加,反之不用添加)
[root@ceph-node1 my-cluster]# ceph-deploy mds create ceph-node1
15、创建CephFS、首先创建一个pool
##CephFS要求至少创建两个RADOS pools,一个用于data,一个用户metadata。
(Pools的相关操作见:http://docs.ceph.com/docs/jewel/rados/operations/pools/)
[root@ceph-node1 lbj]# ceph osd pool create cephfs_data 128 ##创建pool池
[root@ceph-node1 lbj]# ceph osd pool create cephfs_metadata 64 ##创建pool池
[root@ceph-node1 lbj]# ceph fs new lbjtest cephfs_metadata cephfs_data ##创建文件系统
[root@ceph-node1 lbj]# ceph mds stat ##查看状态
16、挂载
mkdir /mnt/mycephfs ##创建挂载点
yum -y install ceph-fuse ##安装挂载工具
ceph-fuse -m 192.168.13.72:6789 /mnt/mycephfs/ ##使用工具进行挂载,13.72为mon地址