Linux 版本:CentOS Linux release 7.2 (Final)
CEPH版本:ceph version 11.2.0
内网无法连接到官网地址,因此可以通过两种方式进行安装
A)部署内部YUM源站点,通过ceph-deploy工具指定URL
B)下载RPM包并上传到服务器,手工安装
这里采用第二种安装方式
1、通过官方网站下载最新版RPM包
2、解决依赖关系
安装之前,先检查下列包是否存在,不同的Linux内核版本会有一些差异
yum install -y junit boost gdisk fcgi xmlstarlet java-devel snappy cryptsetup
yum install python-flask-0.10.1-4.el7.noarch
yum install gperftools-libs-2.2.1-1.2.x86_64.rpm
yum install libbabeltrace-1.2.4-3.el7.x86_64.rpm
yum install lttng-ust-2.4.1-1.el7.1.x86_64.rpm
yum install userspace-rcu-0.7.16-1.el7.x86_64.rpm
yum install python-requests
yum install python-setuptools
yum install glibc.i686
3、Jewel版本之后默认用户为ceph,安装之前先创建ceph用户并授权
adduser -d /home/ceph -m ceph
passwd ceph
# 设置用户权限
echo "ceph ALL =(root) NOPASSWD:ALL" | tee /etc/sudoers.d/ceph
chmod 0440 /etc/sudoers.d/ceph
4、requiretty准备
visudo
# 注释掉Defaults requiretty
# Defaults requiretty修改为 #Defaults requiretty, 表示不需要控制终端。
# 否则会出现sudo: sorry, you must have a tty to run sudo
5、防火墙设置
根据公司安全策略决定是否关闭防火墙,如果不关闭,多台机器间的通讯可能会出现端口不通的情况
systemctl stop firewalld.service
systemctl disable firewalld.service
setenforce 0
6、修改别名
ceph.conf中可以通过配置别名进行多个监控服务之间的通讯,修改hosts中的别名,或者通过hostnamectl设置
vi /etc/hosts
# xxx.xxx.xxx.xxx ceph-node1
rpm -ivh libbabeltrace-1.2.1-1.fc20.x86_64.rpm
rpm -ivh libbabeltrace-1.2.1-1.fc20.x86_64.rpm
rpm -ivh leveldb-1.12.0-5.el7.1.x86_64.rpm
rpm -ivh leveldb-devel-1.12.0-5.el7.1.x86_64.rpm
rpm -ivh selinux-policy-3.13.1-60.el7_2.9.noarch.rpm
rpm -ivh selinux-policy-targeted-3.13.1-60.el7_2.9.noarch.rpm
# 下载并解压CEPH安装包
rpm -ivh *
# 1:librados2-1:11.1.0-6896.g2aab144.################################# [ 3%]
# 2:librbd1-1:11.1.0-6896.g2aab144.el################################# [ 6%]
# 3:libcephfs2-1:11.1.0-6896.g2aab144################################# [ 9%]
# 4:python-rados-1:11.1.0-6896.g2aab1################################# [ 13%]
# 5:librados-devel-1:11.1.0-6896.g2aa################################# [ 16%]
# 6:librgw2-1:11.1.0-6896.g2aab144.el################################# [ 19%]
# 7:libradosstriper1-1:11.1.0-6896.g2################################# [ 22%]
# 8:python-rgw-1:11.1.0-6896.g2aab144################################# [ 25%]
# 9:python-cephfs-1:11.1.0-6896.g2aab################################# [ 28%]
# 10:python-rbd-1:11.1.0-6896.g2aab144################################# [ 31%]
# 11:ceph-common-1:11.1.0-6896.g2aab14################################# [ 34%]
# 12:ceph-selinux-1:11.1.0-6896.g2aab1################################# [ 38%]
# 13:ceph-base-1:11.1.0-6896.g2aab144.################################# [ 41%]
# 14:libcephfs_jni1-1:11.1.0-6896.g2aa################################# [ 44%]
# 15:ceph-mds-1:11.1.0-6896.g2aab144.e################################# [ 47%]
# 16:ceph-mgr-1:11.1.0-6896.g2aab144.e################################# [ 50%]
# 17:ceph-mon-1:11.1.0-6896.g2aab144.e################################# [ 53%]
# 18:ceph-osd-1:11.1.0-6896.g2aab144.e################################# [ 56%]
# 19:ceph-1:11.1.0-6896.g2aab144.el7 ################################# [ 59%]
# 20:cephfs-java-1:11.1.0-6896.g2aab14################################# [ 63%]
# 21:libcephfs_jni-devel-1:11.1.0-6896################################# [ 66%]
# 22:ceph-radosgw-1:11.1.0-6896.g2aab1################################# [ 69%]
# 23:ceph-test-1:11.1.0-6896.g2aab144.################################# [ 72%]
# 24:rbd-mirror-1:11.1.0-6896.g2aab144################################# [ 75%]
# 25:python-ceph-compat-1:11.1.0-6896.################################# [ 78%]
# 26:libradosstriper-devel-1:11.1.0-68################################# [ 81%]
# 27:librgw-devel-1:11.1.0-6896.g2aab1################################# [ 84%]
# 28:libcephfs-devel-1:11.1.0-6896.g2a################################# [ 88%]
# 29:librbd-devel-1:11.1.0-6896.g2aab1################################# [ 91%]
# 30:rbd-fuse-1:11.1.0-6896.g2aab144.e################################# [ 94%]
# 31:rbd-nbd-1:11.1.0-6896.g2aab144.el################################# [ 97%]
# 32:ceph-fuse-1:11.1.0-6896.g2aab144.################################# [100%]
ceph --version
# ceph version 11.1.0-6896-g2aab144 (2aab1443fa28e47e81add9a5aa999c5f7dc39e70)
# 将服务端的配置拷贝到客户段
cd /etc/ceph
vi ceph.client.admin.keyring
[client.admin]
key = AQDeBDFZCNYwDxAAwyhHxRK6Cd7OMDm==
auid = 0
caps mds = "allow"
caps mon = "allow *"
caps osd = "allow *"
vi ceph.conf
#添加以下内容
fsid = deeade81-2e18-4d1e-a37a-1154
mon_initial_members = ser-node1,ser-node2,ser-node3
mon_host = xxx.xxx.xxx,xxx.xxx.xxx,xxx.xxx.xxx
auth_cluster_required = none
auth_service_required = none
auth_client_required = none
mon_allow_pool_delete = true
mon clock drift allowed = 2
mon clock drift warn backoff = 30
#enable_experimental_unrecoverable_data_corrupting_features = ms-type-async
ms_type = async
ms_async_op_threads = 4
osd_find_best_info_ignore_history_les = true
mon_pg_warn_max_per_osd = 1000
max_open_files = 1000000
throttler_perf_counter = false
debug_lockdep = 0/0
debug_context = 0/0
debug_crush = 0/0
debug_mds = 0/0
debug_mds_balancer = 0/0
debug_mds_locker = 0/0
debug_mds_log = 0/0
debug_mds_log_expire = 0/0
debug_mds_migrator = 0/0
debug_buffer = 0/0
debug_timer = 0/0
debug_filer = 0/0
debug_striper = 0/0
debug_objecter = 0/0
debug_rados = 0/0
debug_rbd = 0/0
debug_rbd_mirror = 0/0
debug_rbd_replay = 0/0
debug_journaler = 0/0
debug_objectcacher = 0/0
debug_client = 0/0
debug_osd = 0/0
debug_optracker = 0/0
debug_objclass = 0/0
debug_filestore = 0/0
debug_journal = 0/0
debug_ms = 0/0
debug_mon = 0/0
debug_monc = 0/0
debug_paxos = 0/0
debug_tp = 0/0
debug_auth = 0/0
debug_crypto = 0/0
debug_finisher = 0/0
debug_heartbeatmap = 0/0
debug_perfcounter = 0/0
debug_rgw = 0/0
debug_civetweb = 0/0
debug_javaclient = 0/0
debug_asok = 0/0
debug_throttle = 0/0
debug_refs = 0/0
debug_xio = 0/0
debug_compressor = 0/0
debug_bluestore = 0/0
debug_bluefs = 0/0
debug_bdev = 0/0
debug_kstore = 0/0
debug_rocksdb = 0/0
debug_leveldb = 0/0
debug_memdb = 0/0
debug_kinetic = 0/0
debug_fuse = 0/0
debug_mgr = 0/0
debug_mgrc = 0/0
debug_dpdk = 0/0
debug_eventtrace = 0/0
osd_pool_default_size = 1
#ms_crc_data = false
#ms_crc_header = false
#ms_async_op_threads = 5
osd_op_num_threads_per_shard = 3
osd_op_num_shards = 6
osd_enable_op_tracker = false
filestore_wbthrottle_enable = false
filestore_queue_max_ops = 10240
filestore_queue_max_bytes = 1024000000
filestore_op_threads = 6
filestore_fd_cache_size = 1024
filestore_fd_cache_shards = 32
filestore_ondisk_finisher_threads = 4
filestore_apply_finisher_threads = 4
rbd cache = false
rbd_enable_alloc_hint = false
rbd_op_threads = 4
rbd_cache_writethrough_until_flush = false
分配700G,3副本的空间
计算最佳pg数量的公式
= osd数*100/3
# 400个pg
ceph osd pool create poolname 400 400
# 3副本
ceph osd pool set poolname size 3
# 700G空间,数据共享模式
rbd create poolname/poolname-data --image-shared -s 700000 --stripe-count 24 --stripe-unit 64K
# 挂载
rbd-nbd map poolname/poolname-data
lsb lk
# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# nbd0 43:0 0 683.6G 0 disk