Ceph H版 0.94.3 手动添加osd
系统环境:CentOS Linux release 7.2.1511 (Core)
安装软件包:
yum install https://mirrors.aliyun.com/ceph/rpm-hammer/el7/x86_64/ceph-0.94.3-0.el7.centos.x86_64.rpm
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install https://mirrors.aliyun.com/ceph/rpm-hammer/el7/x86_64/ceph-0.94.3-0.el7.centos.x86_64.rpm
优化:
systemctl disable NetworkManager firewalld
sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config
修改系统打开最大的文件数和线程数
sed -i "s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=32768/g" /etc/systemd/system.conf
sed -i "s/#DefaultLimitNPROC=/DefaultLimitNPROC=32768/g" /etc/systemd/system.conf
通过fdisk 检查要添加的磁盘信息
fdisk -l | grep “/dev/sd”
设置作为osd磁盘的read_ahead
cat >> /etc/rc.local << EOF
echo "8192" > /sys/block/sda/queue/read_ahead_kb
echo "8192" > /sys/block/sdb/queue/read_ahead_kb
echo "8192" > /sys/block/sdc/queue/read_ahead_kb
echo "8192" > /sys/block/sdd/queue/read_ahead_kb
echo "8192" > /sys/block/sde/queue/read_ahead_kb
echo "8192" > /sys/block/sdf/queue/read_ahead_kb
echo "8192" > /sys/block/sdg/queue/read_ahead_kb
echo "8192" > /sys/block/sdh/queue/read_ahead_kb
EOF
非ssd硬盘优化操作:
cat >> /etc/rc.local << EOF
echo "deadline" > /sys/block/sda/queue/scheduler
echo "deadline" > /sys/block/sdb/queue/scheduler
echo "deadline" > /sys/block/sdc/queue/scheduler
echo "deadline" > /sys/block/sdd/queue/scheduler
echo "deadline" > /sys/block/sde/queue/scheduler
echo "deadline" > /sys/block/sdf/queue/scheduler
echo "deadline" > /sys/block/sdg/queue/scheduler
echo "deadline" > /sys/block/sdh/queue/scheduler
EOF
格式化磁盘
for n in {a..h};do mkfs.xfs -f /dev/sd${n}1 ;done
创建osd编号
ceph osd create
依据osd编号创建挂载目录并软链到/var/lib/ceph/osd 下
mkdir osd{104..111}
将磁盘挂载信息添加至 /etc/fstab中
cat >> /etc/fstab << EOF
/dev/sda1 /var/local/osd104 xfs defaults 0 0
/dev/sdb1 /var/local/osd105 xfs defaults 0 0
/dev/sdc1 /var/local/osd106 xfs defaults 0 0
/dev/sdd1 /var/local/osd107 xfs defaults 0 0
/dev/sde1 /var/local/osd108 xfs defaults 0 0
/dev/sdf1 /var/local/osd109 xfs defaults 0 0
/dev/sdg1 /var/local/osd110 xfs defaults 0 0
/dev/sdh1 /var/local/osd111 xfs defaults 0 0
EOF
重启服务器,使优化配置生效,并检查磁盘挂载信息是否正常
reboot
循环操作----
初始化osd数据目录
ceph-osd -i 104 --mkfs --mkkey
注册osd认证密钥
ceph auth add osd.104 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-104/keyring
将osd加入crush圈
ceph osd crush add osd.104 0.0 root=default host=pc012ceph13
在/var/lib/ceph/osd下的每个osd目录下均需要touch一个sysvinit空文件
touch /var/local/osd104/sysvinit
-----循环操作
启动ceph服务
systemctl start ceph
启动osd服务
/etc/init.d/ceph start osd.104
...
...
数据平衡操作:(待空闲时间或维护窗口期)
ceph osd crush reweight osd.104 0.54999
...
...