Ceph luminous 安装配置

Ceph luminous 安装配置

 #环境centos7 , Ceph V12

openstack pike 与 ceph 集成 http://www.cnblogs.com/elvi/p/7897191.html

#环境准备
###########################
#升级内核(可选,)
#官方说明 http://docs.ceph.com/docs/master/start/os-recommendations/#linux-kernel
#CentOS7 升级内核到4.4 LTS

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安装kernel最新稳定版
yum --enablerepo=elrepo-kernel install kernel-lt-devel kernel-lt -y
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg #查看启动项
grub2-set-default 0 #设置第一行为默认启动
rpm -e elrepo-release #删除源
reboot
#uname -r #重启后查看内核
#4.4.98-1.el7.elrepo.x86_64
###########################


###########################

#使用阿里源
rm -f /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo #删除阿里内网地址
#创建ceph源
echo '#阿里ceph源
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
[ceph-source]
name=ceph-source
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/
gpgcheck=0
#'>/etc/yum.repos.d/ceph.repo
yum clean all && yum makecache #生成缓存

#关闭selinux、防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
firewall-cmd --state
sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0


#时间同步
[[ -f /usr/sbin/ntpdate ]] || { echo "install ntp";yum install ntp -y &> /dev/null; } #若没NTP则安装
/usr/sbin/ntpdate ntp6.aliyun.com 
echo "*/3 * * * * /usr/sbin/ntpdate ntp6.aliyun.com  &> /dev/null" > /tmp/crontab
crontab /tmp/crontab

###########################
#网络规划

# #外网规划
# 10.2.1.71 ceph01

#内网规划,写入hosts
echo '#ceph
192.168.58.71 ceph01
192.168.58.72 ceph02
192.168.58.73 ceph03
192.168.58.74 ceph04
'>>/etc/hosts

#配置hostname、固定ip
Nu=1
hostnamectl set-hostname ceph0$Nu
NetName=ens37 #网卡名称
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet ip4 192.168.58.7$Nu/24 \
 ipv4.dns "172.16.11.14 119.29.29.29" #ipv4.gateway "192.168.58.1"
nmcli connection reload #重新加载网络


#磁盘(添加2快磁盘)
# [root@ceph01 ~]# lsblk
# NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

# sdb      8:16   0  100G  0 disk 
# sdc      8:32   0  100G  0 disk

###########################

 

 

#ceph安装配置
###########################
#全部在ceph01操作

# #Ceph节点简介
# Mon(Montior)监控节点
# OSD(Object Storage Device)存储节点
# MDS(Meta Data Server),Ceph作文件系统用到,本实例不配置

#ceph01作为部署节点
#ceph01 ceph02 ceph03作为Mon
#ceph02 ceph03 ceph04作为OSD

#ssh免密验证
#下载shell #
curl http://elven.vip/ks/sh/sshkey.me.sh >sshkey.me.sh
#认证用户及密码#
echo "
USER=root
PASS=123321
">my.sh.conf
sh ./ssh.sh ceph01 ceph02 ceph03 ceph04

#安装ceph-deploy配置工具
yum install -y ceph-deploy

#创建配置目录
mkdir /etc/ceph
cd /etc/ceph/

#初始化Mon配置
ceph-deploy new ceph{01,02,03}
#ceph-deploy new --cluster-network 192.168.58.0/24 --public-network 10.2.1.0/24 ceph{01,02,03}

# #修改冗余份数为2,日志大小2G
# #配置网络,单网卡忽略
echo '
public network = 10.2.1.0/24
cluster network = 192.168.58.0/24

mon_clock_drift_allowed = 2    
osd_journal_size = 4086
osd_pool_default_pg_num = 128
osd_pool_default_pgp_num = 128
osd pool default size = 2
osd pool default min size = 1
rbd_default_features = 1
client_quota = true
'>>./ceph.conf

#安装Ceph
ceph-deploy install ceph01 ceph02 ceph03 ceph04
#yum install -y ceph ceph-radosgw #实际上是安装这2个rpm

#初始化monitor和key
cd /etc/ceph/
ceph-deploy --overwrite-conf mon create-initial

#创建存储节点(使用sdb,sdc磁盘)
#lsblk #查看磁盘
#清空磁盘
ceph-deploy disk zap ceph02:sdb ceph02:sdc
ceph-deploy disk zap ceph03:sdb ceph03:sdc
ceph-deploy disk zap ceph04:sdb ceph04:sdc
#创建osd
ceph-deploy --overwrite-conf osd create ceph02:sdb ceph02:sdc
ceph-deploy --overwrite-conf osd create ceph03:sdb ceph03:sdc
ceph-deploy --overwrite-conf osd create ceph04:sdb ceph04:sdc

#拷贝配置及密钥
ceph-deploy admin ceph01 ceph02 ceph03 ceph04
chmod 644 /etc/ceph/ceph.client.admin.keyring

#创建mon
ceph-deploy --overwrite-conf mon create ceph01
ceph-deploy --overwrite-conf admin ceph01
ceph-deploy --overwrite-conf mon create ceph02
ceph-deploy --overwrite-conf admin ceph02
ceph-deploy --overwrite-conf mon create ceph03
ceph-deploy --overwrite-conf admin ceph03

#添加mgr
#ceph 12开始,monitor必须添加mgr
ceph-deploy mgr create ceph{01,02,03}:mon_mgr
#ceph-deploy mgr create ceph01:mon_mgr ceph02:mon_mgr ceph03:mon_mgr

#启用dashboard (在mon节点)
ceph mgr module enable dashboard
#http://ip:7000 访问dashboard
netstat -antp|grep 7000

# 设置dashboard的ip和端口
# ceph config-key put mgr/dashboard/server_addr 192.168.68.71
# ceph config-key put mgr/dashboard/server_port 7000
# systemctl restart ceph-mgr@ceph01

###########################
#查看相关命令
ceph health
ceph -s
ceph osd tree

ceph df
ceph mon stat
ceph osd stat
ceph pg stat

ceph osd lspools
ceph auth list

###########################
#参考
https://segmentfault.com/a/1190000011581513
http://blog.csdn.net/wylfengyujiancheng/article/details/78461801

https://segmentfault.com/a/1190000011589519
https://www.cnblogs.com/netmouser/p/6876846.html

#中文 文档
http://docs.ceph.org.cn/man/8/ceph-deploy/

#ceph工作原理和安装
http://www.jianshu.com/p/25163032f57f

###########################

 

###########################
#简单ceph性能测试

#创建测试池mytest
ceph osd pool create mytest 128
rados lspools
# ceph osd pool set mytest size 2 #副本为2
# ceph osd pool delete mytest #删除


#Rados性能测试(关注 bandwidth带宽,latency延迟)
rados bench -p mytest 10 write --no-cleanup #写测试10秒
rados bench -p mytest 10 seq  #顺序读
rados bench -p mytest 10 rand #随机读
rados -p mytest cleanup #清理测试数据

#rbd块设备测试
rbd create --size 2G mytest/test1 #创建块设备映像test1
rbd ls mytest
rbd info mytest/test1

rbd map mytest/test1   #映射块设备
#/dev/rbd0
#rbd showmapped         #查看已映射块设备
#挂载
mkfs.xfs /dev/rbd0
mkdir -p /mnt/ceph
mount /dev/rbd0 /mnt/ceph/
df -h /mnt/ceph

#测试
rbd bench-write mytest/test1
#默认参数io 4k,线程数16,总写入1024M, seq顺序写


rbd unmap mytest/test1 #取消块设备映射
rbd rm mytest/test1    #删除块设备映像

#参考
https://www.cnblogs.com/sammyliu/p/5557666.html
###########################

 

###########################
#未解决问题:

##1、
#dashboard开启后
#所有Mon开了7000端口,只能随机1个Mon能访问web


##2、
#rbd map映射块设备失败,暂没解决
#环境 centos 7 内核升级了的 4.4.98-1.el7.elrepo.x86_64
#centos 7自带内核3.10.0-693.2.2.el7.x86_64 正常

###########################

 

转载于:https://www.cnblogs.com/elvi/p/7897178.html

你可能感兴趣的:(Ceph luminous 安装配置)