ceph 安装 配置

1、虚拟机配置信息。

基于centos 7.9 使用yum安装。

[root@beian yum.repos.d]# ansible ceph -m command -a 'cat /etc/hosts '
140.249.50.15 | CHANGED | rc=0 >>
192.168.0.220 k8s-ceph-0003
192.168.0.49 k8s-ceph-0002
192.168.0.71 k8s-ceph-0001
140.249.21.94 | CHANGED | rc=0 >>
192.168.0.220 k8s-ceph-0003
192.168.0.49 k8s-ceph-0002
192.168.0.71 k8s-ceph-0001
140.249.50.100 | CHANGED | rc=0 >>
192.168.0.220 k8s-ceph-0003
192.168.0.49 k8s-ceph-0002
192.168.0.71 k8s-ceph-0001
[root@beian yum.repos.d]# ansible ceph -m command -a 'lsblk '
140.249.50.15 | CHANGED | rc=0 >>
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 40G 0 disk
├─vda1 253:1 0 3.9G 0 part [SWAP]
└─vda2 253:2 0 36.1G 0 part /
vdb 253:16 0 100G 0 disk
vdc 253:32 0 100G 0 disk
vdd 253:48 0 100G 0 disk
140.249.50.100 | CHANGED | rc=0 >>
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 40G 0 disk
├─vda1 253:1 0 3.9G 0 part [SWAP]
└─vda2 253:2 0 36.1G 0 part /
vdb 253:16 0 100G 0 disk
vdc 253:32 0 100G 0 disk
vdd 253:48 0 100G 0 disk
140.249.21.94 | CHANGED | rc=0 >>
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 40G 0 disk
├─vda1 253:1 0 3.9G 0 part [SWAP]
└─vda2 253:2 0 36.1G 0 part /
vdb 253:16 0 100G 0 disk
vdc 253:32 0 100G 0 disk
vdd 253:48 0 100G 0 disk

[root@beian yum.repos.d]# ansible ceph -m command -a 'cat /etc/centos-release '
140.249.50.15 | CHANGED | rc=0 >>
CentOS Linux release 7.9.2009 (Core)
140.249.50.100 | CHANGED | rc=0 >>
CentOS Linux release 7.9.2009 (Core)
140.249.21.94 | CHANGED | rc=0 >>
CentOS Linux release 7.9.2009 (Core)

2、配置yum源

[root@beian yum.repos.d]# ansible ceph -m command -a 'curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo'

[root@beian yum.repos.d]# cat ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
gpgcheck=0
priority=1

[root@beian yum.repos.d]# ansible ceph -m command -a 'yum clean all '
[root@beian yum.repos.d]# ansible ceph -m command -a 'yum makecache '

3、安装ceph

ansible ceph -m command -a 'yum install epel-release -y '
ansible ceph -m command -a 'yum install lttng-ust -y'
ansible ceph -m command -a 'yum install ceph -y '
ansible ceph -m command -a 'yum install ceph-radosgw.x86_64 -y '

yum install ceph-deploy.noarch -y
mkdir ceph-cluster
cd ceph-cluster/

ceph-deploy new k8s-ceph-0001 k8s-ceph-0002 k8s-ceph-0003
ceph-deploy mon create-initial
ceph-deploy admin k8s-ceph-0001 k8s-ceph-0002 k8s-ceph-0003
ceph-deploy mgr create k8s-ceph-0001 k8s-ceph-0002 k8s-ceph-0003

4、初始化ceph osd磁盘

ceph-deploy osd create --data /dev/vdb k8s-ceph-0001
ceph-deploy osd create --data /dev/vdc k8s-ceph-0001
ceph-deploy osd create --data /dev/vdd k8s-ceph-0001

ceph-deploy osd create --data /dev/vdb k8s-ceph-0002
ceph-deploy osd create --data /dev/vdc k8s-ceph-0002
ceph-deploy osd create --data /dev/vdd k8s-ceph-0002

ceph-deploy osd create --data /dev/vdb k8s-ceph-0003
ceph-deploy osd create --data /dev/vdc k8s-ceph-0003
ceph-deploy osd create --data /dev/vdd k8s-ceph-0003

5、创建pool

ceph osd pool create mypool1 128
ceph osd pool application enable mypool1 rgw

故障排查:

[root@k8s-ceph-0001 ceph-cluster]# ceph health detail
HEALTH_WARN 1/3 mons down, quorum k8s-ceph-0001,k8s-ceph-0003
MON_DOWN 1/3 mons down, quorum k8s-ceph-0001,k8s-ceph-0003
mon.k8s-ceph-0002 (rank 0) addr [v2:192.168.0.49:3300/0,v1:192.168.0.49:6789/0] is down (out of quorum)

ceph osd df 查看存在一个osd 状态为down
查看mon日志,得知osd.0 reported failed by osd.1 由于虚拟机为公网虚拟机,不排除打摆子引起。登录down osd 节点,手动重启osd 恢复

你可能感兴趣的:(ceph 安装 配置)