ceph分布式存储配置

1 ceph后端存储配置实战

1.1 ceph配置前的准备

1.1.1 三台机器都要配置

[root@linux-node1 ~]# hostname linux-node1

[root@linux-node1 ~]# echo "linux-node1" >>/etc/hostname

[root@linux-node2 ~]# hostname linux-node2

[root@linux-node2 ~]# echo "linux-node2" >>/etc/hostname

[root@linux-node3 ~]# hostname linux-node3

[root@linux-node3 ~]# echo "linux-node2" >>/etc/hostname

[root@linux-node1 ~]# vi /etc/hosts

192.168.100.151 linux-node1 linux-node1.oldboyedu.com

192.168.100.152 linux-node2 linux-node2.oldboyedu.com

192.168.100.153 linux-node3

[root@linux-node1 ~]# cd /etc/

[root@linux-node1 etc]# scp hosts linux-node2:/etc/

[root@linux-node1 etc]# scp hosts linux-node3:/etc/

[root@linux-node1 ~]# systemctl stop firewalld.service

[root@linux-node1 ~]# setenforce 0

[root@linux-node1 ~]# id ceph

[root@linux-node2 ~]# id ceph

[root@linux-node3 ~]# id ceph

[root@linux-node1 ~]# useradd -d /home/ceph -m ceph

[root@linux-node1 ~]# echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph

[root@linux-node1 ~]# passwd ceph

Changing password for user ceph.

New password:

BAD PASSWORD: The password is shorter than 8 characters

Retype new password:

passwd: all authentication tokens updated successfully.

[root@linux-node2 ~]# passwd ceph

Changing password for user ceph.

New password:

BAD PASSWORD: The password is shorter than 8 characters

Retype new password:

passwd: all authentication tokens updated successfully.

[root@linux-node3 ~]# passwd ceph

Changing password for user ceph.

New password:

BAD PASSWORD: The password is shorter than 8 characters

Retype new password:

passwd: all authentication tokens updated successfully.

[root@linux-node1 ~]# chmod 0440 /etc/sudoers.d/ceph

[root@linux-node1 ~]# visudo

#修改配置文件如下

... ...

Defaults:ceph !requiretty

[root@linux-node1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

[root@linux-node2 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

[root@linux-node3 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

[root@linux-node1 ~]# cd /etc/yum.repo.d/

[root@linux-node1 yum.repo.d]# vi ceph.repo

[ceph]

name=ceph

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/

gpgcheck=0

priority=1

[ceph-noarch]

name=cephnoarch

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/

gpgcheck=0

priority=1

[ceph-source]

name=Ceph source packages

baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/

enabled=0

gpgcheck=1

type=rpm-md

gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc

priority=1

[root@linux-node1 ~]# yum clean all

[root@linux-node1 ~]# yum makecache

[root@linux-node1 ~]# yum repolist

1.1.2 在linux-node1配置SSH认证

[ceph@linux-node1 ~]$ ssh-keygen -t rsa

[ceph@linux-node1 ~]$ ssh-copy-id ceph@linux-node1

[ceph@linux-node1 ~]$ ssh ceph@linux-node1

[ceph@linux-node1 ~]$ ssh-copy-id ceph@linux-node2

[ceph@linux-node1 ~]$ ssh ceph@linux-node2

[ceph@linux-node1 ~]$ ssh-copy-id ceph@linux-node3

[ceph@linux-node1 ~]$ ssh ceph@linux-node3

[ceph@linux-node1 ~]$ sudo vim ~/.ssh/config

Host linux-node1

  Hostname linux-node1

  User ceph

Host linux-node2

  Hostname linux-node2

  User ceph

Host linux-node3

  Hostname linux-node3

  User ceph

[ceph@linux-node1 ~]$ sudo chmod 600 ~/.ssh/config

1.2 安装和配置ceph

[ceph@linux-node1 ~]$ sudo yum install yum-plugin-priorities

[ceph@linux-node1 ~]$ sudo yum install ceph-deploy

[ceph@linux-node1 ~]$ sudo mkdir my-cluster

[ceph@linux-node1 ~]$ sudo cd my-cluster

[ceph@linux-node1 ~]$ sudo chown -R ceph.ceph /home/ceph/my-cluster/

[ceph@linux-node1 my-cluster]$ ceph-deploy new linux-node1 linux-node2 linux-node3 

[ceph@linux-node1 my-cluster]$ ceph-deploy install --no-adjust-repos linux-node1 linux-node2 linux-node3

[ceph@linux-node1 my-cluster]$ ceph-deploy mon create-initial

[ceph@linux-node1 my-cluster]$ ceph-deploy disk zap linux-node1 /dev/sdb

[ceph@linux-node1 my-cluster]$ ceph-deploy disk zap linux-node2 /dev/sdb

[ceph@linux-node1 my-cluster]$ ceph-deploy disk zap linux-node3 /dev/sdb

[ceph@linux-node1 my-cluster]$ sudo chmod +r ceph.client.admin.keyring

[ceph@linux-node1 my-cluster]$ sudo cp -a /home/ceph/my-cluster/ceph.client.admin.keyring /etc/ceph/ceph.client.admin.keyring

[ceph@linux-node1 my-cluster]$ vi ~/my-cluster/ceph.conf

[global]

fsid = 55b2d4e8-cb32-4edb-9cce-d32a64294503

mon_initial_members = linux-node1, linux-node2, linux-node3

mon_host = 192.168.100.151,192.168.100.152,192.168.100.153

auth_cluster_required = cephx

auth_service_required = cephx

auth_client_required = cephx

osd pool default size = 2

mon clock drift allowed = 2

mon clock drift warn backoff = 30

[mon]

mon allow pool delete = true

[ceph@linux-node1 my-cluster]$ ceph-deploy --overwrite-conf config push linux-node{1..3}

[ceph@linux-node1 my-cluster]$ sudo systemctl restart ceph-mon.target

[ceph@linux-node1 my-cluster]$ sudo systemctl stauts ceph-mon.target

[ceph@linux-node1 my-cluster]$ ceph-deploy osd create linux-node1 --data /dev/sdb

[ceph@linux-node1 my-cluster]$ ceph-deploy osd create linux-node2 --data /dev/sdb

[ceph@linux-node1 my-cluster]$ ceph-deploy osd create linux-node3 --data /dev/sdb

[ceph@linux-node1 my-cluster]$ ceph-deploy mgr create linux-node1 linux-node2 linux-node3

[ceph@linux-node1 my-cluster]$ ceph -s

[ceph@linux-node1 my-cluster]$ vi ceph.conf

[global]

fsid = 55b2d4e8-cb32-4edb-9cce-d32a64294503

mon_initial_members = linux-node1, linux-node2, linux-node3

mon_host = 192.168.100.151,192.168.100.152,192.168.100.153

auth_cluster_required = cephx

auth_service_required = cephx

auth_client_required = cephx

osd pool default size = 2

mon clock drift allowed = 2

mon clock drift warn backoff = 30

[mon]

mon allow pool delete = true

[ceph@linux-node1 my-cluster]$ ceph-deploy --overwrite-conf config push linux-node{1..3}

[ceph@linux-node1 my-cluster]$ sudo systemctl restart ceph-mon.target

[ceph@linux-node1 my-cluster]$ sudo systemctl stauts ceph-mon.target

1.3 创建文件系统

1.3.1 创建Ceph MDS角色

[ceph@linux-node1 my-cluster]$ sudo modprobe rbd

[ceph@linux-node1 my-cluster]$ sudo lsmod | grep rbd

[ceph@linux-node1 my-cluster]$ sudo modprobe ceph

[ceph@linux-node1 my-cluster]$ sudo lsmod | grep ceph

1.3.2 创建Ceph MDS角色

[ceph@linux-node1 my-cluster]$ ceph-deploy --overwrite-conf mds create linux-node1

[ceph@linux-node1 my-cluster]$ netstat -tnlp | grep mds

1.3.3 手动创建data和metadata两个池

[ceph@linux-node1 my-cluster]$ ceph osd pool create cephfs_data 128

[ceph@linux-node1 my-cluster]$ ceph osd pool create cephfs_metadata 128

[ceph@linux-node1 my-cluster]$ ceph osd pool stats cephfs_data

[ceph@linux-node1 my-cluster]$ ceph osd pool stats cephfs_metadata

[ceph@linux-node1 my-cluster]$ ceph fs new cephfs cephfs_metadata cephfs_data

[ceph@linux-node1 my-cluster]$ ceph fs ls

[ceph@linux-node1 my-cluster]$ ceph mds stat

[ceph@linux-node1 ~]$ mysql my-cluster/

[ceph@linux-node1 my-cluster]$ vi ceph.conf

[global]

fsid = 55b2d4e8-cb32-4edb-9cce-d32a64294503

mon_initial_members = linux-node1, linux-node2, linux-node3

mon_host = 192.168.100.151,192.168.100.152,192.168.100.153

auth_cluster_required = cephx

auth_service_required = cephx

auth_client_required = cephx

osd pool default size = 2

mon clock drift allowed = 2

mon clock drift warn backoff = 30

[mon]

mon allow pool delete = true

[ceph@linux-node1 my-cluster]$ sudo systemctl restart ceph-mon.target

[ceph@linux-node1 my-cluster]$ sudo systemctl stauts ceph-mon.target

[ceph@linux-node1 my-cluster]$ ceph -w

  cluster:

    id:    b86c386a-141f-4330-8267-2b15584bb915

    health: HEALTH_OK

  services:

    mon: 3 daemons, quorum linux-node1,linux-node2,linux-node3

    mgr: linux-node1(active), standbys: linux-node2, linux-node3

    mds: cephfs-1/1/1 up  {0=linux-node1=up:active}

    osd: 3 osds: 3 up, 3 in

  data:

    pools:  2 pools, 256 pgs

    objects: 21 objects, 2246 bytes

    usage:  3080 MB used, 58347 MB / 61428 MB avail

    pgs:    256 active+clean

2018-08-15 18:27:10.171244 mon.linux-node1 [INF] overall HEALTH_OK

[ceph@linux-node1 my-cluster]$ ceph -s

  cluster:

    id:    b86c386a-141f-4330-8267-2b15584bb915

    health: HEALTH_OK

  services:

    mon: 3 daemons, quorum linux-node1,linux-node2,linux-node3

    mgr: linux-node1(active), standbys: linux-node2, linux-node3

    mds: cephfs-1/1/1 up  {0=linux-node1=up:active}

    osd: 3 osds: 3 up, 3 in

  data:

    pools:  2 pools, 256 pgs

    objects: 21 objects, 2246 bytes

    usage:  3080 MB used, 58347 MB / 61428 MB avail

    pgs:    256 active+clean

1.4 客户端挂载

1.4.1 用户空间挂载 CEPH 文件系统

在linux-node3上执行如下配置

[ceph@linux-node3 ceph]$ sudo yum install -y ceph-fuse.x86_6

[ceph@linux-node3 ceph]$ sudo scp root@linux-node1:/home/ceph/my-cluster/ceph.conf /etc/ceph/ceph.conf

[ceph@linux-node3 ceph]$ sudo scp root@linux-node1:/etc/ceph/ceph.client.admin.keyring /etc/ceph/ceph.client.admin.keyring

[ceph@linux-node3 ceph]$ sudo mkdir /home/ceph/cephfs

[ceph@linux-node3 ceph]$ sudo ceph-fuse -m 192.168.100.151:6789 /home/ceph/cephfs

1.4.2 在linux-node1上执行如下配置

[ceph@linux-node1 ceph]$ sudo yum install -y ceph-fuse.x86_6

[ceph@linux-node1 ceph]$ sudo scp root@linux-node1:/home/ceph/my-cluster/ceph.conf /etc/ceph/ceph.conf

[ceph@linux-node1 ceph]$ sudo scp root@linux-node1:/etc/ceph/ceph.client.admin.keyring /etc/ceph/ceph.client.admin.keyring

[ceph@linux-node1 ceph]$ sudo mkdir /home/ceph/cephfs

[ceph@linux-node1 ceph]$ sudo ceph-fuse -m 192.168.100.151:6789 /home/ceph/cephfs

1.5 用内核驱动挂载 CEPH 文件系统

1.5.1 要挂载 Ceph 文件系统,如果你知道监视器IP地址可以用 mount 命令、或者用 mount.ceph工具来自动解析监视器IP地址

# sudo mkdir /mnt/mycephfs

# sudo mount -t ceph 192.168.100.151:6789:/ /mnt/mycephfs

1.5.2 要挂载启用了 cephx 认证的 Ceph 文件系统,你必须指定用户名、密钥

# sudo mount -t ceph 192.168.100.151:6789:/ /mnt/mycephfs -o name=admin,secret=AQATSKdNGBnwLhAAnNDKnH65FmVKpXZJVasUeQ==

1.5.3 前述用法会把密码遗留在 Bash 历史里,更安全的方法是从文件读密码。例如

# sudo mount -t ceph 192.168.100.151:6789:/ /mnt/mycephfs -o name=admin,secretfile=/etc/ceph/admin.secret

1.5.4 要卸载 Ceph 文件系统,可以用 unmount 命令

# sudo umount /mnt/mycephfs

1.6 CEPHFS配额管理

1.6.1 创建该文件目录的磁盘配额为100MB

[ceph@linux-node1 ~]$ sudo setfattr -n ceph.quota.max_bytes -v 100000000 /home/ceph/cephfs/k8s

1.6.2 查看磁盘配额

[ceph@linux-node1 ~]$ sudo getfattr -n ceph.quota.max_bytes /home/ceph/cephfs/k8s

getfattr: Removing leading '/' from absolute path names

# file: home/ceph/cephfs/k8s

ceph.quota.max_bytes="100000000"

1.6.3 测试配额效果

[ceph@linux-node1 k8s]$ dd if=/dev/zero of=1.txt bs=1M count=200

dd: failed to open ‘1.txt’: Permission denied

[ceph@linux-node1 k8s]$ sudo dd if=/dev/zero of=1.txt bs=1M count=200

dd: error writing ‘1.txt’: Disk quota exceeded

105+0 records in

104+0 records out

109314048 bytes (109 MB) copied, 3.52606 s, 31.0 MB/

1.6.4 再一次测试,发现无法写入了。

[ceph@linux-node1 k8s]$ sudo dd if=/dev/zero of=2.txt bs=1M count=200

dd: error writing ‘2.txt’: Disk quota exceeded

1+0 records in

0+0 records out

0 bytes (0 B) copied, 0.00256971 s, 0.0 kB/

1.6.5 删除配额

[ceph@linux-node1 k8s]$ sudo setfattr -n ceph.quota.max_bytes -v 0 /home/ceph/cephfs/k8s

[ceph@linux-node1 k8s]$ sudo getfattr -n ceph.quota.max_bytes /home/ceph/cephfs/k8s

/home/ceph/cephfs/k8s: ceph.quota.max_bytes: No such attribute

1.6.6 删除已经创建的块设备

[root@linux-node1 ~]# rbd ls list -p volumes

rbd_test

volume-5e0b9cf5-6ebe-4641-8436-c3451af51eb9

[root@linux-node1 ~]# rbd unmap volumes/rbd_test

[root@linux-node1 ~]# rbd showmapped

[root@linux-node1 ~]# rbd remove volumes/rbd_test

Removing image: 100% complete...done.

[root@linux-node1 ~]# rbd ls list -p volumes

volume-5e0b9cf5-6ebe-4641-8436-c3451af51eb9

你可能感兴趣的:(ceph分布式存储配置)