Ceph的CephFS文档

1.部署mds到k-master节点

[root@k-master ceph]# pwd

/etc/ceph

[root@k-master ceph]# ls

ceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.client.admin.keyring  ceph-deploy-ceph.log  etc    s3test.py        testrbd

ceph.bootstrap-mgr.keyring  ceph.bootstrap-rgw.keyring  ceph.conf                  ceph.mon.keyring      rbdmap  swift_openrc.sh

[root@k-master ceph]# ceph-deploy  mds create k-master

[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf

[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mds create k-master

[ceph_deploy.cli][INFO  ] ceph-deploy options:

[ceph_deploy.cli][INFO  ]  username                      : None

[ceph_deploy.cli][INFO  ]  verbose                      : False

[ceph_deploy.cli][INFO  ]  overwrite_conf                : False

[ceph_deploy.cli][INFO  ]  subcommand                    : create

[ceph_deploy.cli][INFO  ]  quiet                        : False

[ceph_deploy.cli][INFO  ]  cd_conf                      :

[ceph_deploy.cli][INFO  ]  cluster                      : ceph

[ceph_deploy.cli][INFO  ]  func                          :

[ceph_deploy.cli][INFO  ]  ceph_conf                    : None

[ceph_deploy.cli][INFO  ]  mds                          : [('k-master', 'k-master')]

[ceph_deploy.cli][INFO  ]  default_release              : False

[ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts k-master:k-master

[k-master][DEBUG ] connected to host: k-master

[k-master][DEBUG ] detect platform information from remote host

[k-master][DEBUG ] detect machine type

[ceph_deploy.mds][INFO  ] Distro info: CentOS Linux 7.7.1908 Core

[ceph_deploy.mds][DEBUG ] remote host will use systemd

[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to k-master

[k-master][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf

[k-master][WARNIN] mds keyring does not exist yet, creating one

[k-master][DEBUG ] create a keyring file

[k-master][DEBUG ] create path if it doesn't exist

[k-master][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.k-master osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-k-master/keyring

[k-master][INFO  ] Running command: systemctl enable ceph-mds@k-master

[k-master][WARNIN] Created symlink from /etc/systemd/system/ceph-mds.target.wants/[email protected] to /usr/lib/systemd/system/[email protected].

[k-master][INFO  ] Running command: systemctl start ceph-mds@k-master

[k-master][INFO  ] Running command: systemctl enable ceph.target

[root@k-master ceph]#

[root@k-master ceph]# ps -ef | grep ceph

ceph        2194      1  0 Jul26 ?        00:17:56 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph

ceph        2638      1  0 Jul26 ?        00:19:42 /usr/bin/ceph-osd -f --cluster ceph --id 1 --setuser ceph --setgroup ceph

ceph        3094      1  2 Jul26 ?        00:50:43 /usr/bin/ceph-mgr -f --cluster ceph --id k-master --setuser ceph --setgroup ceph

root      12196      2  0 Jul26 ?        00:00:00 [ceph-msgr]

root      21112      2  0 Jul26 ?        00:00:00 [ceph-watch-noti]

ceph      55743      1  0 Jul27 ?        00:03:50 /usr/bin/ceph-mon -f --cluster ceph --id k-master --setuser ceph --setgroup ceph

ceph      83272      1  0 03:51 ?        00:01:22 /usr/bin/radosgw -f --cluster ceph --name client.rgw.k-master --setuser ceph --setgroup ceph

ceph      93467      1  0 08:16 ?        00:00:00 /usr/bin/ceph-mds -f --cluster ceph --id k-master --setuser ceph --setgroup ceph

root      93551  64987  0 08:17 pts/0    00:00:00 grep --color=auto ceph

[root@k-master ceph]#

2.创建两个pool(元数据与数据)

[root@k-master ceph]# ceph osd pool create cephfs_metadata 16 16

pool 'cephfs_metadata' created

[root@k-master ceph]# ceph osd pool create cephfs_data 16 16

pool 'cephfs_data' created

[root@k-master ceph]# ceph -s

  cluster:

    id:    4221c53f-a286-4b7f-b20d-3bd453eb841c

    health: HEALTH_OK

  services:

    mon: 1 daemons, quorum k-master

    mgr: k-master(active)

    osd: 3 osds: 3 up, 3 in

    rgw: 1 daemon active

  data:

    pools:  14 pools, 292 pgs

    objects: 275 objects, 85.5MiB

    usage:  3.19GiB used, 11.8GiB / 15.0GiB avail

    pgs:    292 active+clean

  io:

    client:  0B/s rd, 0op/s rd, 0op/s wr

[root@k-master ceph]# ceph osd lspools

1 rbd,3 ceph,4 k8s,6 ceph-demo,7 ceph-tt,8 rbd-test,9 .rgw.root,10 default.rgw.control,11 default.rgw.meta,12 default.rgw.log,13 default.rgw.buckets.index,14 default.rgw.buckets.data,15 cephfs_metadata,16 cephfs_data,

[root@k-master ceph]#

3.创建文件系统

[root@k-master ceph]# ceph fs new cephfs-demo cephfs_metadata cephfs_data

new fs with metadata pool 15 and data pool 16

[root@k-master ceph]# ceph fs ls

name: cephfs-demo, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

[root@k-master ceph]# ceph -s

  cluster:

    id:    4221c53f-a286-4b7f-b20d-3bd453eb841c

    health: HEALTH_OK

  services:

    mon: 1 daemons, quorum k-master

    mgr: k-master(active)

    mds: cephfs-demo-1/1/1 up  {0=k-worker001=up:active}, 1 up:standby

    osd: 3 osds: 3 up, 3 in

    rgw: 1 daemon active

  data:

    pools:  14 pools, 292 pgs

    objects: 296 objects, 85.5MiB

    usage:  3.20GiB used, 11.8GiB / 15.0GiB avail

    pgs:    292 active+clean

  io:

    client:  0B/s rd, 0op/s rd, 0op/s wr

[root@k-master ceph]#

4.挂载测试

[1]内核驱动型(性能高)

参阅:https://ceph.readthedocs.io/en/latest/cephfs/mount-using-kernel-driver/

[root@k-master ceph]# mount -t ceph k-master:/ /mnt/cephfs/

mount error 22 = Invalid argument

[root@k-master ceph]# cat ./ceph.client.admin.keyring

[client.admin]

        key = AQAwmhpf7pEnOBAAXK4KC/J2YbVPDyNgfdFrdg==

[root@k-master ceph]# vim ~/admin.keyring

[root@k-master ceph]# cat ~/admin.keyring

AQAwmhpf7pEnOBAAXK4KC/J2YbVPDyNgfdFrdg==

[root@k-master ceph]# mount -t ceph k-master:6789:/ /mnt/cephfs -o name=admin,secretfile=~/admin.keyring

unable to read secretfile: No such file or directory

error reading secret file

failed to parse ceph_options

[root@k-master ceph]# ll /root/admin.keyring

-rw-r--r-- 1 root root 41 Jul 28 08:47 /root/admin.keyring

[root@k-master ceph]# mount -t ceph k-master:6789:/ /mnt/cephfs -o name=admin,secretfile=/root/admin.keyring

[root@k-master ceph]# mount | grep /mnt/cephfs

192.168.43.60:6789:/ on /mnt/cephfs type ceph (rw,relatime,name=admin,secret=,acl,wsize=16777216)

[root@k-master ceph]#

[root@k-master ceph]# df -h | grep /mnt/cephfs

192.168.43.60:6789:/    5.5G    0  5.5G  0% /mnt/cephfs

[root@k-master ceph]# cd /mnt/cephfs/

[root@k-master cephfs]# touch {1..10}.log

[root@k-master cephfs]# ls

10.log  1.log  2.log  3.log  4.log  5.log  6.log  7.log  8.log  9.log

[root@k-master cephfs]#

[root@k-master cephfs]# lsmod | grep ceph

ceph                  358802  1

libceph              306742  2 rbd,ceph

dns_resolver          13140  1 libceph

libcrc32c              12644  2 xfs,libceph

[root@k-master cephfs]#

[2]使用ceph-fuse挂载

[root@k-master cephfs]# yum install ceph-fuse -y

[root@k-master ~]# mkdir /mnt/ceph-fuse

[root@k-master ~]# ceph-fuse -n client.admin -m k-master:6789 /mnt/ceph-fuse/

2020-07-28 09:49:28.465970 7fc655159240 -1 init, newargv = 0x5584e03c9380 newargc=9

ceph-fuse[97512]: starting ceph client

ceph-fuse[97512]: starting fuse

[root@k-master ~]# mount | grep '/mnt/ceph-fuse'

ceph-fuse on /mnt/ceph-fuse type fuse.ceph-fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)

[root@k-master ~]# df -h | grep mnt

ceph-fuse                5.5G    0  5.5G  0% /mnt/ceph-fuse

[root@k-master ~]#

[root@k-master ~]# cd /mnt/ceph-fuse

[root@k-master ceph-fuse]# ls

10.log  1.log  2.log  3.log  4.log  5.log  6.log  7.log  8.log  9.log

[root@k-master ceph-fuse]# touch {a..z}.log

[root@k-master ceph-fuse]# ls

10.log  2.log  4.log  6.log  8.log  a.log  c.log  e.log  g.log  i.log  k.log  m.log  o.log  q.log  s.log  u.log  w.log  y.log

1.log  3.log  5.log  7.log  9.log  b.log  d.log  f.log  h.log  j.log  l.log  n.log  p.log  r.log  t.log  v.log  x.log  z.log

[root@k-master ceph-fuse]#

你可能感兴趣的:(Ceph的CephFS文档)