安装Ceph集群

安装Ceph集群

安装Ceph集群_第1张图片

环境准备

CentOS 7.6

主机名 IP Role
admin 192.168.182.128 admin
node1 192.168.182.129 mon/mgr/osd
node2 192.168.182.130 osd
node3 192.168.182.131 osd
client 192.168.182.132
  • 所有节点修改主机名相互解析
  • 三台node节点添加大小5G以上硬盘
  • 关闭所有节点防火墙与SeLinux
  • 所有节点创建普通用户并设置密码

以下内容写入到 /etc/hosts

192.168.182.128 admin
192.168.182.129 node1
192.168.182.130 node2
192.168.182.131 node3
192.168.182.132 client

关闭所有节点防火墙与SeLinux脚本(授权并执行即可)

#!/bin/bash

# 关闭防火墙
echo "正在关闭防火墙..."
sudo systemctl stop firewalld
sudo systemctl disable firewalld
echo "防火墙已关闭并禁用。"

# 检查 SELinux 状态
sestatus=$(sestatus | grep "SELinux status" | awk '{print $3}')

if [ "$sestatus" == "enabled" ]; then
    # 关闭 SELinux
    echo "正在关闭 SELinux..."
    sudo setenforce 0
    sudo sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
    echo "SELinux 已关闭。"
else
    echo "SELinux 已经处于禁用状态,无需操作。"
fi

echo "脚本执行完毕。"

所有节点创建用户,设置密码

useradd cephu
passwd cephu

确保Ceph节点新创建用户都有sudo权限 所有节点均操作

visudo
#大约在100行添加
cephu   ALL=(root)      NOPASSWD:ALL

实现ssh免密登录 (admin节点操作)

[root@admin ~]# su  - cephu
[cephu@admin ~]$ ssh-keygen
#将admin节点cephu用户生产的密钥拷贝到各Ceph节点
ssh-copy-id cephu@node

#admin节点root用户田家庵~/.ssh/config配置文件 这样ceph-deploy就能用你所建的用户名登陆Ceph节点了
[root@admin ~]# mkdir ~/.ssh
[root@admin ~]# vim ~/.ssh/config
[root@admin ~]# cat ~/.ssh/config
Host node1
Hostname node1
User cephu

Host node2
Hostname node2
User cephu

Host node3
Hostname node3
User cephu

添加下载源

安装ceph-deploy(admin节点root用户)

vim /etc/yum.repos.d/ceph.repo

[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-luminous/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
yum makecache
yum update
#开启yum缓存
[root@admin ~]# vim /etc/yum.conf
keepcache=1

yum install ceph-deploy -y

安装ntp服务

在所有节点操作,选择一台节点当ntp时间服务器,其他节点当时间服务器客户端跟服务器同步时间。

[root@admin ~]# yum install -y ntp

[root@admin ~]# vim /etc/ntp.conf  #有4行server的位置,把那4行server行注释掉,填写以下两行
server 127.127.1.0 # local clock
fudge  127.127.1.0 stratum 10

[root@admin ~]# systemctl start ntpd
[root@admin ~]# systemctl enable ntpd
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.

其他节点

 yum install ntpdate  -y
 ntpdate  时间服务器ip

部署集群

无特殊说明以下操作均在admin节点,cephu用户执行

  • 创建cephu操作的目录,ceph-deploy命令操作必须在该目录下执行

    [root@admin ~]# su - cephu
    Last login: Tue Jul 11 10:09:09 EDT 2023 on pts/0
    [cephu@admin ~]$ mkdir my-cluster
    
    sudo yum install  wget unzip -y
    
  • 创建集群

    wget https://files.pythonhosted.org/packages/5f/ad/1fde06877a8d7d5c9b60eff7de2d452f639916ae1d48f0b8f97bf97e570a/distribute-0.7.3.zip
    
    [cephu@admin ~]$ ls
    distribute-0.7.3.zip  my-cluster
    [cephu@admin ~]$ unzip distribute-0.7.3.zip
    [cephu@admin ~]$ cd distribute-0.7.3
    [cephu@admin distribute-0.7.3]$ sudo python setup.py install
    
    [cephu@admin distribute-0.7.3]$ cd ../my-cluster/
    [cephu@admin my-cluster]$ ceph-deploy new node1
    [cephu@admin my-cluster]$ ls
    ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring
    
    
    
  • 在三台node节点上安装ceph-radosgw主包

    (三台node节点操作)

     #安装epel源
        yum install -y epel*
     #创建Ceph源,内容如下:阿里源
         vim /etc/yum.repos.d/ceph.repo
    
    [Ceph]
    name=Ceph packages for $basearch
    baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
    enabled=1
    gpgcheck=0
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    priority=1
    
    [Ceph-noarch]
    name=Ceph noarch packages
    baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
    enabled=1
    gpgcheck=0
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    priority=1
    
    [ceph-source]
    name=Ceph source packages
    baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
    enabled=1
    gpgcheck=0
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    priority=1
    
  • 在三台node节点执行命令安装软件

    su - cephu
    sudo yum install ceph ceph-radosgw  -y 
    
     ceph --version
    ceph version 12.2.13 (584a20eb0237c657dc0567da126be145106aa47e) luminous (stable)
    
    

初始化集群

  • 初始化mon(cephu@admin)

    [cephu@admin my-cluster]$ ceph-deploy mon create-initial
    
    ceph-deploy admin node1 node2 node3
    
    ceph-deploy mgr create node1
    
    
    
  • 添加osd

    #查看node节点的磁盘名称
    
    [cephu@node3 ~]$ lsblk
    NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
    sda               8:0    0   20G  0 disk
    ├─sda1            8:1    0    1G  0 part /boot
    └─sda2            8:2    0   19G  0 part
      ├─centos-root 253:0    0   17G  0 lvm  /
      └─centos-swap 253:1    0    2G  0 lvm  [SWAP]
    sdb               8:16   0    5G  0 disk
    sr0              11:0    1  918M  0 rom
    
    
    [cephu@admin my-cluster]$ ceph-deploy osd create --data /dev/sdb node1
    [cephu@admin my-cluster]$ ceph-deploy osd create --data /dev/sdb node2
    [cephu@admin my-cluster]$ ceph-deploy osd create --data /dev/sdb node3
    

    DashBoard配置

    node1操作

    #创建管理域密钥
    [cephu@node1 ~]$ sudo ceph auth get-or-create mgr.node1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
    [mgr.node1]
            key = AQCUXa5kZcSGIhAAqRGiZM7wLa3PQpfhR3dJbA==
    #开启管理域
    
    [cephu@node1 ~]$ sudo ceph-mgr -i node1
    #查看状态
    [cephu@node1 ~]$ sudo ceph status
      cluster:
        id:     4f902c45-53fb-4048-9697-77b959811be9
        health: HEALTH_OK
    
      services:
        mon: 1 daemons, quorum node1
        mgr: node1(active, starting)
        osd: 3 osds: 3 up, 3 in
    
      data:
        pools:   0 pools, 0 pgs
        objects: 0 objects, 0B
        usage:   3.01GiB used, 12.0GiB / 15.0GiB avail
        pgs:
    
    
    #打开dashboard模块
    
    [cephu@node1 ~]$  sudo ceph mgr module enable dashboard
    
    #绑定dashboard模块
     sudo ceph config-key set mgr/dashboard/node1/server_addr 192.168.182.129
    
    
    # ip地址为mgr节点的ip地址,也就是node1的ip地址
    

    访问
    安装Ceph集群_第2张图片

配置客户端

配置客户端使用rbd,创建块设备需要创建存储池,相关命令需要mon节点执行(node1节点)

[cephu@node1 ~]$ sudo ceph osd pool create rbd 128 128
pool 'rbd' created


#初始化存储池
[cephu@node1 ~]$ sudo rbd pool init rbd

#升级内核

[root@client ~]# uname -r
3.10.0-957.el7.x86_64

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org


[root@client ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm

#查看可用系统内核包

[root@client ~]# yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * elrepo-kernel: mirrors.tuna.tsinghua.edu.cn
elrepo-kernel                                                                     | 3.0 kB  00:00:00
elrepo-kernel/primary_db                                                          | 3.2 MB  00:00:17
Available Packages
elrepo-release.noarch                               7.0-6.el7.elrepo                        elrepo-kernel
kernel-lt.x86_64                                    5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-lt-devel.x86_64                              5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-lt-doc.noarch                                5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-lt-headers.x86_64                            5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-lt-tools.x86_64                              5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-lt-tools-libs.x86_64                         5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-lt-tools-libs-devel.x86_64                   5.4.249-1.el7.elrepo                    elrepo-kernel
kernel-ml.x86_64                                    6.4.3-1.el7.elrepo                      elrepo-kernel
kernel-ml-devel.x86_64                              6.4.3-1.el7.elrepo                      elrepo-kernel
kernel-ml-doc.noarch                                6.4.3-1.el7.elrepo                      elrepo-kernel
kernel-ml-headers.x86_64                            6.4.3-1.el7.elrepo                      elrepo-kernel
kernel-ml-tools.x86_64                              6.4.3-1.el7.elrepo                      elrepo-kernel
kernel-ml-tools-libs.x86_64                         6.4.3-1.el7.elrepo                      elrepo-kernel
kernel-ml-tools-libs-devel.x86_64                   6.4.3-1.el7.elrepo                      elrepo-kernel
perf.x86_64                                         5.4.249-1.el7.elrepo                    elrepo-kernel
python-perf.x86_64                                  5.4.249-1.el7.elrepo                    elrepo-kernel

yum --enablerepo=elrepo-kernel install  kernel-ml-devel kernel-ml -y
#内核默认启动顺序
[root@client ~]# awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
CentOS Linux (6.4.3-1.el7.elrepo.x86_64) 7 (Core)
CentOS Linux (3.10.0-957.el7.x86_64) 7 (Core)
CentOS Linux (0-rescue-3e4185a101a24cd88c8db432a7f43144) 7 (Core)


grub2-set-default 0

 reboot
 
 #删除旧内核
 yum remove kernel -y
 
 
 yum  -y install python-setuptools 

创建epel源

yum install -y epel*

[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[root@client ~]# vim /etc/yum.conf  #开启yum缓存
keepcache=1

yum clean all
su - cephu


admin节点对client授权


[cephu@admin my-cluster]$ ceph-deploy admin client
#client修改读写权限
[cephu@client ~]$ sudo chmod +r /etc/ceph/ceph.client.admin.keyring
[cephu@client ~]$ sudo vi /etc/ceph/ceph.conf #在global section下添加:
rbd_default_features = 1


client节点创建块设备镜像

rbd create foo --size 4096
---映射---
#映射到主机
[cephu@client ~]$ sudo rbd map foo --name client.admin
/dev/rbd0
#格式化块设备
sudo mkfs.ext4 -m 0 /dev/rbd/rbd/foo

[cephu@client ~]$ sudo mkdir /mnt/ceph-block-device
[cephu@client ~]$ sudo mount /dev/rbd/rbd/foo /mnt/ceph-block-device
[cephu@client ~]$ cd /mnt/ceph-block-device
[cephu@client ceph-block-device]$ df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
devtmpfs                devtmpfs  452M     0  452M   0% /dev
tmpfs                   tmpfs     465M     0  465M   0% /dev/shm
tmpfs                   tmpfs     465M  6.6M  458M   2% /run
tmpfs                   tmpfs     465M     0  465M   0% /sys/fs/cgroup
/dev/mapper/centos-root xfs        17G  2.0G   16G  12% /
/dev/sda1               xfs      1014M  141M  874M  14% /boot
tmpfs                   tmpfs      93M     0   93M   0% /run/user/0
/dev/rbd0               ext4      3.8G   24K  3.8G   1% /mnt/ceph-block-device
[cephu@client ceph-block-device]$ sudo touch test.txt

客户端重起后,设备需要重新作映射,否则可能卡死。

你可能感兴趣的:(ceph)