已有ceph集群横向扩展存储添加OSD进程
1.修改hosts文件node5如下内容和/etc/sysconfig/network
1)修改/etc/hosts文件
192.168.110.3 node1
192.168.110.4 node2
192.168.110.5 node3
192.168.110.6 node4
192.168.110.7 node5
2)/etc/sysconfig/network
hostnamectl set-hostname node5
3)重启node5
2.在node1中执行,配置免密码登陆
a)在node1中执行,配置 SSH 时,将 paraphrase 项留空,并保留其他默认设置 :
# ssh-keygen
b)复制 SSH 密钥 ID 到 node5时需要提供它们的 root 密码 。 完成该 步之后,你应该就可以免密码登录这些节点了.
# ssh-copy-id node5
3.在node5中配置yum源
a)删除默认的源,国外的比较慢
rm -rf /etc/yum.repos.d/*.repo
b)下载阿里云的base源
如果没有安装yum -y install wget
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
c)下载阿里云的epel源
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
e)添加ceph源
vi /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.163.com/ceph/keys/release.asc
[ceph-deploy]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.163.com/ceph/keys/release.asc
或者使用阿里的源
[ceph]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
[ceph-deploy]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
f)安装epel仓库
使用命令
sudo yum install epel-release -y
4.在node5中安装配置NTP
sudo cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
sudo yum -y install ntp
sudo systemctl enable ntpd
sudo systemctl start ntpd
sudo ntpstat
sudo systemctl stop ntpd
5.在node5中配置防火墙,或者关闭
centos 7
sudo systemctl stop firewalld.service #停止firewall
sudo systemctl disable firewalld.service #禁止firewall开机启动
sudo firewall-cmd --state
6.在node5中关闭 selinux
#sed -i "/^SELINUX/s/enforcing/disabled/" /etc/selinux/config
或
#setenforce 0
[root@localhost selinux]# yum install python-pip -y
7.列举磁盘 (还在/usr/local/src/ceph目录下)
[root@node1 ceph]# ceph-deploy disk list node5
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk list node5
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : list
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf :
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : ['node5']
[ceph_deploy.cli][INFO ] func :
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[node5][DEBUG ] connected to host: node5
[node5][DEBUG ] detect platform information from remote host
[node5][DEBUG ] detect machine type
[node5][DEBUG ] find the location of an executable
[node5][INFO ] Running command: fdisk -l
[node5][INFO ] Disk /dev/sda: 10.7 GB, 10737418240 bytes, 20971520 sectors
[node5][INFO ] Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors
[node5][INFO ] Disk /dev/mapper/centos-root: 8585 MB, 8585740288 bytes, 16769024 sectors
[node5][INFO ] Disk /dev/mapper/centos-swap: 1073 MB, 1073741824 bytes, 2097152 sectors
4.在node5中需要添加一个新磁盘,不需要挂载和格式化
[root@node5 ~]# fdisk -lu
Disk /dev/sda: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000d00c0
Device Boot Start End Blocks Id System
/dev/sda1 * 2048 2099199 1048576 83 Linux
/dev/sda2 2099200 20971519 9436160 8e Linux LVM
Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mapper/centos-root: 8585 MB, 8585740288 bytes, 16769024 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mapper/centos-swap: 1073 MB, 1073741824 bytes, 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
4.擦净磁盘(还在/usr/local/src/ceph目录下)
[root@node1 ceph]# ceph-deploy disk zap node5 /dev/sdb
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap node5 /dev/sdb
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : zap
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf :
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : node5
[ceph_deploy.cli][INFO ] func :
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] disk : ['/dev/sdb']
[ceph_deploy.osd][DEBUG ] zapping /dev/sdb on node5
[node5][DEBUG ] connected to host: node5
[node5][DEBUG ] detect platform information from remote host
[node5][DEBUG ] detect machine type
[node5][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
[node5][DEBUG ] zeroing last few blocks of device
[node5][DEBUG ] find the location of an executable
[node5][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdb
[node5][DEBUG ] --> Zapping: /dev/sdb
[node5][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
[node5][DEBUG ] Running command: wipefs --all /dev/sdb
[node5][DEBUG ] Running command: dd if=/dev/zero of=/dev/sdb bs=1M count=10
[node5][DEBUG ] --> Zapping successful for:
5.准备OSD磁盘
在admin(node1)节点上运行
#cd /usr/local/src/ceph
#ceph-deploy osd create node5 --data /dev/sdb
[root@node1 ceph]# ceph-deploy osd create node5 --data /dev/sdb
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node5 --data /dev/sdb
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] cd_conf :
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] journal : None
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] host : node5
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] func :
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] data : /dev/sdb
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
[node5][DEBUG ] connected to host: node5
[node5][DEBUG ] detect platform information from remote host
[node5][DEBUG ] detect machine type
[node5][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node5
[node5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node5][WARNIN] osd keyring does not exist yet, creating one
[node5][DEBUG ] create a keyring file
[node5][DEBUG ] find the location of an executable
[node5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[node5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[node5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 1b433f64-35da-43b1-a608-3e4bf39ee7b7
[node5][DEBUG ] Running command: vgcreate --force --yes ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a /dev/sdb
[node5][DEBUG ] stdout: Physical volume "/dev/sdb" successfully created.
[node5][DEBUG ] stdout: Volume group "ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a" successfully created
[node5][DEBUG ] Running command: lvcreate --yes -l 100%FREE -n osd-block-1b433f64-35da-43b1-a608-3e4bf39ee7b7 ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a
[node5][DEBUG ] stdout: Logical volume "osd-block-1b433f64-35da-43b1-a608-3e4bf39ee7b7" created.
[node5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[node5][DEBUG ] Running command: mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
[node5][DEBUG ] Running command: restorecon /var/lib/ceph/osd/ceph-3
[node5][DEBUG ] Running command: chown -h ceph:ceph /dev/ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a/osd-block-1b433f64-35da-43b1-a608-3e4bf39ee7b7
[node5][DEBUG ] Running command: chown -R ceph:ceph /dev/dm-2
[node5][DEBUG ] Running command: ln -s /dev/ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a/osd-block-1b433f64-35da-43b1-a608-3e4bf39ee7b7 /var/lib/ceph/osd/ceph-3/block
[node5][DEBUG ] Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
[node5][DEBUG ] stderr: got monmap epoch 1
[node5][DEBUG ] Running command: ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQAJgVtd00NPCRAAJCqKDp8puVQd/rSCaau+LA==
[node5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-3/keyring
[node5][DEBUG ] added entity osd.3 auth auth(auid = 18446744073709551615 key=AQAJgVtd00NPCRAAJCqKDp8puVQd/rSCaau+LA== with 0 caps)
[node5][DEBUG ] Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
[node5][DEBUG ] Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
[node5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 1b433f64-35da-43b1-a608-3e4bf39ee7b7 --setuser ceph --setgroup ceph
[node5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdb
[node5][DEBUG ] Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[node5][DEBUG ] Running command: ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a/osd-block-1b433f64-35da-43b1-a608-3e4bf39ee7b7 --path /var/lib/ceph/osd/ceph-3
[node5][DEBUG ] Running command: ln -snf /dev/ceph-6a25e2ec-d8ab-4b38-bfeb-c5df0084777a/osd-block-1b433f64-35da-43b1-a608-3e4bf39ee7b7 /var/lib/ceph/osd/ceph-3/block
[node5][DEBUG ] Running command: chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
[node5][DEBUG ] Running command: chown -R ceph:ceph /dev/dm-2
[node5][DEBUG ] Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[node5][DEBUG ] Running command: systemctl enable ceph-volume@lvm-3-1b433f64-35da-43b1-a608-3e4bf39ee7b7
[node5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/[email protected] to /usr/lib/systemd/system/[email protected].
[node5][DEBUG ] Running command: systemctl enable --runtime ceph-osd@3
[node5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/[email protected] to /usr/lib/systemd/system/[email protected].
[node5][DEBUG ] Running command: systemctl start ceph-osd@3
[node5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 3
[node5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdb
[node5][INFO ] checking OSD status...
[node5][DEBUG ] find the location of an executable
[node5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node5 is now ready for osd use.
6.查看osd tree
#ceph -s
7.ceph osd tree
[root@node1 ceph]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.07794 root default
-3 0.01949 host node1
0 hdd 0.01949 osd.0 up 1.00000 1.00000
-5 0.01949 host node2
1 hdd 0.01949 osd.1 up 1.00000 1.00000
-7 0.01949 host node3
2 hdd 0.01949 osd.2 up 1.00000 1.00000
-9 0.01949 host node5
3 hdd 0.01949 osd.3 up 1.00000 1.00000