ceph-deploy
安装wget --no-check-certificate -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -
echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
sudo apt update
sudo apt install ceph-deploy
NTP
sudo apt install ntpsec
apt-get install ntp
hostname
# hostname
node1
# vim /etc/hosts
192.168.1.20 node1
192.168.1.21 node2
192.168.1.22 node3
root
用户配置SSH免密钥,主节点可以免密钥登陆其它节点ssh-keygen
ssh-copy-id root@node2
ssh-copy-id root@node3
ssh node2
mkdir my-cluster
cd my-cluster
# ceph-deploy new node1
python-minimal
apt install python-minimal -y
ceph-deploy install node1 node2 node3
[node3][DEBUG ] ceph 已经是最新版 (12.2.12-0ubuntu0.18.04.4)。
[node3][DEBUG ] ceph-mon 已经是最新版 (12.2.12-0ubuntu0.18.04.4)。
[node3][DEBUG ] ceph-osd 已经是最新版 (12.2.12-0ubuntu0.18.04.4)。
[node3][DEBUG ] radosgw 已经是最新版 (12.2.12-0ubuntu0.18.04.4)。
[node3][DEBUG ] ceph-mds 已经是最新版 (12.2.12-0ubuntu0.18.04.4)。
Ubuntu 18.04
每个节点都需要添加更新源,运行apt update
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/debian-luminous
export CEPH_DEPLOY_GPG_URL=http://mirrors.163.com/ceph/keys/release.asc
ceph-deploy install node1 node2 node3 node4
public_network
# vim ceph.conf
public_network = 192.168.1.0/24
ceph-deploy mon create-initial
ceph-deploy admin node1 node2 node3
ceph-deploy mon create node1 node2 node3
# ceph -s
health: HEALTH_OK
mon: 3 daemons, quorum node1,node2,node3
mgr
ceph-deploy mgr create node1 node2 node3
ceph mgr module enable dashboard
# blkid -o value -s TYPE /dev/sda1
ext4
# umount /dev/sda1
# mkfs.ext4 /dev/sda
ceph-deploy disk zap node1:/dev/sda
ceph-deploy osd prepare --fs-type xfs node1:/dev/sda
sda 8:0 0 7.3T 0 disk
├─sda1 8:1 0 100M 0 part /var/lib/ceph/osd/ceph-0
└─sda2 8:2 0 7.3T 0 part
ceph-deploy osd activate node1:/dev/sda1
# ceph-deploy disk list node1
[node1][DEBUG ] /dev/sda :
[node2][DEBUG ] /dev/sda1 ceph data, active, cluster ceph, osd.0, block /dev/sda2
[node3][DEBUG ] /dev/sda2 ceph block, for /dev/sda1
# ceph osd tree
# ceph osd stat
# ceph osd status
# ceph -s
# ceph osd out 0
marked out osd.0.
# ceph osd down 0
marked down osd.0.
# ceph osd crush remove osd.0
removed item id 0 name 'osd.0' from crush map
# ceph auth del osd.0
updated
systemctl status [email protected]
systemctl stop [email protected]
# ceph osd rm 0
removed osd.0
# mkfs.xfs -f /dev/sda
# blkid -o value -s TYPE /dev/sda
xfs
ceph-deploy
# ceph-deploy --version
1.5.38
# sudo apt-get install ceph-deploy
# ceph-deploy --version
2.01
# ceph-deploy disk zap node1:/dev/sda
# ceph-deploy osd create node1:/dev/sda
[node1][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[node1][DEBUG ] --> Absolute path not found for executable: lvs
[node1][WARNIN] --> OSError: [Errno 2] No such file or directory
[node1][DEBUG ] --> Ensure $PATH environment variable contains common executable locations
[node1][ERROR ] RuntimeError: command returned non-zero exit status: 1
[ceph_deploy.osd][ERROR ] Failed to execute command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[ceph_deploy][ERROR ] GenericError: Failed to create 1 OSDs
头皮发麻
apt install -y lvm2
ceph-deploy osd create --data /dev/sda node1
# mkfs.xfs -f /dev/sdb
# ceph-deploy osd create --data /dev/sdb node1
# lvdisplay
LV Path /dev/lvm_01/lv01
ceph-deploy osd create --data /dev/lvm_01/lv01 node2
ceph osd status
systemctl restart ceph-osd.target
参考: