实验步骤:
- 控制节点新增20G磁盘
- 针对磁盘做分区
- 针对磁盘格式化
- 卸载原有的 Swift 虚拟设备
- 创建两个目录并挂载
- 更改权限
- 创建builder
[root@controller ~]# fdisk -l
Disk /dev/nvme0n1: 100 GiB, 107374182400 bytes, 209715200 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0xa59ecf76
Device Boot Start End Sectors Size Id Type
/dev/nvme0n1p1 * 2048 1640447 1638400 800M 83 Linux
/dev/nvme0n1p2 1640448 18417663 16777216 8G 82 Linux swap
/dev/nvme0n1p3 18417664 209715199 191297536 91.2G 83 Linux
Disk /dev/nvme0n2: 20 GiB, 21474836480 bytes, 41943040 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/loop0: 2 GiB, 2147483648 bytes, 4194304 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
.....
[root@controller ~]# fdisk /dev/nvme0n2
Welcome to fdisk (util-linux 2.32.1).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
Device does not contain a recognized partition table.
Created a new DOS disklabel with disk identifier 0xb33e7d50.
Command (m for help): n
Partition type
p primary (0 primary, 0 extended, 4 free)
e extended (container for logical partitions)
Select (default p):
Using default response p.
Partition number (1-4, default 1):
First sector (2048-41943039, default 2048):
Last sector, +sectors or +size{K,M,G,T,P} (2048-41943039, default 41943039): +10G
Created a new partition 1 of type 'Linux' and of size 10 GiB.
Command (m for help): n
Partition type
p primary (1 primary, 0 extended, 3 free)
e extended (container for logical partitions)
Select (default p):
Using default response p.
Partition number (2-4, default 2):
First sector (20973568-41943039, default 20973568):
Last sector, +sectors or +size{K,M,G,T,P} (20973568-41943039, default 41943039):
Created a new partition 2 of type 'Linux' and of size 10 GiB.
Command (m for help): w
The partition table has been altered.
Calling ioctl() to re-read partition table.
Syncing disks.
[root@controller ~]# fdisk -l
Disk /dev/nvme0n1: 100 GiB, 107374182400 bytes, 209715200 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0xa59ecf76
Device Boot Start End Sectors Size Id Type
/dev/nvme0n1p1 * 2048 1640447 1638400 800M 83 Linux
/dev/nvme0n1p2 1640448 18417663 16777216 8G 82 Linux swap / Solaris
/dev/nvme0n1p3 18417664 209715199 191297536 91.2G 83 Linux
Disk /dev/nvme0n2: 20 GiB, 21474836480 bytes, 41943040 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0xb33e7d50
Device Boot Start End Sectors Size Id Type
/dev/nvme0n2p1 2048 20973567 20971520 10G 83 Linux
/dev/nvme0n2p2 20973568 41943039 20969472 10G 83 Linux
[root@controller ~]# mkfs.xfs /dev/nvme0n2p1
meta-data=/dev/nvme0n2p1 isize=512 agcount=4, agsize=655360 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=2621440, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@controller ~]# mkfs.xfs /dev/nvme0n2p2
meta-data=/dev/nvme0n2p2 isize=512 agcount=4, agsize=655296 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=2621184, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@controller ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 1.9G 0 1.9G 0% /dev
tmpfs 1.9G 4.0K 1.9G 1% /dev/shm
tmpfs 1.9G 18M 1.9G 1% /run
tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/nvme0n1p3 92G 22G 70G 24% /
/dev/nvme0n1p1 794M 193M 602M 25% /boot
/dev/loop0 1.9G 21M 1.7G 2% /srv/node/swiftloopback
tmpfs 390M 0 390M 0% /run/user/0
[root@controller ~]# umount /srv/node/swiftloopback
[root@controller ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev
tmpfs tmpfs 1.9G 4.0K 1.9G 1% /dev/shm
tmpfs tmpfs 1.9G 18M 1.9G 1% /run
tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/nvme0n1p3 xfs 92G 22G 70G 24% /
/dev/nvme0n1p1 xfs 794M 193M 602M 25% /boot
tmpfs tmpfs 390M 0 390M 0% /run/user/0
# 禁止开机自启动
[root@controller ~]# vim /etc/fstab
[root@controller ~]# cat /etc/fstab
# HEADER: This file was autogenerated at 2023-08-06 15:30:22 +0800
# HEADER: by puppet. While it can still be managed manually, it
# HEADER: is definitely not recommended.
#
# /etc/fstab
# Created by anaconda on Sun Aug 6 03:09:26 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
# /srv/loopback-device/swiftloopback /srv/node/swiftloopback ext4 noatime,nodiratime,nofail,loop,user_xattr 0
UUID=7975f97d-2540-4f53-9368-38eed00f501f / xfs defaults 0
UUID=d67c0b35-d028-499b-a978-3f34afce2ff6 /boot xfs defaults 0
UUID=120941d3-781e-46cb-9484-13d256d0be23 none swap defaults
[root@controller ~]# cd /srv/node/
[root@controller node]# ls
swiftloopback
[root@controller node]# rm -rf swiftloopback/
[root@controller node]# ls
[root@controller node]# mkdir ob1 ob2
[root@controller node]# ls
ob1 ob2
[root@controller node]# vim /etc/fstab
[root@controller node]# cat /etc/fstab
# HEADER: This file was autogenerated at 2023-08-06 15:30:22 +0800
# HEADER: by puppet. While it can still be managed manually, it
# HEADER: is definitely not recommended.
#
# /etc/fstab
# Created by anaconda on Sun Aug 6 03:09:26 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
UUID=7975f97d-2540-4f53-9368-38eed00f501f / xfs defaults 0
UUID=d67c0b35-d028-499b-a978-3f34afce2ff6 /boot xfs defaults 0
UUID=120941d3-781e-46cb-9484-13d256d0be23 none swap defaults 0
/dev/nvme0n2p1 /srv/node/ob1 xfs defaults 0 0
/dev/nvme0n2p2 /srv/node/ob2 xfs defaults 0 0
[root@controller node]# mount -a
mount: (hint) your fstab has been modified, but systemd still uses
the old version; use 'systemctl daemon-reload' to reload.
[root@controller swift]# systemctl daemon-reload
[root@controller node]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev
tmpfs tmpfs 1.9G 4.0K 1.9G 1% /dev/shm
tmpfs tmpfs 1.9G 18M 1.9G 1% /run
tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/nvme0n1p3 xfs 92G 22G 70G 24% /
/dev/nvme0n1p1 xfs 794M 193M 602M 25% /boot
tmpfs tmpfs 390M 0 390M 0% /run/user/0
/dev/nvme0n2p1 xfs 10G 104M 9.9G 2% /srv/node/ob1
/dev/nvme0n2p2 xfs 10G 104M 9.9G 2% /srv/node/ob2
[root@controller node]# pwd
/srv/node
[root@controller node]# ll
total 0
drwxr-xr-x 2 root root 6 Aug 21 15:23 ob1
drwxr-xr-x 2 root root 6 Aug 21 15:23 ob2
[root@controller node]# chown swift:swift ob*
[root@controller node]# ll
total 0
drwxr-xr-x 2 swift swift 6 Aug 21 15:23 ob1
drwxr-xr-x 2 swift swift 6 Aug 21 15:23 ob2
[root@controller swift]# man swift-ring-builder
swift-ring-builder
<…> create
2的多少次方 分区 副本数 最小更改时间
Createswith 2^ partitions and cas>. is number of hours to restrict moving a
partition more than once.12:代表2 的多少次方,未来要ring要切分多少个分区
2:代表的是多少副本数,未来一个对象要保存几份数据
1: 代表小时数,当ring创建好之后,不能在1个小时内进行修改。
[root@controller ~]# cd /etc/swift/
[root@controller swift]# pwd
/etc/swift
[root@controller swift]# ls
account.builder container.ring.gz object-server
account.ring.gz container-server object-server.conf
account-server container-server.conf proxy-server
account-server.conf internal-client.conf proxy-server.conf
backups object.builder swift.conf
container.builder object-expirer.conf
container-reconciler.conf object.ring.gz
[root@controller swift]# rm -rf *.ring.gz
[root@controller swift]# rm -rf *.builder
[root@controller swift]# ls
account.builder container-server object-server.conf
account-server container-server.conf proxy-server
account-server.conf internal-client.conf proxy-server.conf
backups object.builder swift.conf
container.builder object-expirer.conf
container-reconciler.conf object-server
[root@controller swift]# swift-ring-builder account.builder create 12 2 1
[root@controller swift]# swift-ring-builder container.builder create 12 2 1
[root@controller swift]# swift-ring-builder object.builder create 12 2 1
[root@controller swift]# ls
swift-ring-builder
<…> add z-:/
_ 创建account/container/object ring
# 端口查看
[root@controller swift]# vim account-server.conf
[root@controller swift]# vim account^C^Cver.conf
[root@controller swift]# vim object-server.conf
[root@controller swift]# vim container-server.conf
[root@controller swift]# swift-ring-builder account.builder add z1-192.168.129.185:6002/ob1 100
WARNING: No region specified for z1-192.168.129.185:6002/ob1. Defaulting to region 1.
Device d0r1z1-192.168.129.185:6002R192.168.129.185:6002/ob1_"" with 100.0 weight got id 0
[root@controller swift]# swift-ring-builder account.builder add z2-192.168.129.185:6002/ob2 100
WARNING: No region specified for z2-192.168.129.185:6002/ob2. Defaulting to region 1.
Device d1r1z2-192.168.129.185:6002R192.168.129.185:6002/ob2_"" with 100.0 weight got id 1
[root@controller swift]# swift-ring-builder account.builder
[root@controller swift]# swift-ring-builder container.builder add z1-192.168.129.185:6001/ob1 100
WARNING: No region specified for z1-192.168.129.185:6001/ob1. Defaulting to region 1.
Device d0r1z1-192.168.129.185:6001R192.168.129.185:6001/ob1_"" with 100.0 weight got id 0
[root@controller swift]# swift-ring-builder container.builder add z2-192.168.129.185:6001/ob2 100
WARNING: No region specified for z2-192.168.129.185:6001/ob2. Defaulting to region 1.
Device d1r1z2-192.168.129.185:6001R192.168.129.185:6001/ob2_"" with 100.0 weight got id 1
[root@controller swift]# swift-ring-builder container.builder
[root@controller swift]# swift-ring-builder object.builder add z1-192.168.129.185:6000/ob1 100
WARNING: No region specified for z1-192.168.129.185:6000/ob1. Defaulting to region 1.
Device d0r1z1-192.168.129.185:6000R192.168.129.185:6000/ob1_"" with 100.0 weight got id 0
[root@controller swift]# swift-ring-builder object.builder add z2-192.168.129.185:6000/ob2 100
WARNING: No region specified for z2-192.168.129.185:6000/ob2. Defaulting to region 1.
Device d1r1z2-192.168.129.185:6000R192.168.129.185:6000/ob2_"" with 100.0 weight got id 1
[root@controller swift]# swift-ring-builder object.builder
[root@controller swift]# swift-ring-builder account.builder rebalance
Reassigned 8192 (200.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
[root@controller swift]# swift-ring-builder container.builder rebalance
Reassigned 8192 (200.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
[root@controller swift]# swift-ring-builder object.builder rebalance
Reassigned 8192 (200.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
上传文件后再虚拟机查看底层,双副本机制,所以会存两份
[root@controller swift]# cd /srv/node/
[root@controller node]# ls
ob1 ob2
[root@controller node]# find -name *.data
[root@controller node]# find -name *.data
./ob1/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob2/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
[root@controller node]# find -name *.data
./ob1/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob1/objects/19/7d5/013ad12a8ee1c31ac63d2632c97147d5/1692606945.93427.data
./ob2/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob2/objects/19/7d5/013ad12a8ee1c31ac63d2632c97147d5/1692606945.93427.data
最终实现的效果就是如下
[root@controller ~]# cd /etc/glance/
[root@controller glance]# cp glance-api.conf glance-api.conf.bak
[root@controller glance]# ls
glance-api.conf glance-image-import.conf metadefs schema-image.json
glance-api.conf.bak glance-scrubber.conf rootwrap.conf
glance-cache.conf glance-swift.conf rootwrap.d
[root@controller glance]# vim glance-api.conf
# 文件修改内容全在3000行以后
3057 stores=file,http,swift # glance支持的选项
3111 default_store=swift # glance默认使用的选项
3982 swift_store_region = RegionOne # 默认使用的存储区域
4032 swift_store_endpoint_type = publicURL # 端点的类型public,走哪个endpoint去连接
4090 swift_store_container = glance # 名字可以自定义,未来上传镜像后,会自动创建一个以glance开头的容器名。
4118 swift_store_large_object_size = 5120 # 限制最大单个上传的对象为5G大小。
4142 swift_store_large_object_chunk_size = 200 # 类似于条带化大小,不能超过200个chunk
4160 swift_store_create_container_on_put = true # 要不要自动给你创建一个容器
4182 swift_store_multi_tenant = true # 是否支持多租户
4230 swift_store_admin_tenants = services # swift对应的租户/项目名称
4382 swift_store_auth_version = 2 # 身份认证版本
4391 swift_store_auth_address = http://192.168.129.185:5000/v3 # 身份认证地址(到keystone环境变量里面去获取 export OS_AUTH_URL=http://192.168.129.185:5000/v3)
4399 swift_store_user = swift # 对象存储使用的默认用户 swift
4408 swift_store_key = ed6cd2e5f717491f # swift用户的密码(要去应答文件里面获取 1113 CONFIG_SWIFT_KS_PW=ed6cd2e5f717491f)
## 修改完成后,重启glance服务。
[root@controller glance]# systemctl restart openstack-glance-*
双副本机制
# 创建镜像前
[root@controller node]# find -name *.data
./ob1/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob1/objects/19/7d5/013ad12a8ee1c31ac63d2632c97147d5/1692606945.93427.data
./ob2/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob2/objects/19/7d5/013ad12a8ee1c31ac63d2632c97147d5/1692606945.93427.data
# 创建镜像后
[root@controller node]# find -name *.data
./ob1/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob1/objects/19/7d5/013ad12a8ee1c31ac63d2632c97147d5/1692606945.93427.data
./ob1/objects/3183/9d4/c6f91e6678f60a3af7a23a62d34da9d4/1692608361.25116.data
./ob1/objects/1073/76a/43155ff427c56360f6e9a187b1e6d76a/1692608361.39604.data
./ob2/objects/984/bd3/3d88a72f317884cc5f7428de2da89bd3/1692606880.67249.data
./ob2/objects/19/7d5/013ad12a8ee1c31ac63d2632c97147d5/1692606945.93427.data
./ob2/objects/3183/9d4/c6f91e6678f60a3af7a23a62d34da9d4/1692608361.25116.data
./ob2/objects/1073/76a/43155ff427c56360f6e9a187b1e6d76a/1692608361.39604.data