Ceph块存储使用
- 创建一个存储块
[cephadm@ceph-admin ceph-cluster]$ rbd create mypool/rbd-demo1.img --size 10Gash
#查看
[cephadm@ceph-admin ceph-cluster]$ rbd -p mypool ls
rbd-demo.img
#查看详细信息
[cephadm@ceph-admin ceph-cluster]$ rbd info mypool/rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
id: 37bd6b8b4567
block_name_prefix: rbd_data.37bd6b8b4567
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Dec 4 18:51:43 2021
#去除rbd设备特性
[root@node-1 ceph-deploy]# rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
#删除块设备
[cephadm@ceph-admin ceph-cluster]$ rbd rm mypool/rbd-demo1.img
Removing image: 100% complete...done.
- 客户端挂载设备
客户端需要安装ceph程序
[root@master ~]# yum -y install ceph
#拷贝配置文件到客户端
[root@node-1 ceph-deploy]# ceph-deploy admin 10.46.8.18
#挂载
[cephadm@ceph-admin ceph-cluster]$ sudo rbd map mypool/rbd-demo.img
/dev/rbd0
#查看设备
[cephadm@ceph-admin ceph-cluster]$ rbd device list
id pool image snap device
0 mypool rbd-demo.img - /dev/rbd0
[cephadm@ceph-admin ceph-cluster]$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
rbd0 252:0 0 10G 0 disk
vda 253:0 0 40G 0 disk
└─vda1 253:1 0 40G 0 part /
#格式化挂载使用
[cephadm@ceph-admin ceph-cluster]$ sudo mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=16, agsize=163840 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621440, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[cephadm@ceph-admin ceph-cluster]$ sudo mkdir /mnt/rbd-demo
[cephadm@ceph-admin ceph-cluster]$ sudo mount /dev/rbd0 /mnt/rbd-demo
[cephadm@ceph-admin ceph-cluster]$ df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 947M 0 947M 0% /dev
tmpfs tmpfs 959M 0 959M 0% /dev/shm
tmpfs tmpfs 959M 17M 943M 2% /run
tmpfs tmpfs 959M 0 959M 0% /sys/fs/cgroup
/dev/vda1 xfs 40G 2.1G 38G 6% /
tmpfs tmpfs 192M 0 192M 0% /run/user/0
/dev/rbd0 xfs 10G 33M 10G 1% /mnt/rbd-demo
#测试读写
[root@ceph-admin rbd-demo]# echo "huhuhahei" > test
[root@ceph-admin rbd-demo]# cat test
huhuhahei
- 块设备扩容
[root@ceph-admin rbd-demo]# rbd resize mypool/rbd-demo.img --size 20G
Resizing image: 100% complete...done.
#查看已经扩容生效
[root@ceph-admin rbd-demo]# rbd info mypool/rbd-demo.img
rbd image 'rbd-demo.img':
size 20 GiB in 5120 objects
order 22 (4 MiB objects)
id: 37bd6b8b4567
block_name_prefix: rbd_data.37bd6b8b4567
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Dec 4 18:51:43 2021
[root@ceph-admin rbd-demo]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
rbd0 252:0 0 20G 0 disk /mnt/rbd-demo
vda 253:0 0 40G 0 disk
└─vda1 253:1 0 40G 0 part /
#但是文件系统没有增加 还是10G
[root@ceph-admin rbd-demo]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 947M 0 947M 0% /dev
tmpfs 959M 0 959M 0% /dev/shm
tmpfs 959M 17M 943M 2% /run
tmpfs 959M 0 959M 0% /sys/fs/cgroup
/dev/vda1 40G 2.1G 38G 6% /
tmpfs 192M 0 192M 0% /run/user/0
/dev/rbd0 10G 33M 10G 1% /mnt/rbd-demo
#使用xfs_growfs扩容xfs文件系统
[root@ceph-admin rbd-demo]# xfs_growfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=16, agsize=163840 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=2621440, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 2621440 to 5242880
[root@ceph-admin rbd-demo]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 947M 0 947M 0% /dev
tmpfs tmpfs 959M 0 959M 0% /dev/shm
tmpfs tmpfs 959M 17M 943M 2% /run
tmpfs tmpfs 959M 0 959M 0% /sys/fs/cgroup
/dev/vda1 xfs 40G 2.1G 38G 6% /
tmpfs tmpfs 192M 0 192M 0% /run/user/0
/dev/rbd0 xfs 20G 34M 20G 1% /mnt/rbd-demo
#查看已经扩容完成 20G
- HEALTH_WARN修复
[root@ceph-admin rbd-demo]# ceph -s
cluster:
id: b5fa13e6-6b7f-4eeb-bac3-a7b6bc535109
health: HEALTH_WARN
application not enabled on 1 pool(s)
#这个报错是之前创建块设备没有指定类型
[root@ceph-admin rbd-demo]# ceph osd pool application enable mypool rbd
enabled application 'rbd' on pool 'mypool'
[root@ceph-admin rbd-demo]# ceph osd pool application get mypool
{
"rbd": {}
}
#可以使用下面命令查看详细信息
[root@ceph-admin rbd-demo]# ceph health detail