ceph pool 管理


 

創建池
[root@node1 ~]# ceph osd pool create monitor 128
pool 'monitor' created
查看池
[root@node1 ~]# ceph osd pool ls
rbd
zstack
bak-t-2ea1196a7ada4fa0adc0f19177a1c101
pri-c-601e39f80c6a4659a223ddcfb1f4c929
pri-v-r-601e39f80c6a4659a223ddcfb1f4c929
pri-v-d-601e39f80c6a4659a223ddcfb1f4c929
monitor
上傳鏡像
[root@node1 ~]# qemu-img convert -p -f qcow2 -O raw openstack-centos7.qcow2 rbd:monitor/test1.img
    (100.00/100%)

查看池中的鏡像
[root@node1 ~]# rbd ls zstack
mnvm.img
test.img
test1.img
查看鏡像信息
[root@node1 ~]# rbd info zstack/test.img
rbd image 'test.img':
    size 10240 MB in 2560 objects
    order 22 (4096 kB objects)
    block_name_prefix: rbd_data.3bf902ae8944a
    format: 2
    features: layering, striping
    flags: 
    stripe unit: 4096 kB
    stripe count: 1
刪除鏡像信息
[root@node1 ~]# rbd rm zstack/test.img
Removing image: 100% complete...done.
[root@node1 ~]# 
刪除pool 
[root@node1 ~]# ceph osd pool delete monitor --yes-i-really-really-mean-it
Error EPERM: WARNING: this will *PERMANENTLY DESTROY* all data stored in pool monitor.  If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, followed by --yes-i-really-really-mean-it.

[root@node1 ~]# ceph osd pool delete monitor monitor --yes-i-really-really-mean-it
pool 'monitor' removed
確認
[root@node1 ~]# ceph osd pool ls
rbd
zstack
bak-t-2ea1196a7ada4fa0adc0f19177a1c101
pri-c-601e39f80c6a4659a223ddcfb1f4c929
pri-v-r-601e39f80c6a4659a223ddcfb1f4c929
pri-v-d-601e39f80c6a4659a223ddcfb1f4c929
[root@node1 ~]# 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM