RBD – MANAGE RADOS BLOCK DEVICE (RBD) IMAGES
rbd is a utility for manipulating rados block device (RBD) images, used by the Linux rbd driver and the rbd storage driver for Qemu/KVM. RBD images are simple block devices that are striped over objects and stored in a RADOS object store. The size of the objects the image is striped over must be a power of two.
1. /etc/hosts 
115.XXX.XXX.7	compute1
115.XXX.XXX.4	compute2
2. CEPH ¼³Á¤
ceph@mgmt:~$ ssh-copy-id ceph@compute1
ceph@mgmt:~$ ssh ceph@compute1
ceph@mgmt:~$ ceph-deploy install compute1
ceph@mgmt:~$ cd cephcluster/
ceph@mgmt:~/cephcluster$ ceph-deploy admin compute1
ceph@mgmt:~/cephcluster$ rados lspools
rbd
¡Ø osd pool create <poolname> <int[0-]>     create pool
     {<int[0-]>} {replicated|erasure}        
     {<erasure_code_profile>} {<ruleset>}    
     {<int>}                                 
   osd pool delete <poolname> {<poolname>}  delete pool
     {--yes-i-really-really-mean-it}         
ceph osd pool create {pool-name} {pg-num} [{pgp-num}] [replicated] \
     [crush-ruleset-name]
ceph osd pool create {pool-name} {pg-num}  {pgp-num}   erasure \
     [erasure-code-profile] [crush-ruleset-name]ceph@mgmt:~/cephcluster$ ceph osd pool create client07 256 256
pool 'client07' created
ceph@mgmt:~/cephcluster$ rados lspools
rbd
client07
ceph@mgmt:~/cephcluster$ rbd create client07/vm_disk01 --size 4096
ceph@mgmt:~/cephcluster$ rbd list client07
vm_disk01
OpenStack ComputeNode
1. /etc/hosts Ãß°¡
2. À¯Àú»ý¼º 
# useradd -d /home/ceph -m ceph
# passwd ceph
# echo "ceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/ceph
# chmod 0440 /etc/sudoers.d/ceph
3. RBD ¼³Á¤ 
root@compute1:~# rbd info client07/vm_disk01
rbd image 'vm_disk01':
	size 4096 MB in 1024 objects
	order 22 (4096 kB objects)
	block_name_prefix: rb.0.1156.238e1f29
	format: 1
root@compute1:~# vim /etc/ceph/rbdmap
client07/vm_disk01        id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
root@compute1:~# rbd map client07/vm_disk01
/dev/rbd1
root@compute1:~# rbd showmapped
id pool     image     snap device    
1  client07 vm_disk01 -    /dev/rbd1 
root@compute1:~# mkfs.xfs /dev/rbd1
log stripe unit (4194304 bytes) is too large (maximum is 256KiB)
log stripe unit adjusted to 32KiB
meta-data=/dev/rbd1              isize=256    agcount=9, agsize=130048 blks
         =                       sectsz=512   attr=2, projid32bit=0
data     =                       bsize=4096   blocks=1048576, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
root@compute1:~# mkdir /CEPH
fstab µî·Ï
oot@compute1:~# vim /etc/fstab
/dev/rbd1	/CEPH	xfs	defaults,noatime,netdev		0	0
root@compute1:~# mount /dev/rbd1  /CEPH/
root@compute1:~# df -h |grep CEPH
/dev/rbd1       4.0G   33M  4.0G   1% /CEPH
µð½ºÅ© ¾²±â üũ
root@compute1:~# cd /CEPH
root@compute1:/CEPH# for ((i=1; i<36; i++)); do dd if=/dev/zero of=file_$i bs=1MB count=100; done
root@compute1:/CEPH# df -h |grep CEPH
/dev/rbd1       4.0G  3.3G  716M  83% /CEPH
CEPH ½ºÅ丮Áö »óÅÂüũ
ceph@mgmt:~/cephcluster$ ceph status
RBD µð½ºÅ© È®Àå(resize)
ceph@mgmt:~/cephcluster$ rbd resize client01/vm_disk01 --size 8192
Resizing image: 100% complete...done.
root@compute1:/CEPH# df -h |grep CEPH
/dev/rbd1       4.0G  3.3G  716M  83% /CEPH
root@compute1:/CEPH# rbd info client07/vm_disk01
rbd image 'vm_disk01':
	size 8192 MB in 2048 objects
	order 22 (4096 kB objects)
	block_name_prefix: rb.0.1156.238e1f29
	format: 1
root@compute1:/CEPH# xfs_growfs /dev/rbd1
meta-data=/dev/rbd1              isize=256    agcount=9, agsize=130048 blks
         =                       sectsz=512   attr=2
data     =                       bsize=4096   blocks=1048576, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0
log      =internal               bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 1048576 to 2097152
root@compute1:/CEPH# df -h |grep CEPH
/dev/rbd1       8.0G  3.3G  4.7G  42% /CEPH
root@compute1:/CEPH# for ((i=36; i<46; i++)); do dd if=/dev/zero of=file_$i bs=1MB count=100; done
root@compute1:/CEPH# df -h |grep CEPH
/dev/rbd1       8.0G  4.3G  3.8G  53% /CEPH
´Ù½Ã È®Àå
root@compute1:/CEPH# rbd resize client07/vm_disk01 --size 16384
Resizing image: 100% complete...done.
root@compute1:/CEPH# rbd info client07/vm_disk01
rbd image 'vm_disk01':
	size 16384 MB in 4096 objects
	order 22 (4096 kB objects)
	block_name_prefix: rb.0.1156.238e1f29
	format: 1
root@compute1:/CEPH# df |grep CEPH
/dev/rbd1        8378368  4428048   3950320  53% /CEPH
root@compute1:/CEPH# xfs_growfs /dev/rbd1
meta-data=/dev/rbd1              isize=256    agcount=17, agsize=130048 blks
         =                       sectsz=512   attr=2
data     =                       bsize=4096   blocks=2097152, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0
log      =internal               bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 2097152 to 4194304
root@compute1:/CEPH# df |grep CEPH
/dev/rbd1       16766976  4428560  12338416  27% /CEPH
¡Ø mon daemon restart
root@mgmt:/home/ceph/cephcluster# initctl restart ceph-mon-all
ceph-mon-all start/running
[Err message] ÇöÀç µð½ºÅ© »çÀÌÁî°¡ 10000000 --> 2000000 Ãà¼Ò
# rbd resize backup_img/backup --size 2000000
rbd: shrinking an image is only allowed with the --allow-shrink flag
¡Ø --allow-shrink                     allow shrinking of an image when resizing
# rbd resize backup_img/backup --size 2000000 --allow-shrink