@dyj2017
2017-11-02T02:00:20.000000Z
字数 3098
阅读 1381
ceph ceph实验 rbd
# cat /etc/redhat-releaseCentOS Linux release 7.3.1611 (Core)# ceph -vceph version 12.2.1 (3e7492b9ada8bdc9a5cd0feafd42fbca27f9c38e) luminous (stable)
比如需要恢复所有前缀为 rbd_data.1041643c9869 的块设备,该块设备名为foo,文件系统类型为xfs,大小1G,挂载到了/root/foo/目录下,并有三个文件:
[root@node3 ~]# ls foo/file1.txt file2.txt file3.txt[root@node3 ~]# cat foo/file2.txtCeph2222222222222222222222222222222222222
恢复该foo块设备需要进行下面的操作
[root@node3 ~]# rados -p rbd ls|sortrbd_data.1041643c9869.0000000000000000rbd_data.1041643c9869.0000000000000001rbd_data.1041643c9869.000000000000001frbd_data.1041643c9869.000000000000003erbd_data.1041643c9869.000000000000005drbd_data.1041643c9869.000000000000007crbd_data.1041643c9869.000000000000007drbd_data.1041643c9869.000000000000007erbd_data.1041643c9869.000000000000009brbd_data.1041643c9869.00000000000000barbd_data.1041643c9869.00000000000000d9rbd_data.1041643c9869.00000000000000f8rbd_data.1041643c9869.00000000000000ffrbd_directoryrbd_header.1041643c9869rbd_id.foorbd_info
如:
[root@node3 ~]# rados -p rbd get rbd_data.1041643c9869.0000000000000000 rbd_data.1041643c9869.0000000000000000
[root@node3 ~]# touch mkrbd.sh[root@node3 ~]# chmod +x mkrbd.sh
[root@node3 ~]# vi mkrbd.sh
输入以下内容:
#!/bin/sh# Rados object size 这是刚刚的4M的大小obj_size=4194304# DD bs valuerebuild_block_size=512#rbd="${1}"rbd="foo1" #生成的块名#base="${2}" #prefixbase="rbd_data.1041643c9869"#rbd_size="${3}" #1Grbd_size="1073741824"base_files=$(ls -1 ${base}.* 2>/dev/null | wc -l | awk '{print $1}')if [ ${base_files} -lt 1 ]; thenecho "COULD NOT FIND FILES FOR ${base} IN $(pwd)"exitfi# Create full size sparse image. Could use truncate, but wanted# as few required files and dd what a must.dd if=/dev/zero of=${rbd} bs=1 count=0 seek=${rbd_size} 2>/dev/nullfor file_name in $(ls -1 ${base}.* 2>/dev/null); doseek_loc=$(echo ${file_name} | awk -v os=${obj_size} -v rs=${rebuild_block_size} -F. '{print os*strtonum("0x" $NF)/rs}')dd conv=notrunc if=${file_name} of=${rbd} seek=${seek_loc} bs=${rebuild_block_size} 2>/dev/nulldone
执行该脚本后在本地生成了一个foo1的文件
[root@node3 ~]# file foo1foo1: SGI XFS filesystem data (blksz 4096, inosz 512, v2 dirs)[root@node3 ~]# du -sh foo111M foo1[root@node3 ~]# ll -h foo1-rw-r--r-- 1 root root 1.0G 10月 17 16:04 foo1
可以看出foo1是xfs文件,使用了11M,大小为1G
[root@node3 ~]# mount foo1 /mntmount: 文件系统类型错误、选项错误、/dev/loop0 上有坏超级块、缺少代码页或助手程序,或其他错误有些情况下在 syslog 中可以找到一些有用信息- 请尝试dmesg | tail 这样的命令看看。[root@node3 ~]# dmesg|tail[88818.307314] XFS (rbd0): Mounting V5 Filesystem[88818.865978] XFS (rbd0): Ending clean mount[91099.845834] bash (6208): drop_caches: 1[91492.345582] bash (6208): drop_caches: 1[93485.275727] libceph: osd2 down[93485.275739] libceph: osd5 down[93495.518099] libceph: osd2 up[93495.518165] libceph: osd5 up[95288.897917] loop: module loaded[98449.535689] XFS (loop0): Filesystem has duplicate UUID 313b2d89-f4bc-4ee6-a1d8-a996190222fd - can't mount
挂载foo1时出现了上面的错误提示,这是因为原来的foo块是/dev/rbd0的克隆,所以foo的UUID是和/dev/rbd0的是一样的,这时候我们umount /foo即可:
[root@node3 ~]# umount foo/[root@node3 ~]# mount foo1 /mnt[root@node3 ~]# ls /mnt/file1.txt file2.txt file3.txt[root@node3 ~]# cat /mnt/file2.txtCeph2222222222222222222222222222222222222
可以看到已将完全恢复出了原来的foo块设备的内容,当然也可以给恢复的rbd名字称为foo