@zwei
2016-04-22T10:46:13.000000Z
字数 23276
阅读 5987
ceph
#相关的配置选项
# RADOS images will be chunked into objects of this size (in
# megabytes). For best performance, this should be a power of two.
# (integer value)
#rbd_store_chunk_size = 8
rbd_store_chunk_size = 8
#指定rbd image 的 块大小为8M
#对应着 order 23 (8192 kB objects) 8M = 2 ^ 23
# RADOS pool in which images are stored. (string value)
#rbd_store_pool = images
rbd_store_pool = rbd
# 指定 pool 的名称
# RADOS user to authenticate as (only applicable if using Cephx. If
# <None>, a default will be chosen based on the client. section in
# rbd_store_ceph_conf) (string value)
#rbd_store_user = <None>
rbd_store_user = admin
# 指定 链接ceph 的用户
# Ceph configuration file path. If <None>, librados will locate the
# default config. If using cephx authentication, this file should
# include a reference to the right keyring in a client.<USER> section
# (string value)
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# 指定ceph的 配置文件信息
# ceph.conf 中必须指定 keyring = /opt/ceph1/ceph.client.admin.keyring
# 就是admin 用户的密码文件
# Timeout value (in seconds) used when connecting to ceph cluster. If
# value <= 0, no timeout is set and default librados value is used.
# (integer value)
#rados_connect_timeout = 0
# 设置 链接 rados 的超时时间
##代码分析
def add(self, image_id, image_file, image_size, context=None):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance_store.exceptions.Duplicate` if the image already
existed
"""
checksum = hashlib.md5()
image_name = str(image_id)
with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn:
fsid = None
if hasattr(conn, 'get_fsid'):
fsid = conn.get_fsid()
with conn.open_ioctx(self.pool) as ioctx:
order = int(math.log(self.WRITE_CHUNKSIZE, 2))
LOG.debug('creating image %s with order %d and size %d',
image_name, order, image_size)
if image_size == 0:
LOG.warning(_("since image size is zero we will be doing "
"resize-before-write for each chunk which "
"will be considerably slower than normal"))
try:
loc = self._create_image(fsid, conn, ioctx, image_name,
image_size, order)
except rbd.ImageExists:
msg = _('RBD image %s already exists') % image_id
raise exceptions.Duplicate(message=msg)
try:
with rbd.Image(ioctx, image_name) as image:
bytes_written = 0
offset = 0
chunks = utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE)
for chunk in chunks:
# If the image size provided is zero we need to do
# a resize for the amount we are writing. This will
# be slower so setting a higher chunk size may
# speed things up a bit.
if image_size == 0:
chunk_length = len(chunk)
length = offset + chunk_length
bytes_written += chunk_length
LOG.debug(_("resizing image to %s KiB") %
(length / units.Ki))
image.resize(length)
LOG.debug(_("writing chunk at offset %s") %
(offset))
offset += image.write(chunk, offset)
checksum.update(chunk)
if loc.snapshot:
image.create_snap(loc.snapshot)
image.protect_snap(loc.snapshot)
except Exception as exc:
log_msg = (_LE("Failed to store image %(img_name)s "
"Store Exception %(store_exc)s") %
{'img_name': image_name,
'store_exc': exc})
LOG.error(log_msg)
# Delete image if one was created
try:
target_pool = loc.pool or self.pool
self._delete_image(target_pool, loc.image,
loc.snapshot)
except exceptions.NotFound:
pass
raise exc
# Make sure we send back the image size whether provided or inferred.
if image_size == 0:
image_size = bytes_written
return (loc.get_uri(), image_size, checksum.hexdigest(), {})
功能总结:
使用 glance v2 craete image 的时候在rbd 中创建的流程
1. loc = self._create_image() 先创建一个rbd 的image 指定order 和 size ,image_name
2. offset += image.write(chunk, offset) 在把数据写入rbd image 中
3. image.create_snap(loc.snapshot) 然后在 给这个image 创建一个 snap名称的快照
4. 最后返回的location 是 一个image 快照的 rbd 地址
## 代码分析
@capabilities.check
def delete(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete.
:location `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises NotFound if image does not exist;
InUseByStore if image is in use or snapshot unprotect failed
"""
loc = location.store_location
target_pool = loc.pool or self.pool
self._delete_image(target_pool, loc.image, loc.snapshot)
def _delete_image(self, target_pool, image_name,
snapshot_name=None, context=None):
"""
Delete RBD image and snapshot.
:param image_name Image's name
:param snapshot_name Image snapshot's name
:raises NotFound if image does not exist;
InUseByStore if image is in use or snapshot unprotect failed
"""
with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn:
with conn.open_ioctx(target_pool) as ioctx:
try:
# First remove snapshot.
if snapshot_name is not None:
with rbd.Image(ioctx, image_name) as image:
try:
image.unprotect_snap(snapshot_name)
except rbd.ImageBusy:
log_msg = _("snapshot %(image)s@%(snap)s "
"could not be unprotected because "
"it is in use")
LOG.debug(log_msg %
{'image': image_name,
'snap': snapshot_name})
raise exceptions.InUseByStore()
image.remove_snap(snapshot_name)
# Then delete image.
rbd.RBD().remove(ioctx, image_name)
except rbd.ImageNotFound:
msg = _("RBD image %s does not exist") % image_name
raise exceptions.NotFound(message=msg)
except rbd.ImageBusy:
log_msg = _("image %s could not be removed "
"because it is in use")
LOG.debug(log_msg % image_name)
raise exceptions.InUseByStore()
总结:
image-delete 命令具体如下步骤:
1, 先对snap 接触保护
2, 在对snap 进行删除
3, 在对image 进行删除
4, 如果这个snap在使用中则删除不掉eg: 基于这个image 创建vm,volume
#### ceph 配置选项
# The name of ceph cluster (string value)
#rbd_cluster_name = ceph
rbd_cluster_name = ceph
# The RADOS pool where rbd volumes are stored (string value)
#rbd_pool = rbd
rbd_pool = cinder
# The RADOS client name for accessing rbd volumes - only set when using cephx
# authentication (string value)
#rbd_user = <None>
rbd_user = admin
# Path to the ceph configuration file (string value)
#rbd_ceph_conf =
rbd_ceph_conf = /etc/ceph/ceph.conf
# Flatten volumes created from snapshots to remove dependency from volume to
# snapshot (boolean value)
#rbd_flatten_volume_from_snapshot = false
rbd_flatten_volume_from_snapshot = true
# 在创建基于 snapshot 的卷时候 是否flatten(扶平)新的 volume 独立出来
# 就是让 新的volume 和 snapshot 没有关系。
# The libvirt uuid of the secret for the rbd_user volumes (string value)
#rbd_secret_uuid = <None>
#设置 libvirt 链接 ceph 的认证密腰id
#在计算节点上使用 virsh secret-list 查看
#(.venv)[root@cinder cinder-7.0.0]# virsh secret-list
#UUID Usage
#-----------------------------------------------------------
#7535b2cb-87e7-4d0a-b458-f0ab6adda149 ceph client.admin ceph2 secret
#ec43b2f7-44c3-466b-a9b1-d32fbc3b04d5 ceph client.admin secret
# Directory where temporary image files are stored when the volume driver does
# not write them directly to the volume. Warning: this option is now
# deprecated, please use image_conversion_dir instead. (string value)
#volume_tmp_dir = <None>
#设置下载 glance 镜像的临时目录
# Maximum number of nested volume clones that are taken before a flatten
# occurs. Set to 0 to disable cloning. (integer value)
#rbd_max_clone_depth = 5
rbd_max_clone_depth = 5
# 设置 基于source volume 创建一个卷的时候 检测 源卷的 clone 深度是否为5
# 如果 source volume 的 depth 为5 则 对 source volume 进行 flatten (扶平)
# 也就是 让 source volume 独立出来, 和 source volume 的perent volume 没有关系
# 注意:这里是对 所创建卷的 父卷也就是 source volume 进行 flatten 而不是 对所
# 要创建的卷
# 注意 值 > 0 才会有效果, < 0 则会一直做full copy , 效率低下但安全。
# Volumes will be chunked into objects of this size (in megabytes). (integer
# value)
#rbd_store_chunk_size = 4
rbd_store_chunk_size = 4
# 设置 rbd image 的块的大小
# Timeout value (in seconds) used when connecting to ceph cluster. If value <
# 0, no timeout is set and default librados value is used. (integer value)
#rados_connect_timeout = -1
#rados_connect_timeout = -1
# 设置链接 ceph 的超时时间 大于 等于 0 有效
# Number of retries if connection to ceph cluster failed. (integer value)
#rados_connection_retries = 3
rados_connection_retries = 3
# 设置重新链接的次数
# Interval value (in seconds) between connection retries to ceph cluster.
# (integer value)
#rados_connection_interval = 5
rados_connection_interval = 5
# 设置重新 链接ceph 的间隔时间
cinder help create
Creates a volume.
Positional arguments:
<size> Size of volume, in GBs. (Required unless snapshot-id
/source-volid is specified).
Optional arguments:
--consisgroup-id <consistencygroup-id>
ID of a consistency group where the new volume belongs
to. Default=None.
--snapshot-id <snapshot-id>
Creates volume from snapshot ID. Default=None.
--source-volid <source-volid>
Creates volume from volume ID. Default=None.
--source-replica <source-replica>
Creates volume from replicated volume ID.
Default=None.
--image-id <image-id>
Creates volume from image ID. Default=None.
--image <image> Creates a volume from image (ID or name).
Default=None.
--name <name> Volume name. Default=None.
--description <description>
Volume description. Default=None.
--volume-type <volume-type>
Volume type. Default=None.
--availability-zone <availability-zone>
Availability zone for volume. Default=None.
--metadata [<key=value> [<key=value> ...]]
Metadata key and value pairs. Default=None.
--hint <key=value> Scheduler hint, like in nova.
--allow-multiattach Allow volume to be attached more than once.
Default=False
### 基于 snapshot 创建一个卷
### 基于 image 创建一个卷
### 基于 source volume 创建一个卷
### 创建一个 裸卷
#### cinder driver rbd 代码分析
if create_type == 'raw':
model_update = self._create_raw_volume(volume_ref=volume_ref,
**volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'source_replica':
model_update = self._create_from_source_replica(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume_ref=volume_ref,
**volume_spec)
##### 1, 创建一个 raw 的卷
def create_volume(self, volume):
"""Creates a logical volume."""
size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'", volume['name'])
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
with RADOSClient(self) as client:
self.RBDProxy().create(client.ioctx,
utils.convert_str(volume['name']),
size,
order,
old_format=False,
features=client.features)
总结:创建一个 rbd image
#####2, 创建一个 snap 的卷
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, self.configuration.rbd_pool,
snapshot['volume_name'], snapshot['name'])
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume['name'])
if int(volume['size']):
self._resize(volume)
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.RBDProxy().clone(src_client.ioctx,
utils.convert_str(src_image),
utils.convert_str(src_snap),
dest_client.ioctx,
utils.convert_str(volume['name']),
features=src_client.features)
总结:
## 类似 rbd clone --order 23 252656cd-b791-4334-a0e6-547fd4a410e3@snap c5098738-8039-49df-975c-aea802fe0c7e 命令
## rbd_flatten_volume_from_snapshot 如果设置为true 则会 扶平 新创建的卷就是
## 让新卷独立出来。
#####3, 创建一个基于 source volume 的卷
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the source
volume will be flattened.
"""
src_name = utils.convert_str(src_vref['name'])
dest_name = utils.convert_str(volume['name'])
flatten_parent = False
# Do full copy if requested
if self.configuration.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
depth = self._get_clone_depth(client, src_name)
# If source volume is a clone and rbd_max_clone_depth reached,
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == self.configuration.rbd_max_clone_depth:
LOG.debug("maximum clone depth (%d) has been reached - "
"flattening source volume",
self.configuration.rbd_max_clone_depth)
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
try:
# First flatten source volume if required.
if flatten_parent:
_pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug("flattening source volume %s", src_name)
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
try:
parent_volume.unprotect_snap(snap)
parent_volume.remove_snap(snap)
finally:
parent_volume.close()
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug("creating snapshot='%s'", clone_snap)
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception:
# Only close if exception since we still need it.
src_volume.close()
raise
# Now clone source volume snapshot
try:
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'",
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=client.features)
except Exception:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
raise
finally:
src_volume.close()
if volume['size'] != src_vref['size']:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
"%(dst_size)d",
{'dst_vol': volume['name'], 'src_size': src_vref['size'],
'dst_size': volume['size']})
self._resize(volume)
LOG.debug("clone created successfully")
总结:
当 configuration.rbd_max_clone_depth 的值 <= 0 的时候
创建卷会 基于 source volume copy 一个新的卷
新卷 和 源卷 数据一样的 但是没有父子关系
类似: rbd -p rbd copy test_ld_01 test_ld_01_copy 命令
当 configuration.rbd_max_clone_depth 的值 >= 0 的时候, 但 source volume 的卷的depth
值小于 configuration.rbd_max_clone_depth 的值
则会在 source volume 上创建一个为名为 新卷id + clone_snap 的快照
例如: [root@osd0 ~]# rbd -p glance-02 snap ls volume-5996e0fb-fbc9-4219-b09b-04980737dd29
SNAPID NAME SIZE
43 volume-0aa9e66d-a032-4db3-bde9-6a68a7cf4436.clone_snap 1024 MB
当 configuration.rbd_max_clone_depth 的值 >= 0 的时候, 且 source volume 的卷的depth
值大于等于 configuration.rbd_max_clone_depth 的值
eg: configuration.rbd_max_clone_depth = 5 , source volume 刚好是 莫一个volume 的第5代 子孙, 就是source volume 的爷爷的爷爷
这时候会把 source volume 在独立出来 就是 faltten 出来,和以前的父辈没有关系。
然后在给这个 source volume 做 snap 然后基于这个 snap 在clone 一个新的 卷。
总结: 就是一个 volume 只能有5代子孙。
#####6, 创建一个基于 image 的卷 这个有点复杂根据image 的location有关
######具体看代码
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.
#
# First see if the driver can clone the image directly.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
model_update, cloned = self.driver.clone_image(context,
volume_ref,
image_location,
image_meta,
image_service)
# Try and clone the image if we have it set as a glance location.
if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
model_update, cloned = self._clone_image_volume(context,
volume_ref,
image_location,
image_meta)
#####调用底层的rbd driver
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
if image_location:
# Note: image_location[0] is glance image direct_url.
# image_location[1] contains the list of all locations (including
# direct_url) or None if show_multiple_locations is False in
# glance configuration.
if image_location[1]:
url_locations = [location['url'] for
location in image_location[1]]
else:
url_locations = [image_location[0]]
# iterate all locations to look for a cloneable one.
for url_location in url_locations:
if url_location and self._is_cloneable(
url_location, image_meta):
_prefix, pool, image, snapshot = \
self._parse_location(url_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return {'provider_location': None}, True
return ({}, False)
def _is_cloneable(self, image_location, image_meta):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s.', e)
return False
if self._get_fsid() != fsid:
LOG.debug('%s is in a different ceph cluster.', image_location)
return False
if image_meta['disk_format'] != 'raw':
LOG.debug("rbd image clone requires image format to be "
"'raw' but image %(image)s is '%(format)s'",
{"image", image_location,
"format", image_meta['disk_format']})
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s.',
dict(loc=image_location, err=e))
return False
##总结:
1,查看location是rbd:// 格式的
2,判断location 是否是统一ceph 集群
3,判断image 的格式是否是raw
4,判断指定的 snap 是否存在
总结 location 是rbd 的格式的:
如果location是rbd 的并且是统一集群的则可以使用 rbd clone的方式创建一个volume
查看结果:
(.venv)[root@cinder cinder-7.0.0]# rbd2 -p glance-02 info volume-17272cee-314f-4e27-9807-f183e603a991
rbd image 'volume-17272cee-314f-4e27-9807-f183e603a991':
size 1024 MB in 128 objects
order 23 (8192 kB objects)
block_name_prefix: rbd_data.d53d43ceeb80
format: 2
features: layering, striping
parent: glance-02/8deb9c62-d194-4147-8f26-8f7082a1cbf8@snap
overlap: 9532 kB
stripe unit: 4096 kB
stripe count: 1
### 当 image 中的location 不是rbd 格式或者 不是同一个ceph 集群则使用下列代码
def _create_from_image_download(self, context, volume_ref, image_location,
image_id, image_service):
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
model_update = self.driver.create_volume(volume_ref)
updates = dict(model_update or dict(), status='downloading')
try:
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(updates)s"),
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
image_id, image_location, image_service)
return model_update
总结: 基于 image 创建 是先下载 image 镜像到本地临时目录然后载copy到 rbd image 中。
def copy_image_to_volume(self, context, volume, image_service, image_id):
tmp_dir = self._image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name,
self.configuration.volume_dd_blocksize,
size=volume['size'])
self.delete_volume(volume)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
'--order', order,
tmp.name, volume['name'],
'--new-format']
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
总结: 把先前创建rbd image 的镜像删除掉,然后在通过 rbd import 命令把数据导入到指定的image 中。
注意: rbd import --pool glance-01 --order 22 /var/lib/cinder/conversion/tmpKJBgmg volume-6cccbace-00b6-4c17-b5b7-fcf773728bbe --new-format --id admin --conf /opt/ceph1/ceph.conf --cluster ceph 会自建一个rbd iamge 镜像。
最后删除掉本地临时的 image 镜像。
(.venv)[root@cinder ~]# cinder help delete
usage: cinder delete <volume> [<volume> ...]
Removes one or more volumes.
Positional arguments:
<volume> Name or ID of volume or volumes to delete.
### 代码分析
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume_ref)
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume_ref)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'available')
return True
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'error_deleting')
### manager 调用的 是 driver.remove_export 和 delete_volume
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(volume['name'])
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("volume %s no longer exists in backend"),
volume_name)
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
_pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
@utils.retry(self.rbd.ImageBusy, retries=3)
def _try_remove_volume(client, volume_name):
self.RBDProxy().remove(client.ioctx, volume_name)
if clone_snap is None:
LOG.debug("deleting rbd volume %s", volume_name)
try:
_try_remove_volume(client, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warning(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("RBD volume %s not found, allowing delete "
"operation to proceed."), volume_name)
return
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.RBDProxy().rename(client.ioctx, volume_name, new_name)
总结:
当volume 在rbd 中不存在的时候, cinder delete 命令还是会正确执行,并且更新数据库
### self._delete_backup_snaps(rbd_image) 分析
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug("volume has no backup snaps")
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
###cinder/backup/drivers/ceph.py
@classmethod
def get_backup_snaps(cls, rbd_image, sort=False):
"""Get all backup snapshots for the given rbd image.
NOTE: this call is made public since these snapshots must be deleted
before the base volume can be deleted.
"""
snaps = rbd_image.list_snaps()
backup_snaps = []
for snap in snaps:
search_key = cls.backup_snapshot_name_pattern()
result = re.search(search_key, snap['name'])
if result:
backup_snaps.append({'name': result.group(0),
'backup_id': result.group(1),
'timestamp': result.group(2)})
if sort:
# Sort into ascending order of timestamp
backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True)
return backup_snaps
@staticmethod
def backup_snapshot_name_pattern():
"""Returns the pattern used to match backup snapshots.
It is essential that snapshots created for purposes other than backups
do not have this name format.
"""
return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$"
总结:
获取快照中是否有 backup 开头的 snap, 如果有则删除这些快照。
接下来接着分析:
获取所有的 clone_snap 结尾的 snap
如果还有 其他的snap 则会报错 "is busy" 不能删除
如果这个image 没有 clone_snap 的快照则 使用remove | rm 删除这个image
如果这个image 有 clone_snap 的快照则 调用 rename 修改名称 加上 .delete
总结所有的delete 功能特性:
1, rbd中没有image 则更新数据库 信息
2,image 有backup 相关的snap 则删除 backup 相关快照
3,image 有clone_snap 相关的snap 则 rename 卷名称,不真正删除
4,image 没有clone_snap 相关的snap 则 remove 真正删除卷