openstack 备份 backup之chunkeddriver源码解读

此为backup 中chunkeddriver.py文件

其中方法功能有:

 backup具体过程,metadata、sha256文件产生流程,以及如何产生的backup元数据等

具体流程都写在注释中,都是自己的理解,有误请谅解


import abc
import hashlib
import json
import os

import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six


from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.volume import utils as volume_utils_czr


LOG = logging.getLogger(__name__)


chunkedbackup_service_opts = [
    cfg.StrOpt('backup_compression_algorithm', //默认为“zlib“
               default='zlib',
               choices=['none', 'off', 'no',
                        'zlib', 'gzip',
                        'bz2', 'bzip2'],
               help='Compression algorithm (None to disable)'),
]


CONF = cfg.CONF
CONF.register_opts(chunkedbackup_service_opts) //寄存器


@six.add_metaclass(abc.ABCMeta)
class ChunkedBackupDriver(driver.BackupDriver):
    """Abstract chunked backup driver.


       Implements common functionality for backup drivers that store volume
       data in multiple "chunks" in a backup repository when the size of
       the backed up cinder volume exceeds the size of a backup repository
       "chunk."


       Provides abstract methods to be implemented in concrete chunking
       drivers.
    """


    DRIVER_VERSION = '1.0.0'
    DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}


    def _get_compressor(self, algorithm): //选择压缩方法
        try:
            if algorithm.lower() in ('none', 'off', 'no'): //不压缩
                return None
            elif algorithm.lower() in ('zlib', 'gzip'): //选择压缩方法
                import zlib as compressor
                return compressor
            elif algorithm.lower() in ('bz2', 'bzip2'):
                import bz2 as compressor
                return compressor
        except ImportError:
            pass


        err = _('unsupported compression algorithm: %s') % algorithm //返回错误“不支持的压缩算法“
        raise ValueError(err)


    def __init__(self, context, chunk_size_bytes, sha_block_size_bytes, //初始化
                 backup_default_container, enable_progress_timer,
                 db=None):
        super(ChunkedBackupDriver, self).__init__(context, db)
        self.chunk_size_bytes = chunk_size_bytes //每次读取chunk_size_bytes字节数据
        self.sha_block_size_bytes = sha_block_size_bytes //每sha_block_size_bytes做一次SHA运算
        self.backup_default_container = backup_default_container //默认的容器
        self.enable_progress_timer = enable_progress_timer //计步器


        self.backup_timer_interval = CONF.backup_timer_interval //时间间隔,每隔多长时间通知进程
        self.data_block_num = CONF.backup_object_number_per_notification//
        self.az = CONF.storage_availability_zone //可用域
        self.backup_compression_algorithm = CONF.backup_compression_algorithm //压缩算法参数
        self.compressor = 
            self._get_compressor(CONF.backup_compression_algorithm) //得到压缩算法的驱动
        self.support_force_delete = True //支持强制删除


    # To create your own "chunked" backup driver, implement the following
    # abstract methods.

//如果要自己创建backup驱动,重写以下抽象方法


    @abc.abstractmethod
    def put_container(self, container):
        """Create the container if needed. No failure if it pre-exists."""
        return


    @abc.abstractmethod
    def get_container_entries(self, container, prefix):
        """Get container entry names."""
        return


    @abc.abstractmethod
    def get_object_writer(self, container, object_name, extra_metadata=None):
        """Returns a writer object which stores the chunk data in backup repository.


           The object returned should be a context handler that can be used
           in a "with" context.
        """
        return


    @abc.abstractmethod
    def get_object_reader(self, container, object_name, extra_metadata=None):
        """Returns a reader object for the backed up chunk."""
        return


    @abc.abstractmethod
    def delete_object(self, container, object_name):
        """Delete object from container."""
        return


    @abc.abstractmethod
    def _generate_object_name_prefix(self, backup):
        return


    @abc.abstractmethod
    def update_container_name(self, backup, container):
        """Allow sub-classes to override container name.


        This method exists so that sub-classes can override the container name
        as it comes in to the driver in the backup object. Implementations
        should return None if no change to the container name is desired.
        """
        return


    @abc.abstractmethod
    def get_extra_metadata(self, backup, volume):
        """Return extra metadata to use in prepare_backup.


        This method allows for collection of extra metadata in prepare_backup()
        which will be passed to get_object_reader() and get_object_writer().
        Subclass extensions can use this extra information to optimize
        data transfers. Return a json serializable object.
        """
        return


    def _create_container(self, backup): //创建容器
        # Container's name will be decided by the driver (returned by method
        # update_container_name), if no change is required by the driver then
        # we'll use the one the backup object already has, but if it doesn't
        # have one backup_default_container will be used.
//容器的名称由驱动决定(由update_container_name方法返回),没有的话,使用默认的
        new_container = self.update_container_name(backup, backup.container)
        if new_container:
            # If the driver is not really changing the name we don't want to
            # dirty the field in the object and save it to the DB with the same
            # value.
            if new_container != backup.container: //不是默认的则加入
                backup.container = new_container
        elif backup.container is None: //空的话使用默认
            backup.container = self.backup_default_container


        LOG.debug('_create_container started, container: %(container)s,'
                  'backup: %(backup_id)s.',
                  {'container': backup.container, 'backup_id': backup.id})


        backup.save()
        self.put_container(backup.container)
        return backup.container //返回容器


    def _generate_object_names(self, backup): //生成对象名
        prefix = backup['service_metadata']
        object_names = self.get_container_entries(backup['container'], prefix)
        LOG.debug('generated object list: %s.', object_names)
        return object_names


    def _metadata_filename(self, backup): //生成metadata文件名称
        object_name = backup['service_metadata']
        filename = '%s_metadata' % object_name
        return filename


    def _sha256_filename(self, backup): //生成sha256文件名称
        object_name = backup['service_metadata']
        filename = '%s_sha256file' % object_name
        return filename

//写入metadata信息,记录了这个volume对应存储哪些文件(或object)、
//每个文件的大小,压缩算法、长度、偏移量
    def _write_metadata(self, backup, volume_id, container, object_list,
                        volume_meta, extra_metadata=None):
        filename = self._metadata_filename(backup)
        LOG.debug('_write_metadata started, container name: %(container)s,'
                  ' metadata filename: %(filename)s.',
                  {'container': container, 'filename': filename})
        metadata = {}
        metadata['version'] = self.DRIVER_VERSION //"version": "1.0.0",
        metadata['backup_id'] = backup['id'] //backup_id
        metadata['volume_id'] = volume_id //云硬盘id
        metadata['backup_name'] = backup['display_name']
        metadata['backup_description'] = backup['display_description'] //"backup_description": null, 
        metadata['created_at'] = str(backup['created_at']) //时间  "created_at": "2017-11-23
        metadata['objects'] = object_list //更改的object对象集合,每个字段有object基本信息
        metadata['parent_id'] = backup['parent_id'] //父backup_id
        metadata['volume_meta'] = volume_meta //云硬盘 meta基本信息
        if extra_metadata:
            metadata['extra_metadata'] = extra_metadata
        metadata_json = json.dumps(metadata, sort_keys=True, indent=2) //解析json并排序,index缩进2格
        if six.PY3:
            metadata_json = metadata_json.encode('utf-8')
        with self.get_object_writer(container, filename) as writer:
            writer.write(metadata_json)
        LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json)


//sha256file文件,记录了原始卷每个Sha_block_size数据的SHA值
    def _write_sha256file(self, backup, volume_id, container, sha256_list):
        filename = self._sha256_filename(backup)
        LOG.debug('_write_sha256file started, container name: %(container)s,'
                  ' sha256file filename: %(filename)s.',
                  {'container': container, 'filename': filename})
        sha256file = {}
        sha256file['version'] = self.DRIVER_VERSION
        sha256file['backup_id'] = backup['id']
        sha256file['volume_id'] = volume_id
        sha256file['backup_name'] = backup['display_name']
        sha256file['backup_description'] = backup['display_description'] //默认为null
        sha256file['created_at'] = six.text_type(backup['created_at'])
        sha256file['chunk_size'] = self.sha_block_size_bytes //chunk的大小
        sha256file['sha256s'] = sha256_list
        sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
        if six.PY3:
            sha256file_json = sha256file_json.encode('utf-8')
        with self.get_object_writer(container, filename) as writer:
            writer.write(sha256file_json)
        LOG.debug('_write_sha256file finished.')


    def _read_metadata(self, backup): //读取metadata信息
        container = backup['container']
        filename = self._metadata_filename(backup)
        LOG.debug('_read_metadata started, container name: %(container)s, '
                  'metadata filename: %(filename)s.',
                  {'container': container, 'filename': filename})
        with self.get_object_reader(container, filename) as reader:
            metadata_json = reader.read()
        if six.PY3:
            metadata_json = metadata_json.decode('utf-8')
        metadata = json.loads(metadata_json)
        LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json)
        return metadata


    def _read_sha256file(self, backup): //读取sha256信息
        container = backup['container']
        filename = self._sha256_filename(backup)
        LOG.debug('_read_sha256file started, container name: %(container)s, '
                  'sha256 filename: %(filename)s.',
                  {'container': container, 'filename': filename})
        with self.get_object_reader(container, filename) as reader:
            sha256file_json = reader.read()
        if six.PY3:
            sha256file_json = sha256file_json.decode('utf-8')
        sha256file = json.loads(sha256file_json)
        LOG.debug('_read_sha256file finished.')
        return sha256file


    def _prepare_backup(self, backup): //准备备份,返回backup metadata信息
        """Prepare the backup process and return the backup metadata."""
        volume = self.db.volume_get(self.context, backup.volume_id)


        if volume['size'] <= 0: //判断volume空间是否足够
            err = _('volume size %d is invalid.') % volume['size']
            raise exception.InvalidVolume(reason=err)


        container = self._create_container(backup) //创建一个容器


        object_prefix = self._generate_object_name_prefix(backup)
        backup.service_metadata = object_prefix
        backup.save()


        volume_size_bytes = volume['size'] * units.Gi
        availability_zone = self.az
        LOG.debug('starting backup of volume: %(volume_id)s,'
                  ' volume size: %(volume_size_bytes)d, object names'
                  ' prefix %(object_prefix)s, availability zone:'
                  ' %(availability_zone)s',
                  {
                      'volume_id': backup.volume_id,
                      'volume_size_bytes': volume_size_bytes,
                      'object_prefix': object_prefix,
                      'availability_zone': availability_zone,
                  })
        object_meta = {'id': 1, 'list': [], 'prefix': object_prefix, //创建一个object_meta
                       'volume_meta': None}
        object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix} //初始化sha256信息
        extra_metadata = self.get_extra_metadata(backup, volume)
        if extra_metadata is not None:
            object_meta['extra_metadata'] = extra_metadata


        return (object_meta, object_sha256, extra_metadata, container,
                volume_size_bytes)


    def _backup_chunk(self, backup, container, data, data_offset,
                      object_meta, extra_metadata):
        """Backup data chunk based on the object metadata and offset."""
//基于对象metadata和offsetczr(偏移量)的备份数据块
        object_prefix = object_meta['prefix']
        object_list = object_meta['list']


        object_id = object_meta['id']
        object_name = '%s-%05d' % (object_prefix, object_id)
        obj = {}
        obj[object_name] = {}
        obj[object_name]['offset'] = data_offset
        obj[object_name]['length'] = len(data)
        LOG.debug('Backing up chunk of data from volume.')
        algorithm, output_data = self._prepare_output_data(data)
        obj[object_name]['compression'] = algorithm
        LOG.debug('About to put_object')
        with self.get_object_writer(
                container, object_name, extra_metadata=extra_metadata
        ) as writer:
            writer.write(output_data)
        md5 = hashlib.md5(data).hexdigest() //使用md5加密data
        obj[object_name]['md5'] = md5
        LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
                  {'object_name': object_name, 'md5': md5})
        object_list.append(obj)
        object_id += 1
        object_meta['list'] = object_list
        object_meta['id'] = object_id


        LOG.debug('Calling eventlet.sleep(0)')
        eventlet.sleep(0)


    def _prepare_output_data(self, data): //准备输出数据,返回压缩算法和被压缩的数据
        if self.compressor is None:
            return 'none', data
        data_size_bytes = len(data)
        # Execute compression in native thread so it doesn't prevent
        # cooperative greenthread switching.
        compressed_data = eventlet.tpool.execute(self.compressor.compress,
                                                 data) //算出压缩后数据大小
        comp_size_bytes = len(compressed_data)
        algorithm = CONF.backup_compression_algorithm.lower()
        if comp_size_bytes >= data_size_bytes: //如果比元数据大,则不压缩
            LOG.debug('Compression of this chunk was ineffective: '
                      'original length: %(data_size_bytes)d, '
                      'compressed length: %(compressed_size_bytes)d. '
                      'Using original data for this chunk.',
                      {'data_size_bytes': data_size_bytes,
                       'compressed_size_bytes': comp_size_bytes,
                       })
            return 'none', data
        LOG.debug('Compressed %(data_size_bytes)d bytes of data '
                  'to %(comp_size_bytes)d bytes using %(algorithm)s.',
                  {'data_size_bytes': data_size_bytes,
                   'comp_size_bytes': comp_size_bytes,
                   'algorithm': algorithm,
                   })
        return algorithm, compressed_data


    def _finalize_backup(self, backup, container, object_meta, object_sha256): //完成backup
        """Write the backup's metadata to the backup repository."""
        object_list = object_meta['list']
        object_id = object_meta['id']
        volume_meta = object_meta['volume_meta']
        sha256_list = object_sha256['sha256s']
        extra_metadata = object_meta.get('extra_metadata')
        self._write_sha256file(backup, //调用写入sha256数据
                               backup.volume_id,
                               container,
                               sha256_list)
        self._write_metadata(backup, //调用写入metadata数据
                             backup.volume_id,
                             container,
                             object_list,
                             volume_meta,
                             extra_metadata)
        backup.object_count = object_id
        backup.save()
        LOG.debug('backup %s finished.', backup['id'])


    def _backup_metadata(self, backup, object_meta): //备份metadata
        """Backup volume metadata.


        NOTE(dosaboy): the metadata we are backing up is obtained from a
                       versioned api so we should not alter it in any way here.
                       We must also be sure that the service that will perform
                       the restore is compatible with version used.
        """
//不改变任何元数据
        json_meta = self.get_metadata(backup['volume_id'])
        if not json_meta:
            LOG.debug("No volume metadata to backup.")
            return


        object_meta["volume_meta"] = json_meta //得到metadata


    def _send_progress_end(self, context, backup, object_meta): //执行结束
        object_meta['backup_percent'] = 100 //设置百分比为100
        volume_utils.notify_about_backup_usage(context,
                                               backup,
                                               "createprogress",
                                               extra_usage_info=
                                               object_meta)


    def _send_progress_notification(self, context, backup, object_meta, //执行过程按百分比通知
                                    total_block_sent_num, total_volume_size):
        backup_percent = total_block_sent_num * 100 / total_volume_size //已经成功的块*100/总共的块
        object_meta['backup_percent'] = backup_percent
        volume_utils.notify_about_backup_usage(context,
                                               backup,
                                               "createprogress",
                                               extra_usage_info=
                                               object_meta)


    def backup(self, backup, volume_file, backup_metadata=True):
        """Backup the czr volume.


           If backup['parent_id'] is given, then an incremental backup
           is performed.
        """
        if self.chunk_size_bytes % self.sha_block_size_bytes: //chunk_size_bytes能整除self.sha_block_size_bytes
            err = _('Chunk size is not multiple of ' //否则不能进行增量备份
                    'block size for creating hash.')
            raise exception.InvalidBackup(reason=err)


        # Read the shafile of the parent backup if backup['parent_id']
        # is given.
//如果有父备份,读取父备份的shafile,
        parent_backup_shafile = None
        parent_backup = None
        if backup.parent_id: //如果有父备份,判断是否能进行
            parent_backup = objects.Backup.get_by_id(self.context,
                                                     backup.parent_id)
            parent_backup_shafile = self._read_sha256file(parent_backup)
            parent_backup_shalist = parent_backup_shafile['sha256s']
            if (parent_backup_shafile['chunk_size'] != //父子sha_block_size_bytes大小不相同
                    self.sha_block_size_bytes):
                err = (_('Hash block size has changed since the last '
                         'backup. New hash block size: %(new)s. Old hash '
                         'block size: %(old)s. Do a full backup.')
                       % {'old': parent_backup_shafile['chunk_size'],
                          'new': self.sha_block_size_bytes})
                raise exception.InvalidBackup(reason=err)
            # If the volume size increased since the last backup, fail
            # the incremental backup and ask user to do a full backup.
//如果volume扩容了,增量备份失败,询问是否创建全量备份
            if backup.size > parent_backup.size:
                err = _('Volume size increased since the last '
                        'backup. Do a full backup.')
                raise exception.InvalidBackup(reason=err)


        (object_meta, object_sha256, extra_metadata, container,
         volume_size_bytes) = self._prepare_backup(backup) //准备backup函数,获取相关参数


        counter = 0 //计数器
        total_block_sent_num = 0 //block数量


        # There are two mechanisms to send the progress notification.
        # 1. The notifications are periodically sent in a certain interval.
        # 2. The notifications are sent after a certain number of chunks.
        # Both of them are working simultaneously during the volume backup,
        # when "chunked" backup drivers are deployed.
//有两种通知进程机制:
//1.一定时间间隔内周期性发送
//2.在一定数量的chunks发送后
//两者都在backup期间运行
        def _notify_progress(): //通知进程(按百分比)
            self._send_progress_notification(self.context, backup,
                                             object_meta,
                                             total_block_sent_num,
                                             volume_size_bytes)
        timer = loopingcall.FixedIntervalLoopingCall(
            _notify_progress)
        if self.enable_progress_timer:
            timer.start(interval=self.backup_timer_interval)


        sha256_list = object_sha256['sha256s']
        shaindex = 0 //下标
        is_backup_canceled = False
        while True:
            # First of all, we check the status of this backup. If it
            # has been changed to delete or has been deleted, we cancel the
            # backup process to do forcing delete.
//循环检查backup状态,如果已经删除或正在删除,则取消本次backup
            backup.refresh()
            if backup.status in (fields.BackupStatus.DELETING,
                                 fields.BackupStatus.DELETED):
                is_backup_canceled = True
                # To avoid the chunk left when deletion complete, need to
                # clean up the object of chunk again.
                self.delete_backup(backup) //删除本次backup
                LOG.debug('Cancel the backup process of %s.', backup.id)
                break
            data_offset = volume_file.tell() //当前文件读取指针的位置
            data = volume_file.read(self.chunk_size_bytes) //data读取一个chunk大小(50M)
            if data == b'': /////
                break


            # Calculate new shas with the datablock.
//循环计算一个chunk的各个sha
            shalist = []
            off = 0
            datalen = len(data)
            while off < datalen:
                chunk_start = off
                chunk_end = chunk_start + self.sha_block_size_bytes //每次读取sha_block_size_bytes
                if chunk_end > datalen: //读到文件尾
                    chunk_end = datalen
                chunk = data[chunk_start:chunk_end] //一块sha大小的数据
                sha = hashlib.sha256(chunk).hexdigest() //对chunk进行一次sha256运算
                shalist.append(sha) //添加到shalist列表
                off += self.sha_block_size_bytes //向后读
            sha256_list.extend(shalist) //添加sha256_list


            # If parent_backup is not None, that means an incremental
            # backup will be performed.
//如果有父备份,则执行增量备份
            if parent_backup:
                # Find the extent that needs to be backed up. //找到需要被备份的块
                extent_off = -1
                for idx, sha in enumerate(shalist):
                    if sha != parent_backup_shalist[shaindex]:
                        if extent_off == -1:
                            # Start of new extent.
                            extent_off = idx * self.sha_block_size_bytes
                    else:
                        if extent_off != -1:
                            # We've reached the end of extent.
                            extent_end = idx * self.sha_block_size_bytes
                            segment = data[extent_off:extent_end]
                            self._backup_chunk(backup, container, segment,
                                               data_offset + extent_off, //指针向后偏移extent_off个位置
                                               object_meta,
                                               extra_metadata)
                            extent_off = -1
                    shaindex += 1


                # The last extent extends to the end of data buffer. //最后一块,extent_off没取完,直接取到最后
                if extent_off != -1:
                    extent_end = datalen
                    segment = data[extent_off:extent_end]
                    self._backup_chunk(backup, container, segment,
                                       data_offset + extent_off,
                                       object_meta, extra_metadata)
                    extent_off = -1
            else:  # Do a full backup.
                self._backup_chunk(backup, container, data, data_offset, //备份一个chunk(50M)
                                   object_meta, extra_metadata)


            # Notifications //通知
            total_block_sent_num += self.data_block_num
            counter += 1
            if counter == self.data_block_num:
                # Send the notification to Ceilometer when the chunk
                # number reaches the data_block_num.  The backup percentage
                # is put in the metadata as the extra information.//百分比信息在metadata的extra中
                self._send_progress_notification(self.context, backup,
                                                 object_meta,
                                                 total_block_sent_num,
                                                 volume_size_bytes)
                # Reset the counter
                counter = 0


        # Stop the timer.
        timer.stop()
        # If backup has been cancelled we have nothing more to do
        # but timer.stop().
        if is_backup_canceled:
            return
        # All the data have been sent, the backup_percent reaches 100.
        self._send_progress_end(self.context, backup, object_meta)


        object_sha256['sha256s'] = sha256_list
        if backup_metadata:
            try:
                self._backup_metadata(backup, object_meta)
            # Whatever goes wrong, we want to log, cleanup, and re-raise.
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception("Backup volume metadata failed.")
                    self.delete_backup(backup)


        self._finalize_backup(backup, container, object_meta, object_sha256)


    def _restore_v1(self, backup, volume_id, metadata, volume_file):
        """Restore a v1 volume backup."""
        backup_id = backup['id']
        LOG.debug('v1 volume backup restore of %s started.', backup_id)
        extra_metadata = metadata.get('extra_metadata')
        container = backup['container']
        metadata_objects = metadata['objects']
        metadata_object_names = []
        for obj in metadata_objects:
            metadata_object_names.extend(obj.keys())
        LOG.debug('metadata_object_names = %s.', metadata_object_names)
        prune_list = [self._metadata_filename(backup),
                      self._sha256_filename(backup)]
        object_names = [object_name for object_name in
                        self._generate_object_names(backup)
                        if object_name not in prune_list]
        if sorted(object_names) != sorted(metadata_object_names):
            err = _('restore_backup aborted, actual object list '
                    'does not match object list stored in metadata.')
            raise exception.InvalidBackup(reason=err)


        for metadata_object in metadata_objects:
            object_name, obj = list(metadata_object.items())[0]
            LOG.debug('restoring object. backup: %(backup_id)s, '
                      'container: %(container)s, object name: '
                      '%(object_name)s, volume: %(volume_id)s.',
                      {
                          'backup_id': backup_id,
                          'container': container,
                          'object_name': object_name,
                          'volume_id': volume_id,
                      })


            with self.get_object_reader(
                    container, object_name,
                    extra_metadata=extra_metadata) as reader:
                body = reader.read()
            compression_algorithm = metadata_object[object_name]['compression']
            decompressor = self._get_compressor(compression_algorithm)
            volume_file.seek(obj['offset']) //指针向前移动offset个字节
            if decompressor is not None:
                LOG.debug('decompressing data using %s algorithm',
                          compression_algorithm)
                decompressed = decompressor.decompress(body)
                volume_file.write(decompressed)
            else:
                volume_file.write(body)


            # force flush every write to avoid long blocking write on close
            volume_file.flush()


            # Be tolerant to IO implementations that do not support fileno()
            try:
                fileno = volume_file.fileno()
            except IOError:
                LOG.info("volume_file does not support fileno() so skipping "
                         "fsync()")
            else:
                os.fsync(fileno)


            # Restoring a backup to a volume can take some time. Yield so other
            # threads can run, allowing for among other things the service
            # status to be updated
            eventlet.sleep(0)
        LOG.debug('v1 volume backup restore of %s finished.',
                  backup_id)


    def restore(self, backup, volume_id, volume_file):
        """Restore the given volume backup from backup repository."""
        backup_id = backup['id']
        container = backup['container']
        object_prefix = backup['service_metadata']
        LOG.debug('starting restore of backup %(object_prefix)s '
                  'container: %(container)s, to volume %(volume_id)s, '
                  'backup: %(backup_id)s.',
                  {
                      'object_prefix': object_prefix,
                      'container': container,
                      'volume_id': volume_id,
                      'backup_id': backup_id,
                  })
        metadata = self._read_metadata(backup)
        metadata_version = metadata['version']
        LOG.debug('Restoring backup version %s', metadata_version)
        try:
            restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
                metadata_version))
        except TypeError:
            err = (_('No support to restore backup version %s')
                   % metadata_version)
            raise exception.InvalidBackup(reason=err)


        # Build a list of backups based on parent_id. A full backup
        # will be the last one in the list.
        backup_list = []
        backup_list.append(backup)
        current_backup = backup
        while current_backup.parent_id:
            prev_backup = objects.Backup.get_by_id(self.context,
                                                   current_backup.parent_id)
            backup_list.append(prev_backup)
            current_backup = prev_backup


        # Do a full restore first, then layer the incremental backups
        # on top of it in order.
        index = len(backup_list) - 1
        while index >= 0:
            backup1 = backup_list[index]
            index = index - 1
            metadata = self._read_metadata(backup1)
            restore_func(backup1, volume_id, metadata, volume_file)


            volume_meta = metadata.get('volume_meta', None)
            try:
                if volume_meta:
                    self.put_metadata(volume_id, volume_meta)
                else:
                    LOG.debug("No volume metadata in this backup.")
            except exception.BackupMetadataUnsupportedVersion:
                msg = _("Metadata restore failed due to incompatible version.")
                LOG.error(msg)
                raise exception.BackupOperationError(msg)


        LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
                  {'backup_id': backup_id, 'volume_id': volume_id})


    def delete_backup(self, backup):
        """Delete the given backup."""
        container = backup['container']
        object_prefix = backup['service_metadata']
        LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
                  'prefix: %(pre)s.',
                  {'id': backup['id'],
                   'cont': container,
                   'pre': object_prefix})


        if container is not None and object_prefix is not None:
            object_names = []
            try:
                object_names = self._generate_object_names(backup)
            except Exception:
                LOG.warning('Error while listing objects, continuing'
                            ' with delete.')


            for object_name in object_names:
                self.delete_object(container, object_name)
                LOG.debug('deleted object: %(object_name)s'
                          ' in container: %(container)s.',
                          {
                              'object_name': object_name,
                              'container': container
                          })
                # Deleting a backup's objects can take some time.
                # Yield so other threads can run
                eventlet.sleep(0)


        LOG.debug('delete %s finished.', backup['id'])

猜你喜欢

转载自blog.csdn.net/qq_23348071/article/details/78659949
今日推荐