1
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
5
# not use this file except in compliance with the License. You may obtain
6
# a copy of the License at
8
# http://www.apache.org/licenses/LICENSE-2.0
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
# License for the specific language governing permissions and limitations
16
"""Implementation of a backup service that uses Swift as the backend
20
:backup_swift_url: The URL of the Swift endpoint (default:
22
:backup_swift_object_size: The size in bytes of the Swift objects used
23
for volume backups (default: 52428800).
24
:backup_swift_retry_attempts: The number of retries to make for Swift
25
operations (default: 10).
26
:backup_swift_retry_backoff: The backoff time in seconds between retrying
27
failed Swift operations (default: 10).
28
:backup_compression_algorithm: Compression algorithm to use for volume
29
backups. Supported options are:
30
None (to disable), zlib and bz2 (default: zlib)
40
from cinder.db import base
41
from cinder import exception
42
from cinder import flags
43
from cinder.openstack.common import cfg
44
from cinder.openstack.common import log as logging
45
from cinder.openstack.common import timeutils
46
from swiftclient import client as swift
48
LOG = logging.getLogger(__name__)
50
swiftbackup_service_opts = [
51
cfg.StrOpt('backup_swift_url',
52
default='http://localhost:8080/v1/',
53
help='The URL of the Swift endpoint'),
54
cfg.StrOpt('backup_swift_container',
55
default='volumebackups',
56
help='The default Swift container to use'),
57
cfg.IntOpt('backup_swift_object_size',
59
help='The size in bytes of Swift backup objects'),
60
cfg.IntOpt('backup_swift_retry_attempts',
62
help='The number of retries to make for Swift operations'),
63
cfg.IntOpt('backup_swift_retry_backoff',
65
help='The backoff time in seconds between Swift retries'),
66
cfg.StrOpt('backup_compression_algorithm',
68
help='Compression algorithm (None to disable)'),
72
FLAGS.register_opts(swiftbackup_service_opts)
75
class SwiftBackupService(base.Base):
76
"""Provides backup, restore and delete of backup objects within Swift."""
78
SERVICE_VERSION = '1.0.0'
80
def _get_compressor(self, algorithm):
82
if algorithm.lower() in ('none', 'off', 'no'):
84
elif algorithm.lower() in ('zlib', 'gzip'):
85
import zlib as compressor
87
elif algorithm.lower() in ('bz2', 'bzip2'):
88
import bz2 as compressor
93
err = _('unsupported compression algorithm: %s') % algorithm
94
raise ValueError(unicode(err))
96
def __init__(self, context, db_driver=None):
97
self.context = context
98
self.swift_url = '%sAUTH_%s' % (FLAGS.backup_swift_url,
99
self.context.project_id)
100
self.az = FLAGS.storage_availability_zone
101
self.data_block_size_bytes = FLAGS.backup_swift_object_size
102
self.swift_attempts = FLAGS.backup_swift_retry_attempts
103
self.swift_backoff = FLAGS.backup_swift_retry_backoff
105
self._get_compressor(FLAGS.backup_compression_algorithm)
106
self.conn = swift.Connection(None, None, None,
107
retries=self.swift_attempts,
108
preauthurl=self.swift_url,
109
preauthtoken=self.context.auth_token,
110
starting_backoff=self.swift_backoff)
111
super(SwiftBackupService, self).__init__(db_driver)
113
def _check_container_exists(self, container):
114
LOG.debug(_('_check_container_exists: container: %s') % container)
116
self.conn.head_container(container)
117
except swift.ClientException as error:
118
if error.http_status == httplib.NOT_FOUND:
119
LOG.debug(_('container %s does not exist') % container)
124
LOG.debug(_('container %s exists') % container)
127
def _create_container(self, context, backup):
128
backup_id = backup['id']
129
container = backup['container']
130
LOG.debug(_('_create_container started, container: %(container)s,'
131
'backup: %(backup_id)s') % locals())
132
if container is None:
133
container = FLAGS.backup_swift_container
134
self.db.backup_update(context, backup_id, {'container': container})
135
if not self._check_container_exists(container):
136
self.conn.put_container(container)
139
def _generate_swift_object_name_prefix(self, backup):
140
az = 'az_%s' % self.az
141
backup_name = '%s_backup_%s' % (az, backup['id'])
142
volume = 'volume_%s' % (backup['volume_id'])
143
timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
144
prefix = volume + '/' + timestamp + '/' + backup_name
145
LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix)
148
def _generate_object_names(self, backup):
149
prefix = backup['service_metadata']
150
swift_objects = self.conn.get_container(backup['container'],
152
full_listing=True)[1]
153
swift_object_names = []
154
for swift_object in swift_objects:
155
swift_object_names.append(swift_object['name'])
156
LOG.debug(_('generated object list: %s') % swift_object_names)
157
return swift_object_names
159
def _metadata_filename(self, backup):
160
swift_object_name = backup['service_metadata']
161
filename = '%s_metadata' % swift_object_name
164
def _write_metadata(self, backup, volume_id, container, object_list):
165
filename = self._metadata_filename(backup)
166
LOG.debug(_('_write_metadata started, container name: %(container)s,'
167
' metadata filename: %(filename)s') % locals())
169
metadata['version'] = self.SERVICE_VERSION
170
metadata['backup_id'] = backup['id']
171
metadata['volume_id'] = volume_id
172
metadata['backup_name'] = backup['display_name']
173
metadata['backup_description'] = backup['display_description']
174
metadata['created_at'] = str(backup['created_at'])
175
metadata['objects'] = object_list
176
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
177
reader = StringIO.StringIO(metadata_json)
178
etag = self.conn.put_object(container, filename, reader)
179
md5 = hashlib.md5(metadata_json).hexdigest()
181
err = _('error writing metadata file to swift, MD5 of metadata'
182
' file in swift [%(etag)s] is not the same as MD5 of '
183
'metadata file sent to swift [%(md5)s]') % locals()
184
raise exception.InvalidBackup(reason=err)
185
LOG.debug(_('_write_metadata finished'))
187
def _read_metadata(self, backup):
188
container = backup['container']
189
filename = self._metadata_filename(backup)
190
LOG.debug(_('_read_metadata started, container name: %(container)s, '
191
'metadata filename: %(filename)s') % locals())
192
(resp, body) = self.conn.get_object(container, filename)
193
metadata = json.loads(body)
194
LOG.debug(_('_read_metadata finished (%s)') % metadata)
195
return metadata['objects']
197
def backup(self, backup, volume_file):
198
"""Backup the given volume to swift using the given backup metadata.
200
backup_id = backup['id']
201
volume_id = backup['volume_id']
202
volume = self.db.volume_get(self.context, volume_id)
204
if volume['size'] <= 0:
205
err = _('volume size %d is invalid.') % volume['size']
206
raise exception.InvalidVolume(reason=err)
208
container = self._create_container(self.context, backup)
210
object_prefix = self._generate_swift_object_name_prefix(backup)
211
backup['service_metadata'] = object_prefix
212
self.db.backup_update(self.context, backup_id, {'service_metadata':
214
volume_size_bytes = volume['size'] * 1024 * 1024 * 1024
215
availability_zone = self.az
216
LOG.debug(_('starting backup of volume: %(volume_id)s to swift,'
217
' volume size: %(volume_size_bytes)d, swift object names'
218
' prefix %(object_prefix)s, availability zone:'
219
' %(availability_zone)s') % locals())
223
data_block_size_bytes = self.data_block_size_bytes
224
object_name = '%s-%05d' % (object_prefix, object_id)
226
obj[object_name] = {}
227
obj[object_name]['offset'] = volume_file.tell()
228
data = volume_file.read(data_block_size_bytes)
229
obj[object_name]['length'] = len(data)
232
LOG.debug(_('reading chunk of data from volume'))
233
if self.compressor is not None:
234
algorithm = FLAGS.backup_compression_algorithm.lower()
235
obj[object_name]['compression'] = algorithm
236
data_size_bytes = len(data)
237
data = self.compressor.compress(data)
238
comp_size_bytes = len(data)
239
LOG.debug(_('compressed %(data_size_bytes)d bytes of data'
240
' to %(comp_size_bytes)d bytes using '
241
'%(algorithm)s') % locals())
243
LOG.debug(_('not compressing data'))
244
obj[object_name]['compression'] = 'none'
246
reader = StringIO.StringIO(data)
247
LOG.debug(_('About to put_object'))
248
etag = self.conn.put_object(container, object_name, reader)
249
LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % locals())
250
md5 = hashlib.md5(data).hexdigest()
251
obj[object_name]['md5'] = md5
252
LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % locals())
254
err = _('error writing object to swift, MD5 of object in '
255
'swift %(etag)s is not the same as MD5 of object sent '
256
'to swift %(md5)s') % locals()
257
raise exception.InvalidBackup(reason=err)
258
object_list.append(obj)
260
LOG.debug(_('Calling eventlet.sleep(0)'))
262
self._write_metadata(backup, volume_id, container, object_list)
263
self.db.backup_update(self.context, backup_id, {'object_count':
265
LOG.debug(_('backup %s finished.') % backup_id)
267
def restore(self, backup, volume_id, volume_file):
268
"""Restore the given volume backup from swift.
270
backup_id = backup['id']
271
container = backup['container']
272
volume = self.db.volume_get(self.context, volume_id)
273
volume_size = volume['size']
274
backup_size = backup['size']
276
object_prefix = backup['service_metadata']
277
LOG.debug(_('starting restore of backup %(object_prefix)s from swift'
278
' container: %(container)s, to volume %(volume_id)s, '
279
'backup: %(backup_id)s') % locals())
280
swift_object_names = self._generate_object_names(backup)
281
metadata_objects = self._read_metadata(backup)
282
metadata_object_names = []
283
for metadata_object in metadata_objects:
284
metadata_object_names.extend(metadata_object.keys())
285
LOG.debug(_('metadata_object_names = %s') % metadata_object_names)
286
prune_list = [self._metadata_filename(backup)]
287
swift_object_names = [swift_object_name for swift_object_name in
288
swift_object_names if swift_object_name
290
if sorted(swift_object_names) != sorted(metadata_object_names):
291
err = _('restore_backup aborted, actual swift object list in '
292
'swift does not match object list stored in metadata')
293
raise exception.InvalidBackup(reason=err)
295
for metadata_object in metadata_objects:
296
object_name = metadata_object.keys()[0]
297
LOG.debug(_('restoring object from swift. backup: %(backup_id)s, '
298
'container: %(container)s, swift object name: '
299
'%(object_name)s, volume: %(volume_id)s') % locals())
300
(resp, body) = self.conn.get_object(container, object_name)
301
compression_algorithm = metadata_object[object_name]['compression']
302
decompressor = self._get_compressor(compression_algorithm)
303
if decompressor is not None:
304
LOG.debug(_('decompressing data using %s algorithm') %
305
compression_algorithm)
306
decompressed = decompressor.decompress(body)
307
volume_file.write(decompressed)
309
volume_file.write(body)
311
# force flush every write to avoid long blocking write on close
313
os.fsync(volume_file.fileno())
314
# Restoring a backup to a volume can take some time. Yield so other
315
# threads can run, allowing for among other things the service
316
# status to be updated
318
LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') %
321
def delete(self, backup):
322
"""Delete the given backup from swift."""
323
container = backup['container']
324
LOG.debug('delete started, backup: %s, container: %s, prefix: %s',
325
backup['id'], container, backup['service_metadata'])
327
if container is not None:
328
swift_object_names = []
330
swift_object_names = self._generate_object_names(backup)
332
LOG.warn(_('swift error while listing objects, continuing'
335
for swift_object_name in swift_object_names:
337
self.conn.delete_object(container, swift_object_name)
339
LOG.warn(_('swift error while deleting object %s, '
340
'continuing with delete') % swift_object_name)
342
LOG.debug(_('deleted swift object: %(swift_object_name)s'
343
' in container: %(container)s') % locals())
344
# Deleting a backup's objects from swift can take some time.
345
# Yield so other threads can run
348
LOG.debug(_('delete %s finished') % backup['id'])
351
def get_backup_service(context):
352
return SwiftBackupService(context)