1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
# Copyright 2010 United States Government as represented by the
4
# Administrator of the National Aeronautics and Space Administration.
7
# Licensed under the Apache License, Version 2.0 (the "License"); you may
8
# not use this file except in compliance with the License. You may obtain
9
# a copy of the License at
11
# http://www.apache.org/licenses/LICENSE-2.0
13
# Unless required by applicable law or agreed to in writing, software
14
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16
# License for the specific language governing permissions and limitations
19
Volume manager manages creating, attaching, detaching, and persistent storage.
21
Persistent storage volumes keep their state independent of instances. You can
22
attach to an instance, terminate the instance, spawn a new instance (even
23
one from a different image) and re-attach the volume with the same data
28
:volume_topic: What :mod:`rpc` topic to listen to (default: `volume`).
29
:volume_manager: The module name of a class derived from
30
:class:`manager.Manager` (default:
31
:class:`nova.volume.manager.Manager`).
32
:storage_availability_zone: Defaults to `nova`.
33
:volume_driver: Used by :class:`Manager`. Defaults to
34
:class:`nova.volume.driver.ISCSIDriver`.
35
:volume_group: Name of the group that will contain exported volumes (default:
37
:num_shell_tries: Number of times to attempt to run commands (default: 3)
41
from nova import context
42
from nova import exception
43
from nova import flags
44
from nova.image import glance
45
from nova import manager
46
from nova.openstack.common import cfg
47
from nova.openstack.common import excutils
48
from nova.openstack.common import importutils
49
from nova.openstack.common import log as logging
50
from nova.openstack.common import timeutils
51
from nova import quota
52
from nova import utils
53
from nova.volume import utils as volume_utils
56
LOG = logging.getLogger(__name__)
60
volume_manager_opts = [
61
cfg.StrOpt('storage_availability_zone',
63
help='availability zone of this service'),
64
cfg.StrOpt('volume_driver',
65
default='nova.volume.driver.ISCSIDriver',
66
help='Driver to use for volume creation'),
67
cfg.BoolOpt('use_local_volumes',
69
help='if True, will not discover local volumes'),
70
cfg.BoolOpt('volume_force_update_capabilities',
72
help='if True will force update capabilities on each check'),
76
FLAGS.register_opts(volume_manager_opts)
79
class VolumeManager(manager.SchedulerDependentManager):
80
"""Manages attachable block storage devices."""
81
def __init__(self, volume_driver=None, *args, **kwargs):
82
"""Load the driver from the one specified in args, or from flags."""
84
volume_driver = FLAGS.volume_driver
85
self.driver = importutils.import_object(volume_driver)
86
super(VolumeManager, self).__init__(service_name='volume',
88
# NOTE(vish): Implementation specific db handling is done
90
self.driver.db = self.db
91
self._last_volume_stats = []
94
"""Do any initialization that needs to be run if this is a
95
standalone service."""
97
ctxt = context.get_admin_context()
98
self.driver.do_setup(ctxt)
99
self.driver.check_for_setup_error()
101
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
102
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
103
for volume in volumes:
104
if volume['status'] in ['available', 'in-use']:
105
self.driver.ensure_export(ctxt, volume)
107
LOG.info(_("volume %s: skipping export"), volume['name'])
109
LOG.debug(_('Resuming any in progress delete operations'))
110
for volume in volumes:
111
if volume['status'] == 'deleting':
112
LOG.info(_("Resuming delete on volume: %s"), volume['id'])
113
self.delete_volume(ctxt, volume['id'])
115
def create_volume(self, context, volume_id, snapshot_id=None,
116
image_id=None, reservations=None):
117
"""Creates and exports the volume."""
118
context = context.elevated()
119
volume_ref = self.db.volume_get(context, volume_id)
120
self._notify_about_volume_usage(context, volume_ref, "create.start")
121
LOG.info(_("volume %s: creating"), volume_ref['name'])
123
self.db.volume_update(context,
126
# NOTE(vish): so we don't have to get volume from db again
127
# before passing it to the driver.
128
volume_ref['host'] = self.host
134
vol_name = volume_ref['name']
135
vol_size = volume_ref['size']
136
LOG.debug(_("volume %(vol_name)s: creating lv of"
137
" size %(vol_size)sG") % locals())
138
if snapshot_id is None and image_id is None:
139
model_update = self.driver.create_volume(volume_ref)
140
elif snapshot_id is not None:
141
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
142
model_update = self.driver.create_volume_from_snapshot(
146
# create the volume from an image
147
image_service, image_id = \
148
glance.get_remote_image_service(context,
150
image_location = image_service.get_location(context, image_id)
151
cloned = self.driver.clone_image(volume_ref, image_location)
153
model_update = self.driver.create_volume(volume_ref)
154
status = 'downloading'
157
self.db.volume_update(context, volume_ref['id'], model_update)
159
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
160
model_update = self.driver.create_export(context, volume_ref)
162
self.db.volume_update(context, volume_ref['id'], model_update)
164
with excutils.save_and_reraise_exception():
165
self.db.volume_update(context,
166
volume_ref['id'], {'status': 'error'})
168
now = timeutils.utcnow()
169
volume_ref = self.db.volume_update(context,
170
volume_ref['id'], {'status': status,
172
LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
174
self._notify_about_volume_usage(context, volume_ref, "create.end")
176
if image_id and not cloned:
177
#copy the image onto the volume.
178
self._copy_image_to_volume(context, volume_ref, image_id)
181
def delete_volume(self, context, volume_id):
182
"""Deletes and unexports volume."""
183
context = context.elevated()
184
volume_ref = self.db.volume_get(context, volume_id)
185
if volume_ref['attach_status'] == "attached":
186
# Volume is still attached, need to detach first
187
raise exception.VolumeAttached(volume_id=volume_id)
188
if volume_ref['host'] != self.host:
189
raise exception.InvalidVolume(
190
reason=_("Volume is not local to this node"))
192
self._notify_about_volume_usage(context, volume_ref, "delete.start")
195
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
196
self.driver.remove_export(context, volume_ref)
197
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
198
self.driver.delete_volume(volume_ref)
199
except exception.VolumeIsBusy:
200
LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
201
self.driver.ensure_export(context, volume_ref)
202
self.db.volume_update(context, volume_ref['id'],
203
{'status': 'available'})
206
with excutils.save_and_reraise_exception():
207
self.db.volume_update(context,
209
{'status': 'error_deleting'})
213
reservations = QUOTAS.reserve(context, volumes=-1,
214
gigabytes=-volume_ref['size'])
217
LOG.exception(_("Failed to update usages deleting volume"))
219
volume_ref = self.db.volume_destroy(context, volume_id)
220
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
221
self._notify_about_volume_usage(context, volume_ref, "delete.end")
223
# Commit the reservations
225
QUOTAS.commit(context, reservations)
229
def create_snapshot(self, context, volume_id, snapshot_id):
230
"""Creates and exports the snapshot."""
231
context = context.elevated()
232
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
233
LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])
236
snap_name = snapshot_ref['name']
237
LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
238
model_update = self.driver.create_snapshot(snapshot_ref)
240
self.db.snapshot_update(context, snapshot_ref['id'],
244
with excutils.save_and_reraise_exception():
245
self.db.snapshot_update(context,
249
self.db.snapshot_update(context,
250
snapshot_ref['id'], {'status': 'available',
252
LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
255
def delete_snapshot(self, context, snapshot_id):
256
"""Deletes and unexports snapshot."""
257
context = context.elevated()
258
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
261
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
262
self.driver.delete_snapshot(snapshot_ref)
263
except exception.SnapshotIsBusy:
264
LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name'])
265
self.db.snapshot_update(context,
267
{'status': 'available'})
270
with excutils.save_and_reraise_exception():
271
self.db.snapshot_update(context,
273
{'status': 'error_deleting'})
275
self.db.snapshot_destroy(context, snapshot_id)
276
LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
279
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
280
"""Updates db to show volume is attached"""
281
# TODO(vish): refactor this into a more general "reserve"
282
if not utils.is_uuid_like(instance_uuid):
283
raise exception.InvalidUUID(instance_uuid)
286
self.driver.attach_volume(context,
291
with excutils.save_and_reraise_exception():
292
self.db.volume_update(context,
294
{'status': 'error_attaching'})
296
self.db.volume_attached(context,
301
def detach_volume(self, context, volume_id):
302
"""Updates db to show volume is detached"""
303
# TODO(vish): refactor this into a more general "unreserve"
305
self.driver.detach_volume(context, volume_id)
307
with excutils.save_and_reraise_exception():
308
self.db.volume_update(context,
310
{'status': 'error_detaching'})
312
self.db.volume_detached(context, volume_id)
314
def _copy_image_to_volume(self, context, volume, image_id):
315
"""Downloads Glance image to the specified volume. """
316
volume_id = volume['id']
317
payload = {'volume_id': volume_id, 'image_id': image_id}
319
image_service, image_id = glance.get_remote_image_service(context,
321
self.driver.copy_image_to_volume(context, volume, image_service,
323
LOG.debug(_("Downloaded image %(image_id)s to %(volume_id)s "
324
"successfully") % locals())
325
self.db.volume_update(context, volume_id,
326
{'status': 'available'})
327
except Exception, error:
328
with excutils.save_and_reraise_exception():
329
payload['message'] = unicode(error)
330
self.db.volume_update(context, volume_id, {'status': 'error'})
332
def copy_volume_to_image(self, context, volume_id, image_id):
333
"""Uploads the specified volume to Glance."""
334
payload = {'volume_id': volume_id, 'image_id': image_id}
336
volume = self.db.volume_get(context, volume_id)
337
self.driver.ensure_export(context.elevated(), volume)
338
image_service, image_id = glance.get_remote_image_service(context,
340
self.driver.copy_volume_to_image(context, volume, image_service,
342
LOG.debug(_("Uploaded volume %(volume_id)s to "
343
"image (%(image_id)s) successfully") % locals())
344
except Exception, error:
345
with excutils.save_and_reraise_exception():
346
payload['message'] = unicode(error)
348
if volume['instance_uuid'] is None:
349
self.db.volume_update(context, volume_id,
350
{'status': 'available'})
352
self.db.volume_update(context, volume_id,
353
{'status': 'in-use'})
355
def initialize_connection(self, context, volume_id, connector):
356
"""Prepare volume for connection from host represented by connector.
358
This method calls the driver initialize_connection and returns
359
it to the caller. The connector parameter is a dictionary with
360
information about the host that will connect to the volume in the
365
'initiator': initiator,
368
ip: the ip address of the connecting machine
370
initiator: the iscsi initiator name of the connecting machine.
371
This can be None if the connecting machine does not support iscsi
374
driver is responsible for doing any necessary security setup and
375
returning a connection_info dictionary in the following format::
378
'driver_volume_type': driver_volume_type,
382
driver_volume_type: a string to identify the type of volume. This
383
can be used by the calling code to determine the
384
strategy for connecting to the volume. This could
385
be 'iscsi', 'rbd', 'sheepdog', etc.
387
data: this is the data that the calling code will use to connect
388
to the volume. Keep in mind that this will be serialized to
389
json in various places, so it should not contain any non-json
392
volume_ref = self.db.volume_get(context, volume_id)
393
return self.driver.initialize_connection(volume_ref, connector)
395
def terminate_connection(self, context, volume_id, connector):
396
"""Cleanup connection from host represented by connector.
398
The format of connector is the same as for initialize_connection.
400
volume_ref = self.db.volume_get(context, volume_id)
401
self.driver.terminate_connection(volume_ref, connector)
403
def check_for_export(self, context, instance_id):
404
"""Make sure whether volume is exported."""
405
instance_ref = self.db.instance_get(context, instance_id)
406
volumes = self.db.volume_get_all_by_instance_uuid(context,
407
instance_ref['uuid'])
409
for volume in volumes:
410
self.driver.check_for_export(context, volume['id'])
412
def _volume_stats_changed(self, stat1, stat2):
413
if FLAGS.volume_force_update_capabilities:
415
if len(stat1) != len(stat2):
417
for (k, v) in stat1.iteritems():
418
if (k, v) not in stat2.iteritems():
422
@manager.periodic_task
423
def _report_driver_status(self, context):
424
volume_stats = self.driver.get_volume_stats(refresh=True)
426
LOG.info(_("Checking volume capabilities"))
428
if self._volume_stats_changed(self._last_volume_stats,
430
LOG.info(_("New capabilities found: %s"), volume_stats)
431
self._last_volume_stats = volume_stats
433
# This will grab info about the host and queue it
434
# to be sent to the Schedulers.
435
self.update_service_capabilities(self._last_volume_stats)
437
# avoid repeating fanouts
438
self.update_service_capabilities(None)
440
def _reset_stats(self):
441
LOG.info(_("Clear capabilities"))
442
self._last_volume_stats = []
444
def notification(self, context, event):
445
LOG.info(_("Notification {%s} received"), event)
448
def _notify_about_volume_usage(self, context, volume, event_suffix,
449
extra_usage_info=None):
450
volume_utils.notify_about_volume_usage(
451
context, volume, event_suffix,
452
extra_usage_info=extra_usage_info, host=self.host)