~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/compute/manager.py

  • Committer: Bazaar Package Importer
  • Author(s): Chuck Short
  • Date: 2011-01-21 11:48:06 UTC
  • mto: This revision was merged to the branch mainline in revision 9.
  • Revision ID: james.westby@ubuntu.com-20110121114806-v8fvnnl6az4m4ohv
Tags: upstream-2011.1~bzr597
ImportĀ upstreamĀ versionĀ 2011.1~bzr597

Show diffs side-by-side

added added

removed removed

Lines of Context:
35
35
"""
36
36
 
37
37
import datetime
 
38
import random
 
39
import string
38
40
import logging
39
 
 
40
 
from twisted.internet import defer
 
41
import socket
 
42
import functools
41
43
 
42
44
from nova import exception
43
45
from nova import flags
 
46
from nova import log as logging
44
47
from nova import manager
 
48
from nova import rpc
45
49
from nova import utils
46
50
from nova.compute import power_state
47
51
 
50
54
                    'where instances are stored on disk')
51
55
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
52
56
                    'Driver to use for controlling virtualization')
 
57
flags.DEFINE_string('stub_network', False,
 
58
                    'Stub network related code')
 
59
flags.DEFINE_integer('password_length', 12,
 
60
                    'Length of generated admin passwords')
 
61
flags.DEFINE_string('console_host', socket.gethostname(),
 
62
                    'Console proxy host to use to connect to instances on'
 
63
                    'this host.')
 
64
 
 
65
LOG = logging.getLogger('nova.compute.manager')
 
66
 
 
67
 
 
68
def checks_instance_lock(function):
 
69
    """
 
70
    decorator used for preventing action against locked instances
 
71
    unless, of course, you happen to be admin
 
72
 
 
73
    """
 
74
 
 
75
    @functools.wraps(function)
 
76
    def decorated_function(self, context, instance_id, *args, **kwargs):
 
77
 
 
78
        LOG.info(_("check_instance_lock: decorating: |%s|"), function,
 
79
                 context=context)
 
80
        LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
 
81
                 self, context, instance_id, context=context)
 
82
        locked = self.get_lock(context, instance_id)
 
83
        admin = context.is_admin
 
84
        LOG.info(_("check_instance_lock: locked: |%s|"), locked,
 
85
                 context=context)
 
86
        LOG.info(_("check_instance_lock: admin: |%s|"), admin,
 
87
                 context=context)
 
88
 
 
89
        # if admin or unlocked call function otherwise log error
 
90
        if admin or not locked:
 
91
            LOG.info(_("check_instance_lock: executing: |%s|"), function,
 
92
                     context=context)
 
93
            function(self, context, instance_id, *args, **kwargs)
 
94
        else:
 
95
            LOG.error(_("check_instance_lock: not executing |%s|"),
 
96
                      function, context=context)
 
97
            return False
 
98
 
 
99
    return decorated_function
53
100
 
54
101
 
55
102
class ComputeManager(manager.Manager):
67
114
        self.volume_manager = utils.import_object(FLAGS.volume_manager)
68
115
        super(ComputeManager, self).__init__(*args, **kwargs)
69
116
 
 
117
    def init_host(self):
 
118
        """Do any initialization that needs to be run if this is a
 
119
           standalone service.
 
120
        """
 
121
        self.driver.init_host()
 
122
 
70
123
    def _update_state(self, context, instance_id):
71
124
        """Update the state of an instance from the driver info."""
72
125
        # FIXME(ja): include other fields from state?
78
131
            state = power_state.NOSTATE
79
132
        self.db.instance_set_state(context, instance_id, state)
80
133
 
81
 
    @defer.inlineCallbacks
82
 
    @exception.wrap_exception
83
 
    def refresh_security_group(self, context, security_group_id, **_kwargs):
84
 
        """This call passes stright through to the virtualization driver."""
85
 
        yield self.driver.refresh_security_group(security_group_id)
86
 
 
87
 
    @defer.inlineCallbacks
 
134
    def get_console_topic(self, context, **_kwargs):
 
135
        """Retrieves the console host for a project on this host
 
136
           Currently this is just set in the flags for each compute
 
137
           host."""
 
138
        #TODO(mdragon): perhaps make this variable by console_type?
 
139
        return self.db.queue_get_for(context,
 
140
                                     FLAGS.console_topic,
 
141
                                     FLAGS.console_host)
 
142
 
 
143
    def get_network_topic(self, context, **_kwargs):
 
144
        """Retrieves the network host for a project on this host"""
 
145
        # TODO(vish): This method should be memoized. This will make
 
146
        #             the call to get_network_host cheaper, so that
 
147
        #             it can pas messages instead of checking the db
 
148
        #             locally.
 
149
        if FLAGS.stub_network:
 
150
            host = FLAGS.network_host
 
151
        else:
 
152
            host = self.network_manager.get_network_host(context)
 
153
        return self.db.queue_get_for(context,
 
154
                                     FLAGS.network_topic,
 
155
                                     host)
 
156
 
 
157
    def get_console_pool_info(self, context, console_type):
 
158
        return self.driver.get_console_pool_info(console_type)
 
159
 
 
160
    @exception.wrap_exception
 
161
    def refresh_security_group_rules(self, context,
 
162
                                     security_group_id, **_kwargs):
 
163
        """This call passes straight through to the virtualization driver."""
 
164
        return self.driver.refresh_security_group_rules(security_group_id)
 
165
 
 
166
    @exception.wrap_exception
 
167
    def refresh_security_group_members(self, context,
 
168
                                       security_group_id, **_kwargs):
 
169
        """This call passes straight through to the virtualization driver."""
 
170
        return self.driver.refresh_security_group_members(security_group_id)
 
171
 
88
172
    @exception.wrap_exception
89
173
    def run_instance(self, context, instance_id, **_kwargs):
90
174
        """Launch a new instance with specified options."""
91
175
        context = context.elevated()
92
176
        instance_ref = self.db.instance_get(context, instance_id)
93
177
        if instance_ref['name'] in self.driver.list_instances():
94
 
            raise exception.Error("Instance has already been created")
95
 
        logging.debug("instance %s: starting...", instance_id)
96
 
        self.network_manager.setup_compute_network(context, instance_id)
 
178
            raise exception.Error(_("Instance has already been created"))
 
179
        LOG.audit(_("instance %s: starting..."), instance_id,
 
180
                  context=context)
97
181
        self.db.instance_update(context,
98
182
                                instance_id,
99
183
                                {'host': self.host})
100
184
 
 
185
        self.db.instance_set_state(context,
 
186
                                   instance_id,
 
187
                                   power_state.NOSTATE,
 
188
                                   'networking')
 
189
 
 
190
        is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id
 
191
        # NOTE(vish): This could be a cast because we don't do anything
 
192
        #             with the address currently, but I'm leaving it as
 
193
        #             a call to ensure that network setup completes.  We
 
194
        #             will eventually also need to save the address here.
 
195
        if not FLAGS.stub_network:
 
196
            address = rpc.call(context,
 
197
                               self.get_network_topic(context),
 
198
                               {"method": "allocate_fixed_ip",
 
199
                                "args": {"instance_id": instance_id,
 
200
                                         "vpn": is_vpn}})
 
201
 
 
202
            self.network_manager.setup_compute_network(context,
 
203
                                                       instance_id)
 
204
 
101
205
        # TODO(vish) check to make sure the availability zone matches
102
206
        self.db.instance_set_state(context,
103
207
                                   instance_id,
105
209
                                   'spawning')
106
210
 
107
211
        try:
108
 
            yield self.driver.spawn(instance_ref)
 
212
            self.driver.spawn(instance_ref)
109
213
            now = datetime.datetime.utcnow()
110
214
            self.db.instance_update(context,
111
215
                                    instance_id,
112
216
                                    {'launched_at': now})
113
217
        except Exception:  # pylint: disable-msg=W0702
114
 
            logging.exception("instance %s: Failed to spawn",
115
 
                              instance_ref['name'])
 
218
            LOG.exception(_("instance %s: Failed to spawn"), instance_id,
 
219
                          context=context)
116
220
            self.db.instance_set_state(context,
117
221
                                       instance_id,
118
222
                                       power_state.SHUTDOWN)
119
223
 
120
224
        self._update_state(context, instance_id)
121
225
 
122
 
    @defer.inlineCallbacks
123
226
    @exception.wrap_exception
 
227
    @checks_instance_lock
124
228
    def terminate_instance(self, context, instance_id):
125
229
        """Terminate an instance on this machine."""
126
230
        context = context.elevated()
127
 
        logging.debug("instance %s: terminating", instance_id)
128
 
 
129
231
        instance_ref = self.db.instance_get(context, instance_id)
 
232
        LOG.audit(_("Terminating instance %s"), instance_id, context=context)
 
233
 
 
234
        if not FLAGS.stub_network:
 
235
            address = self.db.instance_get_floating_address(context,
 
236
                                                            instance_ref['id'])
 
237
            if address:
 
238
                LOG.debug(_("Disassociating address %s"), address,
 
239
                          context=context)
 
240
                # NOTE(vish): Right now we don't really care if the ip is
 
241
                #             disassociated.  We may need to worry about
 
242
                #             checking this later.
 
243
                rpc.cast(context,
 
244
                         self.get_network_topic(context),
 
245
                         {"method": "disassociate_floating_ip",
 
246
                          "args": {"floating_address": address}})
 
247
 
 
248
            address = self.db.instance_get_fixed_address(context,
 
249
                                                         instance_ref['id'])
 
250
            if address:
 
251
                LOG.debug(_("Deallocating address %s"), address,
 
252
                          context=context)
 
253
                # NOTE(vish): Currently, nothing needs to be done on the
 
254
                #             network node until release. If this changes,
 
255
                #             we will need to cast here.
 
256
                self.network_manager.deallocate_fixed_ip(context.elevated(),
 
257
                                                         address)
 
258
 
130
259
        volumes = instance_ref.get('volumes', []) or []
131
260
        for volume in volumes:
132
261
            self.detach_volume(context, instance_id, volume['id'])
133
262
        if instance_ref['state'] == power_state.SHUTOFF:
134
263
            self.db.instance_destroy(context, instance_id)
135
 
            raise exception.Error('trying to destroy already destroyed'
136
 
                                  ' instance: %s' % instance_id)
137
 
        yield self.driver.destroy(instance_ref)
 
264
            raise exception.Error(_('trying to destroy already destroyed'
 
265
                                    ' instance: %s') % instance_id)
 
266
        self.driver.destroy(instance_ref)
138
267
 
139
268
        # TODO(ja): should we keep it in a terminated state for a bit?
140
269
        self.db.instance_destroy(context, instance_id)
141
270
 
142
 
    @defer.inlineCallbacks
143
271
    @exception.wrap_exception
 
272
    @checks_instance_lock
144
273
    def reboot_instance(self, context, instance_id):
145
274
        """Reboot an instance on this server."""
146
275
        context = context.elevated()
 
276
        self._update_state(context, instance_id)
147
277
        instance_ref = self.db.instance_get(context, instance_id)
148
 
        self._update_state(context, instance_id)
 
278
        LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
149
279
 
150
280
        if instance_ref['state'] != power_state.RUNNING:
151
 
            logging.warn('trying to reboot a non-running '
152
 
                         'instance: %s (state: %s excepted: %s)',
153
 
                         instance_ref['internal_id'],
154
 
                         instance_ref['state'],
155
 
                         power_state.RUNNING)
 
281
            LOG.warn(_('trying to reboot a non-running '
 
282
                     'instance: %s (state: %s excepted: %s)'),
 
283
                     instance_id,
 
284
                     instance_ref['state'],
 
285
                     power_state.RUNNING,
 
286
                     context=context)
156
287
 
157
 
        logging.debug('instance %s: rebooting', instance_ref['name'])
158
288
        self.db.instance_set_state(context,
159
289
                                   instance_id,
160
290
                                   power_state.NOSTATE,
161
291
                                   'rebooting')
162
 
        yield self.driver.reboot(instance_ref)
163
 
        self._update_state(context, instance_id)
164
 
 
165
 
    @defer.inlineCallbacks
166
 
    @exception.wrap_exception
 
292
        self.network_manager.setup_compute_network(context, instance_id)
 
293
        self.driver.reboot(instance_ref)
 
294
        self._update_state(context, instance_id)
 
295
 
 
296
    @exception.wrap_exception
 
297
    def snapshot_instance(self, context, instance_id, image_id):
 
298
        """Snapshot an instance on this server."""
 
299
        context = context.elevated()
 
300
        instance_ref = self.db.instance_get(context, instance_id)
 
301
 
 
302
        #NOTE(sirp): update_state currently only refreshes the state field
 
303
        # if we add is_snapshotting, we will need this refreshed too,
 
304
        # potentially?
 
305
        self._update_state(context, instance_id)
 
306
 
 
307
        LOG.audit(_('instance %s: snapshotting'), instance_id,
 
308
                  context=context)
 
309
        if instance_ref['state'] != power_state.RUNNING:
 
310
            LOG.warn(_('trying to snapshot a non-running '
 
311
                       'instance: %s (state: %s excepted: %s)'),
 
312
                     instance_id, instance_ref['state'], power_state.RUNNING)
 
313
 
 
314
        self.driver.snapshot(instance_ref, image_id)
 
315
 
 
316
    @exception.wrap_exception
 
317
    @checks_instance_lock
 
318
    def set_admin_password(self, context, instance_id, new_pass=None):
 
319
        """Set the root/admin password for an instance on this server."""
 
320
        context = context.elevated()
 
321
        instance_ref = self.db.instance_get(context, instance_id)
 
322
        if instance_ref['state'] != power_state.RUNNING:
 
323
            logging.warn('trying to reset the password on a non-running '
 
324
                    'instance: %s (state: %s expected: %s)',
 
325
                    instance_ref['id'],
 
326
                    instance_ref['state'],
 
327
                    power_state.RUNNING)
 
328
 
 
329
        logging.debug('instance %s: setting admin password',
 
330
                instance_ref['name'])
 
331
        if new_pass is None:
 
332
            # Generate a random password
 
333
            new_pass = self._generate_password(FLAGS.password_length)
 
334
 
 
335
        self.driver.set_admin_password(instance_ref, new_pass)
 
336
        self._update_state(context, instance_id)
 
337
 
 
338
    def _generate_password(self, length=20):
 
339
        """Generate a random sequence of letters and digits
 
340
        to be used as a password.
 
341
        """
 
342
        chrs = string.letters + string.digits
 
343
        return "".join([random.choice(chrs) for i in xrange(length)])
 
344
 
 
345
    @exception.wrap_exception
 
346
    @checks_instance_lock
167
347
    def rescue_instance(self, context, instance_id):
168
348
        """Rescue an instance on this server."""
169
349
        context = context.elevated()
170
350
        instance_ref = self.db.instance_get(context, instance_id)
171
 
 
172
 
        logging.debug('instance %s: rescuing',
173
 
                      instance_ref['internal_id'])
 
351
        LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
174
352
        self.db.instance_set_state(context,
175
353
                                   instance_id,
176
354
                                   power_state.NOSTATE,
177
355
                                   'rescuing')
178
 
        yield self.driver.rescue(instance_ref)
 
356
        self.network_manager.setup_compute_network(context, instance_id)
 
357
        self.driver.rescue(instance_ref)
179
358
        self._update_state(context, instance_id)
180
359
 
181
 
    @defer.inlineCallbacks
182
360
    @exception.wrap_exception
 
361
    @checks_instance_lock
183
362
    def unrescue_instance(self, context, instance_id):
184
363
        """Rescue an instance on this server."""
185
364
        context = context.elevated()
186
365
        instance_ref = self.db.instance_get(context, instance_id)
187
 
 
188
 
        logging.debug('instance %s: unrescuing',
189
 
                      instance_ref['internal_id'])
 
366
        LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
190
367
        self.db.instance_set_state(context,
191
368
                                   instance_id,
192
369
                                   power_state.NOSTATE,
193
370
                                   'unrescuing')
194
 
        yield self.driver.unrescue(instance_ref)
195
 
        self._update_state(context, instance_id)
 
371
        self.driver.unrescue(instance_ref)
 
372
        self._update_state(context, instance_id)
 
373
 
 
374
    @staticmethod
 
375
    def _update_state_callback(self, context, instance_id, result):
 
376
        """Update instance state when async task completes."""
 
377
        self._update_state(context, instance_id)
 
378
 
 
379
    @exception.wrap_exception
 
380
    @checks_instance_lock
 
381
    def pause_instance(self, context, instance_id):
 
382
        """Pause an instance on this server."""
 
383
        context = context.elevated()
 
384
        instance_ref = self.db.instance_get(context, instance_id)
 
385
        LOG.audit(_('instance %s: pausing'), instance_id, context=context)
 
386
        self.db.instance_set_state(context,
 
387
                                   instance_id,
 
388
                                   power_state.NOSTATE,
 
389
                                   'pausing')
 
390
        self.driver.pause(instance_ref,
 
391
            lambda result: self._update_state_callback(self,
 
392
                                                       context,
 
393
                                                       instance_id,
 
394
                                                       result))
 
395
 
 
396
    @exception.wrap_exception
 
397
    @checks_instance_lock
 
398
    def unpause_instance(self, context, instance_id):
 
399
        """Unpause a paused instance on this server."""
 
400
        context = context.elevated()
 
401
        instance_ref = self.db.instance_get(context, instance_id)
 
402
        LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
 
403
        self.db.instance_set_state(context,
 
404
                                   instance_id,
 
405
                                   power_state.NOSTATE,
 
406
                                   'unpausing')
 
407
        self.driver.unpause(instance_ref,
 
408
            lambda result: self._update_state_callback(self,
 
409
                                                       context,
 
410
                                                       instance_id,
 
411
                                                       result))
 
412
 
 
413
    @exception.wrap_exception
 
414
    def get_diagnostics(self, context, instance_id):
 
415
        """Retrieve diagnostics for an instance on this server."""
 
416
        instance_ref = self.db.instance_get(context, instance_id)
 
417
 
 
418
        if instance_ref["state"] == power_state.RUNNING:
 
419
            LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
 
420
                      context=context)
 
421
            return self.driver.get_diagnostics(instance_ref)
 
422
 
 
423
    @exception.wrap_exception
 
424
    @checks_instance_lock
 
425
    def suspend_instance(self, context, instance_id):
 
426
        """
 
427
        suspend the instance with instance_id
 
428
 
 
429
        """
 
430
        context = context.elevated()
 
431
        instance_ref = self.db.instance_get(context, instance_id)
 
432
        LOG.audit(_('instance %s: suspending'), instance_id, context=context)
 
433
        self.db.instance_set_state(context, instance_id,
 
434
                                            power_state.NOSTATE,
 
435
                                            'suspending')
 
436
        self.driver.suspend(instance_ref,
 
437
            lambda result: self._update_state_callback(self,
 
438
                                                       context,
 
439
                                                       instance_id,
 
440
                                                       result))
 
441
 
 
442
    @exception.wrap_exception
 
443
    @checks_instance_lock
 
444
    def resume_instance(self, context, instance_id):
 
445
        """
 
446
        resume the suspended instance with instance_id
 
447
 
 
448
        """
 
449
        context = context.elevated()
 
450
        instance_ref = self.db.instance_get(context, instance_id)
 
451
        LOG.audit(_('instance %s: resuming'), instance_id, context=context)
 
452
        self.db.instance_set_state(context, instance_id,
 
453
                                            power_state.NOSTATE,
 
454
                                            'resuming')
 
455
        self.driver.resume(instance_ref,
 
456
            lambda result: self._update_state_callback(self,
 
457
                                                       context,
 
458
                                                       instance_id,
 
459
                                                       result))
 
460
 
 
461
    @exception.wrap_exception
 
462
    def lock_instance(self, context, instance_id):
 
463
        """
 
464
        lock the instance with instance_id
 
465
 
 
466
        """
 
467
        context = context.elevated()
 
468
        instance_ref = self.db.instance_get(context, instance_id)
 
469
 
 
470
        LOG.debug(_('instance %s: locking'), instance_id, context=context)
 
471
        self.db.instance_update(context, instance_id, {'locked': True})
 
472
 
 
473
    @exception.wrap_exception
 
474
    def unlock_instance(self, context, instance_id):
 
475
        """
 
476
        unlock the instance with instance_id
 
477
 
 
478
        """
 
479
        context = context.elevated()
 
480
        instance_ref = self.db.instance_get(context, instance_id)
 
481
 
 
482
        LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
 
483
        self.db.instance_update(context, instance_id, {'locked': False})
 
484
 
 
485
    @exception.wrap_exception
 
486
    def get_lock(self, context, instance_id):
 
487
        """
 
488
        return the boolean state of (instance with instance_id)'s lock
 
489
 
 
490
        """
 
491
        context = context.elevated()
 
492
        LOG.debug(_('instance %s: getting locked state'), instance_id,
 
493
                  context=context)
 
494
        instance_ref = self.db.instance_get(context, instance_id)
 
495
        return instance_ref['locked']
196
496
 
197
497
    @exception.wrap_exception
198
498
    def get_console_output(self, context, instance_id):
199
499
        """Send the console output for an instance."""
200
500
        context = context.elevated()
201
 
        logging.debug("instance %s: getting console output", instance_id)
202
501
        instance_ref = self.db.instance_get(context, instance_id)
203
 
 
 
502
        LOG.audit(_("Get console output for instance %s"), instance_id,
 
503
                  context=context)
204
504
        return self.driver.get_console_output(instance_ref)
205
505
 
206
 
    @defer.inlineCallbacks
207
506
    @exception.wrap_exception
 
507
    def get_ajax_console(self, context, instance_id):
 
508
        """Return connection information for an ajax console"""
 
509
        context = context.elevated()
 
510
        logging.debug(_("instance %s: getting ajax console"), instance_id)
 
511
        instance_ref = self.db.instance_get(context, instance_id)
 
512
 
 
513
        return self.driver.get_ajax_console(instance_ref)
 
514
 
 
515
    @checks_instance_lock
208
516
    def attach_volume(self, context, instance_id, volume_id, mountpoint):
209
517
        """Attach a volume to an instance."""
210
518
        context = context.elevated()
211
 
        logging.debug("instance %s: attaching volume %s to %s", instance_id,
212
 
            volume_id, mountpoint)
213
519
        instance_ref = self.db.instance_get(context, instance_id)
214
 
        dev_path = yield self.volume_manager.setup_compute_volume(context,
215
 
                                                                  volume_id)
 
520
        LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
 
521
                  volume_id, mountpoint, context=context)
 
522
        dev_path = self.volume_manager.setup_compute_volume(context,
 
523
                                                            volume_id)
216
524
        try:
217
 
            yield self.driver.attach_volume(instance_ref['name'],
218
 
                                            dev_path,
219
 
                                            mountpoint)
 
525
            self.driver.attach_volume(instance_ref['name'],
 
526
                                      dev_path,
 
527
                                      mountpoint)
220
528
            self.db.volume_attached(context,
221
529
                                    volume_id,
222
530
                                    instance_id,
225
533
            # NOTE(vish): The inline callback eats the exception info so we
226
534
            #             log the traceback here and reraise the same
227
535
            #             ecxception below.
228
 
            logging.exception("instance %s: attach failed %s, removing",
229
 
                              instance_id, mountpoint)
230
 
            yield self.volume_manager.remove_compute_volume(context,
231
 
                                                            volume_id)
 
536
            LOG.exception(_("instance %s: attach failed %s, removing"),
 
537
                          instance_id, mountpoint, context=context)
 
538
            self.volume_manager.remove_compute_volume(context,
 
539
                                                      volume_id)
232
540
            raise exc
233
 
        defer.returnValue(True)
234
 
 
235
 
    @defer.inlineCallbacks
 
541
 
 
542
        return True
 
543
 
236
544
    @exception.wrap_exception
 
545
    @checks_instance_lock
237
546
    def detach_volume(self, context, instance_id, volume_id):
238
547
        """Detach a volume from an instance."""
239
548
        context = context.elevated()
240
 
        logging.debug("instance %s: detaching volume %s",
241
 
                      instance_id,
242
 
                      volume_id)
243
549
        instance_ref = self.db.instance_get(context, instance_id)
244
550
        volume_ref = self.db.volume_get(context, volume_id)
 
551
        LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
 
552
                  volume_id, volume_ref['mountpoint'], instance_id,
 
553
                  context=context)
245
554
        if instance_ref['name'] not in self.driver.list_instances():
246
 
            logging.warn("Detaching volume from unknown instance %s",
247
 
                         instance_ref['name'])
 
555
            LOG.warn(_("Detaching volume from unknown instance %s"),
 
556
                     instance_id, context=context)
248
557
        else:
249
 
            yield self.driver.detach_volume(instance_ref['name'],
250
 
                                            volume_ref['mountpoint'])
251
 
        yield self.volume_manager.remove_compute_volume(context, volume_id)
 
558
            self.driver.detach_volume(instance_ref['name'],
 
559
                                      volume_ref['mountpoint'])
 
560
        self.volume_manager.remove_compute_volume(context, volume_id)
252
561
        self.db.volume_detached(context, volume_id)
253
 
        defer.returnValue(True)
 
562
        return True