~ubuntu-branches/ubuntu/quantal/nova/quantal-proposed

« back to all changes in this revision

Viewing changes to nova/compute/manager.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short
  • Date: 2012-01-20 11:54:15 UTC
  • mto: This revision was merged to the branch mainline in revision 62.
  • Revision ID: package-import@ubuntu.com-20120120115415-h2ujma9o536o1ut6
Tags: upstream-2012.1~e3~20120120.12170
ImportĀ upstreamĀ versionĀ 2012.1~e3~20120120.12170

Show diffs side-by-side

added added

removed removed

Lines of Context:
59
59
from nova import rpc
60
60
from nova import utils
61
61
from nova.virt import driver
 
62
from nova import vnc
62
63
from nova import volume
63
64
 
64
65
 
301
302
            if ((bdm['snapshot_id'] is not None) and
302
303
                (bdm['volume_id'] is None)):
303
304
                # TODO(yamahata): default name and description
 
305
                snapshot = self.volume_api.get_snapshot(context,
 
306
                                                        bdm['snapshot_id'])
304
307
                vol = self.volume_api.create(context, bdm['volume_size'],
305
 
                                             bdm['snapshot_id'], '', '')
 
308
                                             '', '', snapshot)
306
309
                # TODO(yamahata): creating volume simultaneously
307
310
                #                 reduces creation time?
308
 
                self.volume_api.wait_creation(context, vol['id'])
 
311
                self.volume_api.wait_creation(context, vol)
309
312
                self.db.block_device_mapping_update(
310
313
                    context, bdm['id'], {'volume_id': vol['id']})
311
314
                bdm['volume_id'] = vol['id']
312
315
 
313
316
            if bdm['volume_id'] is not None:
314
 
                self.volume_api.check_attach(context,
315
 
                                             volume_id=bdm['volume_id'])
316
 
                cinfo = self._attach_volume_boot(context, instance,
317
 
                                                    bdm['volume_id'],
318
 
                                                    bdm['device_name'])
 
317
                volume = self.volume_api.get(context, bdm['volume_id'])
 
318
                self.volume_api.check_attach(context, volume)
 
319
                cinfo = self._attach_volume_boot(context,
 
320
                                                 instance,
 
321
                                                 volume,
 
322
                                                 bdm['device_name'])
319
323
                self.db.block_device_mapping_update(
320
324
                        context, bdm['id'],
321
325
                        {'connection_info': utils.dumps(cinfo)})
375
379
            except Exception:
376
380
                with utils.save_and_reraise_exception():
377
381
                    self._deallocate_network(context, instance)
378
 
            self._notify_about_instance_usage(instance)
 
382
            self._notify_about_instance_usage(instance, network_info)
379
383
            if self._is_instance_terminated(instance_uuid):
380
384
                raise exception.InstanceNotFound
381
385
        except exception.InstanceNotFound:
517
521
                                     task_state=None,
518
522
                                     launched_at=utils.utcnow())
519
523
 
520
 
    def _notify_about_instance_usage(self, instance):
521
 
        usage_info = utils.usage_from_instance(instance)
 
524
    def _notify_about_instance_usage(self, instance, network_info=None):
 
525
        usage_info = utils.usage_from_instance(instance, network_info)
522
526
        notifier.notify('compute.%s' % self.host,
523
527
                        'compute.instance.create',
524
528
                        notifier.INFO, usage_info)
597
601
            try:
598
602
                # NOTE(vish): actual driver detach done in driver.destroy, so
599
603
                #             just tell nova-volume that we are done with it.
 
604
                volume = self.volume_api.get(context, bdm['volume_id'])
600
605
                self.volume_api.terminate_connection(context,
601
 
                                                     bdm['volume_id'],
 
606
                                                     volume,
602
607
                                                     FLAGS.my_ip)
603
 
                self.volume_api.detach(context, bdm['volume_id'])
 
608
                self.volume_api.detach(context, volume)
604
609
            except exception.DiskNotFound as exc:
605
610
                LOG.warn(_("Ignoring DiskNotFound: %s") % exc)
606
611
 
610
615
        for bdm in bdms:
611
616
            LOG.debug(_("terminating bdm %s") % bdm)
612
617
            if bdm['volume_id'] and bdm['delete_on_termination']:
613
 
                self.volume_api.delete(context, bdm['volume_id'])
 
618
                volume = self.volume_api.get(context, bdm['volume_id'])
 
619
                self.volume_api.delete(context, volume)
614
620
            # NOTE(vish): bdms will be deleted on instance destroy
615
621
 
616
622
    def _delete_instance(self, context, instance):
739
745
                              task_state=None,
740
746
                              launched_at=utils.utcnow())
741
747
 
742
 
        usage_info = utils.usage_from_instance(instance)
 
748
        usage_info = utils.usage_from_instance(instance, network_info)
743
749
        notifier.notify('compute.%s' % self.host,
744
750
                            'compute.instance.rebuild',
745
751
                            notifier.INFO,
907
913
 
908
914
            if instance_state != expected_state:
909
915
                self._instance_update(context, instance_id, task_state=None)
910
 
                raise exception.Error(_('Instance is not running'))
 
916
                raise exception.Error(_('Failed to set admin password. '
 
917
                                      'Instance %s is not running') %
 
918
                                      instance_ref["uuid"])
911
919
            else:
912
920
                try:
913
921
                    self.driver.set_admin_password(instance_ref, new_pass)
1038
1046
        self.driver.confirm_migration(
1039
1047
                migration_ref, instance_ref, network_info)
1040
1048
 
1041
 
        usage_info = utils.usage_from_instance(instance_ref)
 
1049
        usage_info = utils.usage_from_instance(instance_ref, network_info)
1042
1050
        notifier.notify('compute.%s' % self.host,
1043
1051
                            'compute.instance.resize.confirm',
1044
1052
                            notifier.INFO,
1266
1274
        instance_id = instance_ref['id']
1267
1275
        self.network_api.add_fixed_ip_to_instance(context, instance_id,
1268
1276
                                                  self.host, network_id)
1269
 
        usage = utils.usage_from_instance(instance_ref)
 
1277
 
 
1278
        network_info = self.inject_network_info(context,
 
1279
                                                instance_ref['uuid'])
 
1280
        self.reset_network(context, instance_ref['uuid'])
 
1281
 
 
1282
        usage = utils.usage_from_instance(instance_ref, network_info)
1270
1283
        notifier.notify('compute.%s' % self.host,
1271
1284
                        'compute.instance.create_ip',
1272
1285
                        notifier.INFO, usage)
1273
1286
 
1274
 
        self.inject_network_info(context, instance_ref['uuid'])
1275
 
        self.reset_network(context, instance_ref['uuid'])
1276
 
 
1277
1287
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1278
1288
    @checks_instance_lock
1279
1289
    @wrap_instance_fault
1286
1296
        instance_id = instance_ref['id']
1287
1297
        self.network_api.remove_fixed_ip_from_instance(context, instance_id,
1288
1298
                                                       address)
1289
 
        usage = utils.usage_from_instance(instance_ref)
 
1299
 
 
1300
        network_info = self.inject_network_info(context,
 
1301
                                                instance_ref['uuid'])
 
1302
        self.reset_network(context, instance_ref['uuid'])
 
1303
 
 
1304
        usage = utils.usage_from_instance(instance_ref, network_info)
1290
1305
        notifier.notify('compute.%s' % self.host,
1291
1306
                        'compute.instance.delete_ip',
1292
1307
                        notifier.INFO, usage)
1293
1308
 
1294
 
        self.inject_network_info(context, instance_ref['uuid'])
1295
 
        self.reset_network(context, instance_ref['uuid'])
1296
 
 
1297
1309
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1298
1310
    @checks_instance_lock
1299
1311
    @wrap_instance_fault
1442
1454
        LOG.debug(_("network_info to inject: |%s|"), network_info)
1443
1455
 
1444
1456
        self.driver.inject_network_info(instance, network_info)
 
1457
        return network_info
1445
1458
 
1446
1459
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1447
1460
    @wrap_instance_fault
1480
1493
 
1481
1494
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1482
1495
    @wrap_instance_fault
1483
 
    def get_vnc_console(self, context, instance_uuid):
 
1496
    def get_vnc_console(self, context, instance_uuid, console_type):
1484
1497
        """Return connection information for a vnc console."""
1485
1498
        context = context.elevated()
1486
1499
        LOG.debug(_("instance %s: getting vnc console"), instance_uuid)
1487
1500
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1488
 
        return self.driver.get_vnc_console(instance_ref)
1489
 
 
1490
 
    def _attach_volume_boot(self, context, instance, volume_id, mountpoint):
 
1501
 
 
1502
        token = str(utils.gen_uuid())
 
1503
 
 
1504
        if console_type == 'novnc':
 
1505
            # For essex, novncproxy_base_url must include the full path
 
1506
            # including the html file (like http://myhost/vnc_auto.html)
 
1507
            access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token)
 
1508
        elif console_type == 'xvpvnc':
 
1509
            access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token)
 
1510
        else:
 
1511
            raise exception.ConsoleTypeInvalid(console_type=console_type)
 
1512
 
 
1513
        # Retrieve connect info from driver, and then decorate with our
 
1514
        # access info token
 
1515
        connect_info = self.driver.get_vnc_console(instance_ref)
 
1516
        connect_info['token'] = token
 
1517
        connect_info['access_url'] = access_url
 
1518
 
 
1519
        return connect_info
 
1520
 
 
1521
    def _attach_volume_boot(self, context, instance, volume, mountpoint):
1491
1522
        """Attach a volume to an instance at boot time. So actual attach
1492
1523
        is done by instance creation"""
1493
1524
 
1494
1525
        instance_id = instance['id']
1495
1526
        instance_uuid = instance['uuid']
 
1527
        volume_id = volume['id']
1496
1528
        context = context.elevated()
1497
 
        LOG.audit(_("instance %(instance_uuid)s: booting with "
1498
 
                    "volume %(volume_id)s at %(mountpoint)s") %
1499
 
                  locals(), context=context)
 
1529
        msg = _("instance %(instance_uuid)s: booting with "
 
1530
                "volume %(volume_id)s at %(mountpoint)s")
 
1531
        LOG.audit(msg % locals(), context=context)
1500
1532
        address = FLAGS.my_ip
1501
1533
        connection_info = self.volume_api.initialize_connection(context,
1502
 
                                                                volume_id,
 
1534
                                                                volume,
1503
1535
                                                                address)
1504
 
        self.volume_api.attach(context, volume_id, instance_id, mountpoint)
 
1536
        self.volume_api.attach(context, volume, instance_id, mountpoint)
1505
1537
        return connection_info
1506
1538
 
1507
1539
    @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
1509
1541
    @wrap_instance_fault
1510
1542
    def attach_volume(self, context, instance_uuid, volume_id, mountpoint):
1511
1543
        """Attach a volume to an instance."""
 
1544
        volume = self.volume_api.get(context, volume_id)
1512
1545
        context = context.elevated()
1513
1546
        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
1514
1547
        instance_id = instance_ref['id']
1515
 
        LOG.audit(
1516
 
            _("instance %(instance_uuid)s: attaching volume %(volume_id)s"
1517
 
              " to %(mountpoint)s") % locals(), context=context)
 
1548
        msg = _("instance %(instance_uuid)s: attaching volume %(volume_id)s"
 
1549
                " to %(mountpoint)s")
 
1550
        LOG.audit(msg % locals(), context=context)
1518
1551
        address = FLAGS.my_ip
1519
1552
        connection_info = self.volume_api.initialize_connection(context,
1520
 
                                                                volume_id,
 
1553
                                                                volume,
1521
1554
                                                                address)
1522
1555
        try:
1523
1556
            self.driver.attach_volume(connection_info,
1525
1558
                                      mountpoint)
1526
1559
        except Exception:  # pylint: disable=W0702
1527
1560
            with utils.save_and_reraise_exception():
1528
 
                LOG.exception(_("instance %(instance_uuid)s: attach failed"
1529
 
                                " %(mountpoint)s, removing") % locals(),
1530
 
                              context=context)
1531
 
                self.volume_api.terminate_connection(context, volume_id,
 
1561
                msg = _("instance %(instance_uuid)s: attach failed"
 
1562
                        " %(mountpoint)s, removing")
 
1563
                LOG.exception(msg % locals(), context=context)
 
1564
                self.volume_api.terminate_connection(context,
 
1565
                                                     volume,
1532
1566
                                                     address)
1533
1567
 
1534
 
        self.volume_api.attach(context, volume_id, instance_id, mountpoint)
 
1568
        self.volume_api.attach(context, volume, instance_id, mountpoint)
1535
1569
        values = {
1536
1570
            'instance_id': instance_id,
1537
1571
            'connection_info': utils.dumps(connection_info),
1571
1605
        instance_id = instance_ref['id']
1572
1606
        bdm = self._get_instance_volume_bdm(context, instance_id, volume_id)
1573
1607
        self._detach_volume(context, instance_ref, bdm)
1574
 
        self.volume_api.terminate_connection(context, volume_id, FLAGS.my_ip)
1575
 
        self.volume_api.detach(context.elevated(), volume_id)
 
1608
        volume = self.volume_api.get(context, volume_id)
 
1609
        self.volume_api.terminate_connection(context, volume, FLAGS.my_ip)
 
1610
        self.volume_api.detach(context.elevated(), volume)
1576
1611
        self.db.block_device_mapping_destroy_by_instance_and_volume(
1577
1612
            context, instance_id, volume_id)
1578
1613
        return True
1588
1623
            bdm = self._get_instance_volume_bdm(context,
1589
1624
                                                instance_id,
1590
1625
                                                volume_id)
1591
 
            self._detach_volume(context, instance_ref,
1592
 
                                bdm['volume_id'], bdm['device_name'])
1593
 
            self.volume_api.terminate_connection(context,
1594
 
                                                 volume_id,
1595
 
                                                 FLAGS.my_ip)
 
1626
            self._detach_volume(context, instance_ref, bdm)
 
1627
            volume = self.volume_api.get(context, volume_id)
 
1628
            self.volume_api.terminate_connection(context, volume, FLAGS.my_ip)
1596
1629
        except exception.NotFound:
1597
1630
            pass
1598
1631
 
1824
1857
        self.driver.unfilter_instance(instance_ref, network_info)
1825
1858
 
1826
1859
        # Database updating.
 
1860
        # NOTE(jkoelker) This needs to be converted to network api calls
 
1861
        #                if nova wants to support floating_ips in
 
1862
        #                quantum/melange
1827
1863
        try:
1828
1864
            # Not return if floating_ip is not found, otherwise,
1829
1865
            # instance never be accessible..
1863
1899
 
1864
1900
        # Restore volume state
1865
1901
        for volume_ref in instance_ref['volumes']:
1866
 
            volume_id = volume_ref['id']
1867
 
            self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
 
1902
            self.volume_api.update(ctxt, volume_ref, {'status': 'in-use'})
1868
1903
 
1869
1904
        # No instance booting at source host, but instance dir
1870
1905
        # must be deleted for preparing next block migration
1922
1957
 
1923
1958
        for bdm in self._get_instance_volume_bdms(context, instance_ref['id']):
1924
1959
            volume_id = bdm['volume_id']
1925
 
            self.db.volume_update(context, volume_id, {'status': 'in-use'})
1926
 
            self.volume_api.remove_from_compute(context, instance_ref['id'],
1927
 
                                                volume_id, dest)
 
1960
            volume = self.volume_api.get(context, volume_id)
 
1961
            self.volume_api.update(context, volume, {'status': 'in-use'})
 
1962
            self.volume_api.remove_from_compute(context,
 
1963
                                                volume,
 
1964
                                                instance_ref['id'],
 
1965
                                                dest)
1928
1966
 
1929
1967
        # Block migration needs empty image at destination host
1930
1968
        # before migration starts, so if any failure occurs,
1949
1987
        block_device_info = \
1950
1988
            self._get_instance_volume_block_device_info(context, instance_id)
1951
1989
        self.driver.destroy(instance_ref, network_info,
1952
 
                            block_device_info, True)
 
1990
                            block_device_info)
1953
1991
 
1954
1992
    @manager.periodic_task
1955
1993
    def _poll_rebooting_instances(self, context):