~ubuntu-branches/ubuntu/raring/nova/raring-proposed

« back to all changes in this revision

Viewing changes to nova/virt/xenapi/driver.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, Adam Gandelman, Chuck Short
  • Date: 2012-11-23 09:04:58 UTC
  • mfrom: (1.1.66)
  • Revision ID: package-import@ubuntu.com-20121123090458-91565o7aev1i1h71
Tags: 2013.1~g1-0ubuntu1
[ Adam Gandelman ]
* debian/control: Ensure novaclient is upgraded with nova,
  require python-keystoneclient >= 1:2.9.0. (LP: #1073289)
* debian/patches/{ubuntu/*, rbd-security.patch}: Dropped, applied
  upstream.
* debian/control: Add python-testtools to Build-Depends.

[ Chuck Short ]
* New upstream version.
* Refreshed debian/patches/avoid_setuptools_git_dependency.patch.
* debian/rules: FTBFS if missing binaries.
* debian/nova-scheudler.install: Add missing rabbit-queues and
  nova-rpc-zmq-receiver.
* Remove nova-volume since it doesnt exist anymore, transition to cinder-*.
* debian/rules: install apport hook in the right place.
* debian/patches/ubuntu-show-tests.patch: Display test failures.
* debian/control: Add depends on genisoimage
* debian/control: Suggest guestmount.
* debian/control: Suggest websockify. (LP: #1076442)
* debian/nova.conf: Disable nova-volume service.
* debian/control: Depend on xen-system-* rather than the hypervisor.
* debian/control, debian/mans/nova-conductor.8, debian/nova-conductor.init,
  debian/nova-conductor.install, debian/nova-conductor.logrotate
  debian/nova-conductor.manpages, debian/nova-conductor.postrm
  debian/nova-conductor.upstart.in: Add nova-conductor service.
* debian/control: Add python-fixtures as a build deps.

Show diffs side-by-side

added added

removed removed

Lines of Context:
47
47
from eventlet import timeout
48
48
 
49
49
from nova import context
50
 
from nova import db
51
50
from nova import exception
52
 
from nova import flags
53
51
from nova.openstack.common import cfg
54
52
from nova.openstack.common import log as logging
55
53
from nova.virt import driver
91
89
               default=5,
92
90
               help='Max number of times to poll for VHD to coalesce. '
93
91
                    'Used only if compute_driver=xenapi.XenAPIDriver'),
94
 
    cfg.StrOpt('xenapi_agent_path',
95
 
               default='usr/sbin/xe-update-networking',
96
 
               help='Specifies the path in which the xenapi guest agent '
97
 
                    'should be located. If the agent is present, network '
98
 
                    'configuration is not injected into the image. '
99
 
                    'Used if compute_driver=xenapi.XenAPIDriver and '
100
 
                    ' flat_injected=True'),
101
92
    cfg.StrOpt('xenapi_sr_base_path',
102
93
               default='/var/run/sr-mount',
103
94
               help='Base path to the storage repository'),
125
116
               help='Timeout in seconds for XenAPI login.'),
126
117
    ]
127
118
 
128
 
FLAGS = flags.FLAGS
129
 
FLAGS.register_opts(xenapi_opts)
 
119
CONF = cfg.CONF
 
120
CONF.register_opts(xenapi_opts)
 
121
CONF.import_opt('host', 'nova.config')
130
122
 
131
123
 
132
124
class XenAPIDriver(driver.ComputeDriver):
133
125
    """A connection to XenServer or Xen Cloud Platform"""
134
126
 
135
 
    def __init__(self, read_only=False):
136
 
        super(XenAPIDriver, self).__init__()
 
127
    def __init__(self, virtapi, read_only=False):
 
128
        super(XenAPIDriver, self).__init__(virtapi)
137
129
 
138
 
        url = FLAGS.xenapi_connection_url
139
 
        username = FLAGS.xenapi_connection_username
140
 
        password = FLAGS.xenapi_connection_password
 
130
        url = CONF.xenapi_connection_url
 
131
        username = CONF.xenapi_connection_username
 
132
        password = CONF.xenapi_connection_password
141
133
        if not url or password is None:
142
134
            raise Exception(_('Must specify xenapi_connection_url, '
143
135
                              'xenapi_connection_username (optionally), and '
144
136
                              'xenapi_connection_password to use '
145
137
                              'compute_driver=xenapi.XenAPIDriver'))
146
138
 
147
 
        self._session = XenAPISession(url, username, password)
 
139
        self._session = XenAPISession(url, username, password, self.virtapi)
148
140
        self._volumeops = volumeops.VolumeOps(self._session)
149
141
        self._host_state = None
150
 
        self._host = host.Host(self._session)
151
 
        self._vmops = vmops.VMOps(self._session)
 
142
        self._host = host.Host(self._session, self.virtapi)
 
143
        self._vmops = vmops.VMOps(self._session, self.virtapi)
152
144
        self._initiator = None
153
145
        self._hypervisor_hostname = None
154
 
        self._pool = pool.ResourcePool(self._session)
 
146
        self._pool = pool.ResourcePool(self._session, self.virtapi)
155
147
 
156
148
    @property
157
149
    def host_state(self):
160
152
        return self._host_state
161
153
 
162
154
    def init_host(self, host):
163
 
        if FLAGS.xenapi_check_host:
 
155
        if CONF.xenapi_check_host:
164
156
            vm_utils.ensure_correct_host(self._session)
165
157
 
166
158
        try:
188
180
        """Finish reverting a resize, powering back on the instance"""
189
181
        # NOTE(vish): Xen currently does not use network info.
190
182
        self._vmops.finish_revert_migration(instance)
 
183
        self._attach_mapped_block_devices(instance, block_device_info)
191
184
 
192
185
    def finish_migration(self, context, migration, instance, disk_info,
193
186
                         network_info, image_meta, resize_instance=False,
195
188
        """Completes a resize, turning on the migrated instance"""
196
189
        self._vmops.finish_migration(context, migration, instance, disk_info,
197
190
                                     network_info, image_meta, resize_instance)
 
191
        self._attach_mapped_block_devices(instance, block_device_info)
 
192
 
 
193
    def _attach_mapped_block_devices(self, instance, block_device_info):
 
194
        block_device_mapping = driver.block_device_info_get_mapping(
 
195
                block_device_info)
 
196
        for vol in block_device_mapping:
 
197
            connection_info = vol['connection_info']
 
198
            mount_device = vol['mount_device'].rpartition("/")[2]
 
199
            self.attach_volume(connection_info,
 
200
                    instance['name'], mount_device)
198
201
 
199
202
    def snapshot(self, context, instance, image_id):
200
203
        """ Create snapshot from a running VM instance """
237
240
        """Transfers the VHD of a running instance to another host, then shuts
238
241
        off the instance copies over the COW disk"""
239
242
        # NOTE(vish): Xen currently does not use network info.
240
 
        return self._vmops.migrate_disk_and_power_off(context, instance,
241
 
                                                      dest, instance_type)
 
243
        rv = self._vmops.migrate_disk_and_power_off(context, instance,
 
244
                                                    dest, instance_type)
 
245
        block_device_mapping = driver.block_device_info_get_mapping(
 
246
                block_device_info)
 
247
        name_label = self._vmops._get_orig_vm_name_label(instance)
 
248
        for vol in block_device_mapping:
 
249
            connection_info = vol['connection_info']
 
250
            mount_device = vol['mount_device'].rpartition("/")[2]
 
251
            self._volumeops.detach_volume(connection_info,
 
252
                    name_label, mount_device)
 
253
        return rv
242
254
 
243
255
    def suspend(self, instance):
244
256
        """suspend the specified instance"""
266
278
        """Power on the specified instance"""
267
279
        self._vmops.power_on(instance)
268
280
 
269
 
    def poll_rebooting_instances(self, timeout):
 
281
    def soft_delete(self, instance):
 
282
        """Soft delete the specified instance"""
 
283
        self._vmops.soft_delete(instance)
 
284
 
 
285
    def restore(self, instance):
 
286
        """Restore the specified instance"""
 
287
        self._vmops.restore(instance)
 
288
 
 
289
    def poll_rebooting_instances(self, timeout, instances):
270
290
        """Poll for rebooting instances"""
271
 
        self._vmops.poll_rebooting_instances(timeout)
 
291
        self._vmops.poll_rebooting_instances(timeout, instances)
272
292
 
273
293
    def poll_rescued_instances(self, timeout):
274
294
        """Poll for rescued instances"""
298
318
        """Return data about VM diagnostics"""
299
319
        return self._vmops.get_diagnostics(instance)
300
320
 
301
 
    def get_all_bw_usage(self, instances, start_time, stop_time=None):
302
 
        """Return bandwidth usage info for each interface on each
 
321
    def get_all_bw_counters(self, instances):
 
322
        """Return bandwidth usage counters for each interface on each
303
323
           running VM"""
304
324
 
305
325
        # we only care about VMs that correspond to a nova-managed
306
326
        # instance:
307
327
        imap = dict([(inst.name, inst.uuid) for inst in instances])
308
 
 
309
 
        bwusage = []
310
 
        start_time = time.mktime(start_time.timetuple())
311
 
        if stop_time:
312
 
            stop_time = time.mktime(stop_time.timetuple())
 
328
        bwcounters = []
313
329
 
314
330
        # get a dictionary of instance names.  values are dictionaries
315
 
        # of mac addresses with values that are the bw stats:
 
331
        # of mac addresses with values that are the bw counters:
316
332
        # e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
317
 
        iusages = self._vmops.get_all_bw_usage(start_time, stop_time)
318
 
        for instance_name in iusages:
 
333
        all_counters = self._vmops.get_all_bw_counters()
 
334
        for instance_name, counters in all_counters.iteritems():
319
335
            if instance_name in imap:
320
336
                # yes these are stats for a nova-managed vm
321
337
                # correlate the stats with the nova instance uuid:
322
 
                iusage = iusages[instance_name]
323
 
 
324
 
                for macaddr, usage in iusage.iteritems():
325
 
                    bwusage.append(dict(mac_address=macaddr,
326
 
                                        uuid=imap[instance_name],
327
 
                                        bw_in=usage['bw_in'],
328
 
                                        bw_out=usage['bw_out']))
329
 
        return bwusage
 
338
                for vif_counter in counters.values():
 
339
                    vif_counter['uuid'] = imap[instance_name]
 
340
                    bwcounters.append(vif_counter)
 
341
        return bwcounters
330
342
 
331
343
    def get_console_output(self, instance):
332
344
        """Return snapshot of console"""
355
367
 
356
368
    @staticmethod
357
369
    def get_host_ip_addr():
358
 
        xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
 
370
        xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
359
371
        return xs_url.netloc
360
372
 
361
373
    def attach_volume(self, connection_info, instance_name, mountpoint):
371
383
                                             mountpoint)
372
384
 
373
385
    def get_console_pool_info(self, console_type):
374
 
        xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
 
386
        xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
375
387
        return {'address': xs_url.netloc,
376
 
                'username': FLAGS.xenapi_connection_username,
377
 
                'password': FLAGS.xenapi_connection_password}
 
388
                'username': CONF.xenapi_connection_username,
 
389
                'password': CONF.xenapi_connection_password}
378
390
 
379
 
    def get_available_resource(self):
 
391
    def get_available_resource(self, nodename):
380
392
        """Retrieve resource info.
381
393
 
382
394
        This method is called when nova-compute launches, and
383
395
        as part of a periodic task.
384
396
 
 
397
        :param nodename: ignored in this driver
385
398
        :returns: dictionary describing resources
386
399
 
387
400
        """
415
428
        return
416
429
 
417
430
    def check_can_live_migrate_destination(self, ctxt, instance_ref,
 
431
                src_compute_info, dst_compute_info,
418
432
                block_migration=False, disk_over_commit=False):
419
433
        """Check if it is possible to execute live migration.
420
434
 
539
553
    def refresh_provider_fw_rules(self):
540
554
        return self._vmops.refresh_provider_fw_rules()
541
555
 
542
 
    def update_host_status(self):
543
 
        """Update the status info of the host, and return those values
544
 
            to the calling program."""
545
 
        return self.host_state.update_status()
546
 
 
547
556
    def get_host_stats(self, refresh=False):
548
557
        """Return the current state of the host. If 'refresh' is
549
558
           True, run the update first."""
597
606
        # TODO(tr3buchet): remove this function once all virts return false
598
607
        return False
599
608
 
 
609
    def resume_state_on_host_boot(self, context, instance, network_info,
 
610
                                  block_device_info=None):
 
611
        """resume guest state when a host is booted"""
 
612
        self._vmops.power_on(instance)
 
613
 
600
614
 
601
615
class XenAPISession(object):
602
616
    """The session to invoke XenAPI SDK calls"""
603
617
 
604
 
    def __init__(self, url, user, pw):
 
618
    def __init__(self, url, user, pw, virtapi):
605
619
        import XenAPI
606
620
        self.XenAPI = XenAPI
607
621
        self._sessions = queue.Queue()
613
627
        self.host_uuid = self._get_host_uuid()
614
628
        self.product_version, self.product_brand = \
615
629
            self._get_product_version_and_brand()
 
630
        self._virtapi = virtapi
616
631
 
617
632
    def _create_first_session(self, url, user, pw, exception):
618
633
        try:
619
634
            session = self._create_session(url)
620
 
            with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
 
635
            with timeout.Timeout(CONF.xenapi_login_timeout, exception):
621
636
                session.login_with_password(user, pw)
622
637
        except self.XenAPI.Failure, e:
623
638
            # if user and pw of the master are different, we're doomed!
633
648
        return url
634
649
 
635
650
    def _populate_session_pool(self, url, user, pw, exception):
636
 
        for i in xrange(FLAGS.xenapi_connection_concurrent - 1):
 
651
        for i in xrange(CONF.xenapi_connection_concurrent - 1):
637
652
            session = self._create_session(url)
638
 
            with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
 
653
            with timeout.Timeout(CONF.xenapi_login_timeout, exception):
639
654
                session.login_with_password(user, pw)
640
655
            self._sessions.put(session)
641
656
 
642
657
    def _get_host_uuid(self):
643
658
        if self.is_slave:
644
 
            aggr = db.aggregate_get_by_host(context.get_admin_context(),
645
 
                    FLAGS.host, key=pool_states.POOL_FLAG)[0]
 
659
            aggr = self._virtapi.aggregate_get_by_host(
 
660
                context.get_admin_context(),
 
661
                CONF.host, key=pool_states.POOL_FLAG)[0]
646
662
            if not aggr:
647
663
                LOG.error(_('Host is member of a pool, but DB '
648
664
                                'says otherwise'))
649
665
                raise exception.AggregateHostNotFound()
650
 
            return aggr.metadetails[FLAGS.host]
 
666
            return aggr.metadetails[CONF.host]
651
667
        else:
652
668
            with self._get_session() as session:
653
669
                host_ref = session.xenapi.session.get_this_host(session.handle)