~citrix-openstack/nova/xenapi-netinject-prop

« back to all changes in this revision

Viewing changes to nova/virt/xenapi/vmops.py

  • Committer: Salvatore Orlando
  • Date: 2011-03-08 22:33:28 UTC
  • mfrom: (669.1.97 nova)
  • Revision ID: salvatore.orlando@eu.citrix.com-20110308223328-b44thi2r5psq08fc
merge trunk

now using refactored _get_vm_opaqueref

Show diffs side-by-side

added added

removed removed

Lines of Context:
22
22
import json
23
23
import M2Crypto
24
24
import os
 
25
import pickle
25
26
import subprocess
26
27
import tempfile
27
28
import uuid
62
63
                vms.append(rec["name_label"])
63
64
        return vms
64
65
 
65
 
    def spawn(self, instance):
 
66
    def _start(self, instance, vm_ref=None):
 
67
        """Power on a VM instance"""
 
68
        if not vm_ref:
 
69
            vm_ref = VMHelper.lookup(self._session, instance.name)
 
70
        if vm_ref is None:
 
71
            raise exception(_('Attempted to power on non-existent instance'
 
72
            ' bad instance id %s') % instance.id)
 
73
        LOG.debug(_("Starting instance %s"), instance.name)
 
74
        self._session.call_xenapi('VM.start', vm_ref, False, False)
 
75
 
 
76
    def spawn(self, instance, disk):
66
77
        """Create VM instance"""
67
78
        instance_name = instance.name
68
79
        vm = VMHelper.lookup(self._session, instance_name)
82
93
        user = AuthManager().get_user(instance.user_id)
83
94
        project = AuthManager().get_project(instance.project_id)
84
95
 
85
 
        disk_image_type = VMHelper.determine_disk_image_type(instance)
86
 
 
87
 
        vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
88
 
            instance.image_id, user, project, disk_image_type)
89
 
 
90
 
        vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
91
 
 
92
 
        pv_kernel = False
 
96
        vdi_ref = kernel = ramdisk = pv_kernel = None
 
97
 
 
98
        # Are we building from a pre-existing disk?
 
99
        if not disk:
 
100
            #if kernel is not present we must download a raw disk
 
101
 
 
102
            disk_image_type = VMHelper.determine_disk_image_type(instance)
 
103
            vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
 
104
                    instance.image_id, user, project, disk_image_type)
 
105
            vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
 
106
 
 
107
        else:
 
108
            vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk)
 
109
 
93
110
        if disk_image_type == ImageType.DISK_RAW:
94
 
            #Have a look at the VDI and see if it has a PV kernel
 
111
            # Have a look at the VDI and see if it has a PV kernel
95
112
            pv_kernel = VMHelper.lookup_image(self._session, instance.id,
96
113
                                              vdi_ref)
97
114
        elif disk_image_type == ImageType.DISK_VHD:
99
116
            # configurable as Windows will use HVM.
100
117
            pv_kernel = True
101
118
 
102
 
        kernel = None
103
119
        if instance.kernel_id:
104
120
            kernel = VMHelper.fetch_image(self._session, instance.id,
105
121
                instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
106
122
 
107
 
        ramdisk = None
108
123
        if instance.ramdisk_id:
109
124
            ramdisk = VMHelper.fetch_image(self._session, instance.id,
110
125
                instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
111
126
 
112
127
        vm_ref = VMHelper.create_vm(self._session,
113
128
                                          instance, kernel, ramdisk, pv_kernel)
114
 
        VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
 
129
        VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
 
130
                vdi_ref=vdi_ref, userdevice=0, bootable=True)
115
131
 
116
132
        # Alter the image before VM start for, e.g. network injection
117
133
        if FLAGS.xenapi_inject_image:
177
193
        """Refactored out the common code of many methods that receive either
178
194
        a vm name or a vm instance, and want a vm instance in return.
179
195
        """
180
 
        vm = None
181
 
        #assume instance_or_vm is an instance object
182
 
        try:
 
196
        # if instance_or_vm is a string it must be opaque ref or instance name
 
197
        if isinstance(instance_or_vm, basestring):
 
198
            obj = None
 
199
            try:
 
200
                # check for opaque ref
 
201
                obj = self._session.get_xenapi().VM.get_record(instance_or_vm)
 
202
                return instance_or_vm
 
203
            except self.XenAPI.Failure:
 
204
                # wasn't an opaque ref, can be an instance name
 
205
                instance_name = instance_or_vm
 
206
 
 
207
        # if instance_or_vm is an int/long it must be instance id
 
208
        elif isinstance(instance_or_vm, (int, long)):
 
209
            ctx = context.get_admin_context()
 
210
            instance_obj = db.instance_get(ctx, instance_or_vm)
 
211
            instance_name = instance_obj.name
 
212
        else:
183
213
            instance_name = instance_or_vm.name
184
 
        except (AttributeError, KeyError):
185
 
            if isinstance(instance_or_vm, (int, long)):
186
 
                ctx = context.get_admin_context()
187
 
                instance_obj = db.instance_get(ctx, instance_or_vm)
188
 
                instance_name = instance_obj.name
189
 
            else:
190
 
                instance_name = instance_or_vm
191
 
        #try to lookup instance
192
 
        vm = VMHelper.lookup(self._session, instance_name)
193
 
        #if vm=None might be a VM ref
194
 
        if vm == None:
195
 
            vm_rec = self._session.get_xenapi().VM.get_record(instance_or_vm)
196
 
            if vm_rec != None:
197
 
                #found - it is a vm ref
198
 
                return instance_or_vm
199
 
        else:
200
 
            return vm
201
 
        #if we end up here instance_or_vm is neither an instance id or VM ref
202
 
        raise exception.NotFound(
203
 
                        _('Instance not present %s') % instance_name)
 
214
        vm_ref = VMHelper.lookup(self._session, instance_name)
 
215
        if vm_ref is None:
 
216
            raise exception.NotFound(
 
217
                            _('Instance not present %s') % instance_name)
 
218
        return vm_ref
204
219
 
205
220
    def _acquire_bootlock(self, vm):
206
221
        """Prevent an instance from booting"""
217
232
            "start")
218
233
 
219
234
    def snapshot(self, instance, image_id):
220
 
        """ Create snapshot from a running VM instance
 
235
        """Create snapshot from a running VM instance
221
236
 
222
237
        :param instance: instance to be snapshotted
223
238
        :param image_id: id of image to upload to
238
253
            that will bundle the VHDs together and then push the bundle into
239
254
            Glance.
240
255
        """
241
 
 
 
256
        template_vm_ref = None
 
257
        try:
 
258
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
 
259
            # call plugin to ship snapshot off to glance
 
260
            VMHelper.upload_image(
 
261
                    self._session, instance.id, template_vdi_uuids, image_id)
 
262
        finally:
 
263
            if template_vm_ref:
 
264
                self._destroy(instance, template_vm_ref,
 
265
                        shutdown=False, destroy_kernel_ramdisk=False)
 
266
 
 
267
        logging.debug(_("Finished snapshot and upload for VM %s"), instance)
 
268
 
 
269
    def _get_snapshot(self, instance):
242
270
        #TODO(sirp): Add quiesce and VSS locking support when Windows support
243
271
        # is added
244
272
 
249
277
        try:
250
278
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
251
279
                self._session, instance.id, vm_ref, label)
 
280
            return template_vm_ref, template_vdi_uuids
252
281
        except self.XenAPI.Failure, exc:
253
282
            logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
254
283
                    % locals())
255
284
            return
256
285
 
 
286
    def migrate_disk_and_power_off(self, instance, dest):
 
287
        """Copies a VHD from one host machine to another
 
288
 
 
289
        :param instance: the instance that owns the VHD in question
 
290
        :param dest: the destination host machine
 
291
        :param disk_type: values are 'primary' or 'cow'
 
292
        """
 
293
        vm_ref = VMHelper.lookup(self._session, instance.name)
 
294
 
 
295
        # The primary VDI becomes the COW after the snapshot, and we can
 
296
        # identify it via the VBD. The base copy is the parent_uuid returned
 
297
        # from the snapshot creation
 
298
 
 
299
        base_copy_uuid = cow_uuid = None
 
300
        template_vdi_uuids = template_vm_ref = None
257
301
        try:
258
 
            # call plugin to ship snapshot off to glance
259
 
            VMHelper.upload_image(
260
 
                self._session, instance.id, template_vdi_uuids, image_id)
 
302
            # transfer the base copy
 
303
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
 
304
            base_copy_uuid = template_vdi_uuids[1]
 
305
            vdi_ref, vm_vdi_rec = \
 
306
                    VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
 
307
            cow_uuid = vm_vdi_rec['uuid']
 
308
 
 
309
            params = {'host': dest,
 
310
                      'vdi_uuid': base_copy_uuid,
 
311
                      'instance_id': instance.id,
 
312
                      'sr_path': VMHelper.get_sr_path(self._session)}
 
313
 
 
314
            task = self._session.async_call_plugin('migration', 'transfer_vhd',
 
315
                    {'params': pickle.dumps(params)})
 
316
            self._session.wait_for_task(task, instance.id)
 
317
 
 
318
            # Now power down the instance and transfer the COW VHD
 
319
            self._shutdown(instance, vm_ref, method='clean')
 
320
 
 
321
            params = {'host': dest,
 
322
                      'vdi_uuid': cow_uuid,
 
323
                      'instance_id': instance.id,
 
324
                      'sr_path': VMHelper.get_sr_path(self._session), }
 
325
 
 
326
            task = self._session.async_call_plugin('migration', 'transfer_vhd',
 
327
                    {'params': pickle.dumps(params)})
 
328
            self._session.wait_for_task(task, instance.id)
 
329
 
261
330
        finally:
262
 
            self._destroy(instance, template_vm_ref, shutdown=False,
263
 
                          destroy_kernel_ramdisk=False)
264
 
 
265
 
        logging.debug(_("Finished snapshot and upload for VM %s"), instance)
 
331
            if template_vm_ref:
 
332
                self._destroy(instance, template_vm_ref,
 
333
                        shutdown=False, destroy_kernel_ramdisk=False)
 
334
 
 
335
        # TODO(mdietz): we could also consider renaming these to something
 
336
        # sensible so we don't need to blindly pass around dictionaries
 
337
        return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
 
338
 
 
339
    def attach_disk(self, instance, disk_info):
 
340
        """Links the base copy VHD to the COW via the XAPI plugin"""
 
341
        vm_ref = VMHelper.lookup(self._session, instance.name)
 
342
        new_base_copy_uuid = str(uuid.uuid4())
 
343
        new_cow_uuid = str(uuid.uuid4())
 
344
        params = {'instance_id': instance.id,
 
345
                  'old_base_copy_uuid': disk_info['base_copy'],
 
346
                  'old_cow_uuid': disk_info['cow'],
 
347
                  'new_base_copy_uuid': new_base_copy_uuid,
 
348
                  'new_cow_uuid': new_cow_uuid,
 
349
                  'sr_path': VMHelper.get_sr_path(self._session), }
 
350
 
 
351
        task = self._session.async_call_plugin('migration',
 
352
                'move_vhds_into_sr', {'params': pickle.dumps(params)})
 
353
        self._session.wait_for_task(task, instance.id)
 
354
 
 
355
        # Now we rescan the SR so we find the VHDs
 
356
        VMHelper.scan_default_sr(self._session)
 
357
 
 
358
        return new_cow_uuid
 
359
 
 
360
    def resize(self, instance, flavor):
 
361
        """Resize a running instance by changing it's RAM and disk size """
 
362
        raise NotImplementedError()
266
363
 
267
364
    def reboot(self, instance):
268
365
        """Reboot VM instance"""
308
405
            raise RuntimeError(resp_dict['message'])
309
406
        return resp_dict['message']
310
407
 
311
 
    def _start(self, instance, vm):
312
 
        """Start an instance"""
313
 
        task = self._session.call_xenapi("Async.VM.start", vm, False, False)
314
 
        self._session.wait_for_task(task, instance.id)
315
 
 
316
408
    def inject_file(self, instance, b64_path, b64_contents):
317
409
        """Write a file to the VM instance. The path to which it is to be
318
410
        written and the contents of the file need to be supplied; both should
355
447
            if hard:
356
448
                task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
357
449
            else:
358
 
                task = self._session.call_xenapi("Async.VM.clean_shutdown", vm)
359
 
 
 
450
                task = self._session.call_xenapi('Async.VM.clean_shutdown', vm)
360
451
            self._session.wait_for_task(task, instance.id)
361
452
        except self.XenAPI.Failure, exc:
362
453
            LOG.exception(exc)