~tr3buchet/nova/lock

« back to all changes in this revision

Viewing changes to nova/compute/api.py

  • Committer: Vishvananda Ishaya
  • Date: 2010-12-22 20:59:53 UTC
  • mto: This revision was merged to the branch mainline in revision 482.
  • Revision ID: vishvananda@gmail.com-20101222205953-j2j5t0qjwlcd0t2s
merge trunk and upgrade to cheetah templating

Show diffs side-by-side

added added

removed removed

Lines of Context:
73
73
        is_vpn = image_id == FLAGS.vpn_image_id
74
74
        if not is_vpn:
75
75
            image = self.image_service.show(context, image_id)
 
76
 
 
77
            # If kernel_id/ramdisk_id isn't explicitly set in API call
 
78
            # we take the defaults from the image's metadata
76
79
            if kernel_id is None:
77
 
                kernel_id = image.get('kernelId', FLAGS.default_kernel)
 
80
                kernel_id = image.get('kernelId', None)
78
81
            if ramdisk_id is None:
79
 
                ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
 
82
                ramdisk_id = image.get('ramdiskId', None)
80
83
 
81
84
            # Make sure we have access to kernel and ramdisk
82
 
            self.image_service.show(context, kernel_id)
83
 
            self.image_service.show(context, ramdisk_id)
 
85
            if kernel_id:
 
86
                self.image_service.show(context, kernel_id)
 
87
            if ramdisk_id:
 
88
                self.image_service.show(context, ramdisk_id)
84
89
 
85
90
        if security_group is None:
86
91
            security_group = ['default']
103
108
        base_options = {
104
109
            'reservation_id': utils.generate_uid('r'),
105
110
            'image_id': image_id,
106
 
            'kernel_id': kernel_id,
107
 
            'ramdisk_id': ramdisk_id,
 
111
            'kernel_id': kernel_id or '',
 
112
            'ramdisk_id': ramdisk_id or '',
108
113
            'state_description': 'scheduling',
109
114
            'user_id': context.user_id,
110
115
            'project_id': context.project_id,
120
125
 
121
126
        elevated = context.elevated()
122
127
        instances = []
123
 
        logging.debug("Going to run %s instances...", num_instances)
 
128
        logging.debug(_("Going to run %s instances..."), num_instances)
124
129
        for num in range(num_instances):
125
130
            instance = dict(mac_address=utils.generate_mac(),
126
131
                            launch_index=num,
157
162
                     {"method": "setup_fixed_ip",
158
163
                      "args": {"address": address}})
159
164
 
160
 
            logging.debug("Casting to scheduler for %s/%s's instance %s",
 
165
            logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
161
166
                          context.project_id, context.user_id, instance_id)
162
167
            rpc.cast(context,
163
168
                     FLAGS.scheduler_topic,
204
209
            instance = self.db.instance_get_by_internal_id(context,
205
210
                                                           instance_id)
206
211
        except exception.NotFound as e:
207
 
            logging.warning("Instance %d was not found during terminate",
 
212
            logging.warning(_("Instance %d was not found during terminate"),
208
213
                            instance_id)
209
214
            raise e
210
215
 
211
216
        if (instance['state_description'] == 'terminating'):
212
 
            logging.warning("Instance %d is already being terminated",
 
217
            logging.warning(_("Instance %d is already being terminated"),
213
218
                            instance_id)
214
219
            return
215
220
 
223
228
        address = self.db.instance_get_floating_address(context,
224
229
                                                        instance['id'])
225
230
        if address:
226
 
            logging.debug("Disassociating address %s" % address)
 
231
            logging.debug(_("Disassociating address %s") % address)
227
232
            # NOTE(vish): Right now we don't really care if the ip is
228
233
            #             disassociated.  We may need to worry about
229
234
            #             checking this later.  Perhaps in the scheduler?
234
239
 
235
240
        address = self.db.instance_get_fixed_address(context, instance['id'])
236
241
        if address:
237
 
            logging.debug("Deallocating address %s" % address)
 
242
            logging.debug(_("Deallocating address %s") % address)
238
243
            # NOTE(vish): Currently, nothing needs to be done on the
239
244
            #             network node until release. If this changes,
240
245
            #             we will need to cast here.
275
280
                 {"method": "reboot_instance",
276
281
                  "args": {"instance_id": instance['id']}})
277
282
 
 
283
    def pause(self, context, instance_id):
 
284
        """Pause the given instance."""
 
285
        instance = self.db.instance_get_by_internal_id(context, instance_id)
 
286
        host = instance['host']
 
287
        rpc.cast(context,
 
288
                 self.db.queue_get_for(context, FLAGS.compute_topic, host),
 
289
                 {"method": "pause_instance",
 
290
                  "args": {"instance_id": instance['id']}})
 
291
 
 
292
    def unpause(self, context, instance_id):
 
293
        """Unpause the given instance."""
 
294
        instance = self.db.instance_get_by_internal_id(context, instance_id)
 
295
        host = instance['host']
 
296
        rpc.cast(context,
 
297
                 self.db.queue_get_for(context, FLAGS.compute_topic, host),
 
298
                 {"method": "unpause_instance",
 
299
                  "args": {"instance_id": instance['id']}})
 
300
 
278
301
    def rescue(self, context, instance_id):
279
302
        """Rescue the given instance."""
280
303
        instance = self.db.instance_get_by_internal_id(context, instance_id)