50
50
FLAGS = flags.FLAGS
51
flags.DEFINE_string('libvirt_rescue_xml_template',
52
utils.abspath('virt/libvirt.rescue.qemu.xml.template'),
53
'Libvirt RESCUE XML Template for QEmu/KVM')
54
flags.DEFINE_string('libvirt_rescue_xen_xml_template',
55
utils.abspath('virt/libvirt.rescue.xen.xml.template'),
56
'Libvirt RESCUE XML Template for xen')
57
flags.DEFINE_string('libvirt_rescue_uml_xml_template',
58
utils.abspath('virt/libvirt.rescue.uml.xml.template'),
59
'Libvirt RESCUE XML Template for user-mode-linux')
60
# TODO(vish): These flags should probably go into a shared location
61
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
62
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
63
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
51
64
flags.DEFINE_string('libvirt_xml_template',
52
65
utils.abspath('virt/libvirt.qemu.xml.template'),
53
66
'Libvirt XML Template for QEmu/KVM')
62
75
'Template file for injected network')
63
76
flags.DEFINE_string('libvirt_type',
65
'Libvirt domain type (valid options are: kvm, qemu, uml, xen)')
78
'Libvirt domain type (valid options are: '
79
'kvm, qemu, uml, xen)')
66
80
flags.DEFINE_string('libvirt_uri',
68
82
'Override the default libvirt URI (which is dependent'
87
101
class LibvirtConnection(object):
88
102
def __init__(self, read_only):
89
self.libvirt_uri, template_file = self.get_uri_and_template()
105
rescue_file) = self.get_uri_and_templates()
91
107
self.libvirt_xml = open(template_file).read()
108
self.rescue_xml = open(rescue_file).read()
92
109
self._wrapped_conn = None
93
110
self.read_only = read_only
97
114
if not self._wrapped_conn or not self._test_connection():
98
115
logging.debug('Connecting to libvirt: %s' % self.libvirt_uri)
99
self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only)
116
self._wrapped_conn = self._connect(self.libvirt_uri,
100
118
return self._wrapped_conn
102
120
def _test_connection(self):
113
def get_uri_and_template(self):
131
def get_uri_and_templates(self):
114
132
if FLAGS.libvirt_type == 'uml':
115
133
uri = FLAGS.libvirt_uri or 'uml:///system'
116
134
template_file = FLAGS.libvirt_uml_xml_template
135
rescue_file = FLAGS.libvirt_rescue_uml_xml_template
117
136
elif FLAGS.libvirt_type == 'xen':
118
137
uri = FLAGS.libvirt_uri or 'xen:///'
119
138
template_file = FLAGS.libvirt_xen_xml_template
139
rescue_file = FLAGS.libvirt_rescue_xen_xml_template
121
141
uri = FLAGS.libvirt_uri or 'qemu:///system'
122
142
template_file = FLAGS.libvirt_xml_template
123
return uri, template_file
143
rescue_file = FLAGS.libvirt_rescue_xml_template
144
return uri, template_file, rescue_file
125
146
def _connect(self, uri, read_only):
126
147
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
145
166
# If the instance is already terminated, we're still happy
146
167
d = defer.Deferred()
147
d.addCallback(lambda _: self._cleanup(instance))
169
d.addCallback(lambda _: self._cleanup(instance))
148
170
# FIXME: What does this comment mean?
149
171
# TODO(termie): short-circuit me for tests
150
# WE'LL save this for when we do shutdown,
172
# WE'LL save this for when we do shutdown,
151
173
# instead of destroy - but destroy returns immediately
152
174
timer = task.LoopingCall(f=None)
153
176
def _wait_for_shutdown():
155
178
state = self.get_info(instance['name'])['state']
195
219
@defer.inlineCallbacks
196
220
@exception.wrap_exception
197
221
def reboot(self, instance):
222
yield self.destroy(instance, False)
198
223
xml = self.to_xml(instance)
199
yield self._conn.lookupByName(instance['name']).destroy()
200
224
yield self._conn.createXML(xml, 0)
202
226
d = defer.Deferred()
203
227
timer = task.LoopingCall(f=None)
204
229
def _wait_for_reboot():
206
231
state = self.get_info(instance['name'])['state']
217
242
power_state.SHUTDOWN)
220
246
timer.f = _wait_for_reboot
221
247
timer.start(interval=0.5, now=True)
224
250
@defer.inlineCallbacks
225
251
@exception.wrap_exception
252
def rescue(self, instance):
253
yield self.destroy(instance, False)
255
xml = self.to_xml(instance, rescue=True)
256
rescue_images = {'image_id': FLAGS.rescue_image_id,
257
'kernel_id': FLAGS.rescue_kernel_id,
258
'ramdisk_id': FLAGS.rescue_ramdisk_id}
259
yield self._create_image(instance, xml, 'rescue-', rescue_images)
260
yield self._conn.createXML(xml, 0)
263
timer = task.LoopingCall(f=None)
265
def _wait_for_rescue():
267
state = self.get_info(instance['name'])['state']
268
db.instance_set_state(None, instance['id'], state)
269
if state == power_state.RUNNING:
270
logging.debug('instance %s: rescued', instance['name'])
273
except Exception, exn:
274
logging.error('_wait_for_rescue failed: %s', exn)
275
db.instance_set_state(None,
277
power_state.SHUTDOWN)
281
timer.f = _wait_for_rescue
282
timer.start(interval=0.5, now=True)
285
@defer.inlineCallbacks
286
@exception.wrap_exception
287
def unrescue(self, instance):
288
# NOTE(vish): Because reboot destroys and recreates an instance using
289
# the normal xml file, we can just call reboot here
290
yield self.reboot(instance)
292
@defer.inlineCallbacks
293
@exception.wrap_exception
226
294
def spawn(self, instance):
227
295
xml = self.to_xml(instance)
228
296
db.instance_set_state(context.get_admin_context(),
230
298
power_state.NOSTATE,
232
yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
300
yield NWFilterFirewall(self._conn).\
301
setup_nwfilters_for_instance(instance)
233
302
yield self._create_image(instance, xml)
234
303
yield self._conn.createXML(xml, 0)
235
# TODO(termie): this should actually register
236
# a callback to check for successful boot
237
304
logging.debug("instance %s: is running", instance['name'])
239
306
local_d = defer.Deferred()
240
307
timer = task.LoopingCall(f=None)
241
309
def _wait_for_boot():
243
311
state = self.get_info(instance['name'])['state']
266
334
if virsh_output.startswith('/dev/'):
267
335
logging.info('cool, it\'s a device')
268
d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False)
269
d.addCallback(lambda r:r[0])
336
d = process.simple_execute("sudo dd if=%s iflag=nonblock" %
337
virsh_output, check_exit_code=False)
338
d.addCallback(lambda r: r[0])
286
355
@exception.wrap_exception
287
356
def get_console_output(self, instance):
288
console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log')
289
d = process.simple_execute('sudo chown %d %s' % (os.getuid(), console_log))
357
console_log = os.path.join(FLAGS.instances_path, instance['name'],
359
d = process.simple_execute('sudo chown %d %s' % (os.getuid(),
290
361
if FLAGS.libvirt_type == 'xen':
291
362
# Xen is spethial
292
d.addCallback(lambda _: process.simple_execute("virsh ttyconsole %s" % instance['name']))
363
d.addCallback(lambda _:
364
process.simple_execute("virsh ttyconsole %s" %
293
366
d.addCallback(self._flush_xen_console)
294
367
d.addCallback(self._append_to_file, console_log)
297
370
d.addCallback(self._dump_file)
301
373
@defer.inlineCallbacks
302
def _create_image(self, inst, libvirt_xml):
374
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
303
375
# syntactic nicety
304
basepath = lambda fname='': os.path.join(FLAGS.instances_path,
376
basepath = lambda fname='', prefix=prefix: os.path.join(
377
FLAGS.instances_path,
308
381
# ensure directories exist and are writable
309
yield process.simple_execute('mkdir -p %s' % basepath())
310
yield process.simple_execute('chmod 0777 %s' % basepath())
382
yield process.simple_execute('mkdir -p %s' % basepath(prefix=''))
383
yield process.simple_execute('chmod 0777 %s' % basepath(prefix=''))
313
385
# TODO(termie): these are blocking calls, it would be great
314
386
# if they weren't.
317
389
f.write(libvirt_xml)
320
os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, 0660))
392
# NOTE(vish): No need add the prefix to console.log
393
os.close(os.open(basepath('console.log', ''),
394
os.O_CREAT | os.O_WRONLY, 0660))
322
396
user = manager.AuthManager().get_user(inst['user_id'])
323
397
project = manager.AuthManager().get_project(inst['project_id'])
400
disk_images = {'image_id': inst['image_id'],
401
'kernel_id': inst['kernel_id'],
402
'ramdisk_id': inst['ramdisk_id']}
325
403
if not os.path.exists(basepath('disk')):
326
yield images.fetch(inst.image_id, basepath('disk-raw'), user, project)
404
yield images.fetch(inst.image_id, basepath('disk-raw'), user,
327
406
if not os.path.exists(basepath('kernel')):
328
yield images.fetch(inst.kernel_id, basepath('kernel'), user, project)
407
yield images.fetch(inst.kernel_id, basepath('kernel'), user,
329
409
if not os.path.exists(basepath('ramdisk')):
330
yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project)
410
yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
332
413
execute = lambda cmd, process_input=None, check_exit_code=True: \
333
414
process.simple_execute(cmd=cmd,
339
420
network_ref = db.network_get_by_instance(context.get_admin_context(),
341
422
if network_ref['injected']:
342
address = db.instance_get_fixed_address(context.get_admin_context(),
423
admin_context = context.get_admin_context()
424
address = db.instance_get_fixed_address(admin_context, inst['id'])
344
425
with open(FLAGS.injected_network_template) as f:
345
426
net = f.read() % {'address': address,
346
427
'netmask': network_ref['netmask'],
355
436
logging.info('instance %s: injecting net into image %s',
356
437
inst['name'], inst.image_id)
357
yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
438
yield disk.inject_data(basepath('disk-raw'), key, net,
359
441
if os.path.exists(basepath('disk')):
360
442
yield process.simple_execute('rm -f %s' % basepath('disk'))
371
455
yield process.simple_execute('sudo chown root %s' %
372
456
basepath('disk'))
374
def to_xml(self, instance):
458
def to_xml(self, instance, rescue=False):
375
459
# TODO(termie): cache?
376
460
logging.debug('instance %s: starting toXML method', instance['name'])
377
461
network = db.project_get_network(context.get_admin_context(),
378
462
instance['project_id'])
379
463
# FIXME(vish): stick this in db
380
instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
464
instance_type = instance['instance_type']
465
instance_type = instance_types.INSTANCE_TYPES[instance_type]
381
466
ip_address = db.instance_get_fixed_address(context.get_admin_context(),
383
468
# Assume that the gateway also acts as the dhcp server.
391
476
'bridge_name': network['bridge'],
392
477
'mac_address': instance['mac_address'],
393
478
'ip_address': ip_address,
394
'dhcp_server': dhcp_server }
395
libvirt_xml = self.libvirt_xml % xml_info
479
'dhcp_server': dhcp_server}
481
libvirt_xml = self.rescue_xml % xml_info
483
libvirt_xml = self.libvirt_xml % xml_info
396
484
logging.debug('instance %s: finished toXML method', instance['name'])
398
486
return libvirt_xml
400
488
def get_info(self, instance_name):
401
virt_dom = self._conn.lookupByName(instance_name)
490
virt_dom = self._conn.lookupByName(instance_name)
492
raise exception.NotFound("Instance %s not found" % instance_name)
402
493
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
403
494
return {'state': state,
404
495
'max_mem': max_mem,
588
678
def nova_base_ipv4_filter(self):
589
679
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
590
680
for protocol in ['tcp', 'udp', 'icmp']:
591
for direction,action,priority in [('out','accept', 399),
592
('inout','drop', 400)]:
681
for direction, action, priority in [('out', 'accept', 399),
682
('inout', 'drop', 400)]:
593
683
retval += """<rule action='%s' direction='%s' priority='%d'>
595
685
</rule>""" % (action, direction,
597
687
retval += '</filter>'
601
690
def nova_base_ipv6_filter(self):
602
691
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
603
692
for protocol in ['tcp', 'udp', 'icmp']:
604
for direction,action,priority in [('out','accept',399),
605
('inout','drop',400)]:
693
for direction, action, priority in [('out', 'accept', 399),
694
('inout', 'drop', 400)]:
606
695
retval += """<rule action='%s' direction='%s' priority='%d'>
608
697
</rule>""" % (action, direction,
646
732
yield self._define_filter(self.nova_dhcp_filter)
647
733
yield self._define_filter(self.nova_base_filter)
649
nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" +
650
" <filterref filter='nova-base' />\n"
735
nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \
736
" <filterref filter='nova-base' />\n" % \
653
739
if FLAGS.allow_project_net_traffic:
654
740
network_ref = db.project_get_network(context.get_admin_context(),
659
745
yield self._define_filter(project_filter)
661
nwfilter_xml += (" <filterref filter='nova-project-%s' />\n"
662
) % instance['project_id']
747
nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \
748
instance['project_id']
664
750
for security_group in instance.security_groups:
665
751
yield self.ensure_security_group_filter(security_group['id'])
667
nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
668
) % security_group['id']
753
nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \
669
755
nwfilter_xml += "</filter>"
671
757
yield self._define_filter(nwfilter_xml)
684
769
rule_xml += "<rule action='accept' direction='in' priority='300'>"
686
771
net, mask = self._get_net_and_mask(rule.cidr)
687
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask)
772
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
773
(rule.protocol, net, mask)
688
774
if rule.protocol in ['tcp', 'udp']:
689
775
rule_xml += "dstportstart='%s' dstportend='%s' " % \
690
776
(rule.from_port, rule.to_port)
691
777
elif rule.protocol == 'icmp':
692
logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port))
778
logging.info('rule.protocol: %r, rule.from_port: %r, '
780
(rule.protocol, rule.from_port, rule.to_port))
693
781
if rule.from_port != -1:
694
782
rule_xml += "type='%s' " % rule.from_port
695
783
if rule.to_port != -1: