1
# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
# not use this file except in compliance with the License. You may obtain
3
# a copy of the License at
5
# http://www.apache.org/licenses/LICENSE-2.0
7
# Unless required by applicable law or agreed to in writing, software
8
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
# License for the specific language governing permissions and limitations
17
from Crypto.PublicKey import RSA
18
import novaclient.exceptions as novaexception
20
from heat.common import exception
21
from heat.openstack.common import log as logging
22
from heat.engine import scheduler
23
from heat.engine.resources import instance
24
from heat.engine.resources.rackspace import rackspace_resource
25
from heat.db.sqlalchemy import api as db_api
27
logger = logging.getLogger(__name__)
30
class CloudServer(instance.Instance):
31
"""Resource for Rackspace Cloud Servers."""
33
properties_schema = {'flavor': {'Type': 'String', 'Required': True},
34
'image': {'Type': 'String', 'Required': True},
35
'user_data': {'Type': 'String'},
36
'key_name': {'Type': 'String'},
37
'Volumes': {'Type': 'List'},
38
'name': {'Type': 'String'}}
40
attributes_schema = {'PrivateDnsName': ('Private DNS name of the specified'
42
'PublicDnsName': ('Public DNS name of the specified '
44
'PrivateIp': ('Private IP address of the specified '
46
'PublicIp': ('Public IP address of the specified '
49
base_script = """#!/bin/bash
51
# Install cloud-init and heat-cfntools
53
# Create data source for cloud-init
54
mkdir -p /var/lib/cloud/seed/nocloud-net
55
mv /tmp/userdata /var/lib/cloud/seed/nocloud-net/user-data
56
touch /var/lib/cloud/seed/nocloud-net/meta-data
57
chmod 600 /var/lib/cloud/seed/nocloud-net/*
59
# Run cloud-init & cfn-init
60
cloud-init start || cloud-init init
61
bash -x /var/lib/cloud/data/cfn-userdata > /root/cfn-userdata.log 2>&1
64
# - Ubuntu 12.04: Verified working
65
ubuntu_script = base_script % """\
67
apt-get install -y cloud-init python-boto python-pip gcc python-dev
68
pip install heat-cfntools
69
cfn-create-aws-symlinks --source /usr/local/bin
72
# - Fedora 17: Verified working
73
# - Fedora 18: Not working. selinux needs to be in "Permissive"
74
# mode for cloud-init to work. It's disabled by default in the
75
# Rackspace Cloud Servers image. To enable selinux, a reboot is
77
# - Fedora 19: Verified working
78
fedora_script = base_script % """\
79
yum install -y cloud-init python-boto python-pip gcc python-devel
80
pip-python install heat-cfntools
81
cfn-create-aws-symlinks
84
# - Centos 6.4: Verified working
85
centos_script = base_script % """\
86
rpm -ivh http://mirror.rackspace.com/epel/6/i386/epel-release-6-8.noarch.rpm
87
yum install -y cloud-init python-boto python-pip gcc python-devel \
89
pip-python install heat-cfntools
92
# - RHEL 6.4: Verified working
93
rhel_script = base_script % """\
94
rpm -ivh http://mirror.rackspace.com/epel/6/i386/epel-release-6-8.noarch.rpm
95
# The RPM DB stays locked for a few secs
96
while fuser /var/lib/rpm/*; do sleep 1; done
97
yum install -y cloud-init python-boto python-pip gcc python-devel \
99
pip-python install heat-cfntools
100
cfn-create-aws-symlinks
103
# - Debian 7: Not working (heat-cfntools patch submitted)
104
# TODO(jason): Test with Debian 7 as soon as heat-cfntools patch
105
# is in https://review.openstack.org/#/c/38822/
106
debian_script = base_script % """\
107
echo "deb http://mirror.rackspace.com/debian wheezy-backports main" >> \
108
/etc/apt/sources.list
110
apt-get -t wheezy-backports install -y cloud-init
111
apt-get install -y python-pip gcc python-dev
112
pip install heat-cfntools
115
# - Arch 2013.6: Not working (deps not in default package repos)
116
# TODO(jason): Install cloud-init & other deps from third-party repos
117
arch_script = base_script % """\
118
pacman -S --noconfirm python-pip gcc
121
# - Gentoo 13.2: Not working (deps not in default package repos)
122
# TODO(jason): Install cloud-init & other deps from third-party repos
123
gentoo_script = base_script % """\
124
emerge cloud-init python-boto python-pip gcc python-devel
127
# - OpenSUSE 12.3: Not working (deps not in default package repos)
128
# TODO(jason): Install cloud-init & other deps from third-party repos
129
opensuse_script = base_script % """\
130
zypper --non-interactive rm patterns-openSUSE-minimal_base-conflicts
131
zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
134
# List of supported Linux distros and their corresponding config scripts
135
image_scripts = {'arch': None,
136
'centos': centos_script,
138
'fedora': fedora_script,
142
'ubuntu': ubuntu_script}
144
# Template keys supported for handle_update. Properties not
145
# listed here trigger an UpdateReplace
146
update_allowed_keys = ('Metadata', 'Properties')
147
update_allowed_properties = ('flavor', 'name')
149
def __init__(self, name, json_snippet, stack):
150
super(CloudServer, self).__init__(name, json_snippet, stack)
151
self._private_key = None
154
self._public_ip = None
155
self._private_ip = None
156
self.rs = rackspace_resource.RackspaceResource(name,
160
def physical_resource_name(self):
161
name = self.properties.get('name')
165
return super(CloudServer, self).physical_resource_name()
168
return self.rs.nova() # Override the Instance method
171
return self.rs.cinder()
175
"""Get the Cloud Server object."""
177
logger.debug("Calling nova().servers.get()")
178
self._server = self.nova().servers.get(self.resource_id)
183
"""Get the Linux distribution for this server."""
185
logger.debug("Calling nova().images.get()")
186
image = self.nova().images.get(self.properties['image'])
187
self._distro = image.metadata['os_distro']
192
"""Get the config script for the Cloud Server image."""
193
return self.image_scripts[self.distro]
197
"""Get the flavors from the API."""
198
logger.debug("Calling nova().flavors.list()")
199
return [flavor.id for flavor in self.nova().flavors.list()]
202
def private_key(self):
203
"""Return the private SSH key for the resource."""
204
if self._private_key:
205
return self._private_key
206
if self.id is not None:
207
private_key = db_api.resource_data_get(self, 'private_key')
210
self._private_key = private_key
214
def private_key(self, private_key):
215
"""Save the resource's private SSH key to the database."""
216
self._private_key = private_key
217
if self.id is not None:
218
db_api.resource_data_set(self, 'private_key', private_key, True)
220
def _get_ip(self, ip_type):
221
"""Return the IP of the Cloud Server."""
222
if ip_type in self.server.addresses:
223
for ip in self.server.addresses[ip_type]:
224
if ip['version'] == 4:
227
raise exception.Error("Could not determine the %s IP of %s." %
228
(ip_type, self.properties['image']))
232
"""Return the public IP of the Cloud Server."""
233
if not self._public_ip:
234
self._public_ip = self._get_ip('public')
235
return self._public_ip
238
def private_ip(self):
239
"""Return the private IP of the Cloud Server."""
240
if not self._private_ip:
241
self._private_ip = self._get_ip('private')
242
return self._private_ip
245
def has_userdata(self):
246
if self.properties['user_data'] or self.metadata != {}:
252
"""Validate user parameters."""
253
if self.properties['flavor'] not in self.flavors:
254
return {'Error': "flavor not found."}
256
# It's okay if there's no script, as long as user_data and
258
if not self.script and self.has_userdata:
259
return {'Error': "user_data/metadata are not supported with %s." %
260
self.properties['image']}
262
def _run_ssh_command(self, command):
263
"""Run a shell command on the Cloud Server via SSH."""
264
with tempfile.NamedTemporaryFile() as private_key_file:
265
private_key_file.write(self.private_key)
266
private_key_file.seek(0)
267
ssh = paramiko.SSHClient()
268
ssh.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
269
ssh.connect(self.public_ip,
271
key_filename=private_key_file.name)
272
stdin, stdout, stderr = ssh.exec_command(command)
273
logger.debug(stdout.read())
274
logger.debug(stderr.read())
276
def _sftp_files(self, files):
277
"""Transfer files to the Cloud Server via SFTP."""
278
with tempfile.NamedTemporaryFile() as private_key_file:
279
private_key_file.write(self.private_key)
280
private_key_file.seek(0)
281
pkey = paramiko.RSAKey.from_private_key_file(private_key_file.name)
282
transport = paramiko.Transport((self.public_ip, 22))
283
transport.connect(hostkey=None, username="root", pkey=pkey)
284
sftp = paramiko.SFTPClient.from_transport(transport)
285
for remote_file in files:
286
sftp_file = sftp.open(remote_file['path'], 'w')
287
sftp_file.write(remote_file['data'])
290
def handle_create(self):
291
"""Create a Rackspace Cloud Servers container.
293
Rackspace Cloud Servers does not have the metadata service
294
running, so we have to transfer the user-data file to the
295
server and then trigger cloud-init.
297
# Retrieve server creation parameters from properties
298
flavor = self.properties['flavor']
300
# Generate SSH public/private keypair
301
if self._private_key is not None:
302
rsa = RSA.importKey(self._private_key)
304
rsa = RSA.generate(1024)
305
self.private_key = rsa.exportKey()
306
public_keys = [rsa.publickey().exportKey('OpenSSH')]
307
if self.properties.get('key_name'):
308
key_name = self.properties['key_name']
309
public_keys.append(self._get_keypair(key_name).public_key)
310
personality_files = {
311
"/root/.ssh/authorized_keys": '\n'.join(public_keys)}
314
client = self.nova().servers
315
logger.debug("Calling nova().servers.create()")
316
server = client.create(self.physical_resource_name(),
317
self.properties['image'],
319
files=personality_files)
321
# Save resource ID to db
322
self.resource_id_set(server.id)
324
return server, scheduler.TaskRunner(self._attach_volumes_task())
326
def _attach_volumes_task(self):
327
tasks = (scheduler.TaskRunner(self._attach_volume, volume_id, device)
328
for volume_id, device in self.volumes())
329
return scheduler.PollingTaskGroup(tasks)
331
def _attach_volume(self, volume_id, device):
332
logger.debug("Calling nova().volumes.create_server_volume()")
333
self.nova().volumes.create_server_volume(self.server.id,
337
volume = self.cinder().get(volume_id)
338
while volume.status in ('available', 'attaching'):
342
if volume.status != 'in-use':
343
raise exception.Error(volume.status)
345
def _detach_volumes_task(self):
346
tasks = (scheduler.TaskRunner(self._detach_volume, volume_id)
347
for volume_id, device in self.volumes())
348
return scheduler.PollingTaskGroup(tasks)
350
def _detach_volume(self, volume_id):
351
volume = self.cinder().get(volume_id)
354
while volume.status in ('in-use', 'detaching'):
358
if volume.status != 'available':
359
raise exception.Error(volume.status)
361
def check_create_complete(self, cookie):
362
"""Check if server creation is complete and handle server configs."""
363
if not self._check_active(cookie):
366
if self.has_userdata:
367
# Create heat-script and userdata files on server
368
raw_userdata = self.properties['user_data'] or ''
369
userdata = self._build_userdata(raw_userdata)
371
files = [{'path': "/tmp/userdata", 'data': userdata},
372
{'path': "/root/heat-script.sh", 'data': self.script}]
373
self._sftp_files(files)
375
# Connect via SSH and run script
376
cmd = "bash -ex /root/heat-script.sh > /root/heat-script.log 2>&1"
377
self._run_ssh_command(cmd)
381
# TODO(jason): Make this consistent with Instance and inherit
382
def _delete_server(self, server):
383
"""Return a coroutine that deletes the Cloud Server."""
389
if server.status == "DELETED":
391
elif server.status == "ERROR":
392
raise exception.Error("Deletion of server %s failed." %
394
except novaexception.NotFound:
397
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
398
"""Try to update a Cloud Server's parameters.
400
If the Cloud Server's Metadata or flavor changed, update the
401
Cloud Server. If any other parameters changed, re-create the
402
Cloud Server with the new parameters.
404
if 'Metadata' in tmpl_diff:
405
self.metadata = json_snippet['Metadata']
406
metadata_string = json.dumps(self.metadata)
408
files = [{'path': "/var/cache/heat-cfntools/last_metadata",
409
'data': metadata_string}]
410
self._sftp_files(files)
412
command = "bash -x /var/lib/cloud/data/cfn-userdata > " + \
413
"/root/cfn-userdata.log 2>&1"
414
self._run_ssh_command(command)
416
if 'flavor' in prop_diff:
417
self.flavor = json_snippet['Properties']['flavor']
418
self.server.resize(self.flavor)
419
resize = scheduler.TaskRunner(self._check_resize,
422
resize(wait_time=1.0)
424
# If name is the only update, fail update
425
if prop_diff.keys() == ['name'] and \
426
tmpl_diff.keys() == ['Properties']:
427
raise exception.NotSupported(feature="Cloud Server rename")
428
# Other updates were successful, so don't cause update to fail
429
elif 'name' in prop_diff:
430
logger.info("Cloud Server rename not supported.")
434
def _resolve_attribute(self, key):
435
"""Return the method that provides a given template attribute."""
436
attribute_function = {'PublicIp': self.public_ip,
437
'PrivateIp': self.private_ip,
438
'PublicDnsName': self.public_ip,
439
'PrivateDnsName': self.public_ip}
440
if key not in attribute_function:
441
raise exception.InvalidTemplateAttribute(resource=self.name,
443
function = attribute_function[key]
444
logger.info('%s._resolve_attribute(%s) == %s'
445
% (self.name, key, function))
446
return unicode(function)
449
# pyrax module is required to work with Rackspace cloud server provider.
450
# If it is not installed, don't register cloud server provider
451
def resource_mapping():
452
if rackspace_resource.PYRAX_INSTALLED:
453
return {'Rackspace::Cloud::Server': CloudServer}