1
# Copyright 2013-2014 Canonical Ltd. This software is licensed under the
2
# GNU Affero General Public License version 3 (see the file LICENSE).
4
from __future__ import (
15
'available_boot_resources',
19
from argparse import ArgumentParser
20
from collections import (
24
from datetime import datetime
28
from gzip import GzipFile
31
from logging import getLogger
33
from textwrap import dedent
35
from provisioningserver.boot import BootMethodRegistry
36
from provisioningserver.boot.tftppath import list_boot_images
37
from provisioningserver.config import BootConfig
38
from provisioningserver.utils import (
44
from simplestreams.contentsource import FdContentSource
45
from simplestreams.mirrors import (
49
from simplestreams.objectstores import FileStore
50
from simplestreams.util import (
58
def init_logger(log_level=logging.INFO):
59
logger = getLogger(__name__)
60
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
61
handler = logging.StreamHandler()
62
handler.setFormatter(formatter)
63
logger.addHandler(handler)
64
logger.setLevel(log_level)
68
logger = init_logger()
71
class NoConfigFile(Exception):
72
"""Raised when the config file for the script doesn't exist."""
75
def create_empty_hierarchy():
76
"""Create hierarchy of dicts which supports h[key1]...[keyN] accesses.
78
Generated object automatically creates nonexistent levels of hierarchy
79
when accessed the following way: h[arch][subarch][release]=something.
81
:return Generated hierarchy of dicts.
83
return defaultdict(create_empty_hierarchy)
86
# A tuple of the items that together select a boot image.
87
ImageSpec = namedtuple(b'ImageSpec', [
95
def iterate_boot_resources(boot_dict):
96
"""Iterate a multi-level dict of boot images.
98
Yields each combination of architecture, subarchitecture, release, and
99
label for which `boot` has an entry, as an `ImageSpec`.
101
:param boot: Four-level dict of dicts representing boot images: the top
102
level maps the architectures to sub-dicts, each of which maps
103
subarchitectures to further dicts, each of which in turn maps
104
releases to yet more dicts, each of which maps release labels to any
105
kind of item it likes.
107
for arch, subarches in sorted(boot_dict.items()):
108
for subarch, releases in sorted(subarches.items()):
109
for release, labels in sorted(releases.items()):
110
for label in sorted(labels.keys()):
111
yield ImageSpec(arch, subarch, release, label)
114
def value_passes_filter_list(filter_list, property_value):
115
"""Does the given property of a boot image pass the given filter list?
117
The value passes if either it matches one of the entries in the list of
118
filter values, or one of the filter values is an asterisk (`*`).
120
return '*' in filter_list or property_value in filter_list
123
def value_passes_filter(filter_value, property_value):
124
"""Does the given property of a boot image pass the given filter?
126
The value passes the filter if either the filter value is an asterisk
127
(`*`) or the value is equal to the filter value.
129
return filter_value in ('*', property_value)
132
def image_passes_filter(filters, arch, subarch, release, label):
133
"""Filter a boot image against configured import filters.
135
:param filters: A list of dicts describing the filters, as in `boot_merge`.
136
If the list is empty, or `None`, any image matches. Any entry in a
137
filter may be a string containing just an asterisk (`*`) to denote that
138
the entry will match any value.
139
:param arch: The given boot image's architecture.
140
:param subarch: The given boot image's subarchitecture.
141
:param release: The given boot image's OS release.
142
:param label: The given boot image's label.
143
:return: Whether the image matches any of the dicts in `filters`.
145
if filters is None or len(filters) == 0:
147
for filter_dict in filters:
149
value_passes_filter(filter_dict['release'], release) and
150
value_passes_filter_list(filter_dict['arches'], arch) and
151
value_passes_filter_list(filter_dict['subarches'], subarch) and
152
value_passes_filter_list(filter_dict['labels'], label)
159
def boot_merge(boot1, boot2, filters=None):
160
"""Add entries from the second multi-level dict to the first one.
162
Function copies d[arch][subarch][release]=value chains from the second
163
dictionary to the first one if they don't exist there and pass optional
164
check done by filters.
166
:param boot1: first dict which will be extended in-place.
167
:param boot2: second dict which will be used as a source of new entries.
168
:param filters: list of dicts each of which contains 'arch', 'subarch',
169
'release' keys; function takes d[arch][subarch][release] chain to the
170
first dict only if filters contain at least one dict with
171
arch in d['arches'], subarch in d['subarch'], d['release'] == release;
172
dict may have '*' as a value for 'arch' and 'release' keys and as a
173
member of 'subarch' list -- in that case key-specific check always
176
for arch, subarch, release, label in iterate_boot_resources(boot2):
177
if image_passes_filter(filters, arch, subarch, release, label):
179
"Merging boot resource for %s/%s/%s/%s.",
180
arch, subarch, release, label)
181
boot_resource = boot2[arch][subarch][release][label]
182
boot1[arch][subarch][release][label] = boot_resource
183
# Do not override an existing entry with the same
184
# arch/subarch/release/label: the first entry found takes
186
if not boot1[arch][subarch][release][label]:
187
boot1[arch][subarch][release][label] = boot_resource
190
def boot_reverse(boot):
191
"""Determine a set of subarches which should be deployed by boot resource.
193
Function reverses h[arch][subarch][release]=boot_resource hierarchy to form
194
boot resource to subarch relation. Many subarches may be deployed by a
195
single boot resource (in which case boot_resource=[subarch1, subarch2]
196
relation will be created). We note only subarchitectures and ignore
197
architectures because boot resource is tightly coupled with architecture
198
it can deploy according to metadata format. We can figure out for which
199
architecture we need to use a specific boot resource by looking at its
200
description in metadata. We can't do the same with subarch because we may
201
want to use boot resource only for a specific subset of subarches it can be
202
used for. To represent boot resource to subarch relation we generate the
203
following multi-level dictionary: d[content_id][product_name]=[subarches]
204
where 'content_id' and 'product_name' values come from metadata information
205
and allow us to uniquely identify a specific boot resource.
207
:param boot: Hierarchy of dicts d[arch][subarch][release]=boot_resource
208
:return Hierarchy of dictionaries d[content_id][product_name]=[subarches]
209
which describes boot resource to subarches relation for all available
210
boot resources (products).
212
reverse = create_empty_hierarchy()
214
for arch, subarch, release, label in iterate_boot_resources(boot):
215
boot_resource = boot[arch][subarch][release][label]
216
content_id = boot_resource['content_id']
217
product_name = boot_resource['product_name']
218
version_name = boot_resource['version_name']
219
existent = list(reverse[content_id][product_name][version_name])
220
reverse[content_id][product_name][version_name] = [subarch] + existent
225
def tgt_entry(arch, subarch, release, label, image):
226
"""Generate tgt target used to commission arch/subarch with release
228
Tgt target used to commission arch/subarch machine with a specific Ubuntu
229
release should have the following name: ephemeral-arch-subarch-release.
230
This function creates target description in a format used by tgt-admin.
231
It uses arch, subarch and release to generate target name and image as
232
a path to image file which should be shared. Tgt target is marked as
233
read-only. Tgt target has 'allow-in-use' option enabled because this
234
script actively uses hardlinks to do image management and root images
235
in different folders may point to the same inode. Tgt doesn't allow us to
236
use the same inode for different tgt targets (even read-only targets which
237
looks like a bug to me) without this option enabled.
239
:param arch: Architecture name we generate tgt target for
240
:param subarch: Subarchitecture name we generate tgt target for
241
:param release: Ubuntu release we generate tgt target for
242
:param label: The images' label
243
:param image: Path to the image which should be shared via tgt/iscsi
244
:return Tgt entry which can be written to tgt-admin configuration file
246
prefix = 'iqn.2004-05.com.ubuntu:maas'
247
target_name = 'ephemeral-%s-%s-%s-%s' % (arch, subarch, release, label)
249
<target {prefix}:{target_name}>
252
backing-store "{image}"
255
""").format(prefix=prefix, target_name=target_name, image=image)
259
def mirror_info_for_path(path, unsigned_policy=None, keyring=None):
260
if unsigned_policy is None:
261
unsigned_policy = lambda content, path, keyring: content
262
(mirror, rpath) = path_from_mirror_url(path, None)
263
policy = policy_read_signed
264
if rpath.endswith(".json"):
265
policy = unsigned_policy
267
policy = functools.partial(policy, keyring=keyring)
268
return(mirror, rpath, policy)
271
class RepoDumper(BasicMirrorWriter):
273
def dump(self, path, keyring=None):
274
self._boot = create_empty_hierarchy()
275
(mirror, rpath, policy) = mirror_info_for_path(path, keyring=keyring)
276
reader = UrlMirrorReader(mirror, policy=policy)
277
super(RepoDumper, self).sync(reader, rpath)
280
def load_products(self, path=None, content_id=None):
283
def item_cleanup(self, item):
284
keys_to_keep = ['content_id', 'product_name', 'version_name', 'path']
285
compact_item = {key: item[key] for key in keys_to_keep}
288
def insert_item(self, data, src, target, pedigree, contentsource):
289
item = products_exdata(src, pedigree)
290
arch, subarches = item['arch'], item['subarches']
291
release = item['release']
292
label = item['label']
293
compact_item = self.item_cleanup(item)
294
for subarch in subarches.split(','):
295
if not self._boot[arch][subarch][release][label]:
296
self._boot[arch][subarch][release][label] = compact_item
299
class RepoWriter(BasicMirrorWriter):
301
def __init__(self, root_path, cache_path, info):
302
self._root_path = os.path.abspath(root_path)
304
self._cache = FileStore(os.path.abspath(cache_path))
305
super(RepoWriter, self).__init__()
307
def write(self, path, keyring=None):
308
(mirror, rpath, policy) = mirror_info_for_path(path, keyring=keyring)
309
reader = UrlMirrorReader(mirror, policy=policy)
310
super(RepoWriter, self).sync(reader, rpath)
312
def load_products(self, path=None, content_id=None):
315
def filter_version(self, data, src, target, pedigree):
316
item = products_exdata(src, pedigree)
317
content_id, product_name = item['content_id'], item['product_name']
318
version_name = item['version_name']
320
content_id in self._info and
321
product_name in self._info[content_id] and
322
version_name in self._info[content_id][product_name]
325
def insert_file(self, name, tag, checksums, size, contentsource):
326
logger.info("Inserting file %s (tag=%s, size=%s).", name, tag, size)
328
tag, contentsource, checksums, mutable=False, size=size)
329
return [(self._cache._fullpath(tag), name)]
331
def insert_root_image(self, tag, checksums, size, contentsource):
332
root_image_tag = 'root-image-%s' % tag
333
root_image_path = self._cache._fullpath(root_image_tag)
334
root_tgz_tag = 'root-tgz-%s' % tag
335
root_tgz_path = self._cache._fullpath(root_tgz_tag)
336
if not os.path.isfile(root_image_path):
337
logger.info("New root image: %s.", root_image_path)
339
tag, contentsource, checksums, mutable=False, size=size)
340
uncompressed = FdContentSource(
341
GzipFile(self._cache._fullpath(tag)))
342
self._cache.insert(root_image_tag, uncompressed, mutable=False)
343
self._cache.remove(tag)
344
if not os.path.isfile(root_tgz_path):
345
logger.info("Converting root tarball: %s.", root_tgz_path)
346
call_uec2roottar(root_image_path, root_tgz_path)
347
return [(root_image_path, 'root-image'), (root_tgz_path, 'root-tgz')]
349
def insert_item(self, data, src, target, pedigree, contentsource):
350
item = products_exdata(src, pedigree)
351
checksums = item_checksums(data)
352
tag = checksums['sha256']
354
ftype = item['ftype']
355
if ftype == 'root-image.gz':
356
links = self.insert_root_image(tag, checksums, size, contentsource)
358
links = self.insert_file(
359
ftype, tag, checksums, size, contentsource)
360
content_id = item['content_id']
361
prod_name = item['product_name']
362
version_name = item['version_name']
363
for subarch in self._info[content_id][prod_name][version_name]:
364
dst_folder = os.path.join(
365
self._root_path, item['arch'], subarch, item['release'],
367
if not os.path.exists(dst_folder):
368
os.makedirs(dst_folder)
369
for src, link_name in links:
370
link_path = os.path.join(dst_folder, link_name)
371
if os.path.isfile(link_path):
373
os.link(src, link_path)
376
def available_boot_resources(root):
377
for resource_path in glob.glob(os.path.join(root, '*/*/*/*')):
378
arch, subarch, release, label = resource_path.split('/')[-4:]
379
yield (arch, subarch, release, label)
382
def install_boot_loaders(destination):
383
"""Install the all the required file from each bootloader method.
384
:param destination: Directory where the loaders should be stored.
386
for _, method in BootMethodRegistry:
387
method.install_bootloader(destination)
390
def call_uec2roottar(*args):
391
"""Invoke `uec2roottar` with the given arguments.
393
Here only so tests can stub it out.
395
call_and_check(["uec2roottar"] + list(args))
398
def make_arg_parser(doc):
399
"""Create an `argparse.ArgumentParser` for this script."""
401
parser = ArgumentParser(description=doc)
402
default_config = locate_config("bootresources.yaml")
404
'--config-file', action="store", default=default_config,
405
help="Path to config file "
406
"(defaults to %s)" % default_config)
410
def compose_targets_conf(snapshot_path):
411
"""Produce the contents of a snapshot's tgt conf file.
413
:param snasphot_path: Filesystem path to a snapshot of boot images.
414
:return: Contents for a `targets.conf` file.
417
# Use a set to make sure we don't register duplicate entries in tgt.
419
for item in list_boot_images(snapshot_path):
420
arch = item['architecture']
421
subarch = item['subarchitecture']
422
release = item['release']
423
label = item['label']
424
entries.add((arch, subarch, release, label))
426
for arch, subarch, release, label in sorted(entries):
427
root_image = os.path.join(
428
snapshot_path, arch, subarch, release, label, 'root-image')
429
if os.path.isfile(root_image):
430
entry = tgt_entry(arch, subarch, release, label, root_image)
431
tgt_entries.append(entry)
432
text = ''.join(tgt_entries)
433
return text.encode('utf-8')
436
def meta_contains(storage, content):
437
"""Does the `maas.meta` file match `content`?
439
If the file's contents match the latest data, there is no need to update.
441
current_meta = os.path.join(storage, 'current', 'maas.meta')
443
os.path.isfile(current_meta) and
444
content == read_text_file(current_meta)
448
def compose_snapshot_path(storage):
449
"""Put together a path for a new snapshot.
451
A snapshot is a directory in `storage` containing images. The name
452
contains the date in a sortable format.
454
snapshot_name = 'snapshot-%s' % datetime.now().strftime('%Y%m%d-%H%M%S')
455
return os.path.join(storage, snapshot_name)
458
def update_current_symlink(storage, latest_snapshot):
459
"""Symlink `latest_snapshot` as the "current" snapshot."""
460
symlink_path = os.path.join(storage, 'current')
461
if os.path.lexists(symlink_path):
462
os.unlink(symlink_path)
463
os.symlink(latest_snapshot, symlink_path)
466
def write_snapshot_metadata(snapshot, meta_file_content, targets_conf,
467
targets_conf_content):
468
"""Write "meta" file and tgt config for `snapshot`."""
469
meta_file = os.path.join(snapshot, 'maas.meta')
470
atomic_write(meta_file_content, meta_file, mode=0644)
471
atomic_write(targets_conf_content, targets_conf, mode=0644)
475
logger.info("Importing boot resources.")
476
# The config file is required. We do not fall back to defaults if it's
479
config = BootConfig.load_from_cache(filename=args.config_file)
480
except IOError as ex:
481
if ex.errno == errno.ENOENT:
482
# No config file. We have helpful error output for this.
483
raise NoConfigFile(ex)
488
storage = config['boot']['storage']
490
boot = create_empty_hierarchy()
491
dumper = RepoDumper()
493
for source in config['boot']['sources']:
494
repo_boot = dumper.dump(source['path'], keyring=source['keyring'])
495
boot_merge(boot, repo_boot, source['selections'])
497
meta_file_content = json.dumps(boot, sort_keys=True)
498
if meta_contains(storage, meta_file_content):
499
# The current maas.meta already contains the new config. No need to
503
reverse_boot = boot_reverse(boot)
504
snapshot_path = compose_snapshot_path(storage)
505
cache_path = os.path.join(storage, 'cache')
506
targets_conf = os.path.join(snapshot_path, 'maas.tgt')
507
writer = RepoWriter(snapshot_path, cache_path, reverse_boot)
509
for source in config['boot']['sources']:
510
writer.write(source['path'], source['keyring'])
512
targets_conf_content = compose_targets_conf(snapshot_path)
514
logger.info("Writing metadata and updating iSCSI targets.")
515
write_snapshot_metadata(
516
snapshot_path, meta_file_content, targets_conf, targets_conf_content)
517
call_and_check(['tgt-admin', '--conf', targets_conf, '--update', 'ALL'])
519
logger.info("Installing boot images snapshot %s.", snapshot_path)
520
install_boot_loaders(snapshot_path)
522
# If we got here, all went well. This is now truly the "current" snapshot.
523
update_current_symlink(storage, snapshot_path)
524
logger.info("Import done.")