3
# Copyright (C) 2012 Canonical Ltd.
4
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
5
# Copyright (C) 2012 Yahoo! Inc.
7
# Author: Scott Moser <scott.moser@canonical.com>
8
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
11
# This program is free software: you can redistribute it and/or modify
12
# it under the terms of the GNU General Public License version 3, as
13
# published by the Free Software Foundation.
15
# This program is distributed in the hope that it will be useful,
16
# but WITHOUT ANY WARRANTY; without even the implied warranty of
17
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
# GNU General Public License for more details.
20
# You should have received a copy of the GNU General Public License
21
# along with this program. If not, see <http://www.gnu.org/licenses/>.
28
from six.moves import cPickle as pickle
30
from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
32
from cloudinit import handlers
34
# Default handlers (used if not overridden)
35
from cloudinit.handlers import boot_hook as bh_part
36
from cloudinit.handlers import cloud_config as cc_part
37
from cloudinit.handlers import shell_script as ss_part
38
from cloudinit.handlers import upstart_job as up_part
40
from cloudinit import cloud
41
from cloudinit import config
42
from cloudinit import distros
43
from cloudinit import helpers
44
from cloudinit import importer
45
from cloudinit import log as logging
46
from cloudinit import net
47
from cloudinit.net import cmdline
48
from cloudinit.reporting import events
49
from cloudinit import sources
50
from cloudinit import type_utils
51
from cloudinit import util
53
LOG = logging.getLogger(__name__)
55
NULL_DATA_SOURCE = None
56
NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
60
def __init__(self, ds_deps=None, reporter=None):
61
if ds_deps is not None:
62
self.ds_deps = ds_deps
64
self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
65
# Created on first use
69
# Changed only when a fetch occurs
70
self.datasource = NULL_DATA_SOURCE
71
self.ds_restored = False
72
self._previous_iid = None
75
reporter = events.ReportEventStack(
76
name="init-reporter", description="init-desc",
77
reporting_enabled=False)
78
self.reporter = reporter
80
def _reset(self, reset_ds=False):
86
self.datasource = NULL_DATA_SOURCE
87
self.ds_restored = False
92
# Try to find the right class to use
93
system_config = self._extract_cfg('system')
94
distro_name = system_config.pop('distro', 'ubuntu')
95
distro_cls = distros.fetch(distro_name)
96
LOG.debug("Using distro class %s", distro_cls)
97
self._distro = distro_cls(distro_name, system_config, self.paths)
98
# If we have an active datasource we need to adjust
99
# said datasource and move its distro/system config
100
# from whatever it was to a new set...
101
if self.datasource is not NULL_DATA_SOURCE:
102
self.datasource.distro = self._distro
103
self.datasource.sys_cfg = system_config
108
return self._extract_cfg('restricted')
110
def _extract_cfg(self, restriction):
111
# Ensure actually read
113
# Nobody gets the real config
114
ocfg = copy.deepcopy(self._cfg)
115
if restriction == 'restricted':
116
ocfg.pop('system_info', None)
117
elif restriction == 'system':
118
ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {})
119
elif restriction == 'paths':
120
ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {})
121
if not isinstance(ocfg, (dict)):
128
path_info = self._extract_cfg('paths')
129
self._paths = helpers.Paths(path_info, self.datasource)
132
def _initial_subdirs(self):
133
c_dir = self.paths.cloud_dir
136
os.path.join(c_dir, 'scripts'),
137
os.path.join(c_dir, 'scripts', 'per-instance'),
138
os.path.join(c_dir, 'scripts', 'per-once'),
139
os.path.join(c_dir, 'scripts', 'per-boot'),
140
os.path.join(c_dir, 'scripts', 'vendor'),
141
os.path.join(c_dir, 'seed'),
142
os.path.join(c_dir, 'instances'),
143
os.path.join(c_dir, 'handlers'),
144
os.path.join(c_dir, 'sem'),
145
os.path.join(c_dir, 'data'),
149
def purge_cache(self, rm_instance_lnk=False):
151
rm_list.append(self.paths.boot_finished)
153
rm_list.append(self.paths.instance_link)
158
def initialize(self):
159
self._initialize_filesystem()
161
def _initialize_filesystem(self):
162
util.ensure_dirs(self._initial_subdirs())
163
log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
165
util.ensure_file(log_file)
166
perms = self.cfg.get('syslog_fix_perms')
169
if not isinstance(perms, list):
174
u, g = util.extract_usergroup(perm)
176
util.chownbyname(log_file, u, g)
181
LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
182
log_file, ','.join(perms), error)
184
def read_cfg(self, extra_fns=None):
185
# None check so that we don't keep on re-loading if empty
186
if self._cfg is None:
187
self._cfg = self._read_cfg(extra_fns)
188
# LOG.debug("Loaded 'init' config %s", self._cfg)
190
def _read_cfg(self, extra_fns):
191
no_cfg_paths = helpers.Paths({}, self.datasource)
192
merger = helpers.ConfigMerger(paths=no_cfg_paths,
193
datasource=self.datasource,
194
additional_fns=extra_fns,
195
base_cfg=fetch_base_config())
198
def _restore_from_cache(self):
199
# We try to restore from a current link and static path
200
# by using the instance link, if purge_cache was called
201
# the file wont exist.
202
return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
204
def _write_to_cache(self):
205
if self.datasource is NULL_DATA_SOURCE:
207
return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
209
def _get_datasources(self):
210
# Any config provided???
211
pkg_list = self.cfg.get('datasource_pkg_list') or []
212
# Add the defaults at the end
213
for n in ['', type_utils.obj_name(sources)]:
214
if n not in pkg_list:
216
cfg_list = self.cfg.get('datasource_list') or []
217
return (cfg_list, pkg_list)
219
def _restore_from_checked_cache(self, existing):
220
if existing not in ("check", "trust"):
221
raise ValueError("Unexpected value for existing: %s" % existing)
223
ds = self._restore_from_cache()
225
return (None, "no cache found")
227
run_iid_fn = self.paths.get_runpath('instance_id')
228
if os.path.exists(run_iid_fn):
229
run_iid = util.load_file(run_iid_fn).strip()
233
if run_iid == ds.get_instance_id():
234
return (ds, "restored from cache with run check: %s" % ds)
235
elif existing == "trust":
236
return (ds, "restored from cache: %s" % ds)
238
if (hasattr(ds, 'check_instance_id') and
239
ds.check_instance_id(self.cfg)):
240
return (ds, "restored from checked cache: %s" % ds)
242
return (None, "cache invalid in datasource: %s" % ds)
244
def _get_data_source(self, existing):
245
if self.datasource is not NULL_DATA_SOURCE:
246
return self.datasource
248
with events.ReportEventStack(
250
description="attempting to read from cache [%s]" % existing,
251
parent=self.reporter) as myrep:
253
ds, desc = self._restore_from_checked_cache(existing)
254
myrep.description = desc
255
self.ds_restored = bool(ds)
256
LOG.debug(myrep.description)
259
util.del_file(self.paths.instance_link)
260
(cfg_list, pkg_list) = self._get_datasources()
261
# Deep copy so that user-data handlers can not modify
262
# (which will affect user-data handlers down the line...)
263
(ds, dsname) = sources.find_source(self.cfg,
266
copy.deepcopy(self.ds_deps),
268
pkg_list, self.reporter)
269
LOG.info("Loaded datasource %s - %s", dsname, ds)
271
# Ensure we adjust our path members datasource
272
# now that we have one (thus allowing ipath to be used)
276
def _get_instance_subdirs(self):
277
return ['handlers', 'scripts', 'sem']
279
def _get_ipath(self, subname=None):
280
# Force a check to see if anything
281
# actually comes back, if not
282
# then a datasource has not been assigned...
283
instance_dir = self.paths.get_ipath(subname)
285
raise RuntimeError(("No instance directory is available."
286
" Has a datasource been fetched??"))
289
def _reflect_cur_instance(self):
290
# Remove the old symlink and attach a new one so
291
# that further reads/writes connect into the right location
292
idir = self._get_ipath()
293
util.del_file(self.paths.instance_link)
294
util.sym_link(idir, self.paths.instance_link)
296
# Ensures these dirs exist
298
for d in self._get_instance_subdirs():
299
dir_list.append(os.path.join(idir, d))
300
util.ensure_dirs(dir_list)
302
# Write out information on what is being used for the current instance
303
# and what may have been used for a previous instance...
304
dp = self.paths.get_cpath('data')
306
# Write what the datasource was and is..
307
ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
309
ds_fn = os.path.join(idir, 'datasource')
311
previous_ds = util.load_file(ds_fn).strip()
316
util.write_file(ds_fn, "%s\n" % ds)
317
util.write_file(os.path.join(dp, 'previous-datasource'),
318
"%s\n" % (previous_ds))
320
# What the instance id was and is...
321
iid = self.datasource.get_instance_id()
322
iid_fn = os.path.join(dp, 'instance-id')
324
previous_iid = self.previous_iid()
325
util.write_file(iid_fn, "%s\n" % iid)
326
util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
327
util.write_file(os.path.join(dp, 'previous-instance-id'),
328
"%s\n" % (previous_iid))
330
self._write_to_cache()
331
# Ensure needed components are regenerated
332
# after change of instance which may cause
333
# change of configuration
337
def previous_iid(self):
338
if self._previous_iid is not None:
339
return self._previous_iid
341
dp = self.paths.get_cpath('data')
342
iid_fn = os.path.join(dp, 'instance-id')
344
self._previous_iid = util.load_file(iid_fn).strip()
346
self._previous_iid = NO_PREVIOUS_INSTANCE_ID
348
LOG.debug("previous iid found to be %s", self._previous_iid)
349
return self._previous_iid
351
def is_new_instance(self):
352
previous = self.previous_iid()
353
ret = (previous == NO_PREVIOUS_INSTANCE_ID or
354
previous != self.datasource.get_instance_id())
357
def fetch(self, existing="check"):
358
return self._get_data_source(existing=existing)
360
def instancify(self):
361
return self._reflect_cur_instance()
364
# Form the needed options to cloudify our members
365
return cloud.Cloud(self.datasource,
366
self.paths, self.cfg,
367
self.distro, helpers.Runners(self.paths),
368
reporter=self.reporter)
371
self._store_userdata()
372
self._store_vendordata()
374
def _store_userdata(self):
375
raw_ud = self.datasource.get_userdata_raw()
378
util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
379
# processed userdata is a Mime message, so write it as string.
380
processed_ud = self.datasource.get_userdata()
381
if processed_ud is None:
383
util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
385
def _store_vendordata(self):
386
raw_vd = self.datasource.get_vendordata_raw()
389
util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
390
# processed vendor data is a Mime message, so write it as string.
391
processed_vd = str(self.datasource.get_vendordata())
392
if processed_vd is None:
394
util.write_file(self._get_ipath('vendordata'), str(processed_vd),
397
def _default_handlers(self, opts=None):
403
'datasource': self.datasource,
405
# TODO(harlowja) Hmmm, should we dynamically import these??
407
cc_part.CloudConfigPartHandler(**opts),
408
ss_part.ShellScriptPartHandler(**opts),
409
bh_part.BootHookPartHandler(**opts),
410
up_part.UpstartJobPartHandler(**opts),
414
def _default_userdata_handlers(self):
415
return self._default_handlers()
417
def _default_vendordata_handlers(self):
418
return self._default_handlers(
419
opts={'script_path': 'vendor_scripts',
420
'cloud_config_path': 'vendor_cloud_config'})
422
def _do_handlers(self, data_msg, c_handlers_list, frequency,
425
Generalized handlers suitable for use with either vendordata
431
cdir = self.paths.get_cpath("handlers")
432
idir = self._get_ipath("handlers")
434
# Add the path to the plugins dir to the top of our list for importing
437
# Note(harlowja): instance dir should be read before cloud-dir
438
for d in [cdir, idir]:
439
if d and d not in sys.path:
440
sys.path.insert(0, d)
442
def register_handlers_in_dir(path):
443
# Attempts to register any handler modules under the given path.
444
if not path or not os.path.isdir(path):
446
potential_handlers = util.find_modules(path)
447
for (fname, mod_name) in potential_handlers.items():
449
mod_locs, looked_locs = importer.find_module(
450
mod_name, [''], ['list_types', 'handle_part'])
452
LOG.warn("Could not find a valid user-data handler"
453
" named %s in file %s (searched %s)",
454
mod_name, fname, looked_locs)
456
mod = importer.import_module(mod_locs[0])
457
mod = handlers.fixup_handler(mod)
458
types = c_handlers.register(mod)
460
LOG.debug("Added custom handler for %s [%s] from %s",
463
util.logexc(LOG, "Failed to register handler from %s",
466
# This keeps track of all the active handlers
467
c_handlers = helpers.ContentHandlers()
469
# Add any handlers in the cloud-dir
470
register_handlers_in_dir(cdir)
472
# Register any other handlers that come from the default set. This
473
# is done after the cloud-dir handlers so that the cdir modules can
474
# take over the default user-data handler content-types.
475
for mod in c_handlers_list:
476
types = c_handlers.register(mod, overwrite=False)
478
LOG.debug("Added default handler for %s from %s", types, mod)
480
# Form our cloud interface
481
data = self.cloudify()
484
# Init the handlers first
485
for (_ctype, mod) in c_handlers.items():
486
if mod in c_handlers.initialized:
487
# Avoid initing the same module twice (if said module
488
# is registered to more than one content-type).
490
handlers.call_begin(mod, data, frequency)
491
c_handlers.initialized.append(mod)
493
def walk_handlers(excluded):
496
'handlers': c_handlers,
497
# Any new handlers that are encountered get writen here
500
# The default frequency if handlers don't have one
501
'frequency': frequency,
502
# This will be used when new handlers are found
503
# to help write there contents to files with numbered
506
'excluded': excluded,
508
handlers.walk(data_msg, handlers.walker_callback, data=part_data)
510
def finalize_handlers():
511
# Give callbacks opportunity to finalize
512
for (_ctype, mod) in c_handlers.items():
513
if mod not in c_handlers.initialized:
514
# Said module was never inited in the first place, so lets
515
# not attempt to finalize those that never got called.
517
c_handlers.initialized.remove(mod)
519
handlers.call_end(mod, data, frequency)
521
util.logexc(LOG, "Failed to finalize handler: %s", mod)
525
walk_handlers(excluded)
529
def consume_data(self, frequency=PER_INSTANCE):
530
# Consume the userdata first, because we need want to let the part
531
# handlers run first (for merging stuff)
532
with events.ReportEventStack("consume-user-data",
533
"reading and applying user-data",
534
parent=self.reporter):
535
self._consume_userdata(frequency)
536
with events.ReportEventStack("consume-vendor-data",
537
"reading and applying vendor-data",
538
parent=self.reporter):
539
self._consume_vendordata(frequency)
541
# Perform post-consumption adjustments so that
542
# modules that run during the init stage reflect
545
# They will be recreated on future access...
547
# Note(harlowja): the 'active' datasource will have
548
# references to the previous config, distro, paths
549
# objects before the load of the userdata happened,
552
def _consume_vendordata(self, frequency=PER_INSTANCE):
554
Consume the vendordata and run the part handlers on it
556
# User-data should have been consumed first.
557
# So we merge the other available cloud-configs (everything except
558
# vendor provided), and check whether or not we should consume
559
# vendor data at all. That gives user or system a chance to override.
560
if not self.datasource.get_vendordata_raw():
561
LOG.debug("no vendordata from datasource")
564
_cc_merger = helpers.ConfigMerger(paths=self._paths,
565
datasource=self.datasource,
568
include_vendor=False)
569
vdcfg = _cc_merger.cfg.get('vendor_data', {})
571
if not isinstance(vdcfg, dict):
572
vdcfg = {'enabled': False}
573
LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
575
enabled = vdcfg.get('enabled')
576
no_handlers = vdcfg.get('disabled_handlers', None)
578
if not util.is_true(enabled):
579
LOG.debug("vendordata consumption is disabled.")
582
LOG.debug("vendor data will be consumed. disabled_handlers=%s",
585
# Ensure vendordata source fetched before activation (just incase)
586
vendor_data_msg = self.datasource.get_vendordata()
588
# This keeps track of all the active handlers, while excluding what the
589
# users doesn't want run, i.e. boot_hook, cloud_config, shell_script
590
c_handlers_list = self._default_vendordata_handlers()
593
self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
594
excluded=no_handlers)
596
def _consume_userdata(self, frequency=PER_INSTANCE):
598
Consume the userdata and run the part handlers
601
# Ensure datasource fetched before activation (just incase)
602
user_data_msg = self.datasource.get_userdata(True)
604
# This keeps track of all the active handlers
605
c_handlers_list = self._default_handlers()
608
self._do_handlers(user_data_msg, c_handlers_list, frequency)
610
def _find_networking_config(self):
611
disable_file = os.path.join(
612
self.paths.get_cpath('data'), 'upgraded-network')
613
if os.path.exists(disable_file):
614
return (None, disable_file)
616
cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config())
618
if self.datasource and hasattr(self.datasource, 'network_config'):
619
dscfg = ('ds', self.datasource.network_config)
620
sys_cfg = ('system_cfg', self.cfg.get('network'))
622
for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg):
623
if net.is_disabled_cfg(ncfg):
624
LOG.debug("network config disabled by %s", loc)
628
return (net.generate_fallback_config(), "fallback")
630
def apply_network_config(self, bring_up):
631
netcfg, src = self._find_networking_config()
633
LOG.info("network config is disabled by %s", src)
637
LOG.debug("applying net config names for %s" % netcfg)
638
self.distro.apply_network_config_names(netcfg)
639
except Exception as e:
640
LOG.warn("Failed to rename devices: %s", e)
642
if (self.datasource is not NULL_DATA_SOURCE and
643
not self.is_new_instance()):
644
LOG.debug("not a new instance. network config is not applied.")
647
LOG.info("Applying network configuration from %s bringup=%s: %s",
648
src, bring_up, netcfg)
650
return self.distro.apply_network_config(netcfg, bring_up=bring_up)
651
except NotImplementedError:
652
LOG.warn("distro '%s' does not implement apply_network_config. "
653
"networking may not be configured properly." %
658
class Modules(object):
659
def __init__(self, init, cfg_files=None, reporter=None):
661
self.cfg_files = cfg_files
662
# Created on first use
663
self._cached_cfg = None
665
reporter = events.ReportEventStack(
666
name="module-reporter", description="module-desc",
667
reporting_enabled=False)
668
self.reporter = reporter
672
# None check to avoid empty case causing re-reading
673
if self._cached_cfg is None:
674
merger = helpers.ConfigMerger(paths=self.init.paths,
675
datasource=self.init.datasource,
676
additional_fns=self.cfg_files,
677
base_cfg=self.init.cfg)
678
self._cached_cfg = merger.cfg
679
# LOG.debug("Loading 'module' config %s", self._cached_cfg)
680
# Only give out a copy so that others can't modify this...
681
return copy.deepcopy(self._cached_cfg)
683
def _read_modules(self, name):
685
if name not in self.cfg:
687
cfg_mods = self.cfg[name]
688
# Create 'module_list', an array of hashes
689
# Where hash['mod'] = module name
690
# hash['freq'] = frequency
691
# hash['args'] = arguments
692
for item in cfg_mods:
695
if isinstance(item, six.string_types):
699
elif isinstance(item, (list)):
701
# Meant to fall through...
703
contents['mod'] = item[0].strip()
705
contents['freq'] = item[1].strip()
707
contents['args'] = item[2:]
709
module_list.append(contents)
710
elif isinstance(item, (dict)):
714
contents['mod'] = item['name'].strip()
716
if 'frequency' in item:
717
contents['freq'] = item['frequency'].strip()
719
contents['args'] = item['args'] or []
720
if contents and valid:
721
module_list.append(contents)
723
raise TypeError(("Failed to read '%s' item in config,"
724
" unknown type %s") %
725
(item, type_utils.obj_name(item)))
728
def _fixup_modules(self, raw_mods):
730
for raw_mod in raw_mods:
731
raw_name = raw_mod['mod']
732
freq = raw_mod.get('freq')
733
run_args = raw_mod.get('args') or []
734
mod_name = config.form_module_name(raw_name)
737
if freq and freq not in FREQUENCIES:
738
LOG.warn(("Config specified module %s"
739
" has an unknown frequency %s"), raw_name, freq)
740
# Reset it so when ran it will get set to a known value
742
mod_locs, looked_locs = importer.find_module(
743
mod_name, ['', type_utils.obj_name(config)], ['handle'])
745
LOG.warn("Could not find module named %s (searched %s)",
746
mod_name, looked_locs)
748
mod = config.fixup_module(importer.import_module(mod_locs[0]))
749
mostly_mods.append([mod, raw_name, freq, run_args])
752
def _run_modules(self, mostly_mods):
753
cc = self.init.cloudify()
754
# Return which ones ran
755
# and which ones failed + the exception of why it failed
758
for (mod, name, freq, args) in mostly_mods:
760
# Try the modules frequency, otherwise fallback to a known one
763
if freq not in FREQUENCIES:
765
LOG.debug("Running module %s (%s) with frequency %s",
768
# Use the configs logger and not our own
769
# TODO(harlowja): possibly check the module
770
# for having a LOG attr and just give it back
772
func_args = [name, self.cfg,
773
cc, config.LOG, args]
774
# Mark it as having started running
775
which_ran.append(name)
776
# This name will affect the semaphore name created
777
run_name = "config-%s" % (name)
779
desc = "running %s with frequency %s" % (run_name, freq)
780
myrep = events.ReportEventStack(
781
name=run_name, description=desc, parent=self.reporter)
784
ran, _r = cc.run(run_name, mod.handle, func_args,
787
myrep.message = "%s ran successfully" % run_name
789
myrep.message = "%s previously ran" % run_name
791
except Exception as e:
792
util.logexc(LOG, "Running module %s (%s) failed", name, mod)
793
failures.append((name, e))
794
return (which_ran, failures)
796
def run_single(self, mod_name, args=None, freq=None):
797
# Form the users module 'specs'
803
# Now resume doing the normal fixups and running
804
raw_mods = [mod_to_be]
805
mostly_mods = self._fixup_modules(raw_mods)
806
return self._run_modules(mostly_mods)
808
def run_section(self, section_name):
809
raw_mods = self._read_modules(section_name)
810
mostly_mods = self._fixup_modules(raw_mods)
811
d_name = self.init.distro.name
815
overridden = self.cfg.get('unverified_modules', [])
816
for (mod, name, _freq, _args) in mostly_mods:
817
worked_distros = set(mod.distros)
818
worked_distros.update(
819
distros.Distro.expand_osfamily(mod.osfamilies))
821
# module does not declare 'distros' or lists this distro
822
if not worked_distros or d_name in worked_distros:
825
if name in overridden:
831
LOG.info("Skipping modules %s because they are not verified "
832
"on distro '%s'. To run anyway, add them to "
833
"'unverified_modules' in config.", skipped, d_name)
835
LOG.info("running unverified_modules: %s", forced)
837
return self._run_modules(mostly_mods)
840
def fetch_base_config():
842
default_cfg = util.get_builtin_cfg()
844
# Anything in your conf.d location??
845
# or the 'default' cloud.cfg location???
846
base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
848
# Kernel/cmdline parameters override system config
849
kern_contents = util.read_cc_from_cmdline()
851
base_cfgs.append(util.load_yaml(kern_contents, default={}))
853
# And finally the default gets to play
855
base_cfgs.append(default_cfg)
857
return util.mergemanydict(base_cfgs)
860
def _pkl_store(obj, fname):
862
pk_contents = pickle.dumps(obj)
864
util.logexc(LOG, "Failed pickling datasource %s", obj)
867
util.write_file(fname, pk_contents, omode="wb", mode=0o400)
869
util.logexc(LOG, "Failed pickling datasource to %s", fname)
874
def _pkl_load(fname):
875
pickle_contents = None
877
pickle_contents = util.load_file(fname, decode=False)
878
except Exception as e:
879
if os.path.isfile(fname):
880
LOG.warn("failed loading pickle in %s: %s" % (fname, e))
883
# This is allowed so just return nothing successfully loaded...
884
if not pickle_contents:
887
return pickle.loads(pickle_contents)
889
util.logexc(LOG, "Failed loading pickled blob from %s", fname)