~caio1982/mojo/local_repo_keys

« back to all changes in this revision

Viewing changes to mojo/phase.py

  • Committer: Tim Kuhlman
  • Date: 2016-01-13 16:01:22 UTC
  • mfrom: (260 mojo)
  • mto: This revision was merged to the branch mainline in revision 268.
  • Revision ID: timothy.kuhlman@canonical.com-20160113160122-7te92i45gc9iplmm
Merged latest changes from mojo trunk

Show diffs side-by-side

added added

removed removed

Lines of Context:
22
22
from jinja2 import Template
23
23
from jinja2.exceptions import TemplateSyntaxError
24
24
import mojo
 
25
import mojo.juju.debuglogs
25
26
import mojo.juju.status
26
27
import codetree
27
28
from deployer.config import ConfigStack
55
56
    pass
56
57
 
57
58
 
 
59
class StopOnPhaseException(Exception):
 
60
    pass
 
61
 
 
62
 
 
63
class VerifyPhaseException(Exception):
 
64
    pass
 
65
 
 
66
 
 
67
class ScriptPhaseException(Exception):
 
68
    def __init__(self, returncode, output):
 
69
        self.returncode = returncode
 
70
        self.output = output
 
71
 
 
72
 
58
73
class Phase(object):
59
74
    "Base class for phases"
60
75
    name = "not_implemented"
186
201
        with chdir(workspace.build_dir):
187
202
            try:
188
203
                collect_config = codetree.config.Config(config)
189
 
                collect_config.build(use_concurrent=True)
 
204
                collect_success = collect_config.build(use_concurrent=True)
 
205
                if not collect_success:
 
206
                    raise CollectPhaseException("Error during collect phase")
190
207
            except (codetree.handlers.NotSameBranch,
191
208
                    codetree.handlers.NotABranch,
192
209
                    codetree.handlers.BranchDiverged) as e:
254
271
       Arguments:
255
272
           config      base name for script file
256
273
           lxc         Boolean to use lxc or not
 
274
           debug-logs  Config file with debug-logs to look for in case of failure
 
275
           debug-logs-stages-to-exclude
 
276
                       stages that will be excluded from debug-log gathering
257
277
           KEY=VALUE   set as environment variables for use by the script
258
278
    """
259
279
    name = "script"
293
313
        else:
294
314
            env_vars = ["env"] + ["=".join((k, v)) for k, v in env.items()]
295
315
            command = env_vars + [script]
296
 
            output = subprocess.check_output(command, stderr=subprocess.STDOUT)
 
316
            try:
 
317
                output = subprocess.check_output(command, stderr=subprocess.STDOUT)
 
318
            except subprocess.CalledProcessError as e:
 
319
                dl = mojo.juju.debuglogs.DebugLogs(
 
320
                    workspace, stage, self.options.get('debug-logs'), self.options.get('debug-logs-stages-to-exclude'))
 
321
                if dl.has_config:
 
322
                    exception_output = "{}\n{}".format(e.output, dl.get_log_output())
 
323
                else:
 
324
                    exception_output = e.output
 
325
                raise ScriptPhaseException(e.returncode, exception_output)
297
326
            if output.strip():
298
327
                logging.info(output)
299
328
 
477
506
        deploy_name = self._get_target(configs)
478
507
        deployer_config = ConfigStack(configs)
479
508
        deployer_config.load()
480
 
        print(yaml.dump(deployer_config.get(deploy_name).data))
 
509
        deployment = deployer_config.get(deploy_name)
 
510
        # XXX Pretty much a gross hack, but
 
511
        #     Config.get() forces our hand.
 
512
        deployment.repo_path = workspace.repo_dir
 
513
        deployment.resolve_config()
 
514
        print(yaml.dump(deployment.data))
481
515
 
482
516
    def diff(self, project, workspace, stage):
483
517
        """
531
565
        else:
532
566
            wait_flag = False
533
567
 
 
568
        if "optional" in self.options:
 
569
            optional = str2bool(self.options['optional'])
 
570
        else:
 
571
            optional = False
 
572
 
534
573
        deploy_name = self._get_target(configs)
535
574
 
536
575
        # run the deploy
543
582
        cmd += timeout
544
583
        cmd += retry
545
584
        cmd += ('-d', deploy_name, '-W', '-u')
546
 
        if str2bool(self.options.get('optional')):
547
 
            with chdir(workspace.repo_dir):
548
 
                subprocess.call(cmd)
549
 
        else:
550
 
            logging.info("Running: {}".format(" ".join(cmd)))
551
 
            with chdir(workspace.repo_dir):
552
 
                status, output = bicommand(" ".join(cmd), showoutput=True, env=env)
553
 
                logging.getLogger('file').info(output)
554
 
            if status != 0:
555
 
                timeout_fail = re.search("Reached deployment timeout.. exiting",
556
 
                                    output)
557
 
                if timeout_fail:
558
 
                    logging.error("Deployment timed out. It was set to {} "
559
 
                                  "seconds".format(timeout[1]))
560
 
                hook_fail = re.search(".*unit: (?P<unit>[^:]+):.*hook failed",
561
 
                                 output)
562
 
                if hook_fail:
563
 
                    failed_unit = hook_fail.group("unit")
564
 
                    logging.error("Deployment failed, grabbing the last 200 lines of "
565
 
                                  "output from the juju logs on {}".format(failed_unit))
566
 
                    cmd = ["juju", "ssh", failed_unit, "sudo", "tail", "-200",
567
 
                           "/var/log/juju/unit-%s.log" % (
568
 
                                failed_unit.replace("/", "-"),)]
569
 
                    with chdir(workspace.repo_dir):
570
 
                        failure = subprocess.check_output(cmd)
571
 
                        logging.error(failure)
572
 
                # Show juju status
573
 
                juju_status = mojo.juju.status.Status(environment=juju_env)
574
 
                logging.error("Juju Status: {}".format(juju_status.status()))
575
 
                if not timeout_fail and not hook_fail:
576
 
                    logging.error("There was an unrecognised problem with "
577
 
                                  "running a deploy phase")
 
585
 
 
586
        logging.info("Running: {}".format(" ".join(cmd)))
 
587
 
 
588
        if optional:
 
589
            logging.info("NOTE: This phase is marked optional; "
 
590
                         "failure will not terminate the run.")
 
591
 
 
592
        with chdir(workspace.repo_dir):
 
593
            status, output = bicommand(cmd, showoutput=True, env=env)
 
594
            logging.getLogger('file').info(output)
 
595
 
 
596
        if status != 0:
 
597
            timeout_fail = re.search("Reached deployment timeout.. exiting",
 
598
                                output)
 
599
            if timeout_fail:
 
600
                logging.error("Deployment timed out. It was set to {} "
 
601
                              "seconds".format(timeout[1]))
 
602
            hook_fail = re.search(".*unit: (?P<unit>[^:]+):.*hook failed",
 
603
                             output)
 
604
            if hook_fail:
 
605
                failed_unit = hook_fail.group("unit")
 
606
                logging.error("Deployment failed, grabbing the last 200 lines of "
 
607
                              "output from the juju logs on {}".format(failed_unit))
 
608
                cmd = ["juju", "ssh", failed_unit, "sudo", "tail", "-200",
 
609
                       "/var/log/juju/unit-%s.log" % (
 
610
                            failed_unit.replace("/", "-"),)]
 
611
                with chdir(workspace.repo_dir):
 
612
                    failure = subprocess.check_output(cmd)
 
613
                    logging.error(failure)
 
614
            # Show juju status
 
615
            juju_status = mojo.juju.status.Status(environment=juju_env)
 
616
            logging.error("Juju Status: {}".format(juju_status.status()))
 
617
            if not timeout_fail and not hook_fail:
 
618
                logging.error("There was an unrecognised problem with "
 
619
                              "running a deploy phase")
 
620
 
 
621
            if optional:
 
622
                logging.info("Error found during optional deployment phase: continuing as directed.")
 
623
            else:
578
624
                raise JujuDeployerException("Error found during deployment phase")
579
 
            else:
580
 
                logging.info("Checking Juju status")
581
 
                juju_status = mojo.juju.status.Status(environment=juju_env)
582
 
                juju_status.check_and_wait(timeout=status_timeout)
583
 
                if wait_flag:
584
 
                    logging.info("Waiting for environment to reach steady state")
585
 
                    try:
586
 
                        wait()
587
 
                    except JujuWaitException:
588
 
                        # Show juju status
589
 
                        logging.error("Juju Status: {}".format(print_juju_status(environment=juju_env)))
590
 
                        raise
591
 
                    logging.info("Environment has reached steady state")
 
625
        else:
 
626
            logging.info("Checking Juju status")
 
627
            juju_status = mojo.juju.status.Status(environment=juju_env)
 
628
            juju_status.check_and_wait(timeout=status_timeout)
 
629
            if wait_flag:
 
630
                logging.info("Waiting for environment to reach steady state")
 
631
                try:
 
632
                    wait()
 
633
                except JujuWaitException:
 
634
                    # Show juju status
 
635
                    logging.error("Juju Status: {}".format(print_juju_status(environment=juju_env)))
 
636
                    raise
 
637
                logging.info("Environment has reached steady state")
592
638
 
593
639
    def render_config(self, src, dest, tmpl_vars=None):
594
640
        if not tmpl_vars:
630
676
           lxc         Boolean to use lxc or not
631
677
           retry       int; how many times to retry
632
678
           sleep       int; how many seconds to wait between retries
 
679
           debug-logs  Config file with debug-logs to look for in case of failure
 
680
           debug-logs-stages-to-exclude
 
681
                       stages that will be excluded from debug-log gathering
633
682
    """
634
683
    name = "verify"
635
684
 
648
697
            try:
649
698
                _run(project, workspace, stage,
650
699
                     auto_secrets=auto_secrets)
651
 
            except subprocess.CalledProcessError as e:
 
700
            except ScriptPhaseException as e:
652
701
                if i < retry:
653
702
                    logging.warning(e.output)
654
703
                    logging.warning(
657
706
                            e.returncode, i, retry, sleep))
658
707
                    time.sleep(sleep)
659
708
                else:
660
 
                    raise
 
709
                    raise VerifyPhaseException(e.output)
661
710
            else:
662
711
                # Hooray, success!
663
712
                break
685
734
 
686
735
        try:
687
736
            _run(project, workspace, stage)
688
 
        except subprocess.CalledProcessError as e:
 
737
        except ScriptPhaseException as e:
689
738
            if e.returncode == return_code:
690
739
                # Exit silently
691
740
                logging.info(e.output)
692
741
                sys.exit(0)
693
742
            else:
694
 
                raise
 
743
                raise StopOnPhaseException(e.output)
695
744
 
696
745
 
697
746
class JujuCheckWaitPhase(Phase):
698
 
    """Check juju status and then confirm it's in a steady state using 
 
747
    """Check juju status and then confirm it's in a steady state using
699
748
    lp:juju-wait
700
 
    
 
749
 
701
750
        Arguments:
702
751
            status-timeout  We check juju status after each deploy phase. How
703
752
                            long in seconds before we timeout if instances remain