~ubuntu-branches/ubuntu/precise/juju/precise

« back to all changes in this revision

Viewing changes to juju/state/service.py

  • Committer: Package Import Robot
  • Author(s): Clint Byrum
  • Date: 2012-03-30 15:28:16 UTC
  • mfrom: (1.1.10)
  • Revision ID: package-import@ubuntu.com-20120330152816-5iy8sdojk9mnxzpk
Tags: 0.5+bzr504-0ubuntu1
* New upstream snapshot (LP: #962507 LP: #953258 LP: #965507).
* d/control: Depend and Build-Depend on python-oauth for MaaS.
* d/control: Drop dummy ensemble package and make breaks/replaces
  broader to force removal of any ensemble package. (LP: #954492)
* d/control: Move lxc, libvirt-bin, and zookeeper to Suggests to
  reduce the amount of packages installed on every node unecessarily
  and also avoid conflicting when deploying into a libvirt-bin
  default network VM (LP: #962389)
* d/rules: skip test suite when nocheck is set.
* d/rules: remove redundant dh_clean call
* d/juju.install: remove usr, with only one binary package this is
  not necessary anymore and causes dh_install to fail because no
  files are installed to debian/tmp anymore.
* d/rules,d/control,d/manpages,d/juju.manpages: Generate basic
  manpage from online help. (LP: #966611)
* d/patches/no-write-sample-on-help.patch: Added so --help can be
  safely run without a writable home dir on buildds. (LP: #957682)

Show diffs side-by-side

added added

removed removed

Lines of Context:
7
7
from txzookeeper.utils import retry_change
8
8
 
9
9
from juju.charm.url import CharmURL
 
10
from juju.machine.constraints import Constraints
10
11
from juju.state.agent import AgentStateMixin
11
12
from juju.state.base import StateBase
12
13
from juju.state.endpoint import RelationEndpoint
15
16
    ServiceUnitStateMachineAlreadyAssigned, ServiceStateNameInUse,
16
17
    BadDescriptor, BadServiceStateName, NoUnusedMachines,
17
18
    ServiceUnitDebugAlreadyEnabled, ServiceUnitResolvedAlreadyEnabled,
18
 
    ServiceUnitRelationResolvedAlreadyEnabled, StopWatcher)
 
19
    ServiceUnitRelationResolvedAlreadyEnabled, StopWatcher,
 
20
    ServiceUnitUpgradeAlreadyEnabled)
19
21
from juju.state.charm import CharmStateManager
20
22
from juju.state.relation import ServiceRelationState, RelationStateManager
21
23
from juju.state.machine import _public_machine_id, MachineState
22
 
from juju.state.utils import remove_tree, dict_merge, YAMLState
 
24
from juju.state.utils import (
 
25
    remove_tree, dict_merge, YAMLState, YAMLStateNodeMixin)
23
26
 
24
27
RETRY_HOOKS = 1000
25
28
NO_HOOKS = 1001
26
29
 
27
30
 
 
31
def _series_constraints(base_constraints, charm_id):
 
32
    series = CharmURL.parse(charm_id).collection.series
 
33
    return base_constraints.with_series(series)
 
34
 
 
35
 
28
36
class ServiceStateManager(StateBase):
29
37
    """Manages the state of services in an environment."""
30
38
 
31
39
    @inlineCallbacks
32
 
    def add_service_state(self, service_name, charm_state):
 
40
    def add_service_state(self, service_name, charm_state, constraints):
33
41
        """Create a new service with the given name.
34
42
 
35
43
        @param service_name: Unique name of the service created.
36
44
        @param charm_state: CharmState for the service.
 
45
        @param constraint_strs: list of constraint strings
37
46
 
38
47
        @return: ServiceState for the created service.
39
48
        """
 
49
        charm_id = charm_state.id
 
50
        constraints = _series_constraints(constraints, charm_id)
 
51
        service_details = {
 
52
            "charm": charm_id, "constraints": constraints.data}
40
53
        # charm metadata is always decoded into unicode, ensure any
41
54
        # serialized state references strings to avoid tying to py runtime.
42
 
        service_details = {"charm": charm_state.id}
43
55
        node_data = yaml.safe_dump(service_details)
44
56
        path = yield self._client.create("/services/service-", node_data,
45
57
                                         flags=zookeeper.SEQUENCE)
140
152
        A `descriptor` is of the form ``<service name>[:<relation name>]``.
141
153
        Returns the following:
142
154
 
143
 
          - Returns a set of matching endpoints, drawn from the peers,
144
 
            provides, and requires interfaces. The empty set is
 
155
          - Returns a list of matching endpoints, drawn from the
 
156
            peers, provides, and requires interfaces. An empty list is
145
157
            returned if there are no endpoints matching the
146
 
            `descriptor`.
 
158
            `descriptor`. This list is sorted such that implicit
 
159
            relations appear last.
147
160
 
148
161
          - Raises a `BadDescriptor` exception if `descriptor` cannot
149
162
            be parsed.
 
163
 
150
164
        """
151
165
        tokens = descriptor.split(":")
152
166
        if len(tokens) == 1 and bool(tokens[0]):
175
189
                            relation_type=spec["interface"],
176
190
                            relation_name=relation_name,
177
191
                            relation_role=relation_role))
 
192
 
 
193
        # Add in implicit relations
 
194
        endpoints.add(
 
195
            RelationEndpoint(
 
196
                query_service_name, "juju-info", "juju-info", "server"))
 
197
 
 
198
        # when offering matches implicit relations should be considered last.
 
199
        # this cmpfunc pushes implicit methods to the end of the list of possible
 
200
        # endpoints
 
201
        def low_priority_implicit_cmp(rel1, rel2):
 
202
            if rel1.relation_name.startswith("juju-"):
 
203
                return 1
 
204
            if rel2.relation_name.startswith("juju-"):
 
205
                return -1
 
206
            return cmp(rel1.relation_name, rel2.relation_name)
 
207
 
 
208
        endpoints = sorted(endpoints, low_priority_implicit_cmp)
 
209
 
178
210
        returnValue(endpoints)
179
211
 
180
212
    @inlineCallbacks
181
213
    def join_descriptors(self, descriptor1, descriptor2):
182
214
        """Return a list of pairs of RelationEndpoints joining descriptors."""
183
215
        result = []
184
 
        relation_set1 = yield self.get_relation_endpoints(descriptor1)
185
 
        relation_set2 = yield self.get_relation_endpoints(descriptor2)
186
 
        for relation1 in relation_set1:
187
 
            for relation2 in relation_set2:
 
216
        relations_1 = yield self.get_relation_endpoints(descriptor1)
 
217
        relations_2 = yield self.get_relation_endpoints(descriptor2)
 
218
 
 
219
        for relation1 in relations_1:
 
220
            for relation2 in relations_2:
188
221
                if relation1.may_relate_to(relation2):
189
222
                    result.append((relation1, relation2))
190
223
        returnValue(result)
224
257
        return self._watch_topology(watch_topology)
225
258
 
226
259
 
227
 
class ServiceState(StateBase):
 
260
class ServiceState(StateBase, YAMLStateNodeMixin):
228
261
    """State of a service registered in an environment.
229
262
 
230
263
    Each service is composed by units, and each unit represents an
276
309
    @property
277
310
    def _exposed_path(self):
278
311
        """Path of ZK node that if it exists, indicates service is exposed."""
279
 
        return "/services/%s/exposed" % self._internal_id
280
 
 
281
 
    @inlineCallbacks
 
312
        return "%s/exposed" % self._zk_path
 
313
 
 
314
    def _node_missing(self):
 
315
        raise ServiceStateNotFound(self._service_name)
 
316
 
282
317
    def get_charm_id(self):
283
318
        """Return the charm id this service is supposed to use.
284
319
        """
285
 
        content, stat = yield self._client.get(self._zk_path)
286
 
        details = yaml.load(content)
287
 
        returnValue(details["charm"])
 
320
        return self._get_node_value("charm")
288
321
 
289
322
    @inlineCallbacks
290
323
    def set_charm_id(self, charm_id):
292
325
        """
293
326
        # Verify its a valid charm id
294
327
        CharmURL.parse(charm_id).assert_revision()
295
 
 
296
 
        def update_charm_id(content, stat):
297
 
            data = yaml.load(content)
298
 
            data["charm"] = charm_id
299
 
            return yaml.safe_dump(data)
300
 
 
301
 
        yield retry_change(
302
 
            self._client, "/services/%s" % self.internal_id, update_charm_id)
 
328
        yield self._set_node_value("charm", charm_id)
303
329
 
304
330
    @inlineCallbacks
305
331
    def get_charm_state(self):
306
332
        """Return the CharmState for this service."""
307
 
        forumla_state_manager = CharmStateManager(self._client)
308
333
        charm_id = yield self.get_charm_id()
309
 
        charm = yield forumla_state_manager.get_charm_state(charm_id)
 
334
        formula_state_manager = CharmStateManager(self._client)
 
335
        charm = yield formula_state_manager.get_charm_state(charm_id)
310
336
        returnValue(charm)
311
337
 
312
338
    @inlineCallbacks
 
339
    def set_constraints(self, constraints):
 
340
        """Set hardware requirements for any new machines running this service.
 
341
 
 
342
        :param constraint_strs: a list of constraint strings describing the
 
343
            *service*-level requirements for new hardware.
 
344
        """
 
345
        charm_id = yield self.get_charm_id()
 
346
        constraints = _series_constraints(constraints, charm_id)
 
347
        yield self._set_node_value("constraints", constraints.data)
 
348
 
 
349
    @inlineCallbacks
 
350
    def get_constraints(self):
 
351
        """Get *service*-level constraints for new hardware.
 
352
 
 
353
        :return: a list of constraint strings describing the service-level
 
354
            hardware constraints.
 
355
        """
 
356
        data = yield self._get_node_value("constraints")
 
357
        returnValue(Constraints(data))
 
358
 
 
359
    @inlineCallbacks
313
360
    def add_unit_state(self):
314
361
        """Add a new service unit to this state.
315
362
 
316
363
        @return: ServiceUnitState for the created unit.
317
364
        """
 
365
        constraints = yield self.get_constraints()
318
366
        charm_id = yield self.get_charm_id()
319
 
        unit_data = {"charm": charm_id}
 
367
        unit_data = {"charm": charm_id, "constraints": constraints.data}
320
368
        path = yield self._client.create(
321
369
            "/units/unit-", yaml.dump(unit_data), flags=zookeeper.SEQUENCE)
322
370
 
679
727
    return service_relations
680
728
 
681
729
 
682
 
class ServiceUnitState(StateBase, AgentStateMixin):
 
730
class ServiceUnitState(StateBase, AgentStateMixin, YAMLStateNodeMixin):
683
731
    """State of a service unit registered in an environment.
684
732
 
685
733
    Each service is composed by units, and each unit represents an
723
771
        return "%s/%d" % (self._service_name, self._unit_sequence)
724
772
 
725
773
    @property
 
774
    def _zk_path(self):
 
775
        return "/units/%s" % self._internal_id
 
776
 
 
777
    @property
726
778
    def _ports_path(self):
727
779
        """The path for the open ports for this service unit."""
728
 
        return "/units/%s/ports" % self._internal_id
 
780
        return "%s/ports" % self._zk_path
729
781
 
730
782
    def _get_agent_path(self):
731
783
        """Get the zookeeper path for the service unit agent."""
732
 
        return "/units/%s/agent" % self._internal_id
733
 
 
734
 
    @inlineCallbacks
 
784
        return "%s/agent" % self._zk_path
 
785
 
 
786
    def _check_valid_in(self, topology):
 
787
        ok = topology.has_service(self._internal_service_id)
 
788
        ok = ok and topology.has_service_unit(
 
789
            self._internal_service_id, self._internal_id)
 
790
        if not ok:
 
791
            raise StateChanged()
 
792
 
 
793
    def _node_missing(self):
 
794
        raise ServiceUnitStateNotFound(self._service_name)
 
795
 
735
796
    def get_public_address(self):
736
797
        """Get the public address of the unit.
737
798
 
738
799
        If the unit is unassigned, or its unit agent hasn't started this
739
800
        value maybe None.
740
801
        """
741
 
        unit_data, stat = yield self._client.get(
742
 
            "/units/%s" % self.internal_id)
743
 
        data = yaml.load(unit_data)
744
 
        returnValue(data.get("public-address"))
 
802
        return self._get_node_value("public-address")
745
803
 
746
 
    @inlineCallbacks
747
804
    def set_public_address(self, public_address):
748
805
        """A unit's public address can be utilized to access the service.
749
806
 
750
807
        The service must have been exposed for the service to be reachable
751
808
        outside of the environment.
752
809
        """
753
 
        def update_private_address(content, stat):
754
 
            data = yaml.load(content)
755
 
            data["public-address"] = public_address
756
 
            return yaml.safe_dump(data)
757
 
 
758
 
        yield retry_change(
759
 
            self._client,
760
 
            "/units/%s" % self.internal_id,
761
 
            update_private_address)
762
 
 
763
 
    @inlineCallbacks
 
810
        return self._set_node_value("public-address", public_address)
 
811
 
764
812
    def get_private_address(self):
765
813
        """Get the private address of the unit.
766
814
 
767
815
        If the unit is unassigned, or its unit agent hasn't started this
768
816
        value maybe None.
769
817
        """
770
 
        unit_data, stat = yield self._client.get(
771
 
            "/units/%s" % self.internal_id)
772
 
        data = yaml.load(unit_data)
773
 
        returnValue(data.get("private-address"))
 
818
        return self._get_node_value("private-address")
774
819
 
775
 
    @inlineCallbacks
776
820
    def set_private_address(self, private_address):
777
821
        """A unit's address private to the environment.
778
822
 
779
823
        Other service will see and utilize this address for relations.
780
824
        """
781
 
 
782
 
        def update_private_address(content, stat):
783
 
            data = yaml.load(content)
784
 
            data["private-address"] = private_address
785
 
            return yaml.safe_dump(data)
786
 
 
787
 
        yield retry_change(
788
 
            self._client,
789
 
            "/units/%s" % self.internal_id,
790
 
            update_private_address)
791
 
 
792
 
    @inlineCallbacks
 
825
        return self._set_node_value("private-address", private_address)
 
826
 
793
827
    def get_charm_id(self):
794
 
        """Get the charm identifier that the unit is currently running."""
795
 
        unit_data, stat = yield self._client.get(
796
 
            "/units/%s" % self.internal_id)
797
 
        data = yaml.load(unit_data)
798
 
        returnValue(data["charm"])
 
828
        """The id of the charm currently deployed on the unit"""
 
829
        return self._get_node_value("charm")
799
830
 
800
831
    @inlineCallbacks
801
832
    def set_charm_id(self, charm_id):
802
833
        """Set the charm identifier that the unit is currently running."""
803
 
 
804
 
        # Verify its a valid charm id
805
834
        CharmURL.parse(charm_id).assert_revision()
806
 
 
807
 
        def update_charm_id(content, stat):
808
 
            data = yaml.load(content)
809
 
            data["charm"] = charm_id
810
 
            return yaml.safe_dump(data)
811
 
 
812
 
        yield retry_change(
813
 
            self._client, "/units/%s" % self.internal_id, update_charm_id)
 
835
        yield self._set_node_value("charm", charm_id)
 
836
 
 
837
    @inlineCallbacks
 
838
    def get_constraints(self):
 
839
        """The hardware constraints for this unit"""
 
840
        data = yield self._get_node_value("constraints")
 
841
        returnValue(Constraints(data))
814
842
 
815
843
    @inlineCallbacks
816
844
    def get_assigned_machine_id(self):
817
845
        """Get the assigned machine id or None if the unit is not assigned.
818
846
        """
819
847
        topology = yield self._read_topology()
820
 
        if not topology.has_service(self._internal_service_id):
821
 
            raise StateChanged()
822
 
        if not topology.has_service_unit(self._internal_service_id,
823
 
                                         self._internal_id):
824
 
            raise StateChanged()
 
848
        self._check_valid_in(topology)
825
849
        machine_id = topology.get_service_unit_machine(
826
850
            self._internal_service_id, self._internal_id)
827
851
        if machine_id is not None:
833
857
        """
834
858
 
835
859
        def assign_unit(topology):
836
 
            if not topology.has_service(self._internal_service_id) or \
837
 
               not topology.has_service_unit(self._internal_service_id,
838
 
                                             self._internal_id):
839
 
                raise StateChanged()
 
860
            self._check_valid_in(topology)
840
861
 
841
862
            machine_id = topology.get_service_unit_machine(
842
863
                self._internal_service_id, self._internal_id)
868
889
        `assign_to_machine`.
869
890
        """
870
891
        # used to provide a writable result for the callback
871
 
        unused_machine_internal_id_wrapper = [None]
 
892
        scope_escaper = [None]
 
893
        unit_constraints = yield self.get_constraints()
872
894
 
 
895
        @inlineCallbacks
873
896
        def assign_unused_unit(topology):
874
 
            if not topology.has_service(self._internal_service_id) or \
875
 
               not topology.has_service_unit(self._internal_service_id,
876
 
                                             self._internal_id):
877
 
                raise StateChanged()
 
897
            self._check_valid_in(topology)
878
898
 
879
899
            # XXX We cannot reuse the "root" machine (used by the
880
900
            # provisioning agent), but the topology metadata does not
881
901
            # properly reflect its allocation.  In the future, once it
882
902
            # is managed like any other service, this special case can
883
903
            # be removed.
884
 
            root_machine = "machine-%010d" % 0
885
 
            unused_machines = sorted([
886
 
                m for m in topology.get_machines()
887
 
                if not (m == root_machine or
888
 
                        topology.machine_has_units(m))])
889
 
            if not unused_machines:
 
904
            root_machine_id = "machine-%010d" % 0
 
905
            for internal_id in topology.get_machines():
 
906
                if internal_id == root_machine_id:
 
907
                    continue
 
908
                if topology.machine_has_units(internal_id):
 
909
                    continue
 
910
                machine_state = MachineState(self._client, internal_id)
 
911
                machine_constraints = yield machine_state.get_constraints()
 
912
                if machine_constraints.can_satisfy(unit_constraints):
 
913
                    break
 
914
            else:
890
915
                raise NoUnusedMachines()
891
 
            unused_machine_internal_id = unused_machines[0]
 
916
 
892
917
            topology.assign_service_unit_to_machine(
893
918
                self._internal_service_id,
894
919
                self._internal_id,
895
 
                unused_machine_internal_id)
896
 
            unused_machine_internal_id_wrapper[0] = \
897
 
                unused_machine_internal_id
 
920
                machine_state.internal_id)
 
921
            scope_escaper[0] = machine_state
898
922
 
899
923
        yield self._retry_topology_change(assign_unused_unit)
900
 
        returnValue(MachineState(
901
 
                self._client, unused_machine_internal_id_wrapper[0]))
 
924
        returnValue(scope_escaper[0])
902
925
 
903
926
    def unassign_from_machine(self):
904
927
        """Unassign this service unit from whatever machine it's assigned to.
905
928
        """
906
929
 
907
930
        def unassign_unit(topology):
908
 
            if not topology.has_service(self._internal_service_id) or \
909
 
               not topology.has_service_unit(self._internal_service_id,
910
 
                                             self._internal_id):
911
 
                raise StateChanged()
 
931
            self._check_valid_in(topology)
912
932
 
913
 
            # If for whatever reason it's already not assigned to a
 
933
            # If for whatever reason it's not already assigned to a
914
934
            # machine, ignore it and move forward so that we don't
915
935
            # have to deal with conflicts.
916
936
            machine_id = topology.get_service_unit_machine(
920
940
                    self._internal_service_id, self._internal_id)
921
941
        return self._retry_topology_change(unassign_unit)
922
942
 
 
943
    @property
 
944
    def _hook_debug_path(self):
 
945
        return "%s/debug" % self._zk_path
 
946
 
923
947
    @inlineCallbacks
924
948
    def enable_hook_debug(self, hook_names):
925
949
        """Enable hook debugging.
944
968
                hook_names,)
945
969
            raise ValueError(msg)
946
970
 
947
 
        debug_path = "/units/%s/debug" % self._internal_id
948
971
        try:
949
972
            yield self._client.create(
950
 
                debug_path, yaml.safe_dump({"debug_hooks": hook_names}),
 
973
                self._hook_debug_path,
 
974
                yaml.safe_dump({"debug_hooks": hook_names}),
951
975
                flags=zookeeper.EPHEMERAL)
952
976
        except zookeeper.NodeExistsException:
953
977
            raise ServiceUnitDebugAlreadyEnabled(self.unit_name)
960
984
        When a single hook is being debugged this method is used by agents
961
985
        to clear the debug settings after they have been processed.
962
986
        """
963
 
        debug_path = "/units/%s/debug" % self._internal_id
964
987
        try:
965
 
            yield self._client.delete(debug_path)
 
988
            yield self._client.delete(self._hook_debug_path)
966
989
        except zookeeper.NoNodeException:
967
990
            # We get to the same end state.
968
991
            pass
974
997
 
975
998
        If no setting is found, None is returned.
976
999
        """
977
 
        debug_path = "/units/%s/debug" % self._internal_id
978
1000
        try:
979
 
            content, stat = yield self._client.get(debug_path)
 
1001
            content, stat = yield self._client.get(self._hook_debug_path)
980
1002
        except zookeeper.NoNodeException:
981
1003
            # We get to the same end state.
982
1004
            returnValue(None)
998
1020
        of the current state. It is only a reflection of some change
999
1021
        happening, to inform watch users should fetch the current value.
1000
1022
        """
1001
 
        debug_path = "/units/%s/debug" % self._internal_id
1002
1023
 
1003
1024
        @inlineCallbacks
1004
1025
        def watcher(change_event):
1005
1026
            if permanent and self._client.connected:
1006
 
                exists_d, watch_d = self._client.exists_and_watch(debug_path)
 
1027
                exists_d, watch_d = self._client.exists_and_watch(
 
1028
                    self._hook_debug_path)
1007
1029
 
1008
1030
            yield callback(change_event)
1009
1031
 
1010
1032
            if permanent:
1011
1033
                watch_d.addCallback(watcher)
1012
1034
 
1013
 
        exists_d, watch_d = self._client.exists_and_watch(debug_path)
 
1035
        exists_d, watch_d = self._client.exists_and_watch(
 
1036
            self._hook_debug_path)
1014
1037
        exists = yield exists_d
1015
1038
        # Setup the watch deferred callback after the user defined callback
1016
1039
        # has returned successfully from the existence invocation.
1020
1043
        # Wait on the first callback, reflecting present state, not a zk watch
1021
1044
        yield callback_d
1022
1045
 
 
1046
    @property
 
1047
    def _upgrade_flag_path(self):
 
1048
        return "%s/upgrade" % self._zk_path
 
1049
 
1023
1050
    @inlineCallbacks
1024
 
    def set_upgrade_flag(self):
 
1051
    def set_upgrade_flag(self, force=False):
1025
1052
        """Inform the unit it should perform an upgrade.
1026
1053
        """
1027
 
        upgrade_path = "/units/%s/upgrade" % self._internal_id
1028
 
        try:
1029
 
            yield self._client.create(upgrade_path)
1030
 
        except zookeeper.NodeExistsException:
1031
 
            # We get to the same end state
1032
 
            pass
 
1054
        assert isinstance(force, bool), "Invalid force upgrade flag"
 
1055
 
 
1056
        def update(content, stat):
 
1057
            if not content:
 
1058
                flags = dict(force=force)
 
1059
                return yaml.dump(flags)
 
1060
 
 
1061
            flags = yaml.load(content)
 
1062
            if not isinstance(flags, dict):
 
1063
                flags = dict(force=force)
 
1064
                return yaml.dump(flags)
 
1065
 
 
1066
            if flags['force'] != force:
 
1067
                raise ServiceUnitUpgradeAlreadyEnabled(self.unit_name)
 
1068
 
 
1069
            return content
 
1070
 
 
1071
        yield retry_change(self._client,
 
1072
                           self._upgrade_flag_path,
 
1073
                           update)
1033
1074
 
1034
1075
    @inlineCallbacks
1035
1076
    def get_upgrade_flag(self):
1036
 
        """Returns a boolean denoting if the upgrade flag is set.
 
1077
        """Returns a dictionary containing the upgrade flag or False.
1037
1078
        """
1038
 
        upgrade_path = "/units/%s/upgrade" % self._internal_id
1039
 
        stat = yield self._client.exists(upgrade_path)
1040
 
        returnValue(bool(stat))
 
1079
        try:
 
1080
            content, stat = yield self._client.get(self._upgrade_flag_path)
 
1081
            if not content:
 
1082
                returnValue(False)
 
1083
        except zookeeper.NoNodeException:
 
1084
            returnValue(False)
 
1085
        returnValue(yaml.load(content))
1041
1086
 
1042
1087
    @inlineCallbacks
1043
1088
    def clear_upgrade_flag(self):
1046
1091
        Typically done by the unit agent before beginning the
1047
1092
        upgrade.
1048
1093
        """
1049
 
        upgrade_path = "/units/%s/upgrade" % self._internal_id
1050
1094
        try:
1051
 
            yield self._client.delete(upgrade_path)
 
1095
            yield self._client.delete(self._upgrade_flag_path)
1052
1096
        except zookeeper.NoNodeException:
1053
1097
            # We get to the same end state.
1054
1098
            pass
1070
1114
        happening, the callback should fetch the current value via
1071
1115
        the API, if needed.
1072
1116
        """
1073
 
        upgrade_path = "/units/%s/upgrade" % self._internal_id
1074
 
 
1075
1117
        @inlineCallbacks
1076
1118
        def watcher(change_event):
1077
1119
 
1078
1120
            if permanent and self._client.connected:
1079
 
                exists_d, watch_d = self._client.exists_and_watch(upgrade_path)
 
1121
                exists_d, watch_d = self._client.exists_and_watch(
 
1122
                    self._upgrade_flag_path)
1080
1123
 
1081
1124
            yield callback(change_event)
1082
1125
 
1083
1126
            if permanent:
1084
1127
                watch_d.addCallback(watcher)
1085
1128
 
1086
 
        exists_d, watch_d = self._client.exists_and_watch(upgrade_path)
 
1129
        exists_d, watch_d = self._client.exists_and_watch(
 
1130
            self._upgrade_flag_path)
1087
1131
 
1088
1132
        exists = yield exists_d
1089
1133
 
1096
1140
        yield callback_d
1097
1141
 
1098
1142
    @property
1099
 
    def _unit_resolve_path(self):
1100
 
        return "/units/%s/resolved" % self.internal_id
 
1143
    def _unit_resolved_path(self):
 
1144
        return "%s/resolved" % self._zk_path
1101
1145
 
1102
1146
    @inlineCallbacks
1103
1147
    def set_resolved(self, retry):
1115
1159
 
1116
1160
        try:
1117
1161
            yield self._client.create(
1118
 
                self._unit_resolve_path, yaml.safe_dump({"retry": retry}))
 
1162
                self._unit_resolved_path, yaml.safe_dump({"retry": retry}))
1119
1163
        except zookeeper.NodeExistsException:
1120
1164
            raise ServiceUnitResolvedAlreadyEnabled(self.unit_name)
1121
1165
 
1128
1172
        error state.
1129
1173
        """
1130
1174
        try:
1131
 
            content, stat = yield self._client.get(self._unit_resolve_path)
 
1175
            content, stat = yield self._client.get(self._unit_resolved_path)
1132
1176
        except zookeeper.NoNodeException:
1133
1177
            # Return a default value.
1134
1178
            returnValue(None)
1138
1182
    def clear_resolved(self):
1139
1183
        """Remove any resolved setting on the unit."""
1140
1184
        try:
1141
 
            yield self._client.delete(self._unit_resolve_path)
 
1185
            yield self._client.delete(self._unit_resolved_path)
1142
1186
        except zookeeper.NoNodeException:
1143
1187
            # We get to the same end state.
1144
1188
            pass
1159
1203
                returnValue(None)
1160
1204
 
1161
1205
            exists_d, watch_d = self._client.exists_and_watch(
1162
 
                self._unit_resolve_path)
 
1206
                self._unit_resolved_path)
1163
1207
            try:
1164
1208
                yield callback(change_event)
1165
1209
            except StopWatcher:
1167
1211
            watch_d.addCallback(watcher)
1168
1212
 
1169
1213
        exists_d, watch_d = self._client.exists_and_watch(
1170
 
            self._unit_resolve_path)
 
1214
            self._unit_resolved_path)
1171
1215
        exists = yield exists_d
1172
1216
 
1173
1217
        # Setup the watch deferred callback after the user defined callback
1183
1227
 
1184
1228
    @property
1185
1229
    def _relation_resolved_path(self):
1186
 
        return "/units/%s/relation-resolved" % self.internal_id
 
1230
        return "%s/relation-resolved" % self._zk_path
1187
1231
 
1188
1232
    @inlineCallbacks
1189
1233
    def set_relation_resolved(self, relation_map):