~hazmat/pyjuju/proposed-support

« back to all changes in this revision

Viewing changes to juju/control/status.py

  • Committer: kapil.thangavelu at canonical
  • Date: 2012-05-22 22:08:15 UTC
  • mfrom: (484.1.53 trunk)
  • Revision ID: kapil.thangavelu@canonical.com-20120522220815-acyt8m89i9ybe0w1
merge trunk

Show diffs side-by-side

added added

removed removed

Lines of Context:
7
7
from twisted.internet.defer import inlineCallbacks, returnValue
8
8
import yaml
9
9
 
10
 
from juju.errors import  ProviderError
11
 
from juju.environment.errors import EnvironmentsConfigError
 
10
from juju.control.utils import get_environment
 
11
from juju.errors import ProviderError
12
12
from juju.state.errors import UnitRelationStateNotFound
13
13
from juju.state.charm import CharmStateManager
14
14
from juju.state.machine import MachineStateManager
15
 
from juju.state.service import ServiceStateManager
 
15
from juju.state.service import ServiceStateManager, parse_service_name
16
16
from juju.state.relation import RelationStateManager
17
17
from juju.unit.workflow import WorkflowStateClient
18
18
 
19
 
 
20
19
# a minimal registry for renderers
21
20
# maps from format name to callable
22
21
renderers = {}
65
64
 
66
65
    will return data on the DEPLOYMENT2 envionment.
67
66
    """
68
 
    environment = options.environments.get(options.environment)
69
 
    if environment is None and options.environment:
70
 
        raise EnvironmentsConfigError(
71
 
            "Invalid environment %r" % options.environment)
72
 
    elif environment is None:
73
 
        environment = options.environments.get_default()
74
 
 
 
67
    environment = get_environment(options)
75
68
    renderer = renderers.get(options.format)
76
69
    if renderer is None:
77
70
        formats = sorted(renderers.keys())
96
89
    client = yield provider.connect()
97
90
    try:
98
91
        # Collect status information
99
 
        state = yield collect(scope, provider, client, log)
 
92
        command = StatusCommand(client, provider, log)
 
93
        state = yield command(scope)
 
94
        #state = yield collect(scope, provider, client, log)
100
95
    finally:
101
96
        yield client.close()
102
97
    # Render
125
120
    return (services, units)
126
121
 
127
122
 
128
 
@inlineCallbacks
129
 
def collect(scope, machine_provider, client, log):
130
 
    """Extract status information into nested dicts for rendering.
131
 
 
132
 
       `scope`: an optional list of name specifiers. Globbing based
133
 
       wildcards supported. Defaults to all units, services and
134
 
       relations.
135
 
 
136
 
       `machine_provider`: machine provider for the environment
137
 
 
138
 
       `client`: ZK client connection
139
 
 
140
 
       `log`: a Python stdlib logger.
141
 
    """
142
 
    service_manager = ServiceStateManager(client)
143
 
    relation_manager = RelationStateManager(client)
144
 
    machine_manager = MachineStateManager(client)
145
 
    charm_manager = CharmStateManager(client)
146
 
 
147
 
    service_data = {}
148
 
    machine_data = {}
149
 
    state = dict(services=service_data, machines=machine_data)
150
 
 
151
 
    seen_machines = set()
152
 
    filter_services, filter_units = digest_scope(scope)
153
 
 
154
 
    services = yield service_manager.get_all_service_states()
155
 
    for service in services:
156
 
        if len(filter_services):
157
 
            found = False
158
 
            for filter_service in filter_services:
159
 
                if fnmatch(service.service_name, filter_service):
160
 
                    found = True
161
 
                    break
162
 
            if not found:
163
 
                continue
164
 
 
165
 
        unit_data = {}
 
123
class StatusCommand(object):
 
124
    def __init__(self, client, provider, log):
 
125
        """
 
126
        Callable status command object.
 
127
 
 
128
        `client`: ZK client connection
 
129
        `provider`: machine provider for the environment
 
130
        `log`: a Python stdlib logger.
 
131
 
 
132
        """
 
133
        self.client = client
 
134
        self.provider = provider
 
135
        self.log = log
 
136
 
 
137
        self.service_manager = ServiceStateManager(client)
 
138
        self.relation_manager = RelationStateManager(client)
 
139
        self.machine_manager = MachineStateManager(client)
 
140
        self.charm_manager = CharmStateManager(client)
 
141
        self._reset()
 
142
 
 
143
    def _reset(self, scope=None):
 
144
        # init per-run state
 
145
        # self.state is assembled by the various process methods
 
146
        # intermediate access to state is made more convenient
 
147
        # using these references to its internals.
 
148
        self.service_data = {}  # service name: service info
 
149
        self.machine_data = {}  # machine id: machine state
 
150
        self.unit_data = {}  # unit_name :unit_info
 
151
 
 
152
        # used in collecting subordinate (which are added to state in a two
 
153
        # phase pass)
 
154
        self.subordinates = {}  # service : set(principal service names)
 
155
 
 
156
        self.state = dict(services=self.service_data,
 
157
                     machines=self.machine_data)
 
158
 
 
159
        # Filtering info
 
160
        self.seen_machines = set()
 
161
        self.filter_services, self.filter_units = digest_scope(scope)
 
162
 
 
163
    @inlineCallbacks
 
164
    def __call__(self, scope=None):
 
165
        """Extract status information into nested dicts for rendering.
 
166
 
 
167
        `scope`: an optional list of name specifiers. Globbing based wildcards
 
168
        supported. Defaults to all units, services and relations.
 
169
 
 
170
        """
 
171
        self._reset(scope)
 
172
 
 
173
        # Pass 1 Gather Data (including principals and subordinates)
 
174
        # this builds unit info  and container relationships
 
175
        # which is assembled in pass 2 below
 
176
        yield self._process_services()
 
177
 
 
178
        # Pass 2: Nest information according to principal/subordinates
 
179
        # rules
 
180
        self._process_subordinates()
 
181
 
 
182
        yield self._process_machines()
 
183
 
 
184
        returnValue(self.state)
 
185
 
 
186
    @inlineCallbacks
 
187
    def _process_services(self):
 
188
        """
 
189
        For each service gather the following information::
 
190
 
 
191
          <service name>:
 
192
            charm: <charm name>
 
193
            exposed: <expose boolean>
 
194
            relations:
 
195
                 <relation info -- see _process_relations>
 
196
            units:
 
197
                 <unit info -- see _process_units>
 
198
        """
 
199
        services = yield self.service_manager.get_all_service_states()
 
200
        for service in services:
 
201
            if len(self.filter_services):
 
202
                found = False
 
203
                for filter_service in self.filter_services:
 
204
                    if fnmatch(service.service_name, filter_service):
 
205
                        found = True
 
206
                        break
 
207
                if not found:
 
208
                    continue
 
209
            yield self._process_service(service)
 
210
 
 
211
    @inlineCallbacks
 
212
    def _process_service(self, service):
 
213
        """
 
214
        Gather the service info (described in _process_services).
 
215
 
 
216
        `service`: ServiceState instance
 
217
        """
 
218
 
166
219
        relation_data = {}
 
220
        service_data = self.service_data
167
221
 
168
222
        charm_id = yield service.get_charm_id()
169
 
        charm = yield charm_manager.get_charm_state(charm_id)
170
 
 
171
 
        service_data[service.service_name] = dict(units=unit_data,
172
 
                                                  charm=charm.id,
173
 
                                                  relations=relation_data)
174
 
        exposed = yield service.get_exposed_flag()
175
 
        if exposed:
176
 
            service_data[service.service_name].update(exposed=exposed)
177
 
 
 
223
        charm = yield self.charm_manager.get_charm_state(charm_id)
 
224
 
 
225
        service_data[service.service_name] = (
 
226
            dict(units={},
 
227
                 charm=charm.id,
 
228
                 relations=relation_data))
 
229
 
 
230
        if (yield service.is_subordinate()):
 
231
            service_data[service.service_name]["subordinate"] = True
 
232
 
 
233
        yield self._process_expose(service)
 
234
 
 
235
        relations, rel_svc_map = yield self._process_relation_map(
 
236
            service)
 
237
 
 
238
        unit_matched = yield self._process_units(service,
 
239
                                                 relations,
 
240
                                                 rel_svc_map)
 
241
 
 
242
        # after filtering units check if any matched or remove the
 
243
        # service from the output
 
244
        if self.filter_units and not unit_matched:
 
245
            del service_data[service.service_name]
 
246
            return
 
247
 
 
248
        yield self._process_relations(service, relations, rel_svc_map)
 
249
 
 
250
    @inlineCallbacks
 
251
    def _process_units(self, service, relations, rel_svc_map):
 
252
        """
 
253
        Gather unit information for a service::
 
254
 
 
255
            <unit name>:
 
256
                agent-state: <started|pendding|etc>
 
257
                machine: <machine id>
 
258
                open-ports: ["port/protocol", ...]
 
259
                public-address: <public dns name or ip>
 
260
                subordinates:
 
261
                     <optional nested units of subordinate services>
 
262
 
 
263
 
 
264
        `service`: ServiceState intance
 
265
        `relations`: list of ServiceRelationState instance for this service
 
266
        `rel_svc_map`: maps relation internal ids to the remote endpoint
 
267
                       service name. This references the name of the remote
 
268
                       endpoint and so is generated per service.
 
269
        """
178
270
        units = yield service.get_all_unit_states()
179
271
        unit_matched = False
180
272
 
181
 
        relations = yield relation_manager.get_relations_for_service(service)
182
 
 
183
273
        for unit in units:
184
 
            if len(filter_units):
 
274
            if len(self.filter_units):
185
275
                found = False
186
 
                for filter_unit in filter_units:
 
276
                for filter_unit in self.filter_units:
187
277
                    if fnmatch(unit.unit_name, filter_unit):
188
278
                        found = True
189
279
                        break
190
280
                if not found:
191
281
                    continue
192
 
 
193
 
            u = unit_data[unit.unit_name] = dict()
194
 
            machine_id = yield unit.get_assigned_machine_id()
195
 
            u["machine"] = machine_id
196
 
            unit_workflow_client = WorkflowStateClient(client, unit)
197
 
            unit_state = yield unit_workflow_client.get_state()
198
 
            if not unit_state:
199
 
                u["state"] = "pending"
200
 
            else:
201
 
                unit_connected = yield unit.has_agent()
202
 
                u["state"] = unit_state if unit_connected else "down"
203
 
            if exposed:
204
 
                open_ports = yield unit.get_open_ports()
205
 
                u["open-ports"] = ["{port}/{proto}".format(**port_info)
206
 
                                   for port_info in open_ports]
207
 
 
208
 
            u["public-address"] = yield unit.get_public_address()
209
 
 
210
 
            # indicate we should include information about this
211
 
            # machine later
212
 
            seen_machines.add(machine_id)
 
282
            yield self._process_unit(service, unit, relations, rel_svc_map)
213
283
            unit_matched = True
214
 
 
215
 
            # collect info on each relation for the service unit
216
 
            relation_status = {}
217
 
            for relation in relations:
218
 
                try:
219
 
                    relation_unit = yield relation.get_unit_state(unit)
220
 
                except UnitRelationStateNotFound:
221
 
                    # This exception will occur when relations are
222
 
                    # established between services without service
223
 
                    # units, and therefore never have any
224
 
                    # corresponding service relation units. This
225
 
                    # scenario does not occur in actual deployments,
226
 
                    # but can happen in test circumstances. In
227
 
                    # particular, it will happen with a misconfigured
228
 
                    # provider, which exercises this codepath.
229
 
                    continue  # should not occur, but status should not fail
230
 
                relation_workflow_client = WorkflowStateClient(
231
 
                    client, relation_unit)
232
 
                relation_workflow_state = \
233
 
                    yield relation_workflow_client.get_state()
234
 
                relation_status[relation.relation_name] = dict(
235
 
                    state=relation_workflow_state)
236
 
            u["relations"] = relation_status
237
 
 
238
 
        # after filtering units check if any matched or remove the
239
 
        # service from the output
240
 
        if filter_units and not unit_matched:
241
 
            del service_data[service.service_name]
242
 
            continue
243
 
 
244
 
        for relation in relations:
245
 
            rel_services = yield relation.get_service_states()
246
 
 
247
 
            # A single related service implies a peer relation. More
248
 
            # imply a bi-directional provides/requires relationship.
249
 
            # In the later case we omit the local side of the relation
250
 
            # when reporting.
251
 
            if len(rel_services) > 1:
252
 
                # Filter out self from multi-service relations.
253
 
                rel_services = [
254
 
                    rsn for rsn in rel_services if rsn.service_name !=
255
 
                    service.service_name]
256
 
 
257
 
            if len(rel_services) > 1:
258
 
                raise ValueError("Unexpected relationship with more "
259
 
                                 "than 2 endpoints")
260
 
 
261
 
            rel_service = rel_services[0]
262
 
            relation_data[relation.relation_name] = rel_service.service_name
263
 
 
264
 
    machines = yield machine_manager.get_all_machine_states()
265
 
    for machine_state in machines:
266
 
        if (filter_services or filter_units) and \
267
 
                machine_state.id not in seen_machines:
268
 
            continue
269
 
 
 
284
        returnValue(unit_matched)
 
285
 
 
286
    @inlineCallbacks
 
287
    def _process_unit(self, service, unit, relations, rel_svc_map):
 
288
        """ Generate unit info for a single unit of a single service.
 
289
 
 
290
        `unit`: ServiceUnitState
 
291
        see `_process_units` for an explanation of other arguments.
 
292
 
 
293
        """
 
294
        u = self.unit_data[unit.unit_name] = dict()
 
295
        container = yield unit.get_container()
 
296
 
 
297
        if container:
 
298
            u["container"] = container.unit_name
 
299
            self.subordinates.setdefault(unit.service_name,
 
300
                                    set()).add(container.service_name)
 
301
 
 
302
        machine_id = yield unit.get_assigned_machine_id()
 
303
        u["machine"] = machine_id
 
304
        unit_workflow_client = WorkflowStateClient(self.client, unit)
 
305
        unit_state = yield unit_workflow_client.get_state()
 
306
        if not unit_state:
 
307
            u["agent-state"] = "pending"
 
308
        else:
 
309
            unit_connected = yield unit.has_agent()
 
310
            u["agent-state"] = unit_state.replace("_", "-") \
 
311
                               if unit_connected else "down"
 
312
 
 
313
        exposed = self.service_data[service.service_name].get("exposed")
 
314
        if exposed:
 
315
            open_ports = yield unit.get_open_ports()
 
316
            u["open-ports"] = ["{port}/{proto}".format(**port_info)
 
317
                               for port_info in open_ports]
 
318
 
 
319
        u["public-address"] = yield unit.get_public_address()
 
320
 
 
321
        # indicate we should include information about this
 
322
        # machine later
 
323
        self.seen_machines.add(machine_id)
 
324
 
 
325
        # collect info on each relation for the service unit
 
326
        yield self._process_unit_relations(service, unit,
 
327
                                           relations, rel_svc_map)
 
328
 
 
329
    @inlineCallbacks
 
330
    def _process_relation_map(self, service):
 
331
        """Generate a mapping from a services relations to the service name of
 
332
        the remote endpoints.
 
333
 
 
334
        returns: ([ServiceRelationState, ...], mapping)
 
335
        """
 
336
        relation_data = self.service_data[service.service_name]["relations"]
 
337
        relation_mgr = self.relation_manager
 
338
        relations = yield relation_mgr.get_relations_for_service(service)
 
339
        rel_svc_map = {}
 
340
 
 
341
        for relation in relations:
 
342
            rel_services = yield relation.get_service_states()
 
343
 
 
344
            # A single related service implies a peer relation. More
 
345
            # imply a bi-directional provides/requires relationship.
 
346
            # In the later case we omit the local side of the relation
 
347
            # when reporting.
 
348
            if len(rel_services) > 1:
 
349
                # Filter out self from multi-service relations.
 
350
                rel_services = [
 
351
                    rsn for rsn in rel_services if rsn.service_name !=
 
352
                    service.service_name]
 
353
 
 
354
            if len(rel_services) > 1:
 
355
                raise ValueError("Unexpected relationship with more "
 
356
                                 "than 2 endpoints")
 
357
 
 
358
            rel_service = rel_services[0]
 
359
            relation_data.setdefault(relation.relation_name, set()).add(
 
360
                rel_service.service_name)
 
361
            rel_svc_map[relation.internal_relation_id] = (
 
362
                 rel_service.service_name)
 
363
 
 
364
        returnValue((relations, rel_svc_map))
 
365
 
 
366
    @inlineCallbacks
 
367
    def _process_relations(self, service, relations, rel_svc_map):
 
368
        """Generate relation information for a given service
 
369
 
 
370
        Each service with relations will have a relations dict
 
371
        nested under it with one or more relations described::
 
372
 
 
373
           relations:
 
374
              <relation name>:
 
375
              - <remote service name>
 
376
 
 
377
        """
 
378
        relation_data = self.service_data[service.service_name]["relations"]
 
379
 
 
380
        for relation in relations:
 
381
            rel_services = yield relation.get_service_states()
 
382
 
 
383
            # A single related service implies a peer relation. More
 
384
            # imply a bi-directional provides/requires relationship.
 
385
            # In the later case we omit the local side of the relation
 
386
            # when reporting.
 
387
            if len(rel_services) > 1:
 
388
                # Filter out self from multi-service relations.
 
389
                rel_services = [
 
390
                    rsn for rsn in rel_services if rsn.service_name !=
 
391
                    service.service_name]
 
392
 
 
393
            if len(rel_services) > 1:
 
394
                raise ValueError("Unexpected relationship with more "
 
395
                                 "than 2 endpoints")
 
396
 
 
397
            rel_service = rel_services[0]
 
398
            relation_data.setdefault(
 
399
                relation.relation_name, set()).add(
 
400
                    rel_service.service_name)
 
401
            rel_svc_map[relation.internal_relation_id] = (
 
402
                rel_service.service_name)
 
403
 
 
404
        # Normalize the sets back to lists
 
405
        for r in relation_data:
 
406
            relation_data[r] = sorted(relation_data[r])
 
407
 
 
408
    @inlineCallbacks
 
409
    def _process_unit_relations(self, service, unit, relations, rel_svc_map):
 
410
        """Collect UnitRelationState information per relation and per unit.
 
411
 
 
412
        Includes information under each unit for its relations including
 
413
        its relation state and information about any possible errors.
 
414
 
 
415
        see `_process_relations` for argument information
 
416
        """
 
417
        u = self.unit_data[unit.unit_name]
 
418
        relation_errors = {}
 
419
 
 
420
        for relation in relations:
 
421
            try:
 
422
                relation_unit = yield relation.get_unit_state(unit)
 
423
            except UnitRelationStateNotFound:
 
424
                # This exception will occur when relations are
 
425
                # established between services without service
 
426
                # units, and therefore never have any
 
427
                # corresponding service relation units.
 
428
                # UPDATE: common with subordinate services, and
 
429
                # some testing scenarios.
 
430
                continue
 
431
            relation_workflow_client = WorkflowStateClient(
 
432
                self.client, relation_unit)
 
433
            workflow_state = yield relation_workflow_client.get_state()
 
434
 
 
435
            rel_svc_name = rel_svc_map.get(relation.internal_relation_id)
 
436
            if rel_svc_name and workflow_state not in ("up", None):
 
437
                relation_errors.setdefault(
 
438
                    relation.relation_name, set()).add(rel_svc_name)
 
439
 
 
440
        if relation_errors:
 
441
            # Normalize sets and store.
 
442
            u["relation-errors"] = dict(
 
443
                [(r, sorted(relation_errors[r])) for r in relation_errors])
 
444
 
 
445
    def _process_subordinates(self):
 
446
        """Properly nest subordinate units under their principal service's
 
447
        unit nodes. Services and units are generated in one pass, then
 
448
        iterated by this method to structure the output data to reflect
 
449
        actual unit containment.
 
450
 
 
451
        Subordinate units will include the follow::
 
452
           subordinate: true
 
453
            subordinate-to:
 
454
            - <principal service names>
 
455
 
 
456
        Principal services that have subordinates will include::
 
457
 
 
458
            subordinates:
 
459
              <subordinate unit name>:
 
460
                agent-state: <agent state>
 
461
        """
 
462
        service_data = self.service_data
 
463
 
 
464
        for unit_name, u in self.unit_data.iteritems():
 
465
            container = u.get("container")
 
466
            if container:
 
467
                d = self.unit_data[container].setdefault("subordinates", {})
 
468
                d[unit_name] = u
 
469
 
 
470
                # remove keys that don't appear in output or come from container
 
471
                for key in ("container", "machine", "public-address"):
 
472
                    u.pop(key, None)
 
473
            else:
 
474
                service_name = parse_service_name(unit_name)
 
475
                service_data[service_name]["units"][unit_name] = u
 
476
 
 
477
        for sub_service, principal_services in self.subordinates.iteritems():
 
478
            service_data[sub_service]["subordinate-to"] = sorted(principal_services)
 
479
            service_data[sub_service].pop("units", None)
 
480
 
 
481
    @inlineCallbacks
 
482
    def _process_expose(self, service):
 
483
        """Indicate if a service is exposed or not."""
 
484
        exposed = yield service.get_exposed_flag()
 
485
        if exposed:
 
486
            self.service_data[service.service_name].update(exposed=exposed)
 
487
        returnValue(exposed)
 
488
 
 
489
    @inlineCallbacks
 
490
    def _process_machines(self):
 
491
        """Gather machine information.
 
492
 
 
493
        machines:
 
494
          <machine id>:
 
495
            agent-state: <agent state>
 
496
            dns-name: <dns name>
 
497
            instance-id: <provider specific instance id>
 
498
            instance-state: <instance state>
 
499
        """
 
500
 
 
501
        machines = yield self.machine_manager.get_all_machine_states()
 
502
        for machine_state in machines:
 
503
            if (self.filter_services or self.filter_units) and \
 
504
                    machine_state.id not in self.seen_machines:
 
505
                continue
 
506
            yield self._process_machine(machine_state)
 
507
 
 
508
    @inlineCallbacks
 
509
    def _process_machine(self, machine_state):
 
510
        """
 
511
        `machine_state`: MachineState instance
 
512
        """
270
513
        instance_id = yield machine_state.get_instance_id()
271
514
        m = {"instance-id": instance_id \
272
515
             if instance_id is not None else "pending"}
273
516
        if instance_id is not None:
274
517
            try:
275
 
                pm = yield machine_provider.get_machine(instance_id)
 
518
                pm = yield self.provider.get_machine(instance_id)
276
519
                m["dns-name"] = pm.dns_name
277
520
                m["instance-state"] = pm.state
278
521
                if (yield machine_state.has_agent()):
279
522
                    # if the agent's connected, we're fine
280
 
                    m["state"] = "running"
 
523
                    m["agent-state"] = "running"
281
524
                else:
282
 
                    units = (yield machine_state.get_all_service_unit_states())
 
525
                    units = (
 
526
                        yield machine_state.get_all_service_unit_states())
283
527
                    for unit in units:
284
 
                        unit_workflow_client = WorkflowStateClient(client, unit)
 
528
                        unit_workflow_client = WorkflowStateClient(
 
529
                            self.client, unit)
285
530
                        if (yield unit_workflow_client.get_state()):
286
 
                            # for unit to have a state, its agent must have
287
 
                            # run, which implies the machine agent must have
288
 
                            # been running correctly at some point in the past
289
 
                            m["state"] = "down"
 
531
                            # for unit to have a state, its agent must
 
532
                            # have run, which implies the machine agent
 
533
                            # must have been running correctly at some
 
534
                            # point in the past
 
535
                            m["agent-state"] = "down"
290
536
                            break
291
537
                    else:
292
538
                        # otherwise we're probably just still waiting
293
 
                        m["state"] = "not-started"
 
539
                        m["agent-state"] = "not-started"
294
540
            except ProviderError:
295
541
                # The provider doesn't have machine information
296
 
                log.error(
 
542
                self.log.error(
297
543
                    "Machine provider information missing: machine %s" % (
298
544
                        machine_state.id))
299
545
 
300
 
        machine_data[machine_state.id] = m
301
 
 
302
 
    returnValue(state)
 
546
        self.machine_data[machine_state.id] = m
303
547
 
304
548
 
305
549
def render_yaml(data, filelike, environment):
342
586
        "shape": "box",
343
587
        "style": "filled",
344
588
        },
 
589
 
 
590
    "subunit": {
 
591
        "color": "#c9c9c9",
 
592
        "fontcolor": "#ffffff",
 
593
        "shape": "box",
 
594
        "style": "filled",
 
595
        "rank": "same"
 
596
        },
 
597
 
345
598
    "relation": {
346
599
        "dir": "none"}
347
600
    }
350
603
def safe_dot_label(name):
351
604
    """Convert a name to a label safe for use in DOT.
352
605
 
353
 
    Works around an issue where service names like wiki-db will
354
 
    produce DOT items with names like cluster_wiki-db where the
355
 
    trailing '-' invalidates the name.
 
606
    Works around an issue where service names like wiki-db will produce DOT
 
607
    items with names like cluster_wiki-db where the trailing '-' invalidates
 
608
    the name.
 
609
 
356
610
    """
357
611
    return name.replace("-", "_")
358
612
 
385
639
                           **style["service"])
386
640
        cluster.add_node(snode)
387
641
 
388
 
        for unit_name, unit in service["units"].iteritems():
389
 
            un = pydot.Node(safe_dot_label(unit_name),
390
 
                            label="<%s<br/><i>%s</i>>" % (
391
 
                                unit_name,
392
 
                                unit.get("public-address")),
393
 
                            **style["unit"])
394
 
            cluster.add_node(un)
 
642
        for unit_name, unit in service.get("units", {}).iteritems():
 
643
            subordinates = unit.get("subordinates")
 
644
            if subordinates:
 
645
                container = pydot.Subgraph()
 
646
                un = pydot.Node(safe_dot_label(unit_name),
 
647
                                    label="<%s<br/><i>%s</i>>" % (
 
648
                                        unit_name,
 
649
                                        unit.get("public-address")),
 
650
                                    **style["unit"])
 
651
                container.add_node(un)
 
652
                for sub in subordinates:
 
653
                    s = pydot.Node(safe_dot_label(sub),
 
654
                                   label="<%s<br/>>" % (sub),
 
655
                                   **style["subunit"])
 
656
                    container.add_node(s)
 
657
                    container.add_edge(pydot.Edge(un, s, **style["relation"]))
 
658
                cluster.add_subgraph(container)
 
659
            else:
 
660
                un = pydot.Node(safe_dot_label(unit_name),
 
661
                                label="<%s<br/><i>%s</i>>" % (
 
662
                                    unit_name,
 
663
                                    unit.get("public-address")),
 
664
                                **style["unit"])
 
665
                cluster.add_node(un)
395
666
 
396
667
            cluster.add_edge(pydot.Edge(snode, un))
397
668
 
399
670
 
400
671
        # now map the relationships
401
672
        for kind, relation in service["relations"].iteritems():
402
 
            src = safe_dot_label(relation)
403
 
            dest = safe_dot_label(service_name)
404
 
            descriptor = tuple(sorted((src, dest)))
405
 
            if descriptor not in seen_relations:
406
 
                seen_relations.add(descriptor)
407
 
                dot.add_edge(pydot.Edge(
408
 
                        src,
409
 
                        dest,
410
 
                        label=kind,
411
 
                        **style["relation"]
412
 
                    ))
 
673
            if not isinstance(relation, list):
 
674
                relation = (relation,)
 
675
            for rel in relation:
 
676
                src = safe_dot_label(rel)
 
677
                dest = safe_dot_label(service_name)
 
678
                descriptor = ":".join(tuple(sorted((src, dest))))
 
679
                #kind = safe_dot_label("%s/%s" % (descriptor, kind))
 
680
                if descriptor not in seen_relations:
 
681
                    seen_relations.add(descriptor)
 
682
                    dot.add_edge(pydot.Edge(
 
683
                            src,
 
684
                            dest,
 
685
                            label=kind,
 
686
                            **style["relation"]
 
687
                        ))
413
688
 
414
 
    if format == 'dot':
 
689
    if format == "dot":
415
690
        filelike.write(dot.to_string())
416
691
    else:
417
692
        filelike.write(dot.create(format=format))