|
5537
by Gary van der Merwe
Assign copyright to Canonical Ltd. |
1 |
# Copyright (C) 2009, 2010 Canonical Ltd
|
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
2 |
#
|
3 |
# This program is free software; you can redistribute it and/or
|
|
4 |
# modify it under the terms of the GNU General Public License
|
|
5 |
# as published by the Free Software Foundation; either version 2
|
|
6 |
# of the License, or (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
16 |
||
17 |
"""
|
|
18 |
Layout a revision graph for visual representation, allowing for filtering and
|
|
19 |
collapsible branches.
|
|
20 |
||
21 |
Usage example
|
|
22 |
=============
|
|
23 |
||
24 |
.. python::
|
|
25 |
bi = loggraphviz.BranchInfo('branch-label', tree, branch)
|
|
26 |
graph_viz = loggraphviz.GraphVizLoader([bi], bi, False)
|
|
27 |
graph_viz.load()
|
|
28 |
state = loggraphviz.GraphVizFilterState(graph_viz)
|
|
29 |
computed = graph_viz.compute_viz(state)
|
|
30 |
||
31 |
"""
|
|
32 |
||
33 |
import gc |
|
34 |
from itertools import izip |
|
35 |
||
36 |
from bzrlib import errors |
|
37 |
from bzrlib.bzrdir import BzrDir |
|
38 |
from bzrlib.transport.local import LocalTransport |
|
39 |
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION |
|
40 |
from bzrlib.graph import ( |
|
41 |
Graph, |
|
42 |
StackedParentsProvider, |
|
43 |
KnownGraph, |
|
44 |
DictParentsProvider, |
|
45 |
)
|
|
46 |
||
47 |
||
48 |
class BranchInfo(object): |
|
49 |
"""Wrapper for a branch, it's working tree, if available, and a label."""
|
|
50 |
||
51 |
def __init__(self, label, tree, branch, index=None): |
|
52 |
self.label = label |
|
53 |
self.tree = tree |
|
54 |
self.branch = branch |
|
55 |
self.index = index |
|
56 |
||
57 |
def __hash__(self): |
|
58 |
return self.branch.base.__hash__() |
|
59 |
||
60 |
def __eq__(self, other): |
|
61 |
if isinstance(other, BranchInfo): |
|
62 |
return self.branch.base.__eq__(other.branch.base) |
|
63 |
return False |
|
64 |
||
65 |
||
66 |
class RevisionData(object): |
|
67 |
"""
|
|
68 |
Container for data for a revision in the graph that gets calculated
|
|
69 |
when the graph is loaded.
|
|
70 |
"""
|
|
71 |
||
72 |
# Instance of this object are typically named "rev".
|
|
73 |
||
74 |
__slots__ = ["index", "_merge_sort_node", "branch", "_revno_str", |
|
75 |
"merges", "merged_by", 'branch_id', 'color'] |
|
76 |
||
77 |
def __init__(self, index, merge_sort_node): |
|
78 |
"""Create a new RevisionData instance."""
|
|
79 |
self.index = index |
|
80 |
self._merge_sort_node = merge_sort_node |
|
81 |
self.branch = None |
|
82 |
self._revno_str = None |
|
83 |
self.merges = [] |
|
84 |
"""Revision indexes that this revision merges"""
|
|
85 |
self.merged_by = None |
|
86 |
"""Revision index that merges this revision."""
|
|
87 |
self.branch_id = self._merge_sort_node.revno[0:-1] |
|
88 |
self.color = reduce(lambda x, y: x + y, self.branch_id, 0) |
|
89 |
||
90 |
revid = property(lambda self: self._merge_sort_node.key) |
|
91 |
merge_depth = property(lambda self: self._merge_sort_node.merge_depth) |
|
92 |
revno_sequence = property(lambda self: self._merge_sort_node.revno) |
|
93 |
end_of_merge = property(lambda self: self._merge_sort_node.end_of_merge) |
|
94 |
||
95 |
def get_revno_str(self): |
|
96 |
if self._revno_str is None: |
|
97 |
self._revno_str = ".".join(["%d" % (revno) |
|
98 |
for revno in self.revno_sequence]) |
|
99 |
if self.revid.startswith(CURRENT_REVISION): |
|
100 |
self._revno_str += " ?" |
|
101 |
return self._revno_str |
|
102 |
revno_str = property(get_revno_str) |
|
103 |
||
104 |
def __repr__(self): |
|
105 |
return "%s <%s %s>" % (self.__class__.__name__, self.revno_str, |
|
106 |
self.revid) |
|
107 |
||
108 |
class BranchLine(object): |
|
109 |
"""Container for data for a branch line, aka merge line."""
|
|
110 |
||
111 |
__slots__ = ["branch_id", "revs", "merges", "merged_by", |
|
112 |
"color", "merge_depth"] |
|
113 |
||
114 |
def __init__(self, branch_id): |
|
115 |
self.branch_id = branch_id |
|
116 |
self.revs = [] |
|
117 |
self.merges = [] |
|
118 |
self.merged_by = [] |
|
119 |
self.merge_depth = 0 |
|
120 |
||
121 |
def __repr__(self): |
|
122 |
return "%s <%s>" % (self.__class__.__name__, self.branch_id) |
|
123 |
||
124 |
||
125 |
class GhostRevisionError(errors.InternalBzrError): |
|
126 |
||
127 |
_fmt = "{%(revision_id)s} is a ghost." |
|
128 |
||
129 |
def __init__(self, revision_id): |
|
130 |
errors.BzrError.__init__(self) |
|
131 |
self.revision_id = revision_id |
|
132 |
||
133 |
||
134 |
class GraphVizLoader(object): |
|
135 |
"""
|
|
136 |
Loads graph for branches and provides computed layout for visual
|
|
137 |
representation.
|
|
138 |
"""
|
|
139 |
||
140 |
# Most list/dicts related to revisions are unfiltered. When we do a graph
|
|
141 |
# layout, we filter these revisions. A revision may be filter out because:
|
|
142 |
# * It's branch is hidden (or collapsed).
|
|
143 |
# * We have a specified file_id(s), and the revision does not touch the
|
|
144 |
# file_id(s).
|
|
145 |
# * We have a search, and the revision does not match the search.
|
|
146 |
#
|
|
147 |
# The main list of unfiltered revisions is self.revisions. A revisions
|
|
148 |
# index in revisions are normally called index. The main list of filtered
|
|
149 |
# revisions is filtered_revs. Revision indexes in this list are called
|
|
150 |
# f_index.
|
|
151 |
||
152 |
def __init__(self, branches, primary_bi, no_graph): |
|
153 |
self.branches = branches |
|
154 |
"""List of BranchInfo for each branch."""
|
|
155 |
self.primary_bi = primary_bi |
|
156 |
self.no_graph = no_graph |
|
157 |
||
158 |
self.repos = [] |
|
159 |
self.local_repo_copies = [] |
|
160 |
"""A list of repositories that revisions will be attempted to be loaded
|
|
161 |
from first."""
|
|
162 |
||
163 |
self.revid_head_info = {} |
|
164 |
"""Dict with a keys of head revid and value of
|
|
165 |
(list of (branch, label),
|
|
166 |
list of revids that are unique to this head)
|
|
167 |
|
|
168 |
We can't just store the BranchInfo, because the label may have
|
|
169 |
have addition text in it, e.g. "Working Tree", "Pending Merges"
|
|
170 |
"""
|
|
171 |
||
172 |
self.revid_branch_info = {} |
|
173 |
||
174 |
self.ghosts = set() |
|
175 |
||
176 |
self.revisions = [] |
|
177 |
"""List of RevisionInfo from merge_sort."""
|
|
178 |
||
179 |
self.revid_rev = {} |
|
180 |
self.graph_children = {} |
|
181 |
||
182 |
self.tags = {} # map revid -> tags set |
|
183 |
||
184 |
def load(self): |
|
185 |
# Get a unique list of repositories. If the url is the same,
|
|
186 |
# we consider it the same repositories
|
|
187 |
repo_urls = set() |
|
188 |
for bi in self.branches: |
|
189 |
repo = bi.branch.repository |
|
190 |
if repo.base not in repo_urls: |
|
191 |
repo_urls.add(repo.base) |
|
192 |
self.repos.append(repo) |
|
193 |
||
194 |
no_local_repos = True |
|
195 |
for repo in self.repos: |
|
196 |
if repo_is_local(repo): |
|
197 |
no_local_repos = False |
|
198 |
if no_local_repos: |
|
199 |
self.load_current_dir_repo() |
|
200 |
self.repos.sort(key=lambda repo: not repo_is_local(repo)) |
|
201 |
||
202 |
self.lock_read_branches() |
|
203 |
try: |
|
204 |
head_revids, graph_parents = self.load_graph_parents() |
|
205 |
self.process_graph_parents(head_revids, graph_parents) |
|
206 |
||
207 |
self.compute_head_info() |
|
208 |
del self.graph |
|
209 |
||
210 |
if not self.no_graph: |
|
211 |
self.compute_branch_lines() |
|
212 |
self.compute_merge_info() |
|
213 |
||
214 |
self.load_tags() |
|
215 |
finally: |
|
216 |
self.unlock_branches() |
|
217 |
||
218 |
||
219 |
def load_current_dir_repo(self): |
|
220 |
# There are no local repositories. Try open the repository
|
|
221 |
# of the current directory, and try load revisions data from
|
|
222 |
# this before trying from remote repositories. This makes
|
|
223 |
# the common use case of viewing a remote branch that is
|
|
224 |
# related to the current branch much faster, because most
|
|
225 |
# of the revision can be loaded from the local repository.
|
|
226 |
try: |
|
227 |
bzrdir, relpath = BzrDir.open_containing(u".") |
|
228 |
repo = bzrdir.find_repository() |
|
229 |
self.repos.add(repo) |
|
230 |
self.local_repo_copies.append(repo) |
|
231 |
except Exception: |
|
232 |
pass
|
|
233 |
||
234 |
def update_ui(self): |
|
235 |
pass
|
|
236 |
||
237 |
def throbber_show(self): |
|
238 |
pass
|
|
239 |
||
240 |
def throbber_hide(self): |
|
241 |
pass
|
|
242 |
||
243 |
def lock_read_branches(self): |
|
244 |
for bi in self.branches: |
|
245 |
bi.branch.lock_read() |
|
246 |
for repo in self.repos: |
|
247 |
repo.lock_read() |
|
248 |
||
249 |
def unlock_branches(self): |
|
250 |
for bi in self.branches: |
|
251 |
bi.branch.unlock() |
|
252 |
for repo in self.repos: |
|
253 |
repo.unlock() |
|
254 |
||
255 |
#def lock_read_repos(self):
|
|
256 |
# for repo in self.repos.itervalues():
|
|
257 |
# repo.lock_read()
|
|
258 |
#
|
|
259 |
#def unlock_repos(self):
|
|
260 |
# for repo in self.repos.itervalues():
|
|
261 |
# repo.unlock()
|
|
262 |
||
263 |
def load_tags(self): |
|
264 |
self.tags = {} |
|
265 |
for bi in self.branches: |
|
266 |
# revid to tags map
|
|
267 |
branch_tags = bi.branch.tags.get_reverse_tag_dict() |
|
268 |
for revid, tags in branch_tags.iteritems(): |
|
269 |
if revid in self.tags: |
|
270 |
self.tags[revid].update(set(tags)) |
|
271 |
else: |
|
272 |
self.tags[revid] = set(tags) |
|
273 |
||
274 |
def append_head_info(self, revid, branch_info, tag): |
|
275 |
if not revid == NULL_REVISION: |
|
276 |
if not revid in self.revid_head_info: |
|
277 |
self.revid_head_info[revid] = ([], []) |
|
278 |
self.revid_head_info[revid][0].append((branch_info, tag)) |
|
279 |
||
280 |
# So that early calls to get_revid_branch work
|
|
281 |
self.revid_branch_info[revid] = branch_info |
|
282 |
||
283 |
def load_branch_heads(self, bi): |
|
284 |
branch_heads = [] |
|
285 |
||
286 |
def append_head_info(revid, branch_info, label): |
|
287 |
self.append_head_info(revid, branch_info, label) |
|
288 |
branch_heads.append(revid) |
|
289 |
||
290 |
if len(self.branches) > 0: |
|
291 |
label = bi.label |
|
292 |
else: |
|
293 |
label = None |
|
294 |
||
295 |
branch_last_revision = bi.branch.last_revision() |
|
296 |
append_head_info(branch_last_revision, bi, bi.label) |
|
297 |
self.update_ui() |
|
298 |
||
299 |
if bi.tree: |
|
300 |
parent_ids = bi.tree.get_parent_ids() |
|
301 |
if parent_ids: |
|
302 |
# first parent is last revision of the tree
|
|
303 |
revid = parent_ids[0] |
|
304 |
if revid != branch_last_revision: |
|
305 |
# working tree is out of date
|
|
306 |
if label: |
|
307 |
wt_label = "%s - Working Tree" % label |
|
308 |
else: |
|
309 |
wt_label = "Working Tree" |
|
310 |
append_head_info(revid, bi, wt_label) |
|
311 |
# other parents are pending merges
|
|
312 |
for revid in parent_ids[1:]: |
|
313 |
if label: |
|
314 |
pm_label = "%s - Pending Merge" % label |
|
315 |
else: |
|
316 |
pm_label = "Pending Merge" |
|
317 |
append_head_info(revid, bi, pm_label) |
|
318 |
self.update_ui() |
|
319 |
return branch_heads, branch_heads, () |
|
320 |
||
321 |
def load_graph_parents(self): |
|
322 |
"""Load the heads of the graph, and the graph parents"""
|
|
323 |
||
324 |
extra_parents = {} |
|
325 |
branches_heads = [] |
|
326 |
||
327 |
def load_branch_heads(bi, insert_at_begin=False): |
|
328 |
load_heads, sort_heads, extra_parents_ = self.load_branch_heads(bi) |
|
329 |
for key, parents in extra_parents_: |
|
330 |
extra_parents[key] = parents |
|
331 |
if insert_at_begin: |
|
332 |
branches_heads.insert(0, (load_heads, sort_heads)) |
|
333 |
else: |
|
334 |
branches_heads.append((load_heads, sort_heads)) |
|
335 |
||
336 |
for bi in self.branches: |
|
337 |
# Don't do the primary branch, as that will be inserted later at
|
|
338 |
# the first position.
|
|
339 |
if bi != self.primary_bi: |
|
340 |
load_branch_heads(bi) |
|
341 |
||
342 |
if len(branches_heads) >= 2: |
|
343 |
head_revids = [revid for load_heads, sort_heads in branches_heads |
|
344 |
for revid in load_heads] |
|
345 |
head_revs = self.load_revisions(head_revids) |
|
346 |
||
347 |
get_max_timestamp = lambda branch_heads: max( |
|
348 |
[head_revs[revid].timestamp for revid in branch_heads[0]]) |
|
349 |
branches_heads.sort(key=get_max_timestamp, reverse=True) |
|
350 |
||
351 |
if self.primary_bi: |
|
352 |
load_branch_heads(self.primary_bi, True) |
|
353 |
||
354 |
load_heads = [revid for load_heads_, sort_heads_ in branches_heads |
|
355 |
for revid in load_heads_] |
|
356 |
sort_heads = [revid for load_heads_, sort_heads_ in branches_heads |
|
357 |
for revid in sort_heads_] |
|
358 |
||
359 |
parents_providers = [repo._make_parents_provider() \ |
|
360 |
for repo in self.repos] |
|
361 |
parents_providers.append(DictParentsProvider(extra_parents)) |
|
362 |
self.graph = Graph(StackedParentsProvider(parents_providers)) |
|
363 |
||
364 |
return sort_heads, self.graph.iter_ancestry(sort_heads) |
|
365 |
||
366 |
def process_graph_parents(self, head_revids, graph_parents_iter): |
|
367 |
graph_parents = {} |
|
368 |
self.ghosts = set() |
|
369 |
||
370 |
for (revid, parent_revids) in graph_parents_iter: |
|
371 |
if revid == NULL_REVISION: |
|
372 |
continue
|
|
373 |
if parent_revids is None: |
|
374 |
#Ghost
|
|
375 |
graph_parents[revid] = () |
|
376 |
self.ghosts.add(revid) |
|
377 |
elif parent_revids == (NULL_REVISION,): |
|
378 |
graph_parents[revid] = () |
|
379 |
else: |
|
380 |
graph_parents[revid] = parent_revids |
|
381 |
if len(graph_parents) % 100 == 0: |
|
382 |
self.update_ui() |
|
383 |
||
384 |
graph_parents["top:"] = head_revids |
|
385 |
||
386 |
if len(graph_parents) > 0: |
|
387 |
enabled = gc.isenabled() |
|
388 |
if enabled: |
|
389 |
gc.disable() |
|
390 |
||
391 |
def make_kg(): |
|
392 |
return KnownGraph(graph_parents) |
|
393 |
self.known_graph = make_kg() |
|
394 |
||
395 |
merge_sorted_revisions = self.known_graph.merge_sort('top:') |
|
396 |
# So far, we are a bit faster than the pure-python code. But the
|
|
397 |
# last step hurts. Specifically, we take
|
|
398 |
# 377ms KnownGraph(self.graph_parents)
|
|
399 |
# 263ms kg.merge_sort() [640ms combined]
|
|
400 |
# 1322ms self.revisions = [...]
|
|
401 |
# vs
|
|
402 |
# 1152ms tsort.merge_sort(self.graph_parents)
|
|
403 |
# 691ms self.revisions = [...]
|
|
404 |
#
|
|
405 |
# It is a gc thing... :(
|
|
406 |
# Adding gc.disable() / gc.enable() around this whole loop changes
|
|
407 |
# things to be:
|
|
408 |
# 100ms KnownGraph(self.graph_parents)
|
|
409 |
# 77ms kg.merge_sort() [177ms combined]
|
|
410 |
# 174ms self.revisions = [...]
|
|
411 |
# vs
|
|
412 |
# 639ms tsort.merge_sort(self.graph_parents)
|
|
413 |
# 150ms self.revisions = [...]
|
|
414 |
# Also known as "wow that's a lot faster". This is because KG()
|
|
415 |
# creates a bunch of Python objects, then merge_sort() creates a
|
|
416 |
# bunch more. And then self.revisions() creates another whole set.
|
|
417 |
# And all of these are moderately long lived, so you have a *lot*
|
|
418 |
# of allocations without removals (which triggers the gc checker
|
|
419 |
# over and over again.) And they probably don't live in cycles
|
|
420 |
# anyway, so you can skip it for now, and just run at the end.
|
|
421 |
||
422 |
# self.revisions *is* a little bit slower. Probably because pyrex
|
|
423 |
# MergeSortNodes use long integers rather than PyIntObject and thus
|
|
424 |
# create them on-the-fly.
|
|
425 |
||
426 |
# Get rid of the 'top:' revision
|
|
427 |
merge_sorted_revisions.pop(0) |
|
428 |
self.revisions = [RevisionData(index, node) |
|
429 |
for index, node in enumerate(merge_sorted_revisions)] |
|
430 |
if enabled: |
|
431 |
gc.enable() |
|
432 |
else: |
|
433 |
self.revisions = () |
|
434 |
||
435 |
self.revid_rev = {} |
|
436 |
self.revno_rev = {} |
|
437 |
||
438 |
self.max_mainline_revno = 0 |
|
439 |
||
440 |
for rev in self.revisions: |
|
441 |
self.max_mainline_revno = max(self.max_mainline_revno, |
|
442 |
rev.revno_sequence[0]) |
|
443 |
self.revid_rev[rev.revid] = rev |
|
444 |
self.revno_rev[rev.revno_sequence] = rev |
|
445 |
||
446 |
def branch_id_sort_key(self, x): |
|
447 |
merge_depth = self.branch_lines[x].merge_depth |
|
448 |
||
449 |
# Note: This greatly affects the layout of the graph.
|
|
450 |
#
|
|
451 |
# Branch line that have a smaller merge depth should be to the left
|
|
452 |
# of those with bigger merge depths.
|
|
453 |
#
|
|
454 |
# For branch lines that have the same parent in the mainline -
|
|
455 |
# those with bigger branch numbers to be to the rights. E.g. for
|
|
456 |
# the following dag, you want the graph to appear as on the left,
|
|
457 |
# not as on the right:
|
|
458 |
#
|
|
459 |
# 3 F_ F
|
|
460 |
# | \ |\
|
|
461 |
# 1.2.1 | E | E
|
|
462 |
# | | | \
|
|
463 |
# 2 D | D_|
|
|
464 |
# |\ | | +_
|
|
465 |
# 1.1.2 | C| | | C
|
|
466 |
# | |/ | \|
|
|
467 |
# 1.1.1 | B | B
|
|
468 |
# |/ | /
|
|
469 |
# 1 A A
|
|
470 |
#
|
|
471 |
# Otherwise, those with a greater mainline parent revno should
|
|
472 |
# appear to the left.
|
|
473 |
||
474 |
if len(x) == 0: |
|
475 |
return (merge_depth) |
|
476 |
else: |
|
477 |
return (merge_depth, -x[0], x[1]) |
|
478 |
||
479 |
def compute_branch_lines(self): |
|
480 |
self.branch_lines = {} |
|
481 |
||
482 |
"""A list of branch lines (aka merge lines).
|
|
483 |
|
|
484 |
For a revisions, the revision number less the least significant
|
|
485 |
digit is the branch_id, and used as the key for the dict. Hence
|
|
486 |
revision with the same revision number less the least significant
|
|
487 |
digit are considered to be in the same branch line. e.g.: for
|
|
488 |
revisions 290.12.1 and 290.12.2, the branch_id would be 290.12,
|
|
489 |
and these two revisions will be in the same branch line.
|
|
490 |
|
|
491 |
"""
|
|
492 |
||
493 |
self.branch_ids = [] |
|
494 |
"""List of branch ids, sorted in the order that the branches will
|
|
495 |
be shown, from left to right on the graph."""
|
|
496 |
||
497 |
for rev in self.revisions: |
|
498 |
||
499 |
branch_line = None |
|
500 |
if rev.branch_id not in self.branch_lines: |
|
501 |
branch_line = BranchLine(rev.branch_id) |
|
502 |
self.branch_lines[rev.branch_id] = branch_line |
|
503 |
else: |
|
504 |
branch_line = self.branch_lines[rev.branch_id] |
|
505 |
||
506 |
branch_line.revs.append(rev) |
|
507 |
branch_line.merge_depth = max(rev.merge_depth, |
|
508 |
branch_line.merge_depth) |
|
509 |
rev.branch = branch_line |
|
510 |
||
511 |
self.branch_ids = self.branch_lines.keys() |
|
512 |
||
513 |
self.branch_ids.sort(key=self.branch_id_sort_key) |
|
514 |
||
515 |
def compute_merge_info(self): |
|
516 |
||
517 |
def set_merged_by(rev, merged_by, merged_by_rev, do_branches=False): |
|
518 |
if merged_by is None: |
|
519 |
return
|
|
520 |
||
521 |
if merged_by_rev is None: |
|
522 |
merged_by_rev = self.revisions[merged_by] |
|
523 |
rev.merged_by = merged_by |
|
524 |
merged_by_rev.merges.append(rev.index) |
|
525 |
||
526 |
if do_branches: |
|
527 |
branch_id = rev.branch_id |
|
528 |
branch_merged_by = self.branch_lines[branch_id].merged_by |
|
529 |
merged_by_branch_id = self.revisions[merged_by].branch_id |
|
530 |
merged_by_branch_merges = \ |
|
531 |
self.branch_lines[merged_by_branch_id].merges |
|
532 |
||
533 |
if not branch_id in merged_by_branch_merges: |
|
534 |
merged_by_branch_merges.append(branch_id) |
|
535 |
if not merged_by_branch_id in branch_merged_by: |
|
536 |
branch_merged_by.append(merged_by_branch_id) |
|
537 |
||
538 |
for rev in self.revisions: |
|
539 |
||
540 |
parents = [self.revid_rev[parent] for parent in |
|
541 |
self.known_graph.get_parent_keys(rev.revid)] |
|
542 |
||
543 |
if len(parents) > 0: |
|
544 |
if rev.branch_id == parents[0].branch_id: |
|
545 |
set_merged_by(parents[0], rev.merged_by, None) |
|
546 |
||
547 |
for parent in parents[1:]: |
|
548 |
if rev.merge_depth <= parent.merge_depth: |
|
549 |
set_merged_by(parent, rev.index, rev, do_branches=True) |
|
550 |
||
551 |
def compute_head_info(self): |
|
552 |
def get_revid_head(heads): |
|
553 |
map = {} |
|
554 |
for i in xrange(len(heads)): |
|
555 |
prev_revids = [revid for revid, head in heads[:i]] |
|
556 |
unique_ancestors = self.graph.find_unique_ancestors( |
|
557 |
heads[i][0], prev_revids) |
|
558 |
for ancestor_revid in unique_ancestors: |
|
559 |
map[ancestor_revid] = heads[i][1] |
|
560 |
return map |
|
561 |
||
562 |
if len(self.branches) > 1: |
|
563 |
head_revid_branch_info = sorted( |
|
564 |
[(revid, branch_info) |
|
565 |
for revid, (head_info, ur) in self.revid_head_info.iteritems() |
|
566 |
for (branch_info, tag) in head_info], |
|
567 |
key=lambda x: not repo_is_local(x[1].branch.repository)) |
|
568 |
self.revid_branch_info = get_revid_head(head_revid_branch_info) |
|
569 |
else: |
|
570 |
self.revid_branch_info = {} |
|
571 |
||
572 |
head_count = 0 |
|
573 |
for head_info, ur in self.revid_head_info.itervalues(): |
|
574 |
head_count += len(head_info) |
|
575 |
||
576 |
if head_count > 1: |
|
577 |
# Populate unique revisions for heads
|
|
578 |
for revid, (head_info, ur) in self.revid_head_info.iteritems(): |
|
579 |
rev = None |
|
580 |
if revid in self.revid_rev: |
|
581 |
rev = self.revid_rev[revid] |
|
582 |
if rev and rev.merged_by: |
|
583 |
# This head has been merged.
|
|
584 |
# d
|
|
585 |
# |\
|
|
586 |
# b c
|
|
587 |
# |/
|
|
588 |
# a
|
|
589 |
# if revid == c,then we want other_revids = [b]
|
|
590 |
||
591 |
merged_by_revid = self.revisions[rev.merged_by].revid |
|
592 |
other_revids = [self.known_graph |
|
593 |
.get_parent_keys(merged_by_revid)[0]] |
|
594 |
else: |
|
595 |
other_revids = [other_revid for other_revid \ |
|
596 |
in self.revid_head_info.iterkeys() \ |
|
597 |
if not other_revid == revid] |
|
598 |
ur.append(revid) |
|
599 |
ur.extend([revid for revid \ |
|
600 |
in self.graph.find_unique_ancestors(revid, other_revids) \ |
|
601 |
if not revid == NULL_REVISION and revid in self.revid_rev]) |
|
602 |
ur.sort(key=lambda x: self.revid_rev[x].index) |
|
603 |
||
604 |
def compute_viz(self, state): |
|
605 |
||
606 |
# Overview:
|
|
607 |
# Work out which revision need to be displayed.
|
|
608 |
# Create ComputedGraphViz and ComputedRevisionData objects
|
|
609 |
# Assign columns for branches, and lines that go between branches.
|
|
610 |
# These are intermingled, because some of the lines need to come
|
|
611 |
# before it's branch, and others need to come after. Other lines
|
|
612 |
# (such a the line from the last rev in a branch) are treated a
|
|
613 |
# special cases.
|
|
614 |
# Return ComputedGraphViz object
|
|
615 |
gc_enabled = gc.isenabled() |
|
616 |
gc.disable() |
|
617 |
try: |
|
618 |
computed = ComputedGraphViz(self) |
|
619 |
computed.filtered_revs = [ComputedRevisionData(rev) for rev in |
|
620 |
state.get_filtered_revisions()] |
|
621 |
||
622 |
c_revisions = computed.revisions |
|
623 |
for f_index, c_rev in enumerate(computed.filtered_revs): |
|
624 |
c_revisions[c_rev.rev.index] = c_rev |
|
625 |
c_rev.f_index = f_index |
|
626 |
||
627 |
for (revid, (head_info, |
|
628 |
unique_revids)) in self.revid_head_info.iteritems(): |
|
629 |
||
630 |
for unique_revid in unique_revids: |
|
631 |
rev = self.revid_rev[unique_revid] |
|
632 |
c_rev = c_revisions[rev.index] |
|
633 |
if c_rev is not None: |
|
634 |
c_rev.branch_labels.extend(head_info) |
|
635 |
break
|
|
636 |
finally: |
|
637 |
if gc_enabled: |
|
638 |
gc.enable() |
|
639 |
||
640 |
if self.no_graph: |
|
641 |
for c_rev in computed.filtered_revs: |
|
642 |
c_rev.col_index = c_rev.rev.merge_depth * 0.5 |
|
643 |
return computed |
|
644 |
||
645 |
# This will hold a tuple of (child, parent, col_index, direct) for each
|
|
646 |
# line that needs to be drawn. If col_index is not none, then the line
|
|
647 |
# is drawn along that column, else the the line can be drawn directly
|
|
648 |
# between the child and parent because either the child and parent are
|
|
649 |
# in the same branch line, or the child and parent are 1 row apart.
|
|
650 |
lines = [] |
|
651 |
lines_by_column = [] |
|
652 |
||
653 |
def branch_line_col_search_order(start_col_index): |
|
654 |
for col_index in range(start_col_index, len(lines_by_column)): |
|
655 |
yield col_index |
|
656 |
#for col_index in range(parent_col_index-1, -1, -1):
|
|
657 |
# yield col_index
|
|
658 |
||
659 |
def line_col_search_order(parent_col_index, child_col_index): |
|
660 |
if parent_col_index is not None and child_col_index is not None: |
|
661 |
max_index = max(parent_col_index, child_col_index) |
|
662 |
min_index = min(parent_col_index, child_col_index) |
|
663 |
# First yield the columns between the child and parent.
|
|
664 |
for col_index in range(max_index, min_index - 1, -1): |
|
665 |
yield col_index |
|
666 |
elif child_col_index is not None: |
|
667 |
max_index = child_col_index |
|
668 |
min_index = child_col_index |
|
669 |
yield child_col_index |
|
670 |
elif parent_col_index is not None: |
|
671 |
max_index = parent_col_index |
|
672 |
min_index = parent_col_index |
|
673 |
yield parent_col_index |
|
674 |
else: |
|
675 |
max_index = 0 |
|
676 |
min_index = 0 |
|
677 |
yield 0 |
|
678 |
i = 1 |
|
679 |
# then yield the columns on either side.
|
|
680 |
while max_index + i < len(lines_by_column) or \ |
|
681 |
min_index - i > -1: |
|
682 |
if max_index + i < len(lines_by_column): |
|
683 |
yield max_index + i |
|
684 |
#if min_index - i > -1:
|
|
685 |
# yield min_index - i
|
|
686 |
i += 1 |
|
687 |
||
688 |
def is_col_free_for_range(col_index, child_f_index, parent_f_index): |
|
689 |
return not any( |
|
690 |
range_overlaps(child_f_index, parent_f_index, |
|
691 |
line_child_f_index, line_parent_f_index) |
|
692 |
for line_child_f_index, line_parent_f_index |
|
693 |
in lines_by_column[col_index]) |
|
694 |
||
695 |
def find_free_column(col_search_order, child_f_index, parent_f_index): |
|
696 |
for col_index in col_search_order: |
|
697 |
if is_col_free_for_range(col_index, |
|
698 |
child_f_index, parent_f_index): |
|
699 |
break
|
|
700 |
else: |
|
701 |
# No free columns found. Add an empty one on the end.
|
|
702 |
col_index = len(lines_by_column) |
|
703 |
lines_by_column.append([]) |
|
704 |
return col_index |
|
705 |
||
706 |
def append_line(child, parent, direct, col_index=None): |
|
707 |
lines.append((child, parent, col_index, direct)) |
|
708 |
||
709 |
if col_index is not None: |
|
710 |
lines_by_column[int(round(col_index))].append( |
|
711 |
(child.f_index, parent.f_index)) |
|
712 |
||
713 |
def find_visible_parent(c_rev, parent, twisty_hidden_parents): |
|
714 |
if c_revisions[parent.index] is not None: |
|
715 |
return (c_rev, c_revisions[parent.index], True) |
|
716 |
else: |
|
717 |
if parent.index in twisty_hidden_parents: |
|
718 |
# no need to draw a line if there is a twisty,
|
|
719 |
# except if this is the last in the branch.
|
|
720 |
return None |
|
721 |
# The parent was not visible. Search for a ancestor
|
|
722 |
# that is. Stop searching if we make a hop, i.e. we
|
|
723 |
# go away from our branch, and we come back to it.
|
|
724 |
has_seen_different_branch = False |
|
725 |
while c_revisions[parent.index] is None: |
|
726 |
if not parent.branch_id == c_rev.rev.branch_id: |
|
727 |
has_seen_different_branch = True |
|
728 |
# find grand parent.
|
|
729 |
g_parent_ids = (self.known_graph |
|
730 |
.get_parent_keys(parent.revid)) |
|
731 |
||
732 |
if len(g_parent_ids) == 0: |
|
733 |
return None |
|
734 |
else: |
|
735 |
parent = self.revid_rev[g_parent_ids[0]] |
|
736 |
||
737 |
if (has_seen_different_branch and |
|
738 |
parent.branch_id == branch_id): |
|
739 |
# We have gone away and come back to our
|
|
740 |
# branch. Stop.
|
|
741 |
return None |
|
742 |
if parent: |
|
743 |
# Not Direct
|
|
744 |
return (c_rev, c_revisions[parent.index], False) |
|
745 |
||
746 |
def append_branch_parent_lines(branch_rev_visible_parents): |
|
747 |
groups = group_overlapping(branch_rev_visible_parents) |
|
748 |
for parents, start, end, group_key in groups: |
|
749 |
# Since all parents go from the same branch line to the
|
|
750 |
# same branch line, we can use the col indexes of the
|
|
751 |
# parent.
|
|
752 |
if end - start == 1: |
|
753 |
col_index = None |
|
754 |
else: |
|
755 |
col_search_order = line_col_search_order( |
|
756 |
parents[0][1].col_index, parents[0][0].col_index) |
|
757 |
col_index = find_free_column(col_search_order, |
|
758 |
start, end) |
|
759 |
||
760 |
col_offset_increment = 1.0 / len(parents) |
|
761 |
for i, (c_rev, parent_c_rev, direct) in enumerate(parents): |
|
762 |
if col_index is None: |
|
763 |
col_index_offset = None |
|
764 |
else: |
|
765 |
col_index_offset = (col_index - 0.5 + |
|
766 |
(i * col_offset_increment) + |
|
767 |
(col_offset_increment / 2)) |
|
768 |
append_line(c_rev, parent_c_rev, |
|
769 |
direct, col_index_offset) |
|
770 |
||
771 |
for branch_id in self.branch_ids: |
|
772 |
if not branch_id in state.branch_line_state: |
|
773 |
continue
|
|
774 |
||
775 |
branch_line = self.branch_lines[branch_id] |
|
776 |
branch_revs = [c_revisions[rev.index] |
|
777 |
for rev in branch_line.revs |
|
778 |
if c_revisions[rev.index] is not None] |
|
779 |
||
780 |
if not branch_revs: |
|
781 |
continue
|
|
782 |
||
783 |
branch_rev_visible_parents_post = [] |
|
784 |
branch_rev_visible_parents_pre = [] |
|
785 |
# Lists of ([(c_rev, parent_c_rev, is_direct)],
|
|
786 |
# start, end, group_key]
|
|
787 |
||
788 |
last_c_rev = branch_revs[-1] |
|
789 |
last_rev_left_parents = (self.known_graph |
|
790 |
.get_parent_keys(last_c_rev.rev.revid)) |
|
791 |
if last_rev_left_parents: |
|
792 |
last_parent = find_visible_parent( |
|
793 |
last_c_rev, self.revid_rev[last_rev_left_parents[0]], []) |
|
794 |
else: |
|
795 |
last_parent = None |
|
796 |
||
797 |
sprout_with_lines = {} |
|
798 |
||
799 |
merged_by_max_col_index = 0 |
|
800 |
||
801 |
# In this loop:
|
|
802 |
# * Populate twisty_branch_ids and twisty_state
|
|
803 |
# * Find visible parents.
|
|
804 |
# * Append lines that go before the branch line.
|
|
805 |
# * Append lines to children for sprouts.
|
|
806 |
for c_rev in branch_revs: |
|
807 |
rev = c_rev.rev |
|
808 |
||
809 |
if rev.merged_by is not None: |
|
810 |
merged_by_c_rev = c_revisions[rev.merged_by] |
|
811 |
if merged_by_c_rev: |
|
812 |
merged_by_max_col_index = max( |
|
813 |
merged_by_max_col_index, merged_by_c_rev.col_index) |
|
814 |
||
815 |
parents = [self.revid_rev[parent_revid] for parent_revid in |
|
816 |
self.known_graph.get_parent_keys(rev.revid)] |
|
817 |
||
818 |
twisty_hidden_parents = [] |
|
819 |
# Find and add necessary twisties
|
|
820 |
for parent in parents: |
|
821 |
if parent.branch_id == branch_id: |
|
822 |
continue
|
|
823 |
if parent.branch_id == (): |
|
824 |
continue
|
|
825 |
if parent.branch_id in branch_line.merged_by: |
|
826 |
continue
|
|
827 |
parent_branch = self.branch_lines[parent.branch_id] |
|
828 |
# Does this branch have any visible revisions
|
|
829 |
pb_visible = (parent_branch.branch_id in |
|
830 |
state.branch_line_state) |
|
831 |
for pb_rev in parent_branch.revs: |
|
832 |
if pb_visible: |
|
833 |
visible = c_revisions[pb_rev.index] is not None |
|
834 |
else: |
|
835 |
visible = state\ |
|
836 |
.get_revision_visible_if_branch_visible(pb_rev) |
|
837 |
if visible: |
|
838 |
(c_rev.twisty_expands_branch_ids |
|
839 |
.append(parent_branch.branch_id)) |
|
840 |
if not pb_visible: |
|
841 |
twisty_hidden_parents.append(parent.index) |
|
842 |
break
|
|
843 |
||
844 |
# Work out if the twisty needs to show a + or -. If all
|
|
845 |
# twisty_branch_ids are visible, show - else +.
|
|
846 |
if len(c_rev.twisty_expands_branch_ids) > 0: |
|
847 |
c_rev.twisty_state = True |
|
848 |
for twisty_branch_id in c_rev.twisty_expands_branch_ids: |
|
849 |
if not twisty_branch_id in state.branch_line_state: |
|
850 |
c_rev.twisty_state = False |
|
851 |
break
|
|
852 |
||
853 |
# Don't include left hand parents All of these parents in the
|
|
854 |
# branch can be drawn with one line.
|
|
855 |
parents = parents[1:] |
|
856 |
||
857 |
branch_id_sort_key = self.branch_id_sort_key(branch_id) |
|
858 |
for i, parent in enumerate(parents): |
|
859 |
parent_info = find_visible_parent(c_rev, parent, |
|
860 |
twisty_hidden_parents) |
|
861 |
if parent_info: |
|
862 |
c_rev, parent_c_rev, direct = parent_info |
|
863 |
if (last_parent and |
|
864 |
parent_c_rev.f_index <= last_parent[1].f_index and |
|
865 |
self.branch_id_sort_key(parent_c_rev.rev.branch_id) |
|
866 |
< branch_id_sort_key): |
|
867 |
# This line goes before the branch line
|
|
868 |
dest = branch_rev_visible_parents_pre |
|
869 |
else: |
|
870 |
# This line goes after
|
|
871 |
dest = branch_rev_visible_parents_post |
|
872 |
||
873 |
line_len = parent_c_rev.f_index - c_rev.f_index |
|
874 |
if line_len == 1: |
|
875 |
group_key = None |
|
876 |
else: |
|
877 |
group_key = parent_c_rev.rev.branch_id |
|
878 |
||
879 |
dest.append(([parent_info], |
|
880 |
c_rev.f_index, |
|
881 |
parent_c_rev.f_index, |
|
882 |
group_key)) |
|
883 |
||
884 |
# This may be a sprout. Add line to first visible child
|
|
885 |
if c_rev.rev.merged_by is not None: |
|
886 |
merged_by = self.revisions[c_rev.rev.merged_by] |
|
887 |
if c_revisions[merged_by.index] is None and\ |
|
888 |
branch_revs[0].f_index == c_rev.f_index: |
|
889 |
# The revision that merges this revision is not
|
|
890 |
# visible, and it is the first visible revision in
|
|
891 |
# the branch line. This is a sprout.
|
|
892 |
#
|
|
893 |
# XXX What if multiple merges with --force,
|
|
894 |
# aka octopus merge?
|
|
895 |
#
|
|
896 |
# Search until we find a descendant that is visible.
|
|
897 |
||
898 |
while merged_by is not None and \ |
|
899 |
c_revisions[merged_by.index] is None: |
|
900 |
if merged_by.merged_by is not None: |
|
901 |
merged_by = self.revisions[merged_by.merged_by] |
|
902 |
else: |
|
903 |
merged_by = None |
|
904 |
||
905 |
if merged_by is not None: |
|
906 |
# Ensure only one line to a descendant.
|
|
907 |
if (merged_by.index not in sprout_with_lines): |
|
908 |
sprout_with_lines[merged_by.index] = True |
|
909 |
parent = c_revisions[merged_by.index] |
|
910 |
if parent is not None: |
|
911 |
if c_rev.f_index - parent.f_index == 1: |
|
912 |
col_index = None |
|
913 |
else: |
|
914 |
col_search_order = line_col_search_order( |
|
915 |
parent.col_index, c_rev.col_index) |
|
916 |
col_index = find_free_column( |
|
917 |
col_search_order, |
|
918 |
parent.f_index, c_rev.f_index) |
|
919 |
append_line(parent, c_rev, False, |
|
920 |
col_index) |
|
921 |
||
922 |
# Find a column for this branch.
|
|
923 |
#
|
|
924 |
# Find the col_index for the direct parent branch. This will
|
|
925 |
# be the starting point when looking for a free column.
|
|
926 |
||
927 |
append_branch_parent_lines(branch_rev_visible_parents_pre) |
|
928 |
||
929 |
if branch_id == (): |
|
930 |
start_col_index = 0 |
|
931 |
else: |
|
932 |
start_col_index = 1 |
|
933 |
||
934 |
if last_parent and last_parent[0].col_index is not None: |
|
935 |
parent_col_index = last_parent[1].col_index |
|
936 |
start_col_index = max(start_col_index, parent_col_index) |
|
937 |
||
938 |
start_col_index = max(start_col_index, merged_by_max_col_index) |
|
939 |
||
940 |
col_search_order = branch_line_col_search_order(start_col_index) |
|
941 |
||
942 |
if last_parent: |
|
943 |
col_index = find_free_column(col_search_order, |
|
944 |
branch_revs[0].f_index, |
|
945 |
last_parent[1].f_index) |
|
946 |
else: |
|
947 |
col_index = find_free_column(col_search_order, |
|
948 |
branch_revs[0].f_index, |
|
949 |
branch_revs[-1].f_index) |
|
950 |
||
951 |
# Free column for this branch found. Set node for all
|
|
952 |
# revision in this branch.
|
|
953 |
for rev in branch_revs: |
|
954 |
rev.col_index = col_index |
|
955 |
||
956 |
append_line(branch_revs[0], branch_revs[-1], True, col_index) |
|
957 |
if last_parent: |
|
958 |
append_line(last_parent[0], last_parent[1], |
|
959 |
last_parent[2], col_index) |
|
960 |
||
961 |
branch_rev_visible_parents_post.reverse() |
|
962 |
append_branch_parent_lines(branch_rev_visible_parents_post) |
|
963 |
||
964 |
# It has now been calculated which column a line must go into. Now
|
|
965 |
# copy the lines in to computed_revisions.
|
|
966 |
for (child, parent, line_col_index, direct) in lines: |
|
967 |
||
968 |
parent_color = parent.rev.color |
|
969 |
||
970 |
line_length = parent.f_index - child.f_index |
|
971 |
if line_length == 0: |
|
972 |
# Nothing to do
|
|
973 |
pass
|
|
974 |
elif line_length == 1: |
|
975 |
child.lines.append( |
|
976 |
(child.col_index, |
|
977 |
parent.col_index, |
|
978 |
parent_color, |
|
979 |
direct)) |
|
980 |
else: |
|
981 |
# line from the child's column to the lines column
|
|
982 |
child.lines.append( |
|
983 |
(child.col_index, |
|
984 |
line_col_index, |
|
985 |
parent_color, |
|
986 |
direct)) |
|
987 |
# lines down the line's column
|
|
988 |
for line_part_f_index in range(child.f_index + 1, |
|
989 |
parent.f_index - 1): |
|
990 |
computed.filtered_revs[line_part_f_index].lines.append( |
|
991 |
(line_col_index, |
|
992 |
line_col_index, |
|
993 |
parent_color, |
|
994 |
direct)) |
|
995 |
# line from the line's column to the parent's column
|
|
996 |
computed.filtered_revs[parent.f_index - 1].lines.append( |
|
997 |
(line_col_index, |
|
998 |
parent.col_index, |
|
999 |
parent_color, |
|
1000 |
direct)) |
|
1001 |
||
1002 |
return computed |
|
1003 |
||
1004 |
def get_revid_branch_info(self, revid): |
|
1005 |
"""This returns a branch info whos branch contains the revision.
|
|
1006 |
|
|
1007 |
If the revision exists more than one branch, it will only return the
|
|
1008 |
first branch info. """
|
|
1009 |
||
1010 |
if revid in self.ghosts: |
|
1011 |
raise GhostRevisionError(revid) |
|
1012 |
||
1013 |
if len(self.branches) == 1 or revid not in self.revid_branch_info: |
|
1014 |
return self.branches[0] |
|
1015 |
return self.revid_branch_info[revid] |
|
1016 |
||
1017 |
def get_revid_branch(self, revid): |
|
1018 |
return self.get_revid_branch_info(revid).branch |
|
1019 |
||
1020 |
def get_revid_repo(self, revid): |
|
1021 |
return self.get_revid_branch_info(revid).branch.repository |
|
1022 |
||
1023 |
def get_repo_revids(self, revids): |
|
1024 |
"""Returns list of tuple of (repo, revids)"""
|
|
1025 |
repo_revids = {} |
|
1026 |
for repo in self.repos: |
|
1027 |
repo_revids[repo.base] = [] |
|
1028 |
||
1029 |
for local_repo_copy in self.local_repo_copies: |
|
1030 |
for revid in self.repos[local_repo_copy].has_revisions(revids): |
|
1031 |
revids.remove(revid) |
|
1032 |
repo_revids[local_repo_copy].append(revid) |
|
1033 |
||
1034 |
for revid in revids: |
|
1035 |
try: |
|
1036 |
repo = self.get_revid_repo(revid) |
|
1037 |
except GhostRevisionError: |
|
1038 |
pass
|
|
1039 |
else: |
|
1040 |
repo_revids[repo.base].append(revid) |
|
1041 |
||
1042 |
return [(repo, repo_revids[repo.base]) |
|
1043 |
for repo in self.repos] |
|
1044 |
||
1045 |
def load_revisions(self, revids): |
|
1046 |
return_revisions = {} |
|
1047 |
for repo, revids in self.get_repo_revids(revids): |
|
1048 |
if revids: |
|
1049 |
repo.lock_read() |
|
1050 |
try: |
|
1051 |
self.update_ui() |
|
1052 |
for rev in repo.get_revisions(revids): |
|
1053 |
return_revisions[rev.revision_id] = rev |
|
1054 |
finally: |
|
1055 |
repo.unlock() |
|
1056 |
return return_revisions |
|
1057 |
||
1058 |
||
1059 |
def repo_is_local(repo): |
|
1060 |
return isinstance(repo.bzrdir.transport, LocalTransport) |
|
1061 |
||
1062 |
def group_overlapping(groups): |
|
1063 |
""" Groups items with overlapping ranges together.
|
|
1064 |
|
|
1065 |
:param groups: List of uncollapsed groups.
|
|
1066 |
:param group: (start of range, end of range, items in group)
|
|
1067 |
:return: List of collapsed groups.
|
|
1068 |
|
|
1069 |
"""
|
|
1070 |
||
1071 |
has_change = True |
|
1072 |
while has_change: |
|
1073 |
has_change = False |
|
1074 |
a = 0 |
|
1075 |
while a < len(groups): |
|
1076 |
inner_has_change = False |
|
1077 |
items_a, start_a, end_a, group_key_a = groups[a] |
|
1078 |
if group_key_a is not None: |
|
1079 |
b = a + 1 |
|
1080 |
while b < len(groups): |
|
1081 |
items_b, start_b, end_b, group_key_b = groups[b] |
|
1082 |
if (group_key_a == group_key_b and |
|
1083 |
range_overlaps(start_a, end_a, start_b, end_b)): |
|
1084 |
# overlaps. Merge b into a
|
|
1085 |
items_a.extend(items_b) |
|
1086 |
start_a = min(start_a, start_b) |
|
1087 |
end_a = max(end_a, end_b) |
|
1088 |
del groups[b] |
|
1089 |
has_change = True |
|
1090 |
inner_has_change = True |
|
1091 |
else: |
|
1092 |
b += 1 |
|
1093 |
if inner_has_change: |
|
1094 |
groups[a] = (items_a, start_a, end_a, group_key_a) |
|
1095 |
a += 1 |
|
1096 |
||
1097 |
return groups |
|
1098 |
||
1099 |
def range_overlaps (start_a, end_a, start_b, end_b): |
|
1100 |
"""Tests if two ranges overlap."""
|
|
1101 |
return (start_b < start_a < end_b or |
|
1102 |
start_b < end_a < end_b or |
|
1103 |
(start_a <= start_b and end_a >= end_b)) |
|
1104 |
||
1105 |
||
1106 |
class PendingMergesGraphVizLoader(GraphVizLoader): |
|
1107 |
"""GraphVizLoader that only loads pending merges.
|
|
1108 |
|
|
1109 |
As only the pending merges are passed to merge_sort, the revno
|
|
1110 |
are incorrect, and should be ignored.
|
|
1111 |
|
|
1112 |
Only works on a single branch.
|
|
1113 |
|
|
1114 |
"""
|
|
1115 |
||
1116 |
def load_graph_parents(self): |
|
1117 |
if not len(self.branches) == 1 or not len(self.repos) == 1: |
|
1118 |
AssertionError("load_graph_pending_merges should only be called \ |
|
1119 |
when 1 branch and repo has been opened.") |
|
1120 |
||
1121 |
bi = self.branches[0] |
|
1122 |
if bi.tree is None: |
|
1123 |
AssertionError("PendingMergesGraphVizLoader must have a working " |
|
1124 |
"tree.") |
|
1125 |
||
1126 |
self.graph = bi.branch.repository.get_graph() |
|
1127 |
tree_heads = bi.tree.get_parent_ids() |
|
1128 |
other_revisions = [tree_heads[0], ] |
|
1129 |
self.update_ui() |
|
1130 |
||
1131 |
self.append_head_info('root:', bi, None) |
|
1132 |
pending_merges = [] |
|
1133 |
for head in tree_heads[1:]: |
|
1134 |
self.append_head_info(head, bi, None) |
|
1135 |
pending_merges.extend( |
|
1136 |
self.graph.find_unique_ancestors(head, other_revisions)) |
|
1137 |
other_revisions.append(head) |
|
1138 |
self.update_ui() |
|
1139 |
||
1140 |
graph_parents = self.graph.get_parent_map(pending_merges) |
|
1141 |
graph_parents["root:"] = () |
|
1142 |
self.update_ui() |
|
1143 |
||
1144 |
for (revid, parents) in graph_parents.items(): |
|
1145 |
new_parents = [] |
|
1146 |
for index, parent in enumerate(parents): |
|
1147 |
if parent in graph_parents: |
|
1148 |
new_parents.append(parent) |
|
1149 |
elif index == 0: |
|
1150 |
new_parents.append("root:") |
|
1151 |
graph_parents[revid] = tuple(new_parents) |
|
1152 |
||
1153 |
return ["root:", ] + tree_heads[1:], graph_parents.items() |
|
1154 |
||
1155 |
||
1156 |
class WithWorkingTreeGraphVizLoader(GraphVizLoader): |
|
1157 |
"""
|
|
1158 |
GraphVizLoader that shows uncommitted working tree changes as a node
|
|
1159 |
in the graph, as if it was already committed.
|
|
1160 |
"""
|
|
1161 |
||
1162 |
def tree_revid(self, tree): |
|
1163 |
return CURRENT_REVISION + tree.basedir.encode('unicode-escape') |
|
1164 |
||
1165 |
def load(self): |
|
1166 |
self.working_trees = {} |
|
1167 |
for bi in self.branches: |
|
1168 |
if not bi.tree is None: |
|
1169 |
self.working_trees[self.tree_revid(bi.tree)] = bi.tree |
|
1170 |
||
1171 |
super(WithWorkingTreeGraphVizLoader, self).load() |
|
1172 |
||
1173 |
def load_branch_heads(self, bi): |
|
1174 |
# returns load_heads, sort_heads and also calls append_head_info.
|
|
1175 |
#
|
|
1176 |
# == For branch with tree ==
|
|
1177 |
# Graph | load_heads | sort_heads | append_head_info
|
|
1178 |
# wt | No | Yes | Yes
|
|
1179 |
# | \ | | |
|
|
1180 |
# | 1.1.2 pending merge | Yes | No | Yes
|
|
1181 |
# 2 | basis rev | Yes | No | Yes
|
|
1182 |
#
|
|
1183 |
# == For branch with tree not up to date ==
|
|
1184 |
# Graph | load_heads | sort_heads | append_head_info
|
|
1185 |
# wt | No | Yes | Yes
|
|
1186 |
# | \ | | |
|
|
1187 |
# | 1.1.2 pending merge | Yes | No | Yes
|
|
1188 |
# 3/ | branch tip | Yes | Yes | Yes
|
|
1189 |
# 2 | basis rev | Yes | No | No
|
|
1190 |
#
|
|
1191 |
# == For branch without tree ==
|
|
1192 |
# branch tip | Yes | head | yes
|
|
1193 |
||
1194 |
load_heads = [] |
|
1195 |
sort_heads = [] |
|
1196 |
extra_parents = [] |
|
1197 |
||
1198 |
if len(self.branches) > 0: |
|
1199 |
label = bi.label |
|
1200 |
else: |
|
1201 |
label = None |
|
1202 |
||
1203 |
branch_last_revision = bi.branch.last_revision() |
|
1204 |
self.append_head_info(branch_last_revision, bi, bi.label) |
|
1205 |
load_heads.append(branch_last_revision) |
|
1206 |
self.update_ui() |
|
1207 |
||
1208 |
if bi.tree: |
|
1209 |
wt_revid = self.tree_revid(bi.tree) |
|
1210 |
if label: |
|
1211 |
wt_label = "%s - Working Tree" % label |
|
1212 |
else: |
|
1213 |
wt_label = "Working Tree" |
|
1214 |
self.append_head_info(wt_revid, bi, wt_label) |
|
1215 |
parent_ids = bi.tree.get_parent_ids() |
|
1216 |
||
1217 |
extra_parents.append((wt_revid, parent_ids)) |
|
1218 |
load_heads.extend(parent_ids) |
|
1219 |
||
1220 |
if parent_ids: |
|
1221 |
# first parent is last revision of the tree
|
|
1222 |
if parent_ids[0] != branch_last_revision: |
|
1223 |
# tree is not up to date.
|
|
1224 |
sort_heads.append(branch_last_revision) |
|
1225 |
||
1226 |
# other parents are pending merges
|
|
1227 |
for revid in parent_ids[1:]: |
|
1228 |
if label: |
|
1229 |
pm_label = "%s - Pending Merge" % label |
|
1230 |
else: |
|
1231 |
pm_label = "Pending Merge" |
|
1232 |
self.append_head_info(revid, bi, pm_label) |
|
1233 |
||
1234 |
sort_heads.append(wt_revid) |
|
1235 |
self.update_ui() |
|
1236 |
else: |
|
1237 |
sort_heads.append(branch_last_revision) |
|
1238 |
||
1239 |
return load_heads, sort_heads, extra_parents |
|
1240 |
||
1241 |
||
1242 |
class GraphVizFilterState(object): |
|
1243 |
"""
|
|
1244 |
Records the state of which branch lines are expanded, and what filters
|
|
1245 |
are applied.
|
|
1246 |
"""
|
|
1247 |
||
1248 |
def __init__(self, graph_viz, filter_changed_callback=None): |
|
1249 |
self.graph_viz = graph_viz |
|
1250 |
self.filter_changed_callback = filter_changed_callback |
|
1251 |
||
1252 |
self.branch_line_state = {} |
|
1253 |
"If a branch_id is in this dict, it is visible. The value of the dict "
|
|
1254 |
"indicates which branches expanded this branch."
|
|
1255 |
||
1256 |
for revid in self.graph_viz.revid_head_info: |
|
1257 |
rev = self.graph_viz.revid_rev[revid] |
|
1258 |
self.branch_line_state[rev.branch_id] = None |
|
1259 |
||
1260 |
self.filters = [] |
|
1261 |
||
1262 |
# This keeps a cache of the filter state so that when one of the
|
|
1263 |
# filters notifies us of a change, we can check if anything did change.
|
|
1264 |
||
1265 |
self.filter_cache = [None for rev in self.graph_viz.revisions] |
|
1266 |
||
1267 |
def get_filtered_revisions(self): |
|
1268 |
if self.graph_viz.no_graph: |
|
1269 |
rev_whos_branch_is_visible = self.graph_viz.revisions |
|
1270 |
else: |
|
1271 |
rev_whos_branch_is_visible = [] |
|
1272 |
for branch_id in self.branch_line_state.iterkeys(): |
|
1273 |
try: |
|
1274 |
branch_line = self.graph_viz.branch_lines[branch_id] |
|
1275 |
except KeyError: |
|
1276 |
continue
|
|
1277 |
rev_whos_branch_is_visible.extend(branch_line.revs) |
|
1278 |
rev_whos_branch_is_visible.sort(key=lambda rev: rev.index) |
|
1279 |
||
1280 |
visible = self.get_revision_visible_if_branch_visible |
|
1281 |
return (rev for rev in rev_whos_branch_is_visible if visible(rev)) |
|
1282 |
||
1283 |
def get_revision_visible_if_branch_visible(self, rev): |
|
1284 |
rev_filter_cache = self.filter_cache[rev.index] |
|
1285 |
if rev_filter_cache is None: |
|
1286 |
rev_filter_cache = \ |
|
1287 |
self._get_revision_visible_if_branch_visible(rev) |
|
1288 |
self.filter_cache[rev.index] = rev_filter_cache |
|
1289 |
return rev_filter_cache |
|
1290 |
||
1291 |
def _get_revision_visible_if_branch_visible(self, rev): |
|
1292 |
filters_value = True |
|
1293 |
for filter in self.filters: |
|
1294 |
if not filter.get_revision_visible(rev): |
|
1295 |
filters_value = False |
|
1296 |
break
|
|
1297 |
if filters_value: |
|
1298 |
return True |
|
1299 |
||
1300 |
if not self.graph_viz.no_graph: |
|
1301 |
for merged_index in rev.merges: |
|
1302 |
merged_rev = self.graph_viz.revisions[merged_index] |
|
1303 |
if self.get_revision_visible_if_branch_visible(merged_rev): |
|
1304 |
return True |
|
1305 |
||
1306 |
return False |
|
1307 |
||
1308 |
def filter_changed(self, revs=None, last_call=True): |
|
1309 |
if revs is None: |
|
1310 |
self.filter_cache = [None for rev in self.graph_viz.revisions] |
|
1311 |
if self.filter_changed_callback: |
|
1312 |
self.filter_changed_callback() |
|
1313 |
else: |
|
1314 |
pending_revs = revs |
|
1315 |
processed_revs = set() |
|
1316 |
prev_cached_revs = [] |
|
1317 |
while pending_revs: |
|
1318 |
rev = pending_revs.pop(0) |
|
1319 |
if rev in processed_revs: |
|
1320 |
continue
|
|
1321 |
processed_revs.add(rev) |
|
1322 |
||
1323 |
rev_filter_cache = self.filter_cache[rev.index] |
|
1324 |
||
1325 |
if rev_filter_cache is not None: |
|
1326 |
prev_cached_revs.append((rev, rev_filter_cache)) |
|
1327 |
self.filter_cache[rev.index] = None |
|
1328 |
||
1329 |
if not self.graph_viz.no_graph: |
|
1330 |
if rev.merged_by is not None: |
|
1331 |
pending_revs.append( |
|
1332 |
self.graph_viz.revisions[rev.merged_by]) |
|
1333 |
||
1334 |
# Check if any visibilities have changes. If they have, call
|
|
1335 |
# filter_changed_callback
|
|
1336 |
for rev, prev_visible in prev_cached_revs: |
|
1337 |
visible = self.get_revision_visible_if_branch_visible(rev) |
|
1338 |
if visible != prev_visible: |
|
1339 |
if self.filter_changed_callback: |
|
1340 |
self.filter_changed_callback() |
|
1341 |
break
|
|
1342 |
||
1343 |
def ensure_rev_visible(self, rev): |
|
1344 |
if self.graph_viz.no_graph: |
|
1345 |
return False |
|
1346 |
||
1347 |
branch_id = rev.branch_id |
|
1348 |
if branch_id not in self.branch_line_state: |
|
1349 |
self.branch_line_state[branch_id] = None |
|
1350 |
if self.filter_changed_callback: |
|
1351 |
self.filter_changed_callback() |
|
1352 |
return True |
|
1353 |
return False |
|
1354 |
||
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1355 |
def collapse_expand_rev(self, c_rev, in_place_mod=True): |
1356 |
if in_place_mod: |
|
1357 |
branch_line_state = self.branch_line_state |
|
1358 |
else: |
|
1359 |
# create a copy of the state dict
|
|
1360 |
branch_line_state = dict(self.branch_line_state) |
|
1361 |
||
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1362 |
if c_rev is None: |
1363 |
return False |
|
1364 |
visible = not c_rev.twisty_state |
|
1365 |
branch_ids = zip( |
|
1366 |
c_rev.twisty_expands_branch_ids, |
|
1367 |
[c_rev.rev.branch_id] * len(c_rev.twisty_expands_branch_ids)) |
|
1368 |
||
1369 |
seen_branch_ids = set(branch_id |
|
1370 |
for branch_id, expanded_by in branch_ids) |
|
1371 |
has_change = False |
|
1372 |
while branch_ids: |
|
1373 |
branch_id, expanded_by = branch_ids.pop() |
|
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1374 |
if (branch_id in branch_line_state) != visible: |
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1375 |
has_change = True |
1376 |
if not visible: |
|
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1377 |
del branch_line_state[branch_id] |
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1378 |
parents = self.graph_viz.branch_lines[branch_id].merges |
1379 |
for parent_branch_id in parents: |
|
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1380 |
parent_visible = parent_branch_id in branch_line_state |
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1381 |
if (not parent_visible or |
1382 |
parent_branch_id in seen_branch_ids): |
|
1383 |
continue
|
|
1384 |
||
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1385 |
if branch_line_state[parent_branch_id] == branch_id: |
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1386 |
# This branch expanded the parent branch, so we must
|
1387 |
# collapse it.
|
|
1388 |
branch_ids.append((parent_branch_id, branch_id)) |
|
1389 |
seen_branch_ids.add(parent_branch_id) |
|
1390 |
else: |
|
1391 |
# Check if this parent has any other visible branches
|
|
1392 |
# that merge it.
|
|
1393 |
has_visible = False |
|
1394 |
parent = self.graph_viz.branch_lines[parent_branch_id] |
|
1395 |
for merged_by_branch_id in parent.merged_by: |
|
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1396 |
if merged_by_branch_id in branch_line_state: |
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1397 |
has_visible = True |
1398 |
break
|
|
1399 |
if not has_visible: |
|
1400 |
branch_ids.append((parent_branch_id, branch_id)) |
|
1401 |
seen_branch_ids.add(parent_branch_id) |
|
1402 |
else: |
|
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1403 |
branch_line_state[branch_id] = expanded_by |
1404 |
if has_change and self.filter_changed_callback and in_place_mod: |
|
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1405 |
self.filter_changed_callback() |
|
5538
by Gary van der Merwe
Make it possible to call GraphVizFilterState.collapse_expand_rev without changing the state, but rather return a new branch_line_state dict. |
1406 |
return branch_line_state |
|
5535
by Gary van der Merwe
Copy loggraphviz.py form qbzr rev 1336 revid:garyvdm@gmail.com-20101108200914-ldbs9qwjvctgtxd0 |
1407 |
|
1408 |
def expand_all_branch_lines(self): |
|
1409 |
for branch_id in self.graph_viz.branch_lines.keys(): |
|
1410 |
if branch_id not in self.branch_line_state: |
|
1411 |
self.branch_line_state[branch_id] = None |
|
1412 |
||
1413 |
||
1414 |
class FileIdFilter (object): |
|
1415 |
"""
|
|
1416 |
Filter that only shows revisions that modify one of the specified files.
|
|
1417 |
"""
|
|
1418 |
||
1419 |
def __init__(self, graph_viz, filter_changed_callback, file_ids): |
|
1420 |
self.graph_viz = graph_viz |
|
1421 |
self.filter_changed_callback = filter_changed_callback |
|
1422 |
self.file_ids = file_ids |
|
1423 |
self.has_dir = False |
|
1424 |
self.filter_file_id = [False for rev in self.graph_viz.revisions] |
|
1425 |
||
1426 |
# don't filter working tree nodes
|
|
1427 |
if isinstance(self.graph_viz, WithWorkingTreeGraphVizLoader): |
|
1428 |
for wt_revid in self.graph_viz.working_trees.iterkeys(): |
|
1429 |
try: |
|
1430 |
rev_index = self.graph_viz.revid_rev[wt_revid].index |
|
1431 |
self.filter_file_id[rev_index] = True |
|
1432 |
except KeyError: |
|
1433 |
pass
|
|
1434 |
||
1435 |
||
1436 |
def uses_inventory(self): |
|
1437 |
return self.has_dir |
|
1438 |
||
1439 |
def load(self, revids=None): |
|
1440 |
"""Load which revisions affect the file_ids"""
|
|
1441 |
if self.file_ids: |
|
1442 |
self.graph_viz.throbber_show() |
|
1443 |
||
1444 |
for bi in self.graph_viz.branches: |
|
1445 |
tree = bi.tree |
|
1446 |
if tree is None: |
|
1447 |
tree = bi.branch.basis_tree() |
|
1448 |
||
1449 |
tree.lock_read() |
|
1450 |
try: |
|
1451 |
for file_id in self.file_ids: |
|
1452 |
if tree.kind(file_id) in ('directory', |
|
1453 |
'tree-reference'): |
|
1454 |
self.has_dir = True |
|
1455 |
break
|
|
1456 |
if self.has_dir: |
|
1457 |
break
|
|
1458 |
finally: |
|
1459 |
tree.unlock() |
|
1460 |
||
1461 |
if revids is None: |
|
1462 |
revids = [rev.revid for rev in self.graph_viz.revisions] |
|
1463 |
||
1464 |
for repo, revids in self.graph_viz.get_repo_revids(revids): |
|
1465 |
if self.uses_inventory(): |
|
1466 |
chunk_size = 200 |
|
1467 |
else: |
|
1468 |
chunk_size = 500 |
|
1469 |
||
1470 |
for start in xrange(0, len(revids), chunk_size): |
|
1471 |
self.load_filter_file_id_chunk(repo, |
|
1472 |
revids[start:start + chunk_size]) |
|
1473 |
||
1474 |
self.load_filter_file_id_chunk_finished() |
|
1475 |
||
1476 |
def load_filter_file_id_chunk(self, repo, revids): |
|
1477 |
def check_text_keys(text_keys): |
|
1478 |
changed_revs = [] |
|
1479 |
for file_id, revid in repo.texts.get_parent_map(text_keys): |
|
1480 |
rev = self.graph_viz.revid_rev[revid] |
|
1481 |
self.filter_file_id[rev.index] = True |
|
1482 |
changed_revs.append(rev) |
|
1483 |
||
1484 |
self.graph_viz.update_ui() |
|
1485 |
self.filter_changed_callback(changed_revs, False) |
|
1486 |
self.graph_viz.update_ui() |
|
1487 |
||
1488 |
repo.lock_read() |
|
1489 |
try: |
|
1490 |
if not self.uses_inventory(): |
|
1491 |
text_keys = [(file_id, revid) |
|
1492 |
for revid in revids |
|
1493 |
for file_id in self.file_ids] |
|
1494 |
check_text_keys(text_keys) |
|
1495 |
else: |
|
1496 |
text_keys = [] |
|
1497 |
# We have to load the inventory for each revisions, to find
|
|
1498 |
# the children of any directories.
|
|
1499 |
for inv, revid in izip(repo.iter_inventories(revids), revids): |
|
1500 |
entries = inv.iter_entries_by_dir( |
|
1501 |
specific_file_ids=self.file_ids) |
|
1502 |
for path, entry in entries: |
|
1503 |
text_keys.append((entry.file_id, revid)) |
|
1504 |
if entry.kind == "directory": |
|
1505 |
sub_entries = inv.iter_entries(from_dir=entry) |
|
1506 |
for rc_path, rc_entry in sub_entries: |
|
1507 |
text_keys.append((rc_entry.file_id, revid)) |
|
1508 |
||
1509 |
self.graph_viz.update_ui() |
|
1510 |
||
1511 |
check_text_keys(text_keys) |
|
1512 |
finally: |
|
1513 |
repo.unlock() |
|
1514 |
||
1515 |
def load_filter_file_id_chunk_finished(self): |
|
1516 |
self.filter_changed_callback([], True) |
|
1517 |
self.graph_viz.throbber_hide() |
|
1518 |
||
1519 |
def get_revision_visible(self, rev): |
|
1520 |
return self.filter_file_id[rev.index] |
|
1521 |
||
1522 |
||
1523 |
class WorkingTreeHasChangeFilter(object): |
|
1524 |
"""
|
|
1525 |
Filter out working trees that don't have any changes.
|
|
1526 |
"""
|
|
1527 |
||
1528 |
def __init__(self, graph_viz, filter_changed_callback, file_ids): |
|
1529 |
self.graph_viz = graph_viz |
|
1530 |
self.file_ids = file_ids |
|
1531 |
if not isinstance(graph_viz, WithWorkingTreeGraphVizLoader): |
|
1532 |
raise TypeError('graph_viz expected to be a ' |
|
1533 |
'WithWorkingTreeGraphVizLoader') |
|
1534 |
self.filter_changed_callback = filter_changed_callback |
|
1535 |
self.tree_revids_with_changes = set() |
|
1536 |
||
1537 |
def load(self): |
|
1538 |
"""Load if the working trees have changes."""
|
|
1539 |
self.tree_revids_with_changes = set() |
|
1540 |
self.graph_viz.throbber_show() |
|
1541 |
try: |
|
1542 |
for wt_revid, tree in self.graph_viz.working_trees.iteritems(): |
|
1543 |
if self.has_changes(tree): |
|
1544 |
self.tree_revids_with_changes.add(wt_revid) |
|
1545 |
rev = self.graph_viz.revid_rev[wt_revid] |
|
1546 |
self.filter_changed_callback([rev], False) |
|
1547 |
self.filter_changed_callback([], True) |
|
1548 |
finally: |
|
1549 |
self.graph_viz.throbber_hide() |
|
1550 |
||
1551 |
def has_changes(self, tree): |
|
1552 |
"""Quickly check that the tree contains at least one commitable change.
|
|
1553 |
||
1554 |
:param _from_tree: tree to compare against to find changes (default to
|
|
1555 |
the basis tree and is intended to be used by tests).
|
|
1556 |
||
1557 |
:return: True if a change is found. False otherwise
|
|
1558 |
"""
|
|
1559 |
tree.lock_read() |
|
1560 |
try: |
|
1561 |
# Copied from mutabletree, cause we need file_ids too.
|
|
1562 |
# Check pending merges
|
|
1563 |
if len(tree.get_parent_ids()) > 1: |
|
1564 |
return True |
|
1565 |
from_tree = tree.basis_tree() |
|
1566 |
||
1567 |
specific_files = None |
|
1568 |
if self.file_ids: |
|
1569 |
specific_files = [tree.id2path(file_id) |
|
1570 |
for file_id in self.file_ids] |
|
1571 |
||
1572 |
changes = tree.iter_changes(from_tree, |
|
1573 |
specific_files=specific_files) |
|
1574 |
try: |
|
1575 |
change = changes.next() |
|
1576 |
# Exclude root (talk about black magic... --vila 20090629)
|
|
1577 |
if change[4] == (None, None): |
|
1578 |
change = changes.next() |
|
1579 |
return True |
|
1580 |
except StopIteration: |
|
1581 |
# No changes
|
|
1582 |
return False |
|
1583 |
finally: |
|
1584 |
tree.unlock() |
|
1585 |
||
1586 |
def get_revision_visible(self, rev): |
|
1587 |
if rev.revid.startswith(CURRENT_REVISION): |
|
1588 |
return rev.revid in self.tree_revids_with_changes |
|
1589 |
else: |
|
1590 |
return True |
|
1591 |
||
1592 |
||
1593 |
class ComputedRevisionData(object): |
|
1594 |
"""Container for computed layout data for a revision.
|
|
1595 |
|
|
1596 |
:ivar rev: Reference to RevisionData. Use to get revno, revid, color and
|
|
1597 |
others.
|
|
1598 |
:ivar f_index: Index in `ComputedGraphViz.filtered_revs`.
|
|
1599 |
:ivar col_index: Column index to place node for revision in.
|
|
1600 |
:ivar lines: Lines that need to be drawn from from this revision's line to
|
|
1601 |
the next revision's line. Note that not all these lines relate to this
|
|
1602 |
revision, but be a part of a longer line that is passing this revision.
|
|
1603 |
|
|
1604 |
Each line is a tuple of `(end col_index, start col_index, color,
|
|
1605 |
direct)`.
|
|
1606 |
|
|
1607 |
If direct is False, it indicates that this line represents an
|
|
1608 |
ancestry, with revisions that are filtered. This should be shown as
|
|
1609 |
a dotted line.
|
|
1610 |
|
|
1611 |
:ivar branch_labels: Labels for branch tips.
|
|
1612 |
:ivar twisty_state: State of the revision:
|
|
1613 |
|
|
1614 |
* None: No twisty.
|
|
1615 |
* True: There are branch lines that this revision merges that can
|
|
1616 |
expanded. Show a '+'.
|
|
1617 |
* False: All branches that this revision merges are already expanded.
|
|
1618 |
Show a '-'.
|
|
1619 |
:ivar twisty_expands_branch_ids: Branch lines that will be expanded if the
|
|
1620 |
twisty is clicked.
|
|
1621 |
"""
|
|
1622 |
||
1623 |
# Instance of this object are typically named "c_rev".
|
|
1624 |
__slots__ = ['rev', 'f_index', 'lines', 'col_index', 'branch_labels', |
|
1625 |
'twisty_state', 'twisty_expands_branch_ids'] |
|
1626 |
||
1627 |
def __init__(self, rev): |
|
1628 |
self.rev = rev |
|
1629 |
self.lines = [] |
|
1630 |
self.col_index = None |
|
1631 |
self.twisty_state = None |
|
1632 |
self.twisty_expands_branch_ids = [] |
|
1633 |
self.branch_labels = [] |
|
1634 |
||
1635 |
||
1636 |
class ComputedGraphViz(object): |
|
1637 |
"""Computed layout data for a graph.
|
|
1638 |
|
|
1639 |
:ivar graph_viz: Reference to parent `GraphVizLoader`.
|
|
1640 |
:ivar filtered_revs: List `ComputedRevisionData`. Only visible revisions
|
|
1641 |
are included.
|
|
1642 |
:ivar revisions: List `ComputedRevisionData`. Revision that are not
|
|
1643 |
visible are None.
|
|
1644 |
"""
|
|
1645 |
def __init__(self, graph_viz): |
|
1646 |
self.graph_viz = graph_viz |
|
1647 |
self.filtered_revs = [] |
|
1648 |
self.revisions = [None for i in xrange(len(graph_viz.revisions))] |