1
# Copyright (C) 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Tests for repository write groups."""
30
from bzrlib.branch import BzrBranchFormat7
31
from bzrlib.transport import local, memory
32
from bzrlib.tests import TestNotApplicable
33
from bzrlib.tests.per_repository import TestCaseWithRepository
36
class TestWriteGroup(TestCaseWithRepository):
38
def test_start_write_group_unlocked_needs_write_lock(self):
39
repo = self.make_repository('.')
40
self.assertRaises(errors.NotWriteLocked, repo.start_write_group)
42
def test_start_write_group_read_locked_needs_write_lock(self):
43
repo = self.make_repository('.')
46
self.assertRaises(errors.NotWriteLocked, repo.start_write_group)
50
def test_start_write_group_write_locked_gets_None(self):
51
repo = self.make_repository('.')
53
self.assertEqual(None, repo.start_write_group())
54
repo.commit_write_group()
57
def test_start_write_group_twice_errors(self):
58
repo = self.make_repository('.')
60
repo.start_write_group()
62
# don't need a specific exception for now - this is
63
# really to be sure it's used right, not for signalling
64
# semantic information.
65
self.assertRaises(errors.BzrError, repo.start_write_group)
67
repo.commit_write_group()
70
def test_commit_write_group_does_not_error(self):
71
repo = self.make_repository('.')
73
repo.start_write_group()
74
# commit_write_group can either return None (for repositories without
75
# isolated transactions) or a hint for pack(). So we only check it
76
# works in this interface test, because all repositories are exercised.
77
repo.commit_write_group()
80
def test_unlock_in_write_group(self):
81
repo = self.make_repository('.')
83
repo.start_write_group()
84
# don't need a specific exception for now - this is
85
# really to be sure it's used right, not for signalling
86
# semantic information.
87
self.assertLogsError(errors.BzrError, repo.unlock)
88
# after this error occurs, the repository is unlocked, and the write
89
# group is gone. you've had your chance, and you blew it. ;-)
90
self.assertFalse(repo.is_locked())
91
self.assertRaises(errors.BzrError, repo.commit_write_group)
92
self.assertRaises(errors.BzrError, repo.unlock)
94
def test_is_in_write_group(self):
95
repo = self.make_repository('.')
96
self.assertFalse(repo.is_in_write_group())
98
repo.start_write_group()
99
self.assertTrue(repo.is_in_write_group())
100
repo.commit_write_group()
101
self.assertFalse(repo.is_in_write_group())
102
# abort also removes the in_write_group status.
103
repo.start_write_group()
104
self.assertTrue(repo.is_in_write_group())
105
repo.abort_write_group()
106
self.assertFalse(repo.is_in_write_group())
109
def test_abort_write_group_gets_None(self):
110
repo = self.make_repository('.')
112
repo.start_write_group()
113
self.assertEqual(None, repo.abort_write_group())
116
def test_abort_write_group_does_not_raise_when_suppressed(self):
117
if self.transport_server is local.LocalURLServer:
118
self.transport_server = None
119
self.vfs_transport_factory = memory.MemoryServer
120
repo = self.make_repository('repo')
121
token = repo.lock_write()
122
self.addCleanup(repo.unlock)
123
repo.start_write_group()
124
# Damage the repository on the filesystem
125
t = self.get_transport('')
126
t.rename('repo', 'foo')
127
self.addCleanup(t.rename, 'foo', 'repo')
128
# abort_write_group will not raise an error, because either an
129
# exception was not generated, or the exception was caught and
130
# suppressed. See also test_pack_repository's test of the same name.
131
self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
134
class TestGetMissingParentInventories(TestCaseWithRepository):
136
def test_empty_get_missing_parent_inventories(self):
137
"""A new write group has no missing parent inventories."""
138
repo = self.make_repository('.')
140
repo.start_write_group()
142
self.assertEqual(set(), set(repo.get_missing_parent_inventories()))
144
repo.commit_write_group()
147
def branch_trunk_and_make_tree(self, trunk_repo, relpath):
148
tree = self.make_branch_and_memory_tree('branch')
149
trunk_repo.lock_read()
150
self.addCleanup(trunk_repo.unlock)
151
tree.branch.repository.fetch(trunk_repo, revision_id='rev-1')
152
tree.set_parent_ids(['rev-1'])
155
def make_first_commit(self, repo):
156
trunk = repo.bzrdir.create_branch()
157
tree = memorytree.MemoryTree.create_on_branch(trunk)
159
tree.add([''], ['TREE_ROOT'], ['directory'])
160
tree.add(['dir'], ['dir-id'], ['directory'])
161
tree.add(['filename'], ['file-id'], ['file'])
162
tree.put_file_bytes_non_atomic('file-id', 'content\n')
163
tree.commit('Trunk commit', rev_id='rev-0')
164
tree.commit('Trunk commit', rev_id='rev-1')
167
def make_new_commit_in_new_repo(self, trunk_repo, parents=None):
168
tree = self.branch_trunk_and_make_tree(trunk_repo, 'branch')
169
tree.set_parent_ids(parents)
170
tree.commit('Branch commit', rev_id='rev-2')
171
branch_repo = tree.branch.repository
172
branch_repo.lock_read()
173
self.addCleanup(branch_repo.unlock)
176
def make_stackable_repo(self, relpath='trunk'):
177
if isinstance(self.repository_format, remote.RemoteRepositoryFormat):
178
# RemoteRepository by default builds a default format real
179
# repository, but the default format is unstackble. So explicitly
180
# make a stackable real repository and use that.
181
repo = self.make_repository(relpath, format='1.9')
182
repo = bzrdir.BzrDir.open(self.get_url(relpath)).open_repository()
184
repo = self.make_repository(relpath)
185
if not repo._format.supports_external_lookups:
186
raise TestNotApplicable('format not stackable')
187
repo.bzrdir._format.set_branch_format(BzrBranchFormat7())
190
def reopen_repo_and_resume_write_group(self, repo):
192
resume_tokens = repo.suspend_write_group()
193
except errors.UnsuspendableWriteGroup:
194
# If we got this far, and this repo does not support resuming write
195
# groups, then get_missing_parent_inventories works in all
196
# cases this repo supports.
200
reopened_repo = repo.bzrdir.open_repository()
201
reopened_repo.lock_write()
202
self.addCleanup(reopened_repo.unlock)
203
reopened_repo.resume_write_group(resume_tokens)
206
def test_ghost_revision(self):
207
"""A parent inventory may be absent if all the needed texts are present.
208
i.e., a ghost revision isn't (necessarily) considered to be a missing
211
# Make a trunk with one commit.
212
trunk_repo = self.make_stackable_repo()
213
self.make_first_commit(trunk_repo)
214
trunk_repo.lock_read()
215
self.addCleanup(trunk_repo.unlock)
216
# Branch the trunk, add a new commit.
217
branch_repo = self.make_new_commit_in_new_repo(
218
trunk_repo, parents=['rev-1', 'ghost-rev'])
219
inv = branch_repo.get_inventory('rev-2')
220
# Make a new repo stacked on trunk, and then copy into it:
221
# - all texts in rev-2
222
# - the new inventory (rev-2)
223
# - the new revision (rev-2)
224
repo = self.make_stackable_repo('stacked')
226
repo.start_write_group()
227
# Add all texts from in rev-2 inventory. Note that this has to exclude
228
# the root if the repo format does not support rich roots.
229
rich_root = branch_repo._format.rich_root_data
231
(ie.file_id, ie.revision) for ie in inv.iter_just_entries()
232
if rich_root or inv.id2path(ie.file_id) != '']
233
repo.texts.insert_record_stream(
234
branch_repo.texts.get_record_stream(all_texts, 'unordered', False))
235
# Add inventory and revision for rev-2.
236
repo.add_inventory('rev-2', inv, ['rev-1', 'ghost-rev'])
237
repo.revisions.insert_record_stream(
238
branch_repo.revisions.get_record_stream(
239
[('rev-2',)], 'unordered', False))
240
# Now, no inventories are reported as missing, even though there is a
242
self.assertEqual(set(), repo.get_missing_parent_inventories())
243
# Resuming the write group does not affect
244
# get_missing_parent_inventories.
245
reopened_repo = self.reopen_repo_and_resume_write_group(repo)
246
self.assertEqual(set(), reopened_repo.get_missing_parent_inventories())
247
reopened_repo.abort_write_group()
249
def test_get_missing_parent_inventories(self):
250
"""A stacked repo with a single revision and inventory (no parent
251
inventory) in it must have all the texts in its inventory (even if not
252
changed w.r.t. to the absent parent), otherwise it will report missing
253
texts/parent inventory.
255
The core of this test is that a file was changed in rev-1, but in a
256
stacked repo that only has rev-2
258
# Make a trunk with one commit.
259
trunk_repo = self.make_stackable_repo()
260
self.make_first_commit(trunk_repo)
261
trunk_repo.lock_read()
262
self.addCleanup(trunk_repo.unlock)
263
# Branch the trunk, add a new commit.
264
branch_repo = self.make_new_commit_in_new_repo(
265
trunk_repo, parents=['rev-1'])
266
inv = branch_repo.get_inventory('rev-2')
267
# Make a new repo stacked on trunk, and copy the new commit's revision
268
# and inventory records to it.
269
repo = self.make_stackable_repo('stacked')
271
repo.start_write_group()
272
# Insert a single fulltext inv (using add_inventory because it's
273
# simpler than insert_record_stream)
274
repo.add_inventory('rev-2', inv, ['rev-1'])
275
repo.revisions.insert_record_stream(
276
branch_repo.revisions.get_record_stream(
277
[('rev-2',)], 'unordered', False))
278
# There should be no missing compression parents
279
self.assertEqual(set(),
280
repo.inventories.get_missing_compression_parent_keys())
282
set([('inventories', 'rev-1')]),
283
repo.get_missing_parent_inventories())
284
# Resuming the write group does not affect
285
# get_missing_parent_inventories.
286
reopened_repo = self.reopen_repo_and_resume_write_group(repo)
288
set([('inventories', 'rev-1')]),
289
reopened_repo.get_missing_parent_inventories())
290
# Adding the parent inventory satisfies get_missing_parent_inventories.
291
reopened_repo.inventories.insert_record_stream(
292
branch_repo.inventories.get_record_stream(
293
[('rev-1',)], 'unordered', False))
295
set(), reopened_repo.get_missing_parent_inventories())
296
reopened_repo.abort_write_group()
298
def test_get_missing_parent_inventories_check(self):
299
builder = self.make_branch_builder('test')
300
builder.build_snapshot('A-id', ['ghost-parent-id'], [
301
('add', ('', 'root-id', 'directory', None)),
302
('add', ('file', 'file-id', 'file', 'content\n'))],
303
allow_leftmost_as_ghost=True)
304
b = builder.get_branch()
306
self.addCleanup(b.unlock)
307
repo = self.make_repository('test-repo')
309
self.addCleanup(repo.unlock)
310
repo.start_write_group()
311
self.addCleanup(repo.abort_write_group)
312
# Now, add the objects manually
313
text_keys = [('file-id', 'A-id')]
314
if repo.supports_rich_root():
315
text_keys.append(('root-id', 'A-id'))
316
# Directly add the texts, inventory, and revision object for 'A-id'
317
repo.texts.insert_record_stream(b.repository.texts.get_record_stream(
318
text_keys, 'unordered', True))
319
repo.add_revision('A-id', b.repository.get_revision('A-id'),
320
b.repository.get_inventory('A-id'))
321
get_missing = repo.get_missing_parent_inventories
322
if repo._format.supports_external_lookups:
323
self.assertEqual(set([('inventories', 'ghost-parent-id')]),
324
get_missing(check_for_missing_texts=False))
325
self.assertEqual(set(), get_missing(check_for_missing_texts=True))
326
self.assertEqual(set(), get_missing())
328
# If we don't support external lookups, we always return empty
329
self.assertEqual(set(), get_missing(check_for_missing_texts=False))
330
self.assertEqual(set(), get_missing(check_for_missing_texts=True))
331
self.assertEqual(set(), get_missing())
333
def test_insert_stream_passes_resume_info(self):
334
repo = self.make_repository('test-repo')
335
if (not repo._format.supports_external_lookups or
336
isinstance(repo, remote.RemoteRepository)):
337
raise TestNotApplicable(
338
'only valid for direct connections to resumable repos')
339
# log calls to get_missing_parent_inventories, so that we can assert it
340
# is called with the correct parameters
342
orig = repo.get_missing_parent_inventories
343
def get_missing(check_for_missing_texts=True):
344
call_log.append(check_for_missing_texts)
345
return orig(check_for_missing_texts=check_for_missing_texts)
346
repo.get_missing_parent_inventories = get_missing
348
self.addCleanup(repo.unlock)
349
sink = repo._get_sink()
350
sink.insert_stream((), repo._format, [])
351
self.assertEqual([False], call_log)
353
repo.start_write_group()
354
# We need to insert something, or suspend_write_group won't actually
356
repo.texts.insert_record_stream([versionedfile.FulltextContentFactory(
357
('file-id', 'rev-id'), (), None, 'lines\n')])
358
tokens = repo.suspend_write_group()
359
self.assertNotEqual([], tokens)
360
sink.insert_stream((), repo._format, tokens)
361
self.assertEqual([True], call_log)
364
class TestResumeableWriteGroup(TestCaseWithRepository):
366
def make_write_locked_repo(self, relpath='repo'):
367
repo = self.make_repository(relpath)
369
self.addCleanup(repo.unlock)
372
def reopen_repo(self, repo):
373
same_repo = repo.bzrdir.open_repository()
374
same_repo.lock_write()
375
self.addCleanup(same_repo.unlock)
378
def require_suspendable_write_groups(self, reason):
379
repo = self.make_repository('__suspend_test')
381
self.addCleanup(repo.unlock)
382
repo.start_write_group()
384
wg_tokens = repo.suspend_write_group()
385
except errors.UnsuspendableWriteGroup:
386
repo.abort_write_group()
387
raise TestNotApplicable(reason)
389
def test_suspend_write_group(self):
390
repo = self.make_write_locked_repo()
391
repo.start_write_group()
392
# Add some content so this isn't an empty write group (which may return
394
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
396
wg_tokens = repo.suspend_write_group()
397
except errors.UnsuspendableWriteGroup:
398
# The contract for repos that don't support suspending write groups
399
# is that suspend_write_group raises UnsuspendableWriteGroup, but
400
# is otherwise a no-op. So we can still e.g. abort the write group
402
self.assertTrue(repo.is_in_write_group())
403
repo.abort_write_group()
405
# After suspending a write group we are no longer in a write group
406
self.assertFalse(repo.is_in_write_group())
407
# suspend_write_group returns a list of tokens, which are strs. If
408
# no other write groups were resumed, there will only be one token.
409
self.assertEqual(1, len(wg_tokens))
410
self.assertIsInstance(wg_tokens[0], str)
411
# See also test_pack_repository's test of the same name.
413
def test_resume_write_group_then_abort(self):
414
repo = self.make_write_locked_repo()
415
repo.start_write_group()
416
# Add some content so this isn't an empty write group (which may return
418
text_key = ('file-id', 'revid')
419
repo.texts.add_lines(text_key, (), ['lines'])
421
wg_tokens = repo.suspend_write_group()
422
except errors.UnsuspendableWriteGroup:
423
# If the repo does not support suspending write groups, it doesn't
424
# support resuming them either.
425
repo.abort_write_group()
427
errors.UnsuspendableWriteGroup, repo.resume_write_group, [])
429
#self.assertEqual([], list(repo.texts.keys()))
430
same_repo = self.reopen_repo(repo)
431
same_repo.resume_write_group(wg_tokens)
432
self.assertEqual([text_key], list(same_repo.texts.keys()))
433
self.assertTrue(same_repo.is_in_write_group())
434
same_repo.abort_write_group()
435
self.assertEqual([], list(repo.texts.keys()))
436
# See also test_pack_repository's test of the same name.
438
def test_multiple_resume_write_group(self):
439
self.require_suspendable_write_groups(
440
'Cannot test resume on repo that does not support suspending')
441
repo = self.make_write_locked_repo()
442
repo.start_write_group()
443
# Add some content so this isn't an empty write group (which may return
445
first_key = ('file-id', 'revid')
446
repo.texts.add_lines(first_key, (), ['lines'])
447
wg_tokens = repo.suspend_write_group()
448
same_repo = self.reopen_repo(repo)
449
same_repo.resume_write_group(wg_tokens)
450
self.assertTrue(same_repo.is_in_write_group())
451
second_key = ('file-id', 'second-revid')
452
same_repo.texts.add_lines(second_key, (first_key,), ['more lines'])
454
new_wg_tokens = same_repo.suspend_write_group()
457
same_repo.abort_write_group(suppress_errors=True)
458
raise e[0], e[1], e[2]
459
self.assertEqual(2, len(new_wg_tokens))
460
self.assertSubset(wg_tokens, new_wg_tokens)
461
same_repo = self.reopen_repo(repo)
462
same_repo.resume_write_group(new_wg_tokens)
463
both_keys = set([first_key, second_key])
464
self.assertEqual(both_keys, same_repo.texts.keys())
465
same_repo.abort_write_group()
467
def test_no_op_suspend_resume(self):
468
self.require_suspendable_write_groups(
469
'Cannot test resume on repo that does not support suspending')
470
repo = self.make_write_locked_repo()
471
repo.start_write_group()
472
# Add some content so this isn't an empty write group (which may return
474
text_key = ('file-id', 'revid')
475
repo.texts.add_lines(text_key, (), ['lines'])
476
wg_tokens = repo.suspend_write_group()
477
same_repo = self.reopen_repo(repo)
478
same_repo.resume_write_group(wg_tokens)
479
new_wg_tokens = same_repo.suspend_write_group()
480
self.assertEqual(wg_tokens, new_wg_tokens)
481
same_repo = self.reopen_repo(repo)
482
same_repo.resume_write_group(wg_tokens)
483
self.assertEqual([text_key], list(same_repo.texts.keys()))
484
same_repo.abort_write_group()
486
def test_read_after_suspend_fails(self):
487
self.require_suspendable_write_groups(
488
'Cannot test suspend on repo that does not support suspending')
489
repo = self.make_write_locked_repo()
490
repo.start_write_group()
491
# Add some content so this isn't an empty write group (which may return
493
text_key = ('file-id', 'revid')
494
repo.texts.add_lines(text_key, (), ['lines'])
495
wg_tokens = repo.suspend_write_group()
496
self.assertEqual([], list(repo.texts.keys()))
498
def test_read_after_second_suspend_fails(self):
499
self.require_suspendable_write_groups(
500
'Cannot test suspend on repo that does not support suspending')
501
repo = self.make_write_locked_repo()
502
repo.start_write_group()
503
# Add some content so this isn't an empty write group (which may return
505
text_key = ('file-id', 'revid')
506
repo.texts.add_lines(text_key, (), ['lines'])
507
wg_tokens = repo.suspend_write_group()
508
same_repo = self.reopen_repo(repo)
509
same_repo.resume_write_group(wg_tokens)
510
same_repo.suspend_write_group()
511
self.assertEqual([], list(same_repo.texts.keys()))
513
def test_read_after_resume_abort_fails(self):
514
self.require_suspendable_write_groups(
515
'Cannot test suspend on repo that does not support suspending')
516
repo = self.make_write_locked_repo()
517
repo.start_write_group()
518
# Add some content so this isn't an empty write group (which may return
520
text_key = ('file-id', 'revid')
521
repo.texts.add_lines(text_key, (), ['lines'])
522
wg_tokens = repo.suspend_write_group()
523
same_repo = self.reopen_repo(repo)
524
same_repo.resume_write_group(wg_tokens)
525
same_repo.abort_write_group()
526
self.assertEqual([], list(same_repo.texts.keys()))
528
def test_cannot_resume_aborted_write_group(self):
529
self.require_suspendable_write_groups(
530
'Cannot test resume on repo that does not support suspending')
531
repo = self.make_write_locked_repo()
532
repo.start_write_group()
533
# Add some content so this isn't an empty write group (which may return
535
text_key = ('file-id', 'revid')
536
repo.texts.add_lines(text_key, (), ['lines'])
537
wg_tokens = repo.suspend_write_group()
538
same_repo = self.reopen_repo(repo)
539
same_repo.resume_write_group(wg_tokens)
540
same_repo.abort_write_group()
541
same_repo = self.reopen_repo(repo)
543
errors.UnresumableWriteGroup, same_repo.resume_write_group,
546
def test_commit_resumed_write_group_no_new_data(self):
547
self.require_suspendable_write_groups(
548
'Cannot test resume on repo that does not support suspending')
549
repo = self.make_write_locked_repo()
550
repo.start_write_group()
551
# Add some content so this isn't an empty write group (which may return
553
text_key = ('file-id', 'revid')
554
repo.texts.add_lines(text_key, (), ['lines'])
555
wg_tokens = repo.suspend_write_group()
556
same_repo = self.reopen_repo(repo)
557
same_repo.resume_write_group(wg_tokens)
558
same_repo.commit_write_group()
559
self.assertEqual([text_key], list(same_repo.texts.keys()))
561
'lines', same_repo.texts.get_record_stream([text_key],
562
'unordered', True).next().get_bytes_as('fulltext'))
564
errors.UnresumableWriteGroup, same_repo.resume_write_group,
567
def test_commit_resumed_write_group_plus_new_data(self):
568
self.require_suspendable_write_groups(
569
'Cannot test resume on repo that does not support suspending')
570
repo = self.make_write_locked_repo()
571
repo.start_write_group()
572
# Add some content so this isn't an empty write group (which may return
574
first_key = ('file-id', 'revid')
575
repo.texts.add_lines(first_key, (), ['lines'])
576
wg_tokens = repo.suspend_write_group()
577
same_repo = self.reopen_repo(repo)
578
same_repo.resume_write_group(wg_tokens)
579
second_key = ('file-id', 'second-revid')
580
same_repo.texts.add_lines(second_key, (first_key,), ['more lines'])
581
same_repo.commit_write_group()
583
set([first_key, second_key]), set(same_repo.texts.keys()))
585
'lines', same_repo.texts.get_record_stream([first_key],
586
'unordered', True).next().get_bytes_as('fulltext'))
588
'more lines', same_repo.texts.get_record_stream([second_key],
589
'unordered', True).next().get_bytes_as('fulltext'))
591
def make_source_with_delta_record(self):
592
# Make a source repository with a delta record in it.
593
source_repo = self.make_write_locked_repo('source')
594
source_repo.start_write_group()
595
key_base = ('file-id', 'base')
596
key_delta = ('file-id', 'delta')
598
yield versionedfile.FulltextContentFactory(
599
key_base, (), None, 'lines\n')
600
yield versionedfile.FulltextContentFactory(
601
key_delta, (key_base,), None, 'more\nlines\n')
602
source_repo.texts.insert_record_stream(text_stream())
603
source_repo.commit_write_group()
606
def test_commit_resumed_write_group_with_missing_parents(self):
607
self.require_suspendable_write_groups(
608
'Cannot test resume on repo that does not support suspending')
609
source_repo = self.make_source_with_delta_record()
610
key_base = ('file-id', 'base')
611
key_delta = ('file-id', 'delta')
612
# Start a write group, insert just a delta.
613
repo = self.make_write_locked_repo()
614
repo.start_write_group()
615
stream = source_repo.texts.get_record_stream(
616
[key_delta], 'unordered', False)
617
repo.texts.insert_record_stream(stream)
618
# It's either not commitable due to the missing compression parent, or
619
# the stacked location has already filled in the fulltext.
621
repo.commit_write_group()
622
except errors.BzrCheckError:
623
# It refused to commit because we have a missing parent
626
same_repo = self.reopen_repo(repo)
627
same_repo.lock_read()
628
record = same_repo.texts.get_record_stream([key_delta],
629
'unordered', True).next()
630
self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext'))
632
# Merely suspending and resuming doesn't make it commitable either.
633
wg_tokens = repo.suspend_write_group()
634
same_repo = self.reopen_repo(repo)
635
same_repo.resume_write_group(wg_tokens)
637
errors.BzrCheckError, same_repo.commit_write_group)
638
same_repo.abort_write_group()
640
def test_commit_resumed_write_group_adding_missing_parents(self):
641
self.require_suspendable_write_groups(
642
'Cannot test resume on repo that does not support suspending')
643
source_repo = self.make_source_with_delta_record()
644
key_base = ('file-id', 'base')
645
key_delta = ('file-id', 'delta')
646
# Start a write group.
647
repo = self.make_write_locked_repo()
648
repo.start_write_group()
649
# Add some content so this isn't an empty write group (which may return
651
text_key = ('file-id', 'revid')
652
repo.texts.add_lines(text_key, (), ['lines'])
653
# Suspend it, then resume it.
654
wg_tokens = repo.suspend_write_group()
655
same_repo = self.reopen_repo(repo)
656
same_repo.resume_write_group(wg_tokens)
657
# Add a record with a missing compression parent
658
stream = source_repo.texts.get_record_stream(
659
[key_delta], 'unordered', False)
660
same_repo.texts.insert_record_stream(stream)
661
# Just like if we'd added that record without a suspend/resume cycle,
662
# commit_write_group fails.
664
same_repo.commit_write_group()
665
except errors.BzrCheckError:
668
# If the commit_write_group didn't fail, that is because the
669
# insert_record_stream already gave it a fulltext.
670
same_repo = self.reopen_repo(repo)
671
same_repo.lock_read()
672
record = same_repo.texts.get_record_stream([key_delta],
673
'unordered', True).next()
674
self.assertEqual('more\nlines\n', record.get_bytes_as('fulltext'))
676
same_repo.abort_write_group()
678
def test_add_missing_parent_after_resume(self):
679
self.require_suspendable_write_groups(
680
'Cannot test resume on repo that does not support suspending')
681
source_repo = self.make_source_with_delta_record()
682
key_base = ('file-id', 'base')
683
key_delta = ('file-id', 'delta')
684
# Start a write group, insert just a delta.
685
repo = self.make_write_locked_repo()
686
repo.start_write_group()
687
stream = source_repo.texts.get_record_stream(
688
[key_delta], 'unordered', False)
689
repo.texts.insert_record_stream(stream)
690
# Suspend it, then resume it.
691
wg_tokens = repo.suspend_write_group()
692
same_repo = self.reopen_repo(repo)
693
same_repo.resume_write_group(wg_tokens)
694
# Fill in the missing compression parent.
695
stream = source_repo.texts.get_record_stream(
696
[key_base], 'unordered', False)
697
same_repo.texts.insert_record_stream(stream)
698
same_repo.commit_write_group()
700
def test_suspend_empty_initial_write_group(self):
701
"""Suspending a write group with no writes returns an empty token
704
self.require_suspendable_write_groups(
705
'Cannot test suspend on repo that does not support suspending')
706
repo = self.make_write_locked_repo()
707
repo.start_write_group()
708
wg_tokens = repo.suspend_write_group()
709
self.assertEqual([], wg_tokens)
711
def test_suspend_empty_initial_write_group(self):
712
"""Resuming an empty token list is equivalent to start_write_group."""
713
self.require_suspendable_write_groups(
714
'Cannot test resume on repo that does not support suspending')
715
repo = self.make_write_locked_repo()
716
repo.resume_write_group([])
717
repo.abort_write_group()