1
# Copyright (C) 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Tests for pack repositories.
19
These tests are repeated for all pack-based repository formats.
22
from cStringIO import StringIO
23
from stat import S_ISDIR
25
from bzrlib.btree_index import BTreeGraphIndex
26
from bzrlib.index import GraphIndex
34
revision as _mod_revision,
41
from bzrlib.smart import (
45
from bzrlib.tests import (
47
TestCaseWithTransport,
51
from bzrlib.transport import (
56
from bzrlib.tests.per_repository import TestCaseWithRepository
59
class TestPackRepository(TestCaseWithTransport):
60
"""Tests to be repeated across all pack-based formats.
62
The following are populated from the test scenario:
64
:ivar format_name: Registered name fo the format to test.
65
:ivar format_string: On-disk format marker.
66
:ivar format_supports_external_lookups: Boolean.
70
return bzrdir.format_registry.make_bzrdir(self.format_name)
72
def test_attribute__fetch_order(self):
73
"""Packs do not need ordered data retrieval."""
74
format = self.get_format()
75
repo = self.make_repository('.', format=format)
76
self.assertEqual('unordered', repo._format._fetch_order)
78
def test_attribute__fetch_uses_deltas(self):
79
"""Packs reuse deltas."""
80
format = self.get_format()
81
repo = self.make_repository('.', format=format)
82
self.assertEqual(True, repo._format._fetch_uses_deltas)
84
def test_disk_layout(self):
85
format = self.get_format()
86
repo = self.make_repository('.', format=format)
87
# in case of side effects of locking.
90
t = repo.bzrdir.get_repository_transport(None)
92
# XXX: no locks left when unlocked at the moment
93
# self.assertEqualDiff('', t.get('lock').read())
94
self.check_databases(t)
96
def check_format(self, t):
98
self.format_string, # from scenario
99
t.get('format').read())
101
def assertHasNoKndx(self, t, knit_name):
102
"""Assert that knit_name has no index on t."""
103
self.assertFalse(t.has(knit_name + '.kndx'))
105
def assertHasNoKnit(self, t, knit_name):
106
"""Assert that knit_name exists on t."""
108
self.assertFalse(t.has(knit_name + '.knit'))
110
def check_databases(self, t):
111
"""check knit content for a repository."""
112
# check conversion worked
113
self.assertHasNoKndx(t, 'inventory')
114
self.assertHasNoKnit(t, 'inventory')
115
self.assertHasNoKndx(t, 'revisions')
116
self.assertHasNoKnit(t, 'revisions')
117
self.assertHasNoKndx(t, 'signatures')
118
self.assertHasNoKnit(t, 'signatures')
119
self.assertFalse(t.has('knits'))
120
# revision-indexes file-container directory
122
list(self.index_class(t, 'pack-names', None).iter_all_entries()))
123
self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
124
self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
125
self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
126
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
128
def test_shared_disk_layout(self):
129
format = self.get_format()
130
repo = self.make_repository('.', shared=True, format=format)
132
t = repo.bzrdir.get_repository_transport(None)
134
# XXX: no locks left when unlocked at the moment
135
# self.assertEqualDiff('', t.get('lock').read())
136
# We should have a 'shared-storage' marker file.
137
self.assertEqualDiff('', t.get('shared-storage').read())
138
self.check_databases(t)
140
def test_shared_no_tree_disk_layout(self):
141
format = self.get_format()
142
repo = self.make_repository('.', shared=True, format=format)
143
repo.set_make_working_trees(False)
145
t = repo.bzrdir.get_repository_transport(None)
147
# XXX: no locks left when unlocked at the moment
148
# self.assertEqualDiff('', t.get('lock').read())
149
# We should have a 'shared-storage' marker file.
150
self.assertEqualDiff('', t.get('shared-storage').read())
151
# We should have a marker for the no-working-trees flag.
152
self.assertEqualDiff('', t.get('no-working-trees').read())
153
# The marker should go when we toggle the setting.
154
repo.set_make_working_trees(True)
155
self.assertFalse(t.has('no-working-trees'))
156
self.check_databases(t)
158
def test_adding_revision_creates_pack_indices(self):
159
format = self.get_format()
160
tree = self.make_branch_and_tree('.', format=format)
161
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
163
list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
164
tree.commit('foobarbaz')
165
index = self.index_class(trans, 'pack-names', None)
166
index_nodes = list(index.iter_all_entries())
167
self.assertEqual(1, len(index_nodes))
168
node = index_nodes[0]
170
# the pack sizes should be listed in the index
172
sizes = [int(digits) for digits in pack_value.split(' ')]
173
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
174
stat = trans.stat('indices/%s%s' % (name, suffix))
175
self.assertEqual(size, stat.st_size)
177
def test_pulling_nothing_leads_to_no_new_names(self):
178
format = self.get_format()
179
tree1 = self.make_branch_and_tree('1', format=format)
180
tree2 = self.make_branch_and_tree('2', format=format)
181
tree1.branch.repository.fetch(tree2.branch.repository)
182
trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
184
list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
186
def test_commit_across_pack_shape_boundary_autopacks(self):
187
format = self.get_format()
188
tree = self.make_branch_and_tree('.', format=format)
189
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
190
# This test could be a little cheaper by replacing the packs
191
# attribute on the repository to allow a different pack distribution
192
# and max packs policy - so we are checking the policy is honoured
193
# in the test. But for now 11 commits is not a big deal in a single
196
tree.commit('commit %s' % x)
197
# there should be 9 packs:
198
index = self.index_class(trans, 'pack-names', None)
199
self.assertEqual(9, len(list(index.iter_all_entries())))
200
# insert some files in obsolete_packs which should be removed by pack.
201
trans.put_bytes('obsolete_packs/foo', '123')
202
trans.put_bytes('obsolete_packs/bar', '321')
203
# committing one more should coalesce to 1 of 10.
204
tree.commit('commit triggering pack')
205
index = self.index_class(trans, 'pack-names', None)
206
self.assertEqual(1, len(list(index.iter_all_entries())))
207
# packing should not damage data
208
tree = tree.bzrdir.open_workingtree()
209
check_result = tree.branch.repository.check(
210
[tree.branch.last_revision()])
211
# We should have 50 (10x5) files in the obsolete_packs directory.
212
obsolete_files = list(trans.list_dir('obsolete_packs'))
213
self.assertFalse('foo' in obsolete_files)
214
self.assertFalse('bar' in obsolete_files)
215
self.assertEqual(50, len(obsolete_files))
216
# XXX: Todo check packs obsoleted correctly - old packs and indices
217
# in the obsolete_packs directory.
218
large_pack_name = list(index.iter_all_entries())[0][1][0]
219
# finally, committing again should not touch the large pack.
220
tree.commit('commit not triggering pack')
221
index = self.index_class(trans, 'pack-names', None)
222
self.assertEqual(2, len(list(index.iter_all_entries())))
223
pack_names = [node[1][0] for node in index.iter_all_entries()]
224
self.assertTrue(large_pack_name in pack_names)
226
def test_fail_obsolete_deletion(self):
227
# failing to delete obsolete packs is not fatal
228
format = self.get_format()
229
server = fakenfs.FakeNFSServer()
231
self.addCleanup(server.tearDown)
232
transport = get_transport(server.get_url())
233
bzrdir = self.get_format().initialize_on_transport(transport)
234
repo = bzrdir.create_repository()
235
repo_transport = bzrdir.get_repository_transport(None)
236
self.assertTrue(repo_transport.has('obsolete_packs'))
237
# these files are in use by another client and typically can't be deleted
238
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
239
repo._pack_collection._clear_obsolete_packs()
240
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
242
def test_pack_after_two_commits_packs_everything(self):
243
format = self.get_format()
244
tree = self.make_branch_and_tree('.', format=format)
245
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
247
tree.commit('more work')
248
tree.branch.repository.pack()
249
# there should be 1 pack:
250
index = self.index_class(trans, 'pack-names', None)
251
self.assertEqual(1, len(list(index.iter_all_entries())))
252
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
254
def test_pack_layout(self):
255
format = self.get_format()
256
tree = self.make_branch_and_tree('.', format=format)
257
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
258
tree.commit('start', rev_id='1')
259
tree.commit('more work', rev_id='2')
260
tree.branch.repository.pack()
262
self.addCleanup(tree.unlock)
263
pack = tree.branch.repository._pack_collection.get_pack_by_name(
264
tree.branch.repository._pack_collection.names()[0])
265
# revision access tends to be tip->ancestor, so ordering that way on
266
# disk is a good idea.
267
for _1, key, val, refs in pack.revision_index.iter_all_entries():
269
pos_1 = int(val[1:].split()[0])
271
pos_2 = int(val[1:].split()[0])
272
self.assertTrue(pos_2 < pos_1)
274
def test_pack_repositories_support_multiple_write_locks(self):
275
format = self.get_format()
276
self.make_repository('.', shared=True, format=format)
277
r1 = repository.Repository.open('.')
278
r2 = repository.Repository.open('.')
280
self.addCleanup(r1.unlock)
284
def _add_text(self, repo, fileid):
285
"""Add a text to the repository within a write group."""
286
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
288
def test_concurrent_writers_merge_new_packs(self):
289
format = self.get_format()
290
self.make_repository('.', shared=True, format=format)
291
r1 = repository.Repository.open('.')
292
r2 = repository.Repository.open('.')
295
# access enough data to load the names list
296
list(r1.all_revision_ids())
299
# access enough data to load the names list
300
list(r2.all_revision_ids())
301
r1.start_write_group()
303
r2.start_write_group()
305
self._add_text(r1, 'fileidr1')
306
self._add_text(r2, 'fileidr2')
308
r2.abort_write_group()
311
r1.abort_write_group()
313
# both r1 and r2 have open write groups with data in them
314
# created while the other's write group was open.
315
# Commit both which requires a merge to the pack-names.
317
r1.commit_write_group()
319
r1.abort_write_group()
320
r2.abort_write_group()
322
r2.commit_write_group()
323
# tell r1 to reload from disk
324
r1._pack_collection.reset()
325
# Now both repositories should know about both names
326
r1._pack_collection.ensure_loaded()
327
r2._pack_collection.ensure_loaded()
328
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
329
self.assertEqual(2, len(r1._pack_collection.names()))
335
def test_concurrent_writer_second_preserves_dropping_a_pack(self):
336
format = self.get_format()
337
self.make_repository('.', shared=True, format=format)
338
r1 = repository.Repository.open('.')
339
r2 = repository.Repository.open('.')
343
r1.start_write_group()
345
self._add_text(r1, 'fileidr1')
347
r1.abort_write_group()
350
r1.commit_write_group()
351
r1._pack_collection.ensure_loaded()
352
name_to_drop = r1._pack_collection.all_packs()[0].name
357
# access enough data to load the names list
358
list(r1.all_revision_ids())
361
# access enough data to load the names list
362
list(r2.all_revision_ids())
363
r1._pack_collection.ensure_loaded()
365
r2.start_write_group()
367
# in r1, drop the pack
368
r1._pack_collection._remove_pack_from_memory(
369
r1._pack_collection.get_pack_by_name(name_to_drop))
371
self._add_text(r2, 'fileidr2')
373
r2.abort_write_group()
376
r1._pack_collection.reset()
378
# r1 has a changed names list, and r2 an open write groups with
380
# save r1, and then commit the r2 write group, which requires a
381
# merge to the pack-names, which should not reinstate
384
r1._pack_collection._save_pack_names()
385
r1._pack_collection.reset()
387
r2.abort_write_group()
390
r2.commit_write_group()
392
r2.abort_write_group()
394
# Now both repositories should now about just one name.
395
r1._pack_collection.ensure_loaded()
396
r2._pack_collection.ensure_loaded()
397
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
398
self.assertEqual(1, len(r1._pack_collection.names()))
399
self.assertFalse(name_to_drop in r1._pack_collection.names())
405
def test_concurrent_pack_triggers_reload(self):
406
# create 2 packs, which we will then collapse
407
tree = self.make_branch_and_tree('tree')
410
rev1 = tree.commit('one')
411
rev2 = tree.commit('two')
412
r2 = repository.Repository.open('tree')
415
# Now r2 has read the pack-names file, but will need to reload
416
# it after r1 has repacked
417
tree.branch.repository.pack()
418
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
424
def test_concurrent_pack_during_get_record_reloads(self):
425
tree = self.make_branch_and_tree('tree')
428
rev1 = tree.commit('one')
429
rev2 = tree.commit('two')
430
keys = [(rev1,), (rev2,)]
431
r2 = repository.Repository.open('tree')
434
# At this point, we will start grabbing a record stream, and
435
# trigger a repack mid-way
438
record_stream = r2.revisions.get_record_stream(keys,
440
for record in record_stream:
441
result[record.key] = record
443
tree.branch.repository.pack()
445
# The first record will be found in the original location, but
446
# after the pack, we have to reload to find the next record
447
self.assertEqual(sorted(keys), sorted(result.keys()))
453
def test_lock_write_does_not_physically_lock(self):
454
repo = self.make_repository('.', format=self.get_format())
456
self.addCleanup(repo.unlock)
457
self.assertFalse(repo.get_physical_lock_status())
459
def prepare_for_break_lock(self):
460
# Setup the global ui factory state so that a break-lock method call
461
# will find usable input in the input stream.
462
old_factory = ui.ui_factory
463
def restoreFactory():
464
ui.ui_factory = old_factory
465
self.addCleanup(restoreFactory)
466
ui.ui_factory = ui.SilentUIFactory()
467
ui.ui_factory.stdin = StringIO("y\n")
469
def test_break_lock_breaks_physical_lock(self):
470
repo = self.make_repository('.', format=self.get_format())
471
repo._pack_collection.lock_names()
472
repo.control_files.leave_in_place()
474
repo2 = repository.Repository.open('.')
475
self.assertTrue(repo.get_physical_lock_status())
476
self.prepare_for_break_lock()
478
self.assertFalse(repo.get_physical_lock_status())
480
def test_broken_physical_locks_error_on__unlock_names_lock(self):
481
repo = self.make_repository('.', format=self.get_format())
482
repo._pack_collection.lock_names()
483
self.assertTrue(repo.get_physical_lock_status())
484
repo2 = repository.Repository.open('.')
485
self.prepare_for_break_lock()
487
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
489
def test_fetch_without_find_ghosts_ignores_ghosts(self):
490
# we want two repositories at this point:
491
# one with a revision that is a ghost in the other
493
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
494
# 'references' is present in both repositories, and 'tip' is present
496
# has_ghost missing_ghost
497
#------------------------------
499
# 'references' 'references'
501
# In this test we fetch 'tip' which should not fetch 'ghost'
502
has_ghost = self.make_repository('has_ghost', format=self.get_format())
503
missing_ghost = self.make_repository('missing_ghost',
504
format=self.get_format())
506
def add_commit(repo, revision_id, parent_ids):
508
repo.start_write_group()
509
inv = inventory.Inventory(revision_id=revision_id)
510
inv.root.revision = revision_id
511
root_id = inv.root.file_id
512
sha1 = repo.add_inventory(revision_id, inv, [])
513
repo.texts.add_lines((root_id, revision_id), [], [])
514
rev = _mod_revision.Revision(timestamp=0,
516
committer="Foo Bar <foo@example.com>",
519
revision_id=revision_id)
520
rev.parent_ids = parent_ids
521
repo.add_revision(revision_id, rev)
522
repo.commit_write_group()
524
add_commit(has_ghost, 'ghost', [])
525
add_commit(has_ghost, 'references', ['ghost'])
526
add_commit(missing_ghost, 'references', ['ghost'])
527
add_commit(has_ghost, 'tip', ['references'])
528
missing_ghost.fetch(has_ghost, 'tip')
529
# missing ghost now has tip and not ghost.
530
rev = missing_ghost.get_revision('tip')
531
inv = missing_ghost.get_inventory('tip')
532
self.assertRaises(errors.NoSuchRevision,
533
missing_ghost.get_revision, 'ghost')
534
self.assertRaises(errors.NoSuchRevision,
535
missing_ghost.get_inventory, 'ghost')
537
def make_write_ready_repo(self):
538
repo = self.make_repository('.', format=self.get_format())
540
repo.start_write_group()
543
def test_missing_inventories_compression_parent_prevents_commit(self):
544
repo = self.make_write_ready_repo()
546
repo.inventories._index._missing_compression_parents.add(key)
547
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
548
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
549
repo.abort_write_group()
552
def test_missing_revisions_compression_parent_prevents_commit(self):
553
repo = self.make_write_ready_repo()
555
repo.revisions._index._missing_compression_parents.add(key)
556
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
557
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
558
repo.abort_write_group()
561
def test_missing_signatures_compression_parent_prevents_commit(self):
562
repo = self.make_write_ready_repo()
564
repo.signatures._index._missing_compression_parents.add(key)
565
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
566
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
567
repo.abort_write_group()
570
def test_missing_text_compression_parent_prevents_commit(self):
571
repo = self.make_write_ready_repo()
572
key = ('some', 'junk')
573
repo.texts._index._missing_compression_parents.add(key)
574
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
575
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
576
repo.abort_write_group()
579
def test_supports_external_lookups(self):
580
repo = self.make_repository('.', format=self.get_format())
581
self.assertEqual(self.format_supports_external_lookups,
582
repo._format.supports_external_lookups)
584
def test_abort_write_group_does_not_raise_when_suppressed(self):
585
"""Similar to per_repository.test_write_group's test of the same name.
587
Also requires that the exception is logged.
589
self.vfs_transport_factory = memory.MemoryServer
590
repo = self.make_repository('repo')
591
token = repo.lock_write()
592
self.addCleanup(repo.unlock)
593
repo.start_write_group()
594
# Damage the repository on the filesystem
595
self.get_transport('').rename('repo', 'foo')
596
# abort_write_group will not raise an error
597
self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
598
# But it does log an error
599
log_file = self._get_log(keep_log_file=True)
600
self.assertContainsRe(log_file, 'abort_write_group failed')
601
self.assertContainsRe(log_file, r'INFO bzr: ERROR \(ignored\):')
602
if token is not None:
603
repo.leave_lock_in_place()
605
def test_abort_write_group_does_raise_when_not_suppressed(self):
606
self.vfs_transport_factory = memory.MemoryServer
607
repo = self.make_repository('repo')
608
token = repo.lock_write()
609
self.addCleanup(repo.unlock)
610
repo.start_write_group()
611
# Damage the repository on the filesystem
612
self.get_transport('').rename('repo', 'foo')
613
# abort_write_group will not raise an error
614
self.assertRaises(Exception, repo.abort_write_group)
615
if token is not None:
616
repo.leave_lock_in_place()
618
def test_suspend_write_group(self):
619
self.vfs_transport_factory = memory.MemoryServer
620
repo = self.make_repository('repo')
621
token = repo.lock_write()
622
self.addCleanup(repo.unlock)
623
repo.start_write_group()
624
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
625
wg_tokens = repo.suspend_write_group()
626
expected_pack_name = wg_tokens[0] + '.pack'
627
upload_transport = repo._pack_collection._upload_transport
628
limbo_files = upload_transport.list_dir('')
629
self.assertTrue(expected_pack_name in limbo_files, limbo_files)
630
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
631
self.assertEqual(wg_tokens[0], md5.hexdigest())
633
def test_resume_write_group_then_abort(self):
634
# Create a repo, start a write group, insert some data, suspend.
635
self.vfs_transport_factory = memory.MemoryServer
636
repo = self.make_repository('repo')
637
token = repo.lock_write()
638
self.addCleanup(repo.unlock)
639
repo.start_write_group()
640
text_key = ('file-id', 'revid')
641
repo.texts.add_lines(text_key, (), ['lines'])
642
wg_tokens = repo.suspend_write_group()
643
# Get a fresh repository object for the repo on the filesystem.
644
same_repo = repo.bzrdir.open_repository()
646
same_repo.lock_write()
647
self.addCleanup(same_repo.unlock)
648
same_repo.resume_write_group(wg_tokens)
649
same_repo.abort_write_group()
651
[], same_repo._pack_collection._upload_transport.list_dir(''))
653
[], same_repo._pack_collection._pack_transport.list_dir(''))
655
def test_resume_malformed_token(self):
656
self.vfs_transport_factory = memory.MemoryServer
657
# Make a repository with a suspended write group
658
repo = self.make_repository('repo')
659
token = repo.lock_write()
660
self.addCleanup(repo.unlock)
661
repo.start_write_group()
662
text_key = ('file-id', 'revid')
663
repo.texts.add_lines(text_key, (), ['lines'])
664
wg_tokens = repo.suspend_write_group()
665
# Make a new repository
666
new_repo = self.make_repository('new_repo')
667
token = new_repo.lock_write()
668
self.addCleanup(new_repo.unlock)
670
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
672
errors.UnresumableWriteGroup,
673
new_repo.resume_write_group, [hacked_wg_token])
676
class TestPackRepositoryStacking(TestCaseWithTransport):
678
"""Tests for stacking pack repositories"""
681
if not self.format_supports_external_lookups:
682
raise TestNotApplicable("%r doesn't support stacking"
683
% (self.format_name,))
684
super(TestPackRepositoryStacking, self).setUp()
686
def get_format(self):
687
return bzrdir.format_registry.make_bzrdir(self.format_name)
689
def test_stack_checks_rich_root_compatibility(self):
690
# early versions of the packing code relied on pack internals to
691
# stack, but the current version should be able to stack on any
694
# TODO: Possibly this should be run per-repository-format and raise
695
# TestNotApplicable on formats that don't support stacking. -- mbp
697
repo = self.make_repository('repo', format=self.get_format())
698
if repo.supports_rich_root():
699
# can only stack on repositories that have compatible internal
701
if getattr(repo._format, 'supports_tree_reference', False):
702
matching_format_name = 'pack-0.92-subtree'
704
matching_format_name = 'rich-root-pack'
705
mismatching_format_name = 'pack-0.92'
707
matching_format_name = 'pack-0.92'
708
mismatching_format_name = 'pack-0.92-subtree'
709
base = self.make_repository('base', format=matching_format_name)
710
repo.add_fallback_repository(base)
711
# you can't stack on something with incompatible data
712
bad_repo = self.make_repository('mismatch',
713
format=mismatching_format_name)
714
e = self.assertRaises(errors.IncompatibleRepositories,
715
repo.add_fallback_repository, bad_repo)
716
self.assertContainsRe(str(e),
717
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
718
r'KnitPackRepository.*/repo/.*\n'
719
r'different rich-root support')
721
def test_stack_checks_serializers_compatibility(self):
722
repo = self.make_repository('repo', format=self.get_format())
723
if getattr(repo._format, 'supports_tree_reference', False):
724
# can only stack on repositories that have compatible internal
726
matching_format_name = 'pack-0.92-subtree'
727
mismatching_format_name = 'rich-root-pack'
729
if repo.supports_rich_root():
730
matching_format_name = 'rich-root-pack'
731
mismatching_format_name = 'pack-0.92-subtree'
733
raise TestNotApplicable('No formats use non-v5 serializer'
734
' without having rich-root also set')
735
base = self.make_repository('base', format=matching_format_name)
736
repo.add_fallback_repository(base)
737
# you can't stack on something with incompatible data
738
bad_repo = self.make_repository('mismatch',
739
format=mismatching_format_name)
740
e = self.assertRaises(errors.IncompatibleRepositories,
741
repo.add_fallback_repository, bad_repo)
742
self.assertContainsRe(str(e),
743
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
744
r'KnitPackRepository.*/repo/.*\n'
745
r'different serializers')
747
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
748
base = self.make_branch_and_tree('base', format=self.get_format())
750
referencing = self.make_branch_and_tree('repo', format=self.get_format())
751
referencing.branch.repository.add_fallback_repository(base.branch.repository)
752
referencing.commit('bar')
753
new_instance = referencing.bzrdir.open_repository()
754
new_instance.lock_read()
755
self.addCleanup(new_instance.unlock)
756
new_instance._pack_collection.ensure_loaded()
757
self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
759
def test_autopack_only_considers_main_repo_packs(self):
760
base = self.make_branch_and_tree('base', format=self.get_format())
762
tree = self.make_branch_and_tree('repo', format=self.get_format())
763
tree.branch.repository.add_fallback_repository(base.branch.repository)
764
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
765
# This test could be a little cheaper by replacing the packs
766
# attribute on the repository to allow a different pack distribution
767
# and max packs policy - so we are checking the policy is honoured
768
# in the test. But for now 11 commits is not a big deal in a single
771
tree.commit('commit %s' % x)
772
# there should be 9 packs:
773
index = self.index_class(trans, 'pack-names', None)
774
self.assertEqual(9, len(list(index.iter_all_entries())))
775
# committing one more should coalesce to 1 of 10.
776
tree.commit('commit triggering pack')
777
index = self.index_class(trans, 'pack-names', None)
778
self.assertEqual(1, len(list(index.iter_all_entries())))
779
# packing should not damage data
780
tree = tree.bzrdir.open_workingtree()
781
check_result = tree.branch.repository.check(
782
[tree.branch.last_revision()])
783
# We should have 50 (10x5) files in the obsolete_packs directory.
784
obsolete_files = list(trans.list_dir('obsolete_packs'))
785
self.assertFalse('foo' in obsolete_files)
786
self.assertFalse('bar' in obsolete_files)
787
self.assertEqual(50, len(obsolete_files))
788
# XXX: Todo check packs obsoleted correctly - old packs and indices
789
# in the obsolete_packs directory.
790
large_pack_name = list(index.iter_all_entries())[0][1][0]
791
# finally, committing again should not touch the large pack.
792
tree.commit('commit not triggering pack')
793
index = self.index_class(trans, 'pack-names', None)
794
self.assertEqual(2, len(list(index.iter_all_entries())))
795
pack_names = [node[1][0] for node in index.iter_all_entries()]
796
self.assertTrue(large_pack_name in pack_names)
799
class TestSmartServerAutopack(TestCaseWithTransport):
802
super(TestSmartServerAutopack, self).setUp()
803
# Create a smart server that publishes whatever the backing VFS server
805
self.smart_server = server.SmartTCPServer_for_testing()
806
self.smart_server.setUp(self.get_server())
807
self.addCleanup(self.smart_server.tearDown)
808
# Log all HPSS calls into self.hpss_calls.
809
client._SmartClient.hooks.install_named_hook(
810
'call', self.capture_hpss_call, None)
813
def capture_hpss_call(self, params):
814
self.hpss_calls.append(params.method)
816
def get_format(self):
817
return bzrdir.format_registry.make_bzrdir(self.format_name)
819
def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
820
# Make local and remote repos
821
tree = self.make_branch_and_tree('local', format=self.get_format())
822
self.make_branch_and_tree('remote', format=self.get_format())
823
remote_branch_url = self.smart_server.get_url() + 'remote'
824
remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
825
# Make 9 local revisions, and push them one at a time to the remote
826
# repo to produce 9 pack files.
828
tree.commit('commit %s' % x)
829
tree.branch.push(remote_branch)
830
# Make one more push to trigger an autopack
832
tree.commit('commit triggering pack')
833
tree.branch.push(remote_branch)
834
autopack_calls = len([call for call in self.hpss_calls if call ==
835
'PackRepository.autopack'])
836
streaming_calls = len([call for call in self.hpss_calls if call ==
837
'Repository.insert_stream'])
839
# Non streaming server
840
self.assertEqual(1, autopack_calls)
841
self.assertEqual(0, streaming_calls)
843
# Streaming was used, which autopacks on the remote end.
844
self.assertEqual(0, autopack_calls)
845
# NB: The 2 calls are because of the sanity check that the server
846
# supports the verb (see remote.py:RemoteSink.insert_stream for
848
self.assertEqual(2, streaming_calls)
851
def load_tests(basic_tests, module, loader):
852
# these give the bzrdir canned format name, and the repository on-disk
855
dict(format_name='pack-0.92',
856
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
857
format_supports_external_lookups=False,
858
index_class=GraphIndex),
859
dict(format_name='pack-0.92-subtree',
860
format_string="Bazaar pack repository format 1 "
861
"with subtree support (needs bzr 0.92)\n",
862
format_supports_external_lookups=False,
863
index_class=GraphIndex),
864
dict(format_name='1.6',
865
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
866
format_supports_external_lookups=True,
867
index_class=GraphIndex),
868
dict(format_name='1.6.1-rich-root',
869
format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
871
format_supports_external_lookups=True,
872
index_class=GraphIndex),
873
dict(format_name='1.9',
874
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
875
format_supports_external_lookups=True,
876
index_class=BTreeGraphIndex),
877
dict(format_name='1.9-rich-root',
878
format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
880
format_supports_external_lookups=True,
881
index_class=BTreeGraphIndex),
882
dict(format_name='development2',
883
format_string="Bazaar development format 2 "
884
"(needs bzr.dev from before 1.8)\n",
885
format_supports_external_lookups=True,
886
index_class=BTreeGraphIndex),
887
dict(format_name='development2-subtree',
888
format_string="Bazaar development format 2 "
889
"with subtree support (needs bzr.dev from before 1.8)\n",
890
format_supports_external_lookups=True,
891
index_class=BTreeGraphIndex),
893
# name of the scenario is the format name
894
scenarios = [(s['format_name'], s) for s in scenarios_params]
895
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())