~lifeless/ubuntu/lucid/bzr/2.1.2-sru

« back to all changes in this revision

Viewing changes to bzrlib/repository.py

  • Committer: Bazaar Package Importer
  • Author(s): Jelmer Vernooij
  • Date: 2010-02-17 17:47:40 UTC
  • mfrom: (1.5.2 upstream)
  • mto: This revision was merged to the branch mainline in revision 13.
  • Revision ID: james.westby@ubuntu.com-20100217174740-p1x5dd5uwvxtg9y6
Tags: upstream-2.1.0
ImportĀ upstreamĀ versionĀ 2.1.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
24
24
    bzrdir,
25
25
    check,
26
26
    chk_map,
 
27
    config,
27
28
    debug,
28
29
    errors,
 
30
    fetch as _mod_fetch,
29
31
    fifo_cache,
30
32
    generate_ids,
31
33
    gpg,
50
52
from bzrlib.testament import Testament
51
53
""")
52
54
 
53
 
from bzrlib.decorators import needs_read_lock, needs_write_lock
 
55
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
54
56
from bzrlib.inter import InterObject
55
57
from bzrlib.inventory import (
56
58
    Inventory,
58
60
    ROOT_ID,
59
61
    entry_factory,
60
62
    )
 
63
from bzrlib.lock import _RelockDebugMixin
61
64
from bzrlib import registry
62
65
from bzrlib.trace import (
63
66
    log_exception_quietly, note, mutter, mutter_callsite, warning)
206
209
            # an inventory delta was accumulated without creating a new
207
210
            # inventory.
208
211
            basis_id = self.basis_delta_revision
209
 
            self.inv_sha1 = self.repository.add_inventory_by_delta(
 
212
            # We ignore the 'inventory' returned by add_inventory_by_delta
 
213
            # because self.new_inventory is used to hint to the rest of the
 
214
            # system what code path was taken
 
215
            self.inv_sha1, _ = self.repository.add_inventory_by_delta(
210
216
                basis_id, self._basis_delta, self._new_revision_id,
211
217
                self.parents)
212
218
        else:
857
863
# Repositories
858
864
 
859
865
 
860
 
class Repository(object):
 
866
class Repository(_RelockDebugMixin):
861
867
    """Repository holding history for one or more branches.
862
868
 
863
869
    The repository holds and retrieves historical information including
1300
1306
        self._reconcile_does_inventory_gc = True
1301
1307
        self._reconcile_fixes_text_parents = False
1302
1308
        self._reconcile_backsup_inventory = True
1303
 
        # not right yet - should be more semantically clear ?
1304
 
        #
1305
 
        # TODO: make sure to construct the right store classes, etc, depending
1306
 
        # on whether escaping is required.
1307
 
        self._warn_if_deprecated()
1308
1309
        self._write_group = None
1309
1310
        # Additional places to query for data.
1310
1311
        self._fallback_repositories = []
1311
1312
        # An InventoryEntry cache, used during deserialization
1312
1313
        self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
 
1314
        # Is it safe to return inventory entries directly from the entry cache,
 
1315
        # rather copying them?
 
1316
        self._safe_to_return_from_cache = False
1313
1317
 
1314
1318
    def __repr__(self):
1315
1319
        if self._fallback_repositories:
1382
1386
        locked = self.is_locked()
1383
1387
        result = self.control_files.lock_write(token=token)
1384
1388
        if not locked:
 
1389
            self._warn_if_deprecated()
 
1390
            self._note_lock('w')
1385
1391
            for repo in self._fallback_repositories:
1386
1392
                # Writes don't affect fallback repos
1387
1393
                repo.lock_read()
1392
1398
        locked = self.is_locked()
1393
1399
        self.control_files.lock_read()
1394
1400
        if not locked:
 
1401
            self._warn_if_deprecated()
 
1402
            self._note_lock('r')
1395
1403
            for repo in self._fallback_repositories:
1396
1404
                repo.lock_read()
1397
1405
            self._refresh_data()
1721
1729
        self.start_write_group()
1722
1730
        return result
1723
1731
 
 
1732
    @only_raises(errors.LockNotHeld, errors.LockBroken)
1724
1733
    def unlock(self):
1725
1734
        if (self.control_files._lock_count == 1 and
1726
1735
            self.control_files._lock_mode == 'w'):
2330
2339
        num_file_ids = len(file_ids)
2331
2340
        for file_id, altered_versions in file_ids.iteritems():
2332
2341
            if pb is not None:
2333
 
                pb.update("fetch texts", count, num_file_ids)
 
2342
                pb.update("Fetch texts", count, num_file_ids)
2334
2343
            count += 1
2335
2344
            yield ("file", file_id, altered_versions)
2336
2345
 
2424
2433
        :param xml: A serialised inventory.
2425
2434
        """
2426
2435
        result = self._serializer.read_inventory_from_string(xml, revision_id,
2427
 
                    entry_cache=self._inventory_entry_cache)
 
2436
                    entry_cache=self._inventory_entry_cache,
 
2437
                    return_from_cache=self._safe_to_return_from_cache)
2428
2438
        if result.revision_id != revision_id:
2429
2439
            raise AssertionError('revision id mismatch %s != %s' % (
2430
2440
                result.revision_id, revision_id))
2662
2672
        for ((revision_id,), parent_keys) in \
2663
2673
                self.revisions.get_parent_map(query_keys).iteritems():
2664
2674
            if parent_keys:
2665
 
                result[revision_id] = tuple(parent_revid
2666
 
                    for (parent_revid,) in parent_keys)
 
2675
                result[revision_id] = tuple([parent_revid
 
2676
                    for (parent_revid,) in parent_keys])
2667
2677
            else:
2668
2678
                result[revision_id] = (_mod_revision.NULL_REVISION,)
2669
2679
        return result
2771
2781
        result.check(callback_refs)
2772
2782
        return result
2773
2783
 
2774
 
    def _warn_if_deprecated(self):
 
2784
    def _warn_if_deprecated(self, branch=None):
2775
2785
        global _deprecation_warning_done
2776
2786
        if _deprecation_warning_done:
2777
2787
            return
2778
 
        _deprecation_warning_done = True
2779
 
        warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2780
 
                % (self._format, self.bzrdir.transport.base))
 
2788
        try:
 
2789
            if branch is None:
 
2790
                conf = config.GlobalConfig()
 
2791
            else:
 
2792
                conf = branch.get_config()
 
2793
            if conf.suppress_warning('format_deprecation'):
 
2794
                return
 
2795
            warning("Format %s for %s is deprecated -"
 
2796
                    " please use 'bzr upgrade' to get better performance"
 
2797
                    % (self._format, self.bzrdir.transport.base))
 
2798
        finally:
 
2799
            _deprecation_warning_done = True
2781
2800
 
2782
2801
    def supports_rich_root(self):
2783
2802
        return self._format.rich_root_data
3087
3106
        """
3088
3107
        try:
3089
3108
            transport = a_bzrdir.get_repository_transport(None)
3090
 
            format_string = transport.get("format").read()
 
3109
            format_string = transport.get_bytes("format")
3091
3110
            return format_registry.get(format_string)
3092
3111
        except errors.NoSuchFile:
3093
3112
            raise errors.NoRepositoryPresent(a_bzrdir)
3406
3425
                   provided a default one will be created.
3407
3426
        :return: None.
3408
3427
        """
3409
 
        from bzrlib.fetch import RepoFetcher
3410
 
        f = RepoFetcher(to_repository=self.target,
 
3428
        f = _mod_fetch.RepoFetcher(to_repository=self.target,
3411
3429
                               from_repository=self.source,
3412
3430
                               last_revision=revision_id,
3413
3431
                               fetch_spec=fetch_spec,
3586
3604
                self.target.texts.insert_record_stream(
3587
3605
                    self.source.texts.get_record_stream(
3588
3606
                        self.source.texts.keys(), 'topological', False))
3589
 
                pb.update('copying inventory', 0, 1)
 
3607
                pb.update('Copying inventory', 0, 1)
3590
3608
                self.target.inventories.insert_record_stream(
3591
3609
                    self.source.inventories.get_record_stream(
3592
3610
                        self.source.inventories.keys(), 'topological', False))
3813
3831
                basis_id, delta, current_revision_id, parents_parents)
3814
3832
            cache[current_revision_id] = parent_tree
3815
3833
 
3816
 
    def _fetch_batch(self, revision_ids, basis_id, cache):
 
3834
    def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3817
3835
        """Fetch across a few revisions.
3818
3836
 
3819
3837
        :param revision_ids: The revisions to copy
3820
3838
        :param basis_id: The revision_id of a tree that must be in cache, used
3821
3839
            as a basis for delta when no other base is available
3822
3840
        :param cache: A cache of RevisionTrees that we can use.
 
3841
        :param a_graph: A Graph object to determine the heads() of the
 
3842
            rich-root data stream.
3823
3843
        :return: The revision_id of the last converted tree. The RevisionTree
3824
3844
            for it will be in cache
3825
3845
        """
3832
3852
        pending_revisions = []
3833
3853
        parent_map = self.source.get_parent_map(revision_ids)
3834
3854
        self._fetch_parent_invs_for_stacking(parent_map, cache)
 
3855
        self.source._safe_to_return_from_cache = True
3835
3856
        for tree in self.source.revision_trees(revision_ids):
3836
3857
            # Find a inventory delta for this revision.
3837
3858
            # Find text entries that need to be copied, too.
3885
3906
            pending_revisions.append(revision)
3886
3907
            cache[current_revision_id] = tree
3887
3908
            basis_id = current_revision_id
 
3909
        self.source._safe_to_return_from_cache = False
3888
3910
        # Copy file texts
3889
3911
        from_texts = self.source.texts
3890
3912
        to_texts = self.target.texts
3891
3913
        if root_keys_to_create:
3892
 
            from bzrlib.fetch import _new_root_data_stream
3893
 
            root_stream = _new_root_data_stream(
 
3914
            root_stream = _mod_fetch._new_root_data_stream(
3894
3915
                root_keys_to_create, self._revision_id_to_root_id, parent_map,
3895
 
                self.source)
 
3916
                self.source, graph=a_graph)
3896
3917
            to_texts.insert_record_stream(root_stream)
3897
3918
        to_texts.insert_record_stream(from_texts.get_record_stream(
3898
3919
            text_keys, self.target._format._fetch_order,
3955
3976
        cache[basis_id] = basis_tree
3956
3977
        del basis_tree # We don't want to hang on to it here
3957
3978
        hints = []
 
3979
        if self._converting_to_rich_root and len(revision_ids) > 100:
 
3980
            a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
 
3981
                                                            revision_ids)
 
3982
        else:
 
3983
            a_graph = None
 
3984
 
3958
3985
        for offset in range(0, len(revision_ids), batch_size):
3959
3986
            self.target.start_write_group()
3960
3987
            try:
3961
3988
                pb.update('Transferring revisions', offset,
3962
3989
                          len(revision_ids))
3963
3990
                batch = revision_ids[offset:offset+batch_size]
3964
 
                basis_id = self._fetch_batch(batch, basis_id, cache)
 
3991
                basis_id = self._fetch_batch(batch, basis_id, cache,
 
3992
                                             a_graph=a_graph)
3965
3993
            except:
 
3994
                self.source._safe_to_return_from_cache = False
3966
3995
                self.target.abort_write_group()
3967
3996
                raise
3968
3997
            else:
4085
4114
                                                  self.source_repo.is_shared())
4086
4115
        converted.lock_write()
4087
4116
        try:
4088
 
            self.step('Copying content into repository.')
 
4117
            self.step('Copying content')
4089
4118
            self.source_repo.copy_content_into(converted)
4090
4119
        finally:
4091
4120
            converted.unlock()
4092
 
        self.step('Deleting old repository content.')
 
4121
        self.step('Deleting old repository content')
4093
4122
        self.repo_dir.transport.delete_tree('repository.backup')
4094
 
        self.pb.note('repository converted')
 
4123
        ui.ui_factory.note('repository converted')
4095
4124
 
4096
4125
    def step(self, message):
4097
4126
        """Update the pb by a step."""
4324
4353
                ):
4325
4354
                if versioned_file is None:
4326
4355
                    continue
 
4356
                # TODO: key is often going to be a StaticTuple object
 
4357
                #       I don't believe we can define a method by which
 
4358
                #       (prefix,) + StaticTuple will work, though we could
 
4359
                #       define a StaticTuple.sq_concat that would allow you to
 
4360
                #       pass in either a tuple or a StaticTuple as the second
 
4361
                #       object, so instead we could have:
 
4362
                #       StaticTuple(prefix) + key here...
4327
4363
                missing_keys.update((prefix,) + key for key in
4328
4364
                    versioned_file.get_missing_compression_parent_keys())
4329
4365
        except NotImplementedError:
4441
4477
        fetching the inventory weave.
4442
4478
        """
4443
4479
        if self._rich_root_upgrade():
4444
 
            import bzrlib.fetch
4445
 
            return bzrlib.fetch.Inter1and2Helper(
 
4480
            return _mod_fetch.Inter1and2Helper(
4446
4481
                self.from_repository).generate_root_texts(revs)
4447
4482
        else:
4448
4483
            return []