~duplicity-team/duplicity/0.8-series

« back to all changes in this revision

Viewing changes to duplicity/dup_main.py

* Merged in lp:~kenneth-loafman/duplicity/duplicity-pylint
  - Enable additional pylint warnings. Make 1st pass at correction.
      unused-argument,
      unused-wildcard-import,
      redefined-builtin,
      bad-indentation,
      mixed-indentation,
     unreachable
  - Renamed globals to config to fix conflict with __builtin__.glogals()
  - Resolved conflict between duplicity.config and testing.manual.config
  - Normalized emacs mode line to have encoding:utf8 on all *.py files

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
 
1
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4; encoding:utf8 -*-
2
2
#
3
3
# duplicity -- Encrypted bandwidth efficient backup
4
4
#
28
28
from __future__ import print_function
29
29
from future import standard_library
30
30
standard_library.install_aliases()
31
 
from builtins import filter
32
31
from builtins import map
33
32
from builtins import next
34
33
from builtins import object
36
35
 
37
36
import copy
38
37
import fasteners
39
 
import gzip
40
38
import os
41
39
import platform
42
 
import re
43
40
import resource
44
41
import sys
45
42
import time
46
 
import traceback
47
 
import types
48
 
 
49
 
from datetime import datetime
50
 
from os import statvfs
51
43
 
52
44
from duplicity import __version__
53
45
from duplicity import asyncscheduler
56
48
from duplicity import dup_collections
57
49
from duplicity import dup_temp
58
50
from duplicity import dup_time
59
 
from duplicity import errors
60
51
from duplicity import file_naming
61
 
from duplicity import globals
 
52
from duplicity import config
62
53
from duplicity import gpg
63
54
from duplicity import log
64
55
from duplicity import manifest
65
56
from duplicity import patchdir
66
57
from duplicity import path
67
58
from duplicity import progress
68
 
from duplicity import robust
69
59
from duplicity import tempdir
70
60
from duplicity import util
71
61
 
115
105
    # check if we can reuse an already set (signing_)passphrase
116
106
    # if signing key is also an encryption key assume that the passphrase is identical
117
107
    if (for_signing and
118
 
            (globals.gpg_profile.sign_key in globals.gpg_profile.recipients or
119
 
             globals.gpg_profile.sign_key in globals.gpg_profile.hidden_recipients) and
 
108
            (config.gpg_profile.sign_key in config.gpg_profile.recipients or
 
109
             config.gpg_profile.sign_key in config.gpg_profile.hidden_recipients) and
120
110
             u'PASSPHRASE' in os.environ):  # noqa
121
111
        log.Notice(_(u"Reuse configured PASSPHRASE as SIGN_PASSPHRASE"))
122
112
        return os.environ[u'PASSPHRASE']
123
113
    # if one encryption key is also the signing key assume that the passphrase is identical
124
114
    if (not for_signing and
125
 
            (globals.gpg_profile.sign_key in globals.gpg_profile.recipients or
126
 
             globals.gpg_profile.sign_key in globals.gpg_profile.hidden_recipients) and
 
115
            (config.gpg_profile.sign_key in config.gpg_profile.recipients or
 
116
             config.gpg_profile.sign_key in config.gpg_profile.hidden_recipients) and
127
117
             u'SIGN_PASSPHRASE' in os.environ):  # noqa
128
118
        log.Notice(_(u"Reuse configured SIGN_PASSPHRASE as PASSPHRASE"))
129
119
        return os.environ[u'SIGN_PASSPHRASE']
136
126
    #   - gpg-agent supplies all, no user interaction
137
127
 
138
128
    # no passphrase if --no-encryption or --use-agent
139
 
    if not globals.encryption or globals.use_agent:
 
129
    if not config.encryption or config.use_agent:
140
130
        return u""
141
131
 
142
132
    # these commands don't need a password
151
141
    # for a full backup, we don't need a password if
152
142
    # there is no sign_key and there are recipients
153
143
    elif (action == u"full" and
154
 
          (globals.gpg_profile.recipients or globals.gpg_profile.hidden_recipients) and not
155
 
          globals.gpg_profile.sign_key):
 
144
          (config.gpg_profile.recipients or config.gpg_profile.hidden_recipients) and not
 
145
          config.gpg_profile.sign_key):
156
146
        return u""
157
147
 
158
148
    # for an inc backup, we don't need a password if
159
149
    # there is no sign_key and there are recipients
160
150
    elif (action == u"inc" and
161
 
          (globals.gpg_profile.recipients or globals.gpg_profile.hidden_recipients) and not
162
 
          globals.gpg_profile.sign_key):
 
151
          (config.gpg_profile.recipients or config.gpg_profile.hidden_recipients) and not
 
152
          config.gpg_profile.sign_key):
163
153
        return u""
164
154
 
165
155
    # Finally, ask the user for the passphrase
171
161
            # if the user made a typo in the first passphrase
172
162
            if use_cache and n == 2:
173
163
                if for_signing:
174
 
                    pass1 = globals.gpg_profile.signing_passphrase
 
164
                    pass1 = config.gpg_profile.signing_passphrase
175
165
                else:
176
 
                    pass1 = globals.gpg_profile.passphrase
 
166
                    pass1 = config.gpg_profile.passphrase
177
167
            else:
178
168
                if for_signing:
179
 
                    if use_cache and globals.gpg_profile.signing_passphrase:
180
 
                        pass1 = globals.gpg_profile.signing_passphrase
 
169
                    if use_cache and config.gpg_profile.signing_passphrase:
 
170
                        pass1 = config.gpg_profile.signing_passphrase
181
171
                    else:
182
172
                        pass1 = getpass_safe(_(u"GnuPG passphrase for signing key:") + u" ")
183
173
                else:
184
 
                    if use_cache and globals.gpg_profile.passphrase:
185
 
                        pass1 = globals.gpg_profile.passphrase
 
174
                    if use_cache and config.gpg_profile.passphrase:
 
175
                        pass1 = config.gpg_profile.passphrase
186
176
                    else:
187
177
                        pass1 = getpass_safe(_(u"GnuPG passphrase:") + u" ")
188
178
 
199
189
                use_cache = False
200
190
                continue
201
191
 
202
 
            if not pass1 and not (globals.gpg_profile.recipients or
203
 
                                  globals.gpg_profile.hidden_recipients) and not for_signing:
 
192
            if not pass1 and not (config.gpg_profile.recipients or
 
193
                                  config.gpg_profile.hidden_recipients) and not for_signing:
204
194
                log.Log(_(u"Cannot use empty passphrase with symmetric encryption!  Please try again."),
205
195
                        log.WARNING, force_print=True)
206
196
                use_cache = False
242
232
    @rtype: int
243
233
    @return: constant 0 (zero)
244
234
    """
245
 
    last_index = globals.restart.last_index
246
 
    last_block = globals.restart.last_block
 
235
    last_index = config.restart.last_index
 
236
    last_block = config.restart.last_block
247
237
    try:
248
238
        # Just spin our wheels
249
239
        iter_result = next(tarblock_iter)
326
316
        instead of copying.
327
317
        """
328
318
        putsize = tdp.getsize()
329
 
        if globals.skip_volume != vol_num:  # for testing purposes only
 
319
        if config.skip_volume != vol_num:  # for testing purposes only
330
320
            backend.put(tdp, dest_filename)
331
321
        validate_block(putsize, dest_filename)
332
322
        if tdp.stat:
345
335
        from encrypted to non in the middle of a backup chain), so we check
346
336
        that the vol1 filename on the server matches the settings of this run.
347
337
        """
348
 
        if ((globals.gpg_profile.recipients or globals.gpg_profile.hidden_recipients) and
349
 
                not globals.gpg_profile.sign_key):
 
338
        if ((config.gpg_profile.recipients or config.gpg_profile.hidden_recipients) and
 
339
                not config.gpg_profile.sign_key):
350
340
            # When using gpg encryption without a signing key, we skip this validation
351
341
            # step to ensure that we can still backup without needing the secret key
352
342
            # on the machine.
353
343
            return
354
344
 
355
345
        vol1_filename = file_naming.get(backup_type, 1,
356
 
                                        encrypted=globals.encryption,
357
 
                                        gzipped=globals.compression)
 
346
                                        encrypted=config.encryption,
 
347
                                        gzipped=config.compression)
358
348
        if vol1_filename != backup_set.volume_name_dict[1]:
359
349
            log.FatalError(_(u"Restarting backup, but current encryption "
360
350
                             u"settings do not match original settings"),
361
351
                           log.ErrorCode.enryption_mismatch)
362
352
 
363
353
        # Settings are same, let's check passphrase itself if we are encrypted
364
 
        if globals.encryption:
365
 
            fileobj = restore_get_enc_fileobj(globals.backend, vol1_filename,
 
354
        if config.encryption:
 
355
            fileobj = restore_get_enc_fileobj(config.backend, vol1_filename,
366
356
                                              manifest.volume_info_dict[1])
367
357
            fileobj.close()
368
358
 
369
 
    if not globals.restart:
 
359
    if not config.restart:
370
360
        # normal backup start
371
361
        vol_num = 0
372
362
        mf = manifest.Manifest(fh=man_outfp)
373
363
        mf.set_dirinfo()
374
364
    else:
375
365
        # restart from last known position
376
 
        mf = globals.restart.last_backup.get_local_manifest()
377
 
        globals.restart.checkManifest(mf)
378
 
        globals.restart.setLastSaved(mf)
379
 
        validate_encryption_settings(globals.restart.last_backup, mf)
 
366
        mf = config.restart.last_backup.get_local_manifest()
 
367
        config.restart.checkManifest(mf)
 
368
        config.restart.setLastSaved(mf)
 
369
        validate_encryption_settings(config.restart.last_backup, mf)
380
370
        mf.fh = man_outfp
381
 
        last_block = globals.restart.last_block
 
371
        last_block = config.restart.last_block
382
372
        log.Notice(_(u"Restarting after volume %s, file %s, block %s") %
383
 
                   (globals.restart.start_vol,
384
 
                    util.uindex(globals.restart.last_index),
385
 
                    globals.restart.last_block))
386
 
        vol_num = globals.restart.start_vol
 
373
                   (config.restart.start_vol,
 
374
                    util.uindex(config.restart.last_index),
 
375
                    config.restart.last_block))
 
376
        vol_num = config.restart.start_vol
387
377
        restart_position_iterator(tarblock_iter)
388
378
 
389
379
    at_end = 0
391
381
 
392
382
    # If --progress option is given, initiate a background thread that will
393
383
    # periodically report progress to the Log.
394
 
    if globals.progress:
 
384
    if config.progress:
395
385
        progress.tracker.set_start_volume(vol_num + 1)
396
386
        progress.progress_thread.start()
397
387
 
403
393
    # is an assert put in place to avoid someone accidentally
404
394
    # enabling concurrency above 1, before adequate work has been
405
395
    # done on the backends to make them support concurrency.
406
 
    assert globals.async_concurrency <= 1
 
396
    assert config.async_concurrency <= 1
407
397
 
408
 
    io_scheduler = asyncscheduler.AsyncScheduler(globals.async_concurrency)
 
398
    io_scheduler = asyncscheduler.AsyncScheduler(config.async_concurrency)
409
399
    async_waiters = []
410
400
 
411
401
    while not at_end:
415
405
        # Create volume
416
406
        vol_num += 1
417
407
        dest_filename = file_naming.get(backup_type, vol_num,
418
 
                                        encrypted=globals.encryption,
419
 
                                        gzipped=globals.compression)
 
408
                                        encrypted=config.encryption,
 
409
                                        gzipped=config.compression)
420
410
        tdp = dup_temp.new_tempduppath(file_naming.parse(dest_filename))
421
411
 
422
412
        # write volume
423
 
        if globals.encryption:
424
 
            at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, globals.gpg_profile,
425
 
                                      globals.volsize)
426
 
        elif globals.compression:
427
 
            at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, globals.volsize)
 
413
        if config.encryption:
 
414
            at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, config.gpg_profile,
 
415
                                      config.volsize)
 
416
        elif config.compression:
 
417
            at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, config.volsize)
428
418
        else:
429
 
            at_end = gpg.PlainWriteFile(tarblock_iter, tdp.name, globals.volsize)
 
419
            at_end = gpg.PlainWriteFile(tarblock_iter, tdp.name, config.volsize)
430
420
        tdp.setdata()
431
421
 
432
422
        # Add volume information to manifest
452
442
        log.Progress(_(u'Processed volume %d') % vol_num, diffdir.stats.SourceFileSize)
453
443
        # Snapshot (serialize) progress now as a Volume has been completed.
454
444
        # This is always the last restore point when it comes to restart a failed backup
455
 
        if globals.progress:
 
445
        if config.progress:
456
446
            progress.tracker.snapshot_progress(vol_num)
457
447
 
458
448
        # for testing purposes only - assert on inc or full
459
 
        assert globals.fail_on_volume != vol_num, u"Forced assertion for testing at volume %d" % vol_num
 
449
        assert config.fail_on_volume != vol_num, u"Forced assertion for testing at volume %d" % vol_num
460
450
 
461
451
    # Collect byte count from all asynchronous jobs; also implicitly waits
462
452
    # for them all to complete.
474
464
    u"""
475
465
    Return a fileobj opened for writing, save results as manifest
476
466
 
477
 
    Save manifest in globals.archive_dir_path gzipped.
 
467
    Save manifest in config.archive_dir_path gzipped.
478
468
    Save them on the backend encrypted as needed.
479
469
 
480
470
    @type man_type: string
492
482
                                        manifest=True)
493
483
    remote_man_filename = file_naming.get(backup_type,
494
484
                                          manifest=True,
495
 
                                          encrypted=globals.encryption)
 
485
                                          encrypted=config.encryption)
496
486
 
497
 
    fh = dup_temp.get_fileobj_duppath(globals.archive_dir_path,
 
487
    fh = dup_temp.get_fileobj_duppath(config.archive_dir_path,
498
488
                                      part_man_filename,
499
489
                                      perm_man_filename,
500
490
                                      remote_man_filename)
505
495
    u"""
506
496
    Return a fileobj opened for writing, save results as signature
507
497
 
508
 
    Save signatures in globals.archive_dir gzipped.
 
498
    Save signatures in config.archive_dir gzipped.
509
499
    Save them on the backend encrypted as needed.
510
500
 
511
501
    @type sig_type: string
521
511
                                        partial=True)
522
512
    perm_sig_filename = file_naming.get(sig_type,
523
513
                                        gzipped=True)
524
 
    remote_sig_filename = file_naming.get(sig_type, encrypted=globals.encryption,
525
 
                                          gzipped=globals.compression)
 
514
    remote_sig_filename = file_naming.get(sig_type, encrypted=config.encryption,
 
515
                                          gzipped=config.compression)
526
516
 
527
 
    fh = dup_temp.get_fileobj_duppath(globals.archive_dir_path,
 
517
    fh = dup_temp.get_fileobj_duppath(config.archive_dir_path,
528
518
                                      part_sig_filename,
529
519
                                      perm_sig_filename,
530
520
                                      remote_sig_filename,
542
532
    @rtype: void
543
533
    @return: void
544
534
    """
545
 
    if globals.progress:
 
535
    if config.progress:
546
536
        progress.tracker = progress.ProgressTracker()
547
537
        # Fake a backup to compute total of moving bytes
548
 
        tarblock_iter = diffdir.DirFull(globals.select)
 
538
        tarblock_iter = diffdir.DirFull(config.select)
549
539
        dummy_backup(tarblock_iter)
550
540
        # Store computed stats to compute progress later
551
541
        progress.tracker.set_evidence(diffdir.stats, True)
552
 
        # Reinit the globals.select iterator, so
 
542
        # Reinit the config.select iterator, so
553
543
        # the core of duplicity can rescan the paths
554
544
        commandline.set_selection()
555
545
        progress.progress_thread = progress.LogProgressThread()
556
546
 
557
 
    if globals.dry_run:
558
 
        tarblock_iter = diffdir.DirFull(globals.select)
 
547
    if config.dry_run:
 
548
        tarblock_iter = diffdir.DirFull(config.select)
559
549
        bytes_written = dummy_backup(tarblock_iter)
560
550
        col_stats.set_values(sig_chain_warning=None)
561
551
    else:
562
552
        sig_outfp = get_sig_fileobj(u"full-sig")
563
553
        man_outfp = get_man_fileobj(u"full")
564
 
        tarblock_iter = diffdir.DirFull_WriteSig(globals.select,
 
554
        tarblock_iter = diffdir.DirFull_WriteSig(config.select,
565
555
                                                 sig_outfp)
566
556
        bytes_written = write_multivol(u"full", tarblock_iter,
567
557
                                       man_outfp, sig_outfp,
568
 
                                       globals.backend)
 
558
                                       config.backend)
569
559
 
570
560
        # close sig file, send to remote, and rename to final
571
561
        sig_outfp.close()
577
567
        man_outfp.to_remote()
578
568
        man_outfp.to_final()
579
569
 
580
 
        if globals.progress:
 
570
        if config.progress:
581
571
            # Terminate the background thread now, if any
582
572
            progress.progress_thread.finished = True
583
573
            progress.progress_thread.join()
598
588
    @param col_stats: collection status
599
589
    """
600
590
    if not col_stats.matched_chain_pair:
601
 
        if globals.incremental:
 
591
        if config.incremental:
602
592
            log.FatalError(_(u"Fatal Error: Unable to start incremental backup.  "
603
593
                             u"Old signatures not found and incremental specified"),
604
594
                           log.ErrorCode.inc_without_sigs)
608
598
    return col_stats.matched_chain_pair[0]
609
599
 
610
600
 
611
 
def print_statistics(stats, bytes_written):
 
601
def print_statistics(stats, bytes_written):  # pylint: disable=unused-argument
612
602
    u"""
613
 
    If globals.print_statistics, print stats after adding bytes_written
 
603
    If config.print_statistics, print stats after adding bytes_written
614
604
 
615
605
    @rtype: void
616
606
    @return: void
617
607
    """
618
 
    if globals.print_statistics:
 
608
    if config.print_statistics:
619
609
        diffdir.stats.TotalDestinationSizeChange = bytes_written
620
610
        logstring = diffdir.stats.get_stats_logstring(_(u"Backup Statistics"))
621
611
        log.Log(logstring, log.NOTICE, force_print=True)
628
618
    @rtype: void
629
619
    @return: void
630
620
    """
631
 
    if not globals.restart:
 
621
    if not config.restart:
632
622
        dup_time.setprevtime(sig_chain.end_time)
633
623
        if dup_time.curtime == dup_time.prevtime:
634
624
            time.sleep(2)
636
626
            assert dup_time.curtime != dup_time.prevtime, \
637
627
                u"time not moving forward at appropriate pace - system clock issues?"
638
628
 
639
 
    if globals.progress:
 
629
    if config.progress:
640
630
        progress.tracker = progress.ProgressTracker()
641
631
        # Fake a backup to compute total of moving bytes
642
 
        tarblock_iter = diffdir.DirDelta(globals.select,
 
632
        tarblock_iter = diffdir.DirDelta(config.select,
643
633
                                         sig_chain.get_fileobjs())
644
634
        dummy_backup(tarblock_iter)
645
635
        # Store computed stats to compute progress later
646
636
        progress.tracker.set_evidence(diffdir.stats, False)
647
 
        # Reinit the globals.select iterator, so
 
637
        # Reinit the config.select iterator, so
648
638
        # the core of duplicity can rescan the paths
649
639
        commandline.set_selection()
650
640
        progress.progress_thread = progress.LogProgressThread()
651
641
 
652
 
    if globals.dry_run:
653
 
        tarblock_iter = diffdir.DirDelta(globals.select,
 
642
    if config.dry_run:
 
643
        tarblock_iter = diffdir.DirDelta(config.select,
654
644
                                         sig_chain.get_fileobjs())
655
645
        bytes_written = dummy_backup(tarblock_iter)
656
646
    else:
657
647
        new_sig_outfp = get_sig_fileobj(u"new-sig")
658
648
        new_man_outfp = get_man_fileobj(u"inc")
659
 
        tarblock_iter = diffdir.DirDelta_WriteSig(globals.select,
 
649
        tarblock_iter = diffdir.DirDelta_WriteSig(config.select,
660
650
                                                  sig_chain.get_fileobjs(),
661
651
                                                  new_sig_outfp)
662
652
        bytes_written = write_multivol(u"inc", tarblock_iter,
663
653
                                       new_man_outfp, new_sig_outfp,
664
 
                                       globals.backend)
 
654
                                       config.backend)
665
655
 
666
656
        # close sig file and rename to final
667
657
        new_sig_outfp.close()
673
663
        new_man_outfp.to_remote()
674
664
        new_man_outfp.to_final()
675
665
 
676
 
        if globals.progress:
 
666
        if config.progress:
677
667
            # Terminate the background thread now, if any
678
668
            progress.progress_thread.finished = True
679
669
            progress.progress_thread.join()
694
684
    @rtype: void
695
685
    @return: void
696
686
    """
697
 
    time = globals.restore_time or dup_time.curtime
 
687
    time = config.restore_time or dup_time.curtime
698
688
    sig_chain = col_stats.get_signature_chain_at_time(time)
699
689
    path_iter = diffdir.get_combined_path_iter(sig_chain.get_fileobjs(time))
700
690
    for path in path_iter:
710
700
 
711
701
def restore(col_stats):
712
702
    u"""
713
 
    Restore archive in globals.backend to globals.local_path
 
703
    Restore archive in config.backend to config.local_path
714
704
 
715
705
    @type col_stats: CollectionStatus object
716
706
    @param col_stats: collection status
718
708
    @rtype: void
719
709
    @return: void
720
710
    """
721
 
    if globals.dry_run:
 
711
    if config.dry_run:
722
712
        return
723
 
    if not patchdir.Write_ROPaths(globals.local_path,
 
713
    if not patchdir.Write_ROPaths(config.local_path,
724
714
                                  restore_get_patched_rop_iter(col_stats)):
725
 
        if globals.restore_dir:
 
715
        if config.restore_dir:
726
716
            log.FatalError(_(u"%s not found in archive - no files restored.")
727
 
                           % (util.fsdecode(globals.restore_dir)),
 
717
                           % (util.fsdecode(config.restore_dir)),
728
718
                           log.ErrorCode.restore_dir_not_found)
729
719
        else:
730
720
            log.FatalError(_(u"No files found in archive - nothing restored."),
738
728
    @type col_stats: CollectionStatus object
739
729
    @param col_stats: collection status
740
730
    """
741
 
    if globals.restore_dir:
742
 
        index = tuple(globals.restore_dir.split(b"/"))
 
731
    if config.restore_dir:
 
732
        index = tuple(config.restore_dir.split(b"/"))
743
733
    else:
744
734
        index = ()
745
 
    time = globals.restore_time or dup_time.curtime
 
735
    time = config.restore_time or dup_time.curtime
746
736
    backup_chain = col_stats.get_backup_chain_at_time(time)
747
737
    assert backup_chain, col_stats.all_backup_chains
748
738
    backup_setlist = backup_chain.get_sets_at_time(time)
763
753
            log.Progress(_(u'Processed volume %d of %d') % (cur_vol[0], num_vols),
764
754
                         cur_vol[0], num_vols)
765
755
 
766
 
    if hasattr(globals.backend, u'pre_process_download'):
 
756
    if hasattr(config.backend, u'pre_process_download'):
767
757
        file_names = []
768
758
        for backup_set in backup_setlist:
769
759
            manifest = backup_set.get_manifest()
770
760
            volumes = manifest.get_containing_volumes(index)
771
761
            for vol_num in volumes:
772
762
                file_names.append(backup_set.volume_name_dict[vol_num])
773
 
        globals.backend.pre_process_download(file_names)
 
763
        config.backend.pre_process_download(file_names)
774
764
 
775
765
    fileobj_iters = list(map(get_fileobj_iter, backup_setlist))
776
766
    tarfiles = list(map(patchdir.TarFile_FromFileobjs, fileobj_iters))
782
772
    Return plaintext fileobj from encrypted filename on backend
783
773
 
784
774
    If volume_info is set, the hash of the file will be checked,
785
 
    assuming some hash is available.  Also, if globals.sign_key is
 
775
    assuming some hash is available.  Also, if config.sign_key is
786
776
    set, a fatal error will be raised if file not signed by sign_key.
787
777
 
788
778
    """
802
792
                       log.ErrorCode.mismatched_hash)
803
793
 
804
794
    fileobj = tdp.filtered_open_with_delete(u"rb")
805
 
    if parseresults.encrypted and globals.gpg_profile.sign_key:
 
795
    if parseresults.encrypted and config.gpg_profile.sign_key:
806
796
        restore_add_sig_check(fileobj)
807
797
    return fileobj
808
798
 
837
827
        u"""Thunk run when closing volume file"""
838
828
        actual_sig = fileobj.fileobj.get_signature()
839
829
        actual_sig = u"None" if actual_sig is None else actual_sig
840
 
        sign_key = globals.gpg_profile.sign_key
 
830
        sign_key = config.gpg_profile.sign_key
841
831
        sign_key = u"None" if sign_key is None else sign_key
842
832
        ofs = -min(len(actual_sig), len(sign_key))
843
833
        if actual_sig[ofs:] != sign_key[ofs:]:
860
850
    """
861
851
    global exit_val
862
852
    collated = diffdir.collate2iters(restore_get_patched_rop_iter(col_stats),
863
 
                                     globals.select)
 
853
                                     config.select)
864
854
    diff_count = 0
865
855
    total_count = 0
866
856
    for backup_ropath, current_path in collated:
868
858
            backup_ropath = path.ROPath(current_path.index)
869
859
        if not current_path:
870
860
            current_path = path.ROPath(backup_ropath.index)
871
 
        if not backup_ropath.compare_verbose(current_path, globals.compare_data):
 
861
        if not backup_ropath.compare_verbose(current_path, config.compare_data):
872
862
            diff_count += 1
873
863
        total_count += 1
874
864
    # Unfortunately, ngettext doesn't handle multiple number variables, so we
899
889
        return
900
890
 
901
891
    filestr = u"\n".join(map(util.fsdecode, extraneous))
902
 
    if globals.force:
 
892
    if config.force:
903
893
        log.Notice(ngettext(u"Deleting this file from backend:",
904
894
                            u"Deleting these files from backend:",
905
895
                            len(extraneous)) + u"\n" + filestr)
906
 
        if not globals.dry_run:
 
896
        if not config.dry_run:
907
897
            col_stats.backend.delete(ext_remote)
908
898
            for fn in ext_local:
909
899
                try:
910
 
                    globals.archive_dir_path.append(fn).delete()
 
900
                    config.archive_dir_path.append(fn).delete()
911
901
                except Exception:
912
902
                    pass
913
903
    else:
927
917
    @rtype: void
928
918
    @return: void
929
919
    """
930
 
    assert globals.keep_chains is not None
 
920
    assert config.keep_chains is not None
931
921
 
932
 
    globals.remove_time = col_stats.get_nth_last_full_backup_time(globals.keep_chains)
 
922
    config.remove_time = col_stats.get_nth_last_full_backup_time(config.keep_chains)
933
923
 
934
924
    remove_old(col_stats)
935
925
 
936
926
 
937
927
def remove_old(col_stats):
938
928
    u"""
939
 
    Remove backup files older than globals.remove_time from backend
 
929
    Remove backup files older than config.remove_time from backend
940
930
 
941
931
    @type col_stats: CollectionStatus object
942
932
    @param col_stats: collection status
944
934
    @rtype: void
945
935
    @return: void
946
936
    """
947
 
    assert globals.remove_time is not None
 
937
    assert config.remove_time is not None
948
938
 
949
939
    def set_times_str(setlist):
950
940
        u"""Return string listing times of sets in setlist"""
954
944
        u"""Return string listing times of chains in chainlist"""
955
945
        return u"\n".join([dup_time.timetopretty(s.end_time) for s in chainlist])
956
946
 
957
 
    req_list = col_stats.get_older_than_required(globals.remove_time)
 
947
    req_list = col_stats.get_older_than_required(config.remove_time)
958
948
    if req_list:
959
949
        log.Warn(u"%s\n%s\n%s" %
960
950
                 (_(u"There are backup set(s) at time(s):"),
962
952
                  _(u"Which can't be deleted because newer sets depend on them.")))
963
953
 
964
954
    if (col_stats.matched_chain_pair and
965
 
            col_stats.matched_chain_pair[1].end_time < globals.remove_time):
 
955
            col_stats.matched_chain_pair[1].end_time < config.remove_time):
966
956
        log.Warn(_(u"Current active backup chain is older than specified time.  "
967
957
                   u"However, it will not be deleted.  To remove all your backups, "
968
958
                   u"manually purge the repository."))
969
959
 
970
 
    chainlist = col_stats.get_chains_older_than(globals.remove_time)
 
960
    chainlist = col_stats.get_chains_older_than(config.remove_time)
971
961
 
972
 
    if globals.remove_all_inc_of_but_n_full_mode:
 
962
    if config.remove_all_inc_of_but_n_full_mode:
973
963
        # ignore chains without incremental backups:
974
964
        chainlist = list(x for x in chainlist if
975
965
                         (isinstance(x, dup_collections.SignatureChain) and x.inclist) or
978
968
    if not chainlist:
979
969
        log.Notice(_(u"No old backup sets found, nothing deleted."))
980
970
        return
981
 
    if globals.force:
 
971
    if config.force:
982
972
        log.Notice(ngettext(u"Deleting backup chain at time:",
983
973
                            u"Deleting backup chains at times:",
984
974
                            len(chainlist)) +
985
975
                   u"\n" + chain_times_str(chainlist))
986
976
        # Add signature files too, since they won't be needed anymore
987
 
        chainlist += col_stats.get_signature_chains_older_than(globals.remove_time)
 
977
        chainlist += col_stats.get_signature_chains_older_than(config.remove_time)
988
978
        chainlist.reverse()  # save oldest for last
989
979
        for chain in chainlist:
990
980
            # if remove_all_inc_of_but_n_full_mode mode, remove only
991
981
            # incrementals one and not full
992
 
            if globals.remove_all_inc_of_but_n_full_mode:
 
982
            if config.remove_all_inc_of_but_n_full_mode:
993
983
                if isinstance(chain, dup_collections.SignatureChain):
994
984
                    chain_desc = _(u"Deleting any incremental signature chain rooted at %s")
995
985
                else:
1000
990
                else:
1001
991
                    chain_desc = _(u"Deleting complete backup chain %s")
1002
992
            log.Notice(chain_desc % dup_time.timetopretty(chain.end_time))
1003
 
            if not globals.dry_run:
1004
 
                chain.delete(keep_full=globals.remove_all_inc_of_but_n_full_mode)
 
993
            if not config.dry_run:
 
994
                chain.delete(keep_full=config.remove_all_inc_of_but_n_full_mode)
1005
995
        col_stats.set_values(sig_chain_warning=None)
1006
996
    else:
1007
997
        log.Notice(ngettext(u"Found old backup chain at the following time:",
1019
1009
    @return: void
1020
1010
    """
1021
1011
    action = u"replicate"
1022
 
    time = globals.restore_time or dup_time.curtime
1023
 
    src_stats = dup_collections.CollectionsStatus(globals.src_backend, None, action).set_values(sig_chain_warning=None)
1024
 
    tgt_stats = dup_collections.CollectionsStatus(globals.backend, None, action).set_values(sig_chain_warning=None)
 
1012
    time = config.restore_time or dup_time.curtime
 
1013
    src_stats = dup_collections.CollectionsStatus(config.src_backend, None, action).set_values(sig_chain_warning=None)
 
1014
    tgt_stats = dup_collections.CollectionsStatus(config.backend, None, action).set_values(sig_chain_warning=None)
1025
1015
 
1026
 
    src_list = globals.src_backend.list()
1027
 
    tgt_list = globals.backend.list()
 
1016
    src_list = config.src_backend.list()
 
1017
    tgt_list = config.backend.list()
1028
1018
 
1029
1019
    src_chainlist = src_stats.get_signature_chains(local=False, filelist=src_list)[0]
1030
1020
    tgt_chainlist = tgt_stats.get_signature_chains(local=False, filelist=tgt_list)[0]
1054
1044
                dup_time.setprevtime(src_sig.start_time)
1055
1045
            dup_time.setcurtime(src_sig.time or src_sig.end_time)
1056
1046
            log.Notice(_(u"Replicating %s.") % (src_sig_filename,))
1057
 
            fileobj = globals.src_backend.get_fileobj_read(src_sig_filename)
1058
 
            filename = file_naming.get(src_sig.type, encrypted=globals.encryption, gzipped=globals.compression)
 
1047
            fileobj = config.src_backend.get_fileobj_read(src_sig_filename)
 
1048
            filename = file_naming.get(src_sig.type, encrypted=config.encryption, gzipped=config.compression)
1059
1049
            tdp = dup_temp.new_tempduppath(file_naming.parse(filename))
1060
1050
            tmpobj = tdp.filtered_open(mode=u'wb')
1061
1051
            util.copyfileobj(fileobj, tmpobj)  # decrypt, compress, (re)-encrypt
1062
1052
            fileobj.close()
1063
1053
            tmpobj.close()
1064
 
            globals.backend.put(tdp, filename)
 
1054
            config.backend.put(tdp, filename)
1065
1055
            tdp.delete()
1066
1056
 
1067
1057
    src_chainlist = src_stats.get_backup_chains(filename_list=src_list)[0]
1093
1083
            mf = manifest.Manifest(fh=mf_tdp.filtered_open(mode=u'wb'))
1094
1084
            for i, filename in list(src_set.volume_name_dict.items()):
1095
1085
                log.Notice(_(u"Replicating %s.") % (filename,))
1096
 
                fileobj = restore_get_enc_fileobj(globals.src_backend, filename, rmf.volume_info_dict[i])
1097
 
                filename = file_naming.get(src_set.type, i, encrypted=globals.encryption, gzipped=globals.compression)
 
1086
                fileobj = restore_get_enc_fileobj(config.src_backend, filename, rmf.volume_info_dict[i])
 
1087
                filename = file_naming.get(src_set.type, i, encrypted=config.encryption, gzipped=config.compression)
1098
1088
                tdp = dup_temp.new_tempduppath(file_naming.parse(filename))
1099
1089
                tmpobj = tdp.filtered_open(mode=u'wb')
1100
1090
                util.copyfileobj(fileobj, tmpobj)  # decrypt, compress, (re)-encrypt
1101
1091
                fileobj.close()
1102
1092
                tmpobj.close()
1103
 
                globals.backend.put(tdp, filename)
 
1093
                config.backend.put(tdp, filename)
1104
1094
 
1105
1095
                vi = copy.copy(rmf.volume_info_dict[i])
1106
1096
                vi.set_hash(u"SHA1", gpg.get_hash(u"SHA1", tdp))
1113
1103
            mf_fileobj = mf_tdp.filtered_open_with_delete(mode=u'rb')
1114
1104
            mf_final_filename = file_naming.get(src_set.type,
1115
1105
                                                manifest=True,
1116
 
                                                encrypted=globals.encryption,
1117
 
                                                gzipped=globals.compression)
 
1106
                                                encrypted=config.encryption,
 
1107
                                                gzipped=config.compression)
1118
1108
            mf_final_tdp = dup_temp.new_tempduppath(file_naming.parse(mf_final_filename))
1119
1109
            mf_final_fileobj = mf_final_tdp.filtered_open(mode=u'wb')
1120
1110
            util.copyfileobj(mf_fileobj, mf_final_fileobj)  # compress, encrypt
1121
1111
            mf_fileobj.close()
1122
1112
            mf_final_fileobj.close()
1123
 
            globals.backend.put(mf_final_tdp, mf_final_filename)
 
1113
            config.backend.put(mf_final_tdp, mf_final_filename)
1124
1114
            mf_final_tdp.delete()
1125
1115
 
1126
 
    globals.src_backend.close()
1127
 
    globals.backend.close()
 
1116
    config.src_backend.close()
 
1117
    config.backend.close()
1128
1118
 
1129
1119
 
1130
1120
def sync_archive(col_stats):
1146
1136
 
1147
1137
        Otherwise, only the metadata for the target chain needs sync.
1148
1138
        """
1149
 
        if globals.metadata_sync_mode == u"full":
 
1139
        if config.metadata_sync_mode == u"full":
1150
1140
            return True
1151
 
        assert globals.metadata_sync_mode == u"partial"
 
1141
        assert config.metadata_sync_mode == u"partial"
1152
1142
        parsed = file_naming.parse(filename)
1153
1143
        try:
1154
1144
            target_chain = col_stats.get_backup_chain_at_time(
1155
 
                globals.restore_time or dup_time.curtime)
 
1145
                config.restore_time or dup_time.curtime)
1156
1146
        except dup_collections.CollectionsError:
1157
1147
            # With zero or multiple chains at this time, do a full sync
1158
1148
            return True
1226
1216
        return (pr, loc_name, fn)
1227
1217
 
1228
1218
    def remove_local(fn):
1229
 
        del_name = globals.archive_dir_path.append(fn).name
 
1219
        del_name = config.archive_dir_path.append(fn).name
1230
1220
 
1231
1221
        log.Notice(_(u"Deleting local %s (not authoritative at backend).") %
1232
1222
                   util.fsdecode(del_name))
1285
1275
 
1286
1276
        pr, loc_name, rem_name = resolve_basename(fn)
1287
1277
 
1288
 
        fileobj = globals.backend.get_fileobj_read(fn)
 
1278
        fileobj = config.backend.get_fileobj_read(fn)
1289
1279
        src_iter = SrcIter(fileobj)
1290
1280
        tdp = dup_temp.new_tempduppath(file_naming.parse(loc_name))
1291
1281
        if pr.manifest:
1293
1283
        else:
1294
1284
            gpg.GzipWriteFile(src_iter, tdp.name, size=sys.maxsize)
1295
1285
        tdp.setdata()
1296
 
        tdp.move(globals.archive_dir_path.append(loc_name))
 
1286
        tdp.move(config.archive_dir_path.append(loc_name))
1297
1287
 
1298
1288
    # get remote metafile list
1299
 
    remlist = globals.backend.list()
 
1289
    remlist = config.backend.list()
1300
1290
    remote_metafiles, ignored, rem_needpass = get_metafiles(remlist)
1301
1291
 
1302
1292
    # get local metafile list
1303
 
    loclist = globals.archive_dir_path.listdir()
 
1293
    loclist = config.archive_dir_path.listdir()
1304
1294
    local_metafiles, local_partials, loc_needpass = get_metafiles(loclist)
1305
1295
 
1306
1296
    # we have the list of metafiles on both sides. remote is always
1333
1323
    else:
1334
1324
        local_missing.sort()
1335
1325
        local_spurious.sort()
1336
 
        if not globals.dry_run:
 
1326
        if not config.dry_run:
1337
1327
            log.Notice(_(u"Synchronizing remote metadata to local cache..."))
1338
1328
            if local_missing and (rem_needpass or loc_needpass):
1339
1329
                # password for the --encrypt-key
1340
 
                globals.gpg_profile.passphrase = get_passphrase(1, u"sync")
 
1330
                config.gpg_profile.passphrase = get_passphrase(1, u"sync")
1341
1331
            for fn in local_spurious:
1342
1332
                remove_local(fn)
1343
 
            if hasattr(globals.backend, u'pre_process_download'):
1344
 
                globals.backend.pre_process_download(local_missing)
 
1333
            if hasattr(config.backend, u'pre_process_download'):
 
1334
                config.backend.pre_process_download(local_missing)
1345
1335
            for fn in local_missing:
1346
1336
                copy_to_local(fn)
1347
1337
            col_stats.set_values()
1367
1357
    assert col_stats.all_backup_chains
1368
1358
    last_backup_set = col_stats.all_backup_chains[-1].get_last()
1369
1359
    # check remote manifest only if we can decrypt it (see #1729796)
1370
 
    check_remote = not globals.encryption or globals.gpg_profile.passphrase
 
1360
    check_remote = not config.encryption or config.gpg_profile.passphrase
1371
1361
    last_backup_set.check_manifests(check_remote=check_remote)
1372
1362
 
1373
1363
 
1399
1389
        # Calculate space we need for at least 2 volumes of full or inc
1400
1390
        # plus about 30% of one volume for the signature files.
1401
1391
        freespace = stats.f_frsize * stats.f_bavail
1402
 
        needspace = (((globals.async_concurrency + 1) * globals.volsize) +
1403
 
                     int(0.30 * globals.volsize))
 
1392
        needspace = (((config.async_concurrency + 1) * config.volsize) +
 
1393
                     int(0.30 * config.volsize))
1404
1394
        if freespace < needspace:
1405
1395
            log.FatalError(_(u"Temp space has %d available, backup needs approx %d.") %
1406
1396
                           (freespace, needspace), log.ErrorCode.not_enough_freespace)
1438
1428
class Restart(object):
1439
1429
    u"""
1440
1430
    Class to aid in restart of inc or full backup.
1441
 
    Instance in globals.restart if restart in progress.
 
1431
    Instance in config.restart if restart in progress.
1442
1432
    """
1443
1433
 
1444
1434
    def __init__(self, last_backup):
1524
1514
    # determine what action we're performing and process command line
1525
1515
    action = commandline.ProcessCommandLine(sys.argv[1:])
1526
1516
 
1527
 
    globals.lockpath = os.path.join(globals.archive_dir_path.name, b"lockfile")
1528
 
    globals.lockfile = fasteners.process_lock.InterProcessLock(globals.lockpath)
1529
 
    log.Debug(_(u"Acquiring lockfile %s") % globals.lockpath)
1530
 
    if not globals.lockfile.acquire(blocking=False):
 
1517
    config.lockpath = os.path.join(config.archive_dir_path.name, b"lockfile")
 
1518
    config.lockfile = fasteners.process_lock.InterProcessLock(config.lockpath)
 
1519
    log.Debug(_(u"Acquiring lockfile %s") % config.lockpath)
 
1520
    if not config.lockfile.acquire(blocking=False):
1531
1521
        log.FatalError(
1532
1522
            u"Another duplicity instance is already running with this archive directory\n",
1533
1523
            log.ErrorCode.user_error)
1543
1533
 
1544
1534
def do_backup(action):
1545
1535
    # set the current time strings again now that we have time separator
1546
 
    if globals.current_time:
1547
 
        dup_time.setcurtime(globals.current_time)
 
1536
    if config.current_time:
 
1537
        dup_time.setcurtime(config.current_time)
1548
1538
    else:
1549
1539
        dup_time.setcurtime()
1550
1540
 
1555
1545
    check_resources(action)
1556
1546
 
1557
1547
    # get current collection status
1558
 
    col_stats = dup_collections.CollectionsStatus(globals.backend,
1559
 
                                                  globals.archive_dir_path,
 
1548
    col_stats = dup_collections.CollectionsStatus(config.backend,
 
1549
                                                  config.archive_dir_path,
1560
1550
                                                  action).set_values()
1561
1551
 
1562
1552
    # check archive synch with remote, fix if needed
1574
1564
            if last_backup.partial:
1575
1565
                if action in [u"full", u"inc"]:
1576
1566
                    # set restart parms from last_backup info
1577
 
                    globals.restart = Restart(last_backup)
 
1567
                    config.restart = Restart(last_backup)
1578
1568
                    # (possibly) reset action
1579
 
                    action = globals.restart.type
 
1569
                    action = config.restart.type
1580
1570
                    # reset the time strings
1581
1571
                    if action == u"full":
1582
 
                        dup_time.setcurtime(globals.restart.time)
 
1572
                        dup_time.setcurtime(config.restart.time)
1583
1573
                    else:
1584
 
                        dup_time.setcurtime(globals.restart.end_time)
1585
 
                        dup_time.setprevtime(globals.restart.start_time)
 
1574
                        dup_time.setcurtime(config.restart.end_time)
 
1575
                        dup_time.setprevtime(config.restart.start_time)
1586
1576
                    # log it -- main restart heavy lifting is done in write_multivol
1587
1577
                    log.Notice(_(u"Last %s backup left a partial set, restarting." % action))
1588
1578
                    break
1590
1580
                    # remove last partial backup and get new collection status
1591
1581
                    log.Notice(_(u"Cleaning up previous partial %s backup set, restarting." % action))
1592
1582
                    last_backup.delete()
1593
 
                    col_stats = dup_collections.CollectionsStatus(globals.backend,
1594
 
                                                                  globals.archive_dir_path,
 
1583
                    col_stats = dup_collections.CollectionsStatus(config.backend,
 
1584
                                                                  config.archive_dir_path,
1595
1585
                                                                  action).set_values()
1596
1586
                    continue
1597
1587
            break
1603
1593
        log.Notice(_(u"Last full backup date:") + u" " + dup_time.timetopretty(last_full_time))
1604
1594
    else:
1605
1595
        log.Notice(_(u"Last full backup date: none"))
1606
 
    if not globals.restart and action == u"inc" and globals.full_force_time is not None and \
1607
 
       last_full_time < globals.full_force_time:
 
1596
    if not config.restart and action == u"inc" and config.full_force_time is not None and \
 
1597
       last_full_time < config.full_force_time:
1608
1598
        log.Notice(_(u"Last full backup is too old, forcing full backup"))
1609
1599
        action = u"full"
1610
1600
    log.PrintCollectionStatus(col_stats)
1612
1602
    os.umask(0o77)
1613
1603
 
1614
1604
    # get the passphrase if we need to based on action/options
1615
 
    globals.gpg_profile.passphrase = get_passphrase(1, action)
 
1605
    config.gpg_profile.passphrase = get_passphrase(1, action)
1616
1606
 
1617
1607
    if action == u"restore":
1618
1608
        restore(col_stats)
1621
1611
    elif action == u"list-current":
1622
1612
        list_current(col_stats)
1623
1613
    elif action == u"collection-status":
1624
 
        if not globals.file_changed:
 
1614
        if not config.file_changed:
1625
1615
            log.PrintCollectionStatus(col_stats, True)
1626
1616
        else:
1627
 
            log.PrintCollectionFileChangedStatus(col_stats, globals.file_changed, True)
 
1617
            log.PrintCollectionFileChangedStatus(col_stats, config.file_changed, True)
1628
1618
    elif action == u"cleanup":
1629
1619
        cleanup(col_stats)
1630
1620
    elif action == u"remove-old":
1640
1630
        # the passphrase for full and inc is used by --sign-key
1641
1631
        # the sign key can have a different passphrase than the encrypt
1642
1632
        # key, therefore request a passphrase
1643
 
        if globals.gpg_profile.sign_key:
1644
 
            globals.gpg_profile.signing_passphrase = get_passphrase(1, action, True)
 
1633
        if config.gpg_profile.sign_key:
 
1634
            config.gpg_profile.signing_passphrase = get_passphrase(1, action, True)
1645
1635
 
1646
1636
        # if there are no recipients (no --encrypt-key), it must be a
1647
1637
        # symmetric key. Therefore, confirm the passphrase
1648
 
        if not (globals.gpg_profile.recipients or globals.gpg_profile.hidden_recipients):
1649
 
            globals.gpg_profile.passphrase = get_passphrase(2, action)
 
1638
        if not (config.gpg_profile.recipients or config.gpg_profile.hidden_recipients):
 
1639
            config.gpg_profile.passphrase = get_passphrase(2, action)
1650
1640
            # a limitation in the GPG implementation does not allow for
1651
1641
            # inputting different passphrases, this affects symmetric+sign.
1652
1642
            # Allow an empty passphrase for the key though to allow a non-empty
1653
1643
            # symmetric key
1654
 
            if (globals.gpg_profile.signing_passphrase and
1655
 
                    globals.gpg_profile.passphrase != globals.gpg_profile.signing_passphrase):
 
1644
            if (config.gpg_profile.signing_passphrase and
 
1645
                    config.gpg_profile.passphrase != config.gpg_profile.signing_passphrase):
1656
1646
                log.FatalError(_(
1657
1647
                    u"When using symmetric encryption, the signing passphrase "
1658
1648
                    u"must equal the encryption passphrase."),
1666
1656
            if not sig_chain:
1667
1657
                full_backup(col_stats)
1668
1658
            else:
1669
 
                if not globals.restart:
 
1659
                if not config.restart:
1670
1660
                    # only ask for a passphrase if there was a previous backup
1671
1661
                    if col_stats.all_backup_chains:
1672
 
                        globals.gpg_profile.passphrase = get_passphrase(1, action)
 
1662
                        config.gpg_profile.passphrase = get_passphrase(1, action)
1673
1663
                        check_last_manifest(col_stats)  # not needed for full backups
1674
1664
                incremental_backup(sig_chain)
1675
 
    globals.backend.close()
 
1665
    config.backend.close()
1676
1666
    log.shutdown()
1677
1667
    if exit_val is not None:
1678
1668
        sys.exit(exit_val)