~ubuntu-branches/ubuntu/trusty/duplicity/trusty-proposed

« back to all changes in this revision

Viewing changes to debian/patches/06_use_passphrase.dpatch

  • Committer: Package Import Robot
  • Author(s): Michael Terry
  • Date: 2011-09-08 09:10:57 UTC
  • Revision ID: package-import@ubuntu.com-20110908091057-0kwlagfye27pw2ku
Tags: 0.6.15-0ubuntu2
* Backport some upstream fixes
* debian/patches/06_use_passhprase.dpatch:
  - Don't prompt for passphrase if PASSPHRASE is set (LP: #836467)
* debian/patches/07_large_rackspace_list.dpatch:
  - Support listing more than 10,000 files from Rackspace servers
    (LP: #832149)
* debian/patches/08_check_volumes.dpatch:
  - Attempt to prevent a data corruption bug that can't be reproduced
    currently by detecting it up front and stopping the backup.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
#! /bin/sh /usr/share/dpatch/dpatch-run
 
2
## 06_use_passphrase.dpatch by Michael Terry <mterry@ubuntu.com>
 
3
##
 
4
## All lines beginning with `## DP:' are a description of the patch.
 
5
## DP: No description.
 
6
 
 
7
@DPATCH@
 
8
diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/duplicity duplicity-0.6.15/duplicity
 
9
--- duplicity-0.6.15~/duplicity 2011-09-08 09:02:47.045178540 -0400
 
10
+++ duplicity-0.6.15/duplicity  2011-09-08 09:04:01.657179330 -0400
 
11
@@ -90,15 +90,15 @@
 
12
     ## if signing key is also an encryption key assume that the passphrase is identical
 
13
     if ( for_signing
 
14
          and globals.gpg_profile.sign_key in globals.gpg_profile.recipients
 
15
-         and globals.gpg_profile.passphrase is not None ):
 
16
-        log.Notice(_("Reuse already set PASSPHRASE as SIGNING_PASSPHRASE"))
 
17
-        return globals.gpg_profile.passphrase
 
18
+         and 'PASSPHRASE' in os.environ ):
 
19
+        log.Notice(_("Reuse configured PASSPHRASE as SIGN_PASSPHRASE"))
 
20
+        return os.environ['PASSPHRASE']
 
21
     ## if one encryption key is also the signing key assume that the passphrase is identical
 
22
     if ( not for_signing
 
23
          and globals.gpg_profile.sign_key in globals.gpg_profile.recipients
 
24
-         and globals.gpg_profile.signing_passphrase is not None ):
 
25
-        log.Notice(_("Reuse already set SIGNING_PASSPHRASE as PASSPHRASE"))
 
26
-        return globals.gpg_profile.signing_passphrase
 
27
+         and 'SIGN_PASSPHRASE' in os.environ ):
 
28
+        log.Notice(_("Reuse configured SIGN_PASSPHRASE as PASSPHRASE"))
 
29
+        return os.environ['SIGN_PASSPHRASE']
 
30
 
 
31
     # Next, verify we need to ask the user
 
32
 
 
33
@@ -1280,7 +1280,7 @@
 
34
         # the sign key can have a different passphrase than the encrypt
 
35
         # key, therefore request a passphrase
 
36
         if globals.gpg_profile.sign_key:
 
37
-            globals.gpg_profile.signing_passphrase = get_passphrase(3, action, True)
 
38
+            globals.gpg_profile.signing_passphrase = get_passphrase(1, action, True)
 
39
 
 
40
         # if there are no recipients (no --encrypt-key), it must be a
 
41
         # symmetric key. Therefore, confirm the passphrase
 
42
diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/duplicity.orig duplicity-0.6.15/duplicity.orig
 
43
--- duplicity-0.6.15~/duplicity.orig    1969-12-31 19:00:00.000000000 -0500
 
44
+++ duplicity-0.6.15/duplicity.orig     2011-09-08 09:02:47.000000000 -0400
 
45
@@ -0,0 +1,1397 @@
 
46
+#!/usr/bin/env python
 
47
+# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
 
48
+#
 
49
+# duplicity -- Encrypted bandwidth efficient backup
 
50
+# Version 0.6.15 released August 19, 2011
 
51
+#
 
52
+# Copyright 2002 Ben Escoto <ben@emerose.org>
 
53
+# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
 
54
+#
 
55
+# This file is part of duplicity.
 
56
+#
 
57
+# Duplicity is free software; you can redistribute it and/or modify it
 
58
+# under the terms of the GNU General Public License as published by the
 
59
+# Free Software Foundation; either version 2 of the License, or (at your
 
60
+# option) any later version.
 
61
+#
 
62
+# Duplicity is distributed in the hope that it will be useful, but
 
63
+# WITHOUT ANY WARRANTY; without even the implied warranty of
 
64
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 
65
+# General Public License for more details.
 
66
+#
 
67
+# You should have received a copy of the GNU General Public License
 
68
+# along with duplicity; if not, write to the Free Software Foundation,
 
69
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
70
+#
 
71
+# See http://www.nongnu.org/duplicity for more information.
 
72
+# Please send mail to me or the mailing list if you find bugs or have
 
73
+# any suggestions.
 
74
+
 
75
+import getpass, gzip, os, sys, time, types
 
76
+import traceback, platform, statvfs, resource, re
 
77
+
 
78
+import gettext
 
79
+gettext.install('duplicity')
 
80
+
 
81
+from duplicity import log
 
82
+log.setup()
 
83
+
 
84
+import duplicity.errors
 
85
+
 
86
+from duplicity import collections
 
87
+from duplicity import commandline
 
88
+from duplicity import diffdir
 
89
+from duplicity import dup_temp
 
90
+from duplicity import dup_time
 
91
+from duplicity import file_naming
 
92
+from duplicity import globals
 
93
+from duplicity import gpg
 
94
+from duplicity import manifest
 
95
+from duplicity import patchdir
 
96
+from duplicity import path
 
97
+from duplicity import robust
 
98
+from duplicity import tempdir
 
99
+from duplicity import asyncscheduler
 
100
+from duplicity import util
 
101
+
 
102
+# If exit_val is not None, exit with given value at end.
 
103
+exit_val = None
 
104
+
 
105
+
 
106
+def get_passphrase(n, action, for_signing = False):
 
107
+    """
 
108
+    Check to make sure passphrase is indeed needed, then get
 
109
+    the passphrase from environment, from gpg-agent, or user
 
110
+
 
111
+    If n=3, a password is requested and verified. If n=2, the current
 
112
+    password is verified. If n=1, a password is requested without
 
113
+    verification for the time being.
 
114
+
 
115
+    @type  n: int
 
116
+    @param n: verification level for a passphrase being requested
 
117
+    @type  action: string
 
118
+    @param action: action to perform
 
119
+    @type  for_signing: boolean
 
120
+    @param for_signing: true if the passphrase is for a signing key, false if not
 
121
+    @rtype: string
 
122
+    @return: passphrase
 
123
+    """
 
124
+
 
125
+    # First try the environment
 
126
+    try:
 
127
+        if for_signing:
 
128
+            return os.environ['SIGN_PASSPHRASE']
 
129
+        else:
 
130
+            return os.environ['PASSPHRASE']
 
131
+    except KeyError:
 
132
+        pass
 
133
+
 
134
+    # check if we can reuse an already set (signing_)passphrase
 
135
+    ## if signing key is also an encryption key assume that the passphrase is identical
 
136
+    if ( for_signing
 
137
+         and globals.gpg_profile.sign_key in globals.gpg_profile.recipients
 
138
+         and globals.gpg_profile.passphrase is not None ):
 
139
+        log.Notice(_("Reuse already set PASSPHRASE as SIGNING_PASSPHRASE"))
 
140
+        return globals.gpg_profile.passphrase
 
141
+    ## if one encryption key is also the signing key assume that the passphrase is identical
 
142
+    if ( not for_signing
 
143
+         and globals.gpg_profile.sign_key in globals.gpg_profile.recipients
 
144
+         and globals.gpg_profile.signing_passphrase is not None ):
 
145
+        log.Notice(_("Reuse already set SIGNING_PASSPHRASE as PASSPHRASE"))
 
146
+        return globals.gpg_profile.signing_passphrase
 
147
+
 
148
+    # Next, verify we need to ask the user
 
149
+
 
150
+    # Assumptions:
 
151
+    #   - encrypt-key has no passphrase
 
152
+    #   - sign-key requires passphrase
 
153
+    #   - gpg-agent supplies all, no user interaction
 
154
+
 
155
+    # no passphrase if --no-encryption or --use-agent
 
156
+    if not globals.encryption or globals.use_agent:
 
157
+        return ""
 
158
+
 
159
+    # these commands don't need a password
 
160
+    elif action in ["collection-status",
 
161
+                    "list-current",
 
162
+                    "remove-all-but-n-full",
 
163
+                    "remove-all-inc-of-but-n-full",
 
164
+                    "remove-old",
 
165
+                    ]:
 
166
+        return ""
 
167
+
 
168
+    # for a full backup, we don't need a password if
 
169
+    # there is no sign_key and there are recipients
 
170
+    elif (action == "full"
 
171
+          and globals.gpg_profile.recipients
 
172
+          and not globals.gpg_profile.sign_key):
 
173
+        return ""
 
174
+
 
175
+    # for an inc backup, we don't need a password if
 
176
+    # there is no sign_key and there are recipients
 
177
+    elif (action == "inc"
 
178
+          and globals.gpg_profile.recipients
 
179
+          and not globals.gpg_profile.sign_key):
 
180
+        return ""
 
181
+
 
182
+    # Finally, ask the user for the passphrase
 
183
+    else:
 
184
+        log.Info(_("PASSPHRASE variable not set, asking user."))
 
185
+        use_cache = True
 
186
+        while 1:
 
187
+            # ask the user to enter a new passphrase to avoid an infinite loop
 
188
+            # if the user made a typo in the first passphrase
 
189
+            if use_cache and n == 2:
 
190
+                if for_signing:
 
191
+                    pass1 = globals.gpg_profile.signing_passphrase
 
192
+                else:
 
193
+                    pass1 = globals.gpg_profile.passphrase
 
194
+            else:
 
195
+                if for_signing:
 
196
+                    if use_cache and globals.gpg_profile.signing_passphrase:
 
197
+                        pass1 = globals.gpg_profile.signing_passphrase
 
198
+                    else:
 
199
+                        pass1 = getpass.getpass(_("GnuPG passphrase for signing key:")+" ")
 
200
+                else:
 
201
+                    if use_cache and globals.gpg_profile.passphrase:
 
202
+                        pass1 = globals.gpg_profile.passphrase
 
203
+                    else:
 
204
+                        pass1 = getpass.getpass(_("GnuPG passphrase:")+" ")
 
205
+
 
206
+            if n == 1:
 
207
+                pass2 = pass1
 
208
+            elif for_signing:
 
209
+                pass2 = getpass.getpass(_("Retype passphrase for signing key to confirm: "))
 
210
+            else:
 
211
+                pass2 = getpass.getpass(_("Retype passphrase to confirm: "))
 
212
+
 
213
+            if not pass1 == pass2:
 
214
+                print _("First and second passphrases do not match!  Please try again.")
 
215
+                use_cache = False
 
216
+                continue
 
217
+
 
218
+            if not pass1 and not globals.gpg_profile.recipients and not for_signing:
 
219
+                print _("Cannot use empty passphrase with symmetric encryption!  Please try again.")
 
220
+                use_cache = False
 
221
+                continue
 
222
+
 
223
+            return pass1
 
224
+
 
225
+
 
226
+def dummy_backup(tarblock_iter):
 
227
+    """
 
228
+    Fake writing to backend, but do go through all the source paths.
 
229
+
 
230
+    @type tarblock_iter: tarblock_iter
 
231
+    @param tarblock_iter: iterator for current tar block
 
232
+
 
233
+    @rtype: int
 
234
+    @return: constant 0 (zero)
 
235
+    """
 
236
+    try:
 
237
+        # Just spin our wheels
 
238
+        while tarblock_iter.next():
 
239
+            pass
 
240
+    except StopIteration:
 
241
+        pass
 
242
+    log.Progress(None, diffdir.stats.SourceFileSize)
 
243
+    return 0
 
244
+
 
245
+
 
246
+def restart_position_iterator(tarblock_iter):
 
247
+    """
 
248
+    Fake writing to backend, but do go through all the source paths.
 
249
+    Stop when we have processed the last file and block from the
 
250
+    last backup.  Normal backup will proceed at the start of the
 
251
+    next volume in the set.
 
252
+
 
253
+    @type tarblock_iter: tarblock_iter
 
254
+    @param tarblock_iter: iterator for current tar block
 
255
+
 
256
+    @rtype: int
 
257
+    @return: constant 0 (zero)
 
258
+    """
 
259
+    last_index = globals.restart.last_index
 
260
+    last_block = globals.restart.last_block
 
261
+    try:
 
262
+        # Just spin our wheels
 
263
+        while tarblock_iter.next():
 
264
+            if (tarblock_iter.previous_index == last_index):
 
265
+                if (tarblock_iter.previous_block > last_block):
 
266
+                    break
 
267
+            if tarblock_iter.previous_index > last_index:
 
268
+                log.Warn(_("File %s complete in backup set.\n"
 
269
+                           "Continuing restart on file %s.") %
 
270
+                           ("/".join(last_index), "/".join(tarblock_iter.previous_index)),
 
271
+                           log.ErrorCode.restart_file_not_found)
 
272
+                break
 
273
+    except StopIteration:
 
274
+        log.Warn(_("File %s missing in backup set.\n"
 
275
+                   "Continuing restart on file %s.") %
 
276
+                   ("/".join(last_index), "/".join(tarblock_iter.previous_index)),
 
277
+                   log.ErrorCode.restart_file_not_found)
 
278
+    return 0
 
279
+
 
280
+
 
281
+def write_multivol(backup_type, tarblock_iter, man_outfp, sig_outfp, backend):
 
282
+    """
 
283
+    Encrypt volumes of tarblock_iter and write to backend
 
284
+
 
285
+    backup_type should be "inc" or "full" and only matters here when
 
286
+    picking the filenames.  The path_prefix will determine the names
 
287
+    of the files written to backend.  Also writes manifest file.
 
288
+    Returns number of bytes written.
 
289
+
 
290
+    @type backup_type: string
 
291
+    @param backup_type: type of backup to perform, either 'inc' or 'full'
 
292
+    @type tarblock_iter: tarblock_iter
 
293
+    @param tarblock_iter: iterator for current tar block
 
294
+    @type backend: callable backend object
 
295
+    @param backend: I/O backend for selected protocol
 
296
+
 
297
+    @rtype: int
 
298
+    @return: bytes written
 
299
+    """
 
300
+
 
301
+    def get_indicies(tarblock_iter):
 
302
+        """Return start_index and end_index of previous volume"""
 
303
+        start_index, start_block = tarblock_iter.recall_index()
 
304
+        if start_index is None:
 
305
+            start_index = ()
 
306
+            start_block = None
 
307
+        if start_block:
 
308
+            start_block -= 1
 
309
+        end_index, end_block = tarblock_iter.get_previous_index()
 
310
+        if end_index is None:
 
311
+            end_index = start_index
 
312
+            end_block = start_block
 
313
+        if end_block:
 
314
+            end_block -= 1
 
315
+        return start_index, start_block, end_index, end_block
 
316
+
 
317
+    def put(tdp, dest_filename):
 
318
+        """
 
319
+        Retrieve file size *before* calling backend.put(), which may (at least
 
320
+        in case of the localbackend) rename the temporary file to the target
 
321
+        instead of copying.
 
322
+        """
 
323
+        putsize = tdp.getsize()
 
324
+        backend.put(tdp, dest_filename)
 
325
+        if tdp.stat:
 
326
+            tdp.delete()
 
327
+        return putsize
 
328
+
 
329
+    if not globals.restart:
 
330
+        # normal backup start
 
331
+        vol_num = 0
 
332
+        mf = manifest.Manifest(fh=man_outfp)
 
333
+        mf.set_dirinfo()
 
334
+    else:
 
335
+        # restart from last known position
 
336
+        mf = globals.restart.last_backup.get_local_manifest()
 
337
+        globals.restart.checkManifest(mf)
 
338
+        globals.restart.setLastSaved(mf)
 
339
+        mf.fh = man_outfp
 
340
+        last_block = globals.restart.last_block
 
341
+        log.Notice("Restarting after volume %s, file %s, block %s" %
 
342
+                   (globals.restart.start_vol,
 
343
+                    "/".join(globals.restart.last_index),
 
344
+                    globals.restart.last_block))
 
345
+        vol_num = globals.restart.start_vol
 
346
+        restart_position_iterator(tarblock_iter)
 
347
+
 
348
+    at_end = 0
 
349
+    bytes_written = 0
 
350
+
 
351
+    # This assertion must be kept until we have solved the problem
 
352
+    # of concurrency at the backend level. Concurrency 1 is fine
 
353
+    # because the actual I/O concurrency on backends is limited to
 
354
+    # 1 as usual, but we are allowed to perform local CPU
 
355
+    # intensive tasks while that single upload is happening. This
 
356
+    # is an assert put in place to avoid someone accidentally
 
357
+    # enabling concurrency above 1, before adequate work has been
 
358
+    # done on the backends to make them support concurrency.
 
359
+    assert globals.async_concurrency <= 1
 
360
+
 
361
+    io_scheduler = asyncscheduler.AsyncScheduler(globals.async_concurrency)
 
362
+    async_waiters = []
 
363
+
 
364
+    while not at_end:
 
365
+        # set up iterator
 
366
+        tarblock_iter.remember_next_index() # keep track of start index
 
367
+
 
368
+        # Create volume
 
369
+        vol_num += 1
 
370
+        dest_filename = file_naming.get(backup_type, vol_num,
 
371
+                                        encrypted=globals.encryption,
 
372
+                                        gzipped=not globals.encryption)
 
373
+        tdp = dup_temp.new_tempduppath(file_naming.parse(dest_filename))
 
374
+
 
375
+        # write volume
 
376
+        if globals.encryption:
 
377
+            at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name,
 
378
+                                      globals.gpg_profile, globals.volsize)
 
379
+        else:
 
380
+            at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, globals.volsize)
 
381
+        tdp.setdata()
 
382
+
 
383
+        # Add volume information to manifest
 
384
+        vi = manifest.VolumeInfo()
 
385
+        vi.set_info(vol_num, *get_indicies(tarblock_iter))
 
386
+        vi.set_hash("SHA1", gpg.get_hash("SHA1", tdp))
 
387
+        mf.add_volume_info(vi)
 
388
+
 
389
+        # Checkpoint after each volume so restart has a place to restart.
 
390
+        # Note that until after the first volume, all files are temporary.
 
391
+        if vol_num == 1:
 
392
+            sig_outfp.to_partial()
 
393
+            man_outfp.to_partial()
 
394
+        else:
 
395
+            sig_outfp.flush()
 
396
+            man_outfp.flush()
 
397
+
 
398
+        async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename: put(tdp, dest_filename),
 
399
+                                                        (tdp, dest_filename)))
 
400
+
 
401
+        # Log human-readable version as well as raw numbers for machine consumers
 
402
+        log.Progress('Processed volume %d' % vol_num, diffdir.stats.SourceFileSize)
 
403
+
 
404
+        # for testing purposes only - assert on inc or full
 
405
+        assert globals.fail_on_volume != vol_num, "Forced assertion for testing at volume %d" % vol_num
 
406
+
 
407
+    # Collect byte count from all asynchronous jobs; also implicitly waits
 
408
+    # for them all to complete.
 
409
+    for waiter in async_waiters:
 
410
+        bytes_written += waiter()
 
411
+
 
412
+    # Upload the collection summary.
 
413
+    #bytes_written += write_manifest(mf, backup_type, backend)
 
414
+
 
415
+    return bytes_written
 
416
+
 
417
+
 
418
+def get_man_fileobj(backup_type):
 
419
+    """
 
420
+    Return a fileobj opened for writing, save results as manifest
 
421
+
 
422
+    Save manifest in globals.archive_dir gzipped.
 
423
+    Save them on the backend encrypted as needed.
 
424
+
 
425
+    @type man_type: string
 
426
+    @param man_type: either "full" or "new"
 
427
+
 
428
+    @rtype: fileobj
 
429
+    @return: fileobj opened for writing
 
430
+    """
 
431
+    assert backup_type == "full" or backup_type == "inc"
 
432
+
 
433
+    part_man_filename = file_naming.get(backup_type,
 
434
+                                        manifest=True,
 
435
+                                        partial=True)
 
436
+    perm_man_filename = file_naming.get(backup_type,
 
437
+                                        manifest=True)
 
438
+    remote_man_filename = file_naming.get(backup_type,
 
439
+                                          manifest=True,
 
440
+                                          encrypted=globals.encryption)
 
441
+
 
442
+    fh = dup_temp.get_fileobj_duppath(globals.archive_dir,
 
443
+                                      part_man_filename,
 
444
+                                      perm_man_filename,
 
445
+                                      remote_man_filename)
 
446
+    return fh
 
447
+
 
448
+
 
449
+def get_sig_fileobj(sig_type):
 
450
+    """
 
451
+    Return a fileobj opened for writing, save results as signature
 
452
+
 
453
+    Save signatures in globals.archive_dir gzipped.
 
454
+    Save them on the backend encrypted as needed.
 
455
+
 
456
+    @type sig_type: string
 
457
+    @param sig_type: either "full-sig" or "new-sig"
 
458
+
 
459
+    @rtype: fileobj
 
460
+    @return: fileobj opened for writing
 
461
+    """
 
462
+    assert sig_type in ["full-sig", "new-sig"]
 
463
+
 
464
+    part_sig_filename = file_naming.get(sig_type,
 
465
+                                        gzipped=False,
 
466
+                                        partial=True)
 
467
+    perm_sig_filename = file_naming.get(sig_type,
 
468
+                                        gzipped=True)
 
469
+    remote_sig_filename = file_naming.get(sig_type, encrypted=globals.encryption,
 
470
+                                          gzipped=not globals.encryption)
 
471
+
 
472
+    fh = dup_temp.get_fileobj_duppath(globals.archive_dir,
 
473
+                                      part_sig_filename,
 
474
+                                      perm_sig_filename,
 
475
+                                      remote_sig_filename)
 
476
+    return fh
 
477
+
 
478
+
 
479
+def full_backup(col_stats):
 
480
+    """
 
481
+    Do full backup of directory to backend, using archive_dir
 
482
+
 
483
+    @type col_stats: CollectionStatus object
 
484
+    @param col_stats: collection status
 
485
+
 
486
+    @rtype: void
 
487
+    @return: void
 
488
+    """
 
489
+    if globals.dry_run:
 
490
+        tarblock_iter = diffdir.DirFull(globals.select)
 
491
+        bytes_written = dummy_backup(tarblock_iter)
 
492
+        col_stats.set_values(sig_chain_warning=None)
 
493
+    else:
 
494
+        sig_outfp = get_sig_fileobj("full-sig")
 
495
+        man_outfp = get_man_fileobj("full")
 
496
+        tarblock_iter = diffdir.DirFull_WriteSig(globals.select,
 
497
+                                                 sig_outfp)
 
498
+        bytes_written = write_multivol("full", tarblock_iter,
 
499
+                                       man_outfp, sig_outfp,
 
500
+                                       globals.backend)
 
501
+
 
502
+        # close sig file, send to remote, and rename to final
 
503
+        sig_outfp.close()
 
504
+        sig_outfp.to_remote()
 
505
+        sig_outfp.to_final()
 
506
+
 
507
+        # close manifest, send to remote, and rename to final
 
508
+        man_outfp.close()
 
509
+        man_outfp.to_remote()
 
510
+        man_outfp.to_final()
 
511
+
 
512
+        col_stats.set_values(sig_chain_warning=None)
 
513
+
 
514
+    print_statistics(diffdir.stats, bytes_written)
 
515
+
 
516
+
 
517
+def check_sig_chain(col_stats):
 
518
+    """
 
519
+    Get last signature chain for inc backup, or None if none available
 
520
+
 
521
+    @type col_stats: CollectionStatus object
 
522
+    @param col_stats: collection status
 
523
+    """
 
524
+    if not col_stats.matched_chain_pair:
 
525
+        if globals.incremental:
 
526
+            log.FatalError(_("Fatal Error: Unable to start incremental backup.  "
 
527
+                             "Old signatures not found and incremental specified"),
 
528
+                           log.ErrorCode.inc_without_sigs)
 
529
+        else:
 
530
+            log.Warn(_("No signatures found, switching to full backup."))
 
531
+        return None
 
532
+    return col_stats.matched_chain_pair[0]
 
533
+
 
534
+
 
535
+def print_statistics(stats, bytes_written):
 
536
+    """
 
537
+    If globals.print_statistics, print stats after adding bytes_written
 
538
+
 
539
+    @rtype: void
 
540
+    @return: void
 
541
+    """
 
542
+    if globals.print_statistics:
 
543
+        diffdir.stats.TotalDestinationSizeChange = bytes_written
 
544
+        print diffdir.stats.get_stats_logstring(_("Backup Statistics"))
 
545
+
 
546
+
 
547
+def incremental_backup(sig_chain):
 
548
+    """
 
549
+    Do incremental backup of directory to backend, using archive_dir
 
550
+
 
551
+    @rtype: void
 
552
+    @return: void
 
553
+    """
 
554
+    dup_time.setprevtime(sig_chain.end_time)
 
555
+    if dup_time.curtime == dup_time.prevtime:
 
556
+        time.sleep(2)
 
557
+        dup_time.setcurtime()
 
558
+        assert dup_time.curtime != dup_time.prevtime, "time not moving forward at appropriate pace - system clock issues?"
 
559
+    if globals.dry_run:
 
560
+        tarblock_iter = diffdir.DirDelta(globals.select,
 
561
+                                         sig_chain.get_fileobjs())
 
562
+        bytes_written = dummy_backup(tarblock_iter)
 
563
+    else:
 
564
+        new_sig_outfp = get_sig_fileobj("new-sig")
 
565
+        new_man_outfp = get_man_fileobj("inc")
 
566
+        tarblock_iter = diffdir.DirDelta_WriteSig(globals.select,
 
567
+                                                  sig_chain.get_fileobjs(),
 
568
+                                                  new_sig_outfp)
 
569
+        bytes_written = write_multivol("inc", tarblock_iter,
 
570
+                                       new_man_outfp, new_sig_outfp,
 
571
+                                       globals.backend)
 
572
+
 
573
+        # close sig file and rename to final
 
574
+        new_sig_outfp.close()
 
575
+        new_sig_outfp.to_remote()
 
576
+        new_sig_outfp.to_final()
 
577
+
 
578
+        # close manifest and rename to final
 
579
+        new_man_outfp.close()
 
580
+        new_man_outfp.to_remote()
 
581
+        new_man_outfp.to_final()
 
582
+
 
583
+    print_statistics(diffdir.stats, bytes_written)
 
584
+
 
585
+
 
586
+def list_current(col_stats):
 
587
+    """
 
588
+    List the files current in the archive (examining signature only)
 
589
+
 
590
+    @type col_stats: CollectionStatus object
 
591
+    @param col_stats: collection status
 
592
+
 
593
+    @rtype: void
 
594
+    @return: void
 
595
+    """
 
596
+    time = globals.restore_time or dup_time.curtime
 
597
+    sig_chain = col_stats.get_signature_chain_at_time(time)
 
598
+    path_iter = diffdir.get_combined_path_iter(sig_chain.get_fileobjs(time))
 
599
+    for path in path_iter:
 
600
+        if path.difftype != "deleted":
 
601
+            user_info = "%s %s" % (dup_time.timetopretty(path.getmtime()),
 
602
+                                   path.get_relative_path())
 
603
+            log_info = "%s %s" % (dup_time.timetostring(path.getmtime()),
 
604
+                                  util.escape(path.get_relative_path()))
 
605
+            log.Log(user_info, log.INFO, log.InfoCode.file_list,
 
606
+                    log_info, True)
 
607
+
 
608
+
 
609
+def restore(col_stats):
 
610
+    """
 
611
+    Restore archive in globals.backend to globals.local_path
 
612
+
 
613
+    @type col_stats: CollectionStatus object
 
614
+    @param col_stats: collection status
 
615
+
 
616
+    @rtype: void
 
617
+    @return: void
 
618
+    """
 
619
+    if globals.dry_run:
 
620
+        return
 
621
+    if not patchdir.Write_ROPaths(globals.local_path,
 
622
+                                  restore_get_patched_rop_iter(col_stats)):
 
623
+        if globals.restore_dir:
 
624
+            log.FatalError(_("%s not found in archive, no files restored.")
 
625
+                           % (globals.restore_dir,),
 
626
+                           log.ErrorCode.restore_dir_not_found)
 
627
+        else:
 
628
+            log.FatalError(_("No files found in archive - nothing restored."),
 
629
+                           log.ErrorCode.no_restore_files)
 
630
+
 
631
+
 
632
+def restore_get_patched_rop_iter(col_stats):
 
633
+    """
 
634
+    Return iterator of patched ROPaths of desired restore data
 
635
+
 
636
+    @type col_stats: CollectionStatus object
 
637
+    @param col_stats: collection status
 
638
+    """
 
639
+    if globals.restore_dir:
 
640
+        index = tuple(globals.restore_dir.split("/"))
 
641
+    else:
 
642
+        index = ()
 
643
+    time = globals.restore_time or dup_time.curtime
 
644
+    backup_chain = col_stats.get_backup_chain_at_time(time)
 
645
+    assert backup_chain, col_stats.all_backup_chains
 
646
+    backup_setlist = backup_chain.get_sets_at_time(time)
 
647
+    num_vols = 0
 
648
+    for s in backup_setlist:
 
649
+        num_vols += len(s)
 
650
+    cur_vol = [0]
 
651
+
 
652
+    def get_fileobj_iter(backup_set):
 
653
+        """Get file object iterator from backup_set contain given index"""
 
654
+        manifest = backup_set.get_manifest()
 
655
+        volumes = manifest.get_containing_volumes(index)
 
656
+        for vol_num in volumes:
 
657
+            yield restore_get_enc_fileobj(backup_set.backend,
 
658
+                                          backup_set.volume_name_dict[vol_num],
 
659
+                                          manifest.volume_info_dict[vol_num])
 
660
+            cur_vol[0] += 1
 
661
+            log.Progress(_('Processed volume %d of %d') % (cur_vol[0], num_vols),
 
662
+                         cur_vol[0], num_vols)
 
663
+
 
664
+    fileobj_iters = map(get_fileobj_iter, backup_setlist)
 
665
+    tarfiles = map(patchdir.TarFile_FromFileobjs, fileobj_iters)
 
666
+    return patchdir.tarfiles2rop_iter(tarfiles, index)
 
667
+
 
668
+
 
669
+def restore_get_enc_fileobj(backend, filename, volume_info):
 
670
+    """
 
671
+    Return plaintext fileobj from encrypted filename on backend
 
672
+
 
673
+    If volume_info is set, the hash of the file will be checked,
 
674
+    assuming some hash is available.  Also, if globals.sign_key is
 
675
+    set, a fatal error will be raised if file not signed by sign_key.
 
676
+
 
677
+    """
 
678
+    parseresults = file_naming.parse(filename)
 
679
+    tdp = dup_temp.new_tempduppath(parseresults)
 
680
+    backend.get(filename, tdp)
 
681
+
 
682
+    """ verify hash of the remote file """
 
683
+    verified, hash_pair, calculated_hash = restore_check_hash(volume_info, tdp)
 
684
+    if not verified:
 
685
+        log.FatalError("%s\n %s\n %s\n %s\n" %
 
686
+                           (_("Invalid data - %s hash mismatch for file:") % hash_pair[0],
 
687
+                            filename,
 
688
+                            _("Calculated hash: %s") % calculated_hash,
 
689
+                            _("Manifest hash: %s") % hash_pair[1]),
 
690
+                           log.ErrorCode.mismatched_hash)
 
691
+
 
692
+    fileobj = tdp.filtered_open_with_delete("rb")
 
693
+    if parseresults.encrypted and globals.gpg_profile.sign_key:
 
694
+        restore_add_sig_check(fileobj)
 
695
+    return fileobj
 
696
+
 
697
+
 
698
+def restore_check_hash(volume_info, vol_path):
 
699
+    """
 
700
+    Check the hash of vol_path path against data in volume_info
 
701
+
 
702
+    @rtype: boolean
 
703
+    @return: true (verified) / false (failed)
 
704
+    """
 
705
+    hash_pair = volume_info.get_best_hash()
 
706
+    if hash_pair:
 
707
+        calculated_hash = gpg.get_hash(hash_pair[0], vol_path)
 
708
+        if calculated_hash != hash_pair[1]:
 
709
+            return False, hash_pair, calculated_hash
 
710
+    """ reached here, verification passed """
 
711
+    return True, hash_pair, calculated_hash
 
712
+
 
713
+
 
714
+def restore_add_sig_check(fileobj):
 
715
+    """
 
716
+    Require signature when closing fileobj matches sig in gpg_profile
 
717
+
 
718
+    @rtype: void
 
719
+    @return: void
 
720
+    """
 
721
+    assert (isinstance(fileobj, dup_temp.FileobjHooked) and
 
722
+            isinstance(fileobj.fileobj, gpg.GPGFile)), fileobj
 
723
+    def check_signature():
 
724
+        """Thunk run when closing volume file"""
 
725
+        actual_sig = fileobj.fileobj.get_signature()
 
726
+        if actual_sig != globals.gpg_profile.sign_key:
 
727
+            log.FatalError(_("Volume was signed by key %s, not %s") %
 
728
+                           (actual_sig, globals.gpg_profile.sign_key),
 
729
+                           log.ErrorCode.unsigned_volume)
 
730
+    fileobj.addhook(check_signature)
 
731
+
 
732
+
 
733
+def verify(col_stats):
 
734
+    """
 
735
+    Verify files, logging differences
 
736
+
 
737
+    @type col_stats: CollectionStatus object
 
738
+    @param col_stats: collection status
 
739
+
 
740
+    @rtype: void
 
741
+    @return: void
 
742
+    """
 
743
+    global exit_val
 
744
+    collated = diffdir.collate2iters(restore_get_patched_rop_iter(col_stats),
 
745
+                                     globals.select)
 
746
+    diff_count = 0; total_count = 0
 
747
+    for backup_ropath, current_path in collated:
 
748
+        if not backup_ropath:
 
749
+            backup_ropath = path.ROPath(current_path.index)
 
750
+        if not current_path:
 
751
+            current_path = path.ROPath(backup_ropath.index)
 
752
+        if not backup_ropath.compare_verbose(current_path):
 
753
+            diff_count += 1
 
754
+        total_count += 1
 
755
+    # Unfortunately, ngettext doesn't handle multiple number variables, so we
 
756
+    # split up the string.
 
757
+    log.Notice(_("Verify complete: %s, %s.") %
 
758
+               (gettext.ngettext("%d file compared",
 
759
+                                 "%d files compared", total_count) % total_count,
 
760
+                gettext.ngettext("%d difference found",
 
761
+                                 "%d differences found", diff_count) % diff_count))
 
762
+    if diff_count >= 1:
 
763
+        exit_val = 1
 
764
+
 
765
+
 
766
+def cleanup(col_stats):
 
767
+    """
 
768
+    Delete the extraneous files in the current backend
 
769
+
 
770
+    @type col_stats: CollectionStatus object
 
771
+    @param col_stats: collection status
 
772
+
 
773
+    @rtype: void
 
774
+    @return: void
 
775
+    """
 
776
+    ext_local, ext_remote = col_stats.get_extraneous(globals.extra_clean)
 
777
+    extraneous = ext_local + ext_remote
 
778
+    if not extraneous:
 
779
+        log.Warn(_("No extraneous files found, nothing deleted in cleanup."))
 
780
+        return
 
781
+
 
782
+    filestr = "\n".join(extraneous)
 
783
+    if globals.force:
 
784
+        log.Notice(gettext.ngettext("Deleting this file from backend:",
 
785
+                                    "Deleting these files from backend:",
 
786
+                                    len(extraneous))
 
787
+                   + "\n" + filestr)
 
788
+        if not globals.dry_run:
 
789
+            col_stats.backend.delete(ext_remote)
 
790
+            for fn in ext_local:
 
791
+                try:
 
792
+                    globals.archive_dir.append(fn).delete()
 
793
+                except Exception:
 
794
+                    pass
 
795
+    else:
 
796
+        log.Notice(gettext.ngettext("Found the following file to delete:",
 
797
+                                     "Found the following files to delete:",
 
798
+                                    len(extraneous))
 
799
+                   + "\n" + filestr + "\n"
 
800
+                   + _("Run duplicity again with the --force option to actually delete."))
 
801
+
 
802
+
 
803
+def remove_all_but_n_full(col_stats):
 
804
+    """
 
805
+    Remove backup files older than the last n full backups.
 
806
+
 
807
+    @type col_stats: CollectionStatus object
 
808
+    @param col_stats: collection status
 
809
+
 
810
+    @rtype: void
 
811
+    @return: void
 
812
+    """
 
813
+    assert globals.keep_chains is not None
 
814
+
 
815
+    globals.remove_time = col_stats.get_nth_last_full_backup_time(globals.keep_chains)
 
816
+
 
817
+    remove_old(col_stats)
 
818
+
 
819
+
 
820
+def remove_old(col_stats):
 
821
+    """
 
822
+    Remove backup files older than globals.remove_time from backend
 
823
+
 
824
+    @type col_stats: CollectionStatus object
 
825
+    @param col_stats: collection status
 
826
+
 
827
+    @rtype: void
 
828
+    @return: void
 
829
+    """
 
830
+    assert globals.remove_time is not None
 
831
+    def set_times_str(setlist):
 
832
+        """Return string listing times of sets in setlist"""
 
833
+        return "\n".join(map(lambda s: dup_time.timetopretty(s.get_time()),
 
834
+                             setlist))
 
835
+
 
836
+    req_list = col_stats.get_older_than_required(globals.remove_time)
 
837
+    if req_list:
 
838
+        log.Warn("%s\n%s\n%s" %
 
839
+                 (_("There are backup set(s) at time(s):"),
 
840
+                  set_times_str(req_list),
 
841
+                  _("Which can't be deleted because newer sets depend on them.")))
 
842
+
 
843
+    if (col_stats.matched_chain_pair and
 
844
+        col_stats.matched_chain_pair[1].end_time < globals.remove_time):
 
845
+        log.Warn(_("Current active backup chain is older than specified time.  "
 
846
+                   "However, it will not be deleted.  To remove all your backups, "
 
847
+                   "manually purge the repository."))
 
848
+
 
849
+    setlist = col_stats.get_older_than(globals.remove_time)
 
850
+    if not setlist:
 
851
+        log.Notice(_("No old backup sets found, nothing deleted."))
 
852
+        return
 
853
+    if globals.force:
 
854
+        log.Notice(gettext.ngettext("Deleting backup set at time:",
 
855
+                                    "Deleting backup sets at times:",
 
856
+                                    len(setlist)) +
 
857
+                   "\n" + set_times_str(setlist))
 
858
+        setlist.reverse() # save oldest for last
 
859
+        for set in setlist:
 
860
+            # if remove_all_inc_of_but_n_full_mode mode, remove only incrementals one and not full
 
861
+            if globals.dry_run:
 
862
+                log.Notice("(Not: dry-run) Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time()))
 
863
+            else:
 
864
+                if globals.remove_all_inc_of_but_n_full_mode and (set.type != "inc") :
 
865
+                    log.Notice("Not deleting set " + set.type + " " + dup_time.timetopretty(set.get_time()))
 
866
+                else :
 
867
+                    log.Notice("Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time()))
 
868
+                    set.delete()
 
869
+        col_stats.set_values(sig_chain_warning=None)
 
870
 
871
+        # force a cleanup operation to get rid of unnecessary old cruft
 
872
+        # we said we want to remove them! didn't we, huh?
 
873
+        # bad duplicity, bad doggy!
 
874
+        # note: in the long run backing out changeset 616 might be
 
875
+        # better, but for now this will ease the pain.
 
876
+        globals.extra_clean=True
 
877
+        cleanup(col_stats)
 
878
+    else:
 
879
+        log.Notice(gettext.ngettext("Found old backup set at the following time:",
 
880
+                                    "Found old backup sets at the following times:",
 
881
+                                    len(setlist)) +
 
882
+                   "\n" + set_times_str(setlist) + "\n" +
 
883
+                   _("Rerun command with --force option to actually delete."))
 
884
+        # see above for rationale.
 
885
+        # this here is to print a list of to-be-removed files (--force is off)
 
886
+        globals.extra_clean=True
 
887
+        cleanup(col_stats)
 
888
+
 
889
+
 
890
+def sync_archive(decrypt):
 
891
+    """
 
892
+    Synchronize local archive manifest file and sig chains to remote archives.
 
893
+    Copy missing files from remote to local as needed to make sure the local
 
894
+    archive is synchronized to remote storage.
 
895
+
 
896
+    @rtype: void
 
897
+    @return: void
 
898
+    """
 
899
+    suffixes = [".g", ".gpg", ".z", ".gz", ".part"]
 
900
+
 
901
+    def get_metafiles(filelist):
 
902
+        """
 
903
+        Return metafiles of interest from the file list.
 
904
+        Files of interest are:
 
905
+          sigtar - signature files
 
906
+          manifest - signature files
 
907
+          duplicity partial versions of the above
 
908
+        Files excluded are:
 
909
+          non-duplicity files
 
910
+
 
911
+        @rtype: list
 
912
+        @return: list of duplicity metadata files
 
913
+        """
 
914
+        metafiles = {}
 
915
+        partials = {}
 
916
+        need_passphrase = False
 
917
+        for fn in filelist:
 
918
+            pr = file_naming.parse(fn)
 
919
+            if not pr:
 
920
+                continue
 
921
+            if pr.encrypted:
 
922
+                need_passphrase = True
 
923
+            if pr.type in ["full-sig", "new-sig"] or pr.manifest:
 
924
+                base, ext = os.path.splitext(fn)
 
925
+                if ext not in suffixes:
 
926
+                    base = fn
 
927
+                if pr.partial:
 
928
+                    partials[base] = fn
 
929
+                else:
 
930
+                    metafiles[base] = fn
 
931
+        return metafiles, partials, need_passphrase
 
932
+
 
933
+    def copy_raw(src_iter, filename):
 
934
+        """
 
935
+        Copy data from src_iter to file at fn
 
936
+        """
 
937
+        block_size = 128 * 1024
 
938
+        file = open(filename, "wb")
 
939
+        while True:
 
940
+            try:
 
941
+                data = src_iter.next(block_size).data
 
942
+            except StopIteration:
 
943
+                break
 
944
+            file.write(data)
 
945
+        file.close()
 
946
+
 
947
+    def resolve_basename(fn):
 
948
+        """
 
949
+        @return: (parsedresult, local_name, remote_name)
 
950
+        """
 
951
+        pr = file_naming.parse(fn)
 
952
+
 
953
+        base, ext = os.path.splitext(fn)
 
954
+        if ext not in suffixes:
 
955
+            base = fn
 
956
+
 
957
+        suffix = file_naming.get_suffix(False, not pr.manifest)
 
958
+        loc_name = base + suffix
 
959
+
 
960
+        return (pr, loc_name, fn)
 
961
+
 
962
+    def remove_local(fn):
 
963
+        del_name = globals.archive_dir.append(fn).name
 
964
+
 
965
+        log.Notice(_("Deleting local %s (not authoritative at backend).") % del_name)
 
966
+        try:
 
967
+            util.ignore_missing(os.unlink, del_name)
 
968
+        except Exception, e:
 
969
+            log.Warn(_("Unable to delete %s: %s") % (del_name, str(e)))
 
970
+
 
971
+    def copy_to_local(fn):
 
972
+        """
 
973
+        Copy remote file fn to local cache.
 
974
+        """
 
975
+        class Block:
 
976
+            """
 
977
+            Data block to return from SrcIter
 
978
+            """
 
979
+            def __init__(self, data):
 
980
+                self.data = data
 
981
+
 
982
+        class SrcIter:
 
983
+            """
 
984
+            Iterate over source and return Block of data.
 
985
+            """
 
986
+            def __init__(self, fileobj):
 
987
+                self.fileobj = fileobj
 
988
+
 
989
+            def next(self, size):
 
990
+                try:
 
991
+                    res = Block(self.fileobj.read(size))
 
992
+                except Exception:
 
993
+                    if hasattr(self.fileobj, 'name'):
 
994
+                        name = self.fileobj.name
 
995
+                    else:
 
996
+                        name = None
 
997
+                    log.FatalError(_("Failed to read %s: %s") %
 
998
+                                   (name, sys.exc_info()),
 
999
+                                   log.ErrorCode.generic)
 
1000
+                if not res.data:
 
1001
+                    self.fileobj.close()
 
1002
+                    raise StopIteration
 
1003
+                return res
 
1004
+
 
1005
+            def get_footer(self):
 
1006
+                return ""
 
1007
+
 
1008
+        log.Notice(_("Copying %s to local cache.") % fn)
 
1009
+
 
1010
+        pr, loc_name, rem_name = resolve_basename(fn)
 
1011
+
 
1012
+        fileobj = globals.backend.get_fileobj_read(fn)
 
1013
+        src_iter = SrcIter(fileobj)
 
1014
+        tdp = dup_temp.new_tempduppath(file_naming.parse(loc_name))
 
1015
+        if pr.manifest:
 
1016
+            copy_raw(src_iter, tdp.name)
 
1017
+        else:
 
1018
+            gpg.GzipWriteFile(src_iter, tdp.name, size=sys.maxint)
 
1019
+        tdp.setdata()
 
1020
+        tdp.move(globals.archive_dir.append(loc_name))
 
1021
+
 
1022
+    # get remote metafile list
 
1023
+    remlist = globals.backend.list()
 
1024
+    remote_metafiles, ignored, rem_needpass = get_metafiles(remlist)
 
1025
+
 
1026
+    # get local metafile list
 
1027
+    loclist = globals.archive_dir.listdir()
 
1028
+    local_metafiles, local_partials, loc_needpass = get_metafiles(loclist)
 
1029
+
 
1030
+    # we have the list of metafiles on both sides. remote is always
 
1031
+    # authoritative. figure out which are local spurious (should not
 
1032
+    # be there) and missing (should be there but are not).
 
1033
+    local_keys = local_metafiles.keys()
 
1034
+    remote_keys = remote_metafiles.keys()
 
1035
+
 
1036
+    local_missing = []
 
1037
+    local_spurious = []
 
1038
+
 
1039
+    for key in remote_keys:
 
1040
+        # If we lost our cache, re-get the remote file.  But don't do it if we
 
1041
+        # already have a local partial.  The local partial will already be
 
1042
+        # complete in this case (seems we got interrupted before we could move
 
1043
+        # it to its final location).
 
1044
+        if key not in local_keys and key not in local_partials:
 
1045
+            local_missing.append(remote_metafiles[key])
 
1046
+
 
1047
+    for key in local_keys:
 
1048
+        # If we have a file locally that is unnecessary, delete it.  Also
 
1049
+        # delete final versions of partial files because if we have both, it
 
1050
+        # means the write of the final version got interrupted.
 
1051
+        if key not in remote_keys or key in local_partials:
 
1052
+            local_spurious.append(local_metafiles[key])
 
1053
+
 
1054
+    # finally finish the process
 
1055
+    if not local_missing and not local_spurious:
 
1056
+        log.Notice(_("Local and Remote metadata are synchronized, no sync needed."))
 
1057
+    else:
 
1058
+        local_missing.sort()
 
1059
+        local_spurious.sort()
 
1060
+        if not globals.dry_run:
 
1061
+            log.Notice(_("Synchronizing remote metadata to local cache..."))
 
1062
+            if local_missing and (rem_needpass or loc_needpass):
 
1063
+                if decrypt:
 
1064
+                    # password for the --encrypt-key
 
1065
+                    globals.gpg_profile.passphrase = get_passphrase(1, "sync")
 
1066
+                else:
 
1067
+                    local_missing = [] # don't download if we can't decrypt
 
1068
+            for fn in local_spurious:
 
1069
+                remove_local(fn)
 
1070
+            for fn in local_missing:
 
1071
+                copy_to_local(fn)
 
1072
+        else:
 
1073
+            if local_missing:
 
1074
+                log.Notice(_("Sync would copy the following from remote to local:")
 
1075
+                           + "\n" + "\n".join(local_missing))
 
1076
+            if local_spurious:
 
1077
+                log.Notice(_("Sync would remove the following spurious local files:")
 
1078
+                           + "\n" + "\n".join(local_spurious))
 
1079
+
 
1080
+
 
1081
+def check_last_manifest(col_stats):
 
1082
+    """
 
1083
+    Check consistency and hostname/directory of last manifest
 
1084
+
 
1085
+    @type col_stats: CollectionStatus object
 
1086
+    @param col_stats: collection status
 
1087
+
 
1088
+    @rtype: void
 
1089
+    @return: void
 
1090
+    """
 
1091
+    if not col_stats.all_backup_chains:
 
1092
+        return
 
1093
+    last_backup_set = col_stats.all_backup_chains[-1].get_last()
 
1094
+    last_backup_set.check_manifests()
 
1095
+
 
1096
+
 
1097
+def check_resources(action):
 
1098
+    """
 
1099
+    Check for sufficient resources:
 
1100
+      - temp space for volume build
 
1101
+      - enough max open files
 
1102
+    Put out fatal error if not sufficient to run
 
1103
+
 
1104
+    @type action: string
 
1105
+    @param action: action in progress
 
1106
+
 
1107
+    @rtype: void
 
1108
+    @return: void
 
1109
+    """
 
1110
+    if action in ["full", "inc", "restore"]:
 
1111
+        # Make sure we have enough resouces to run
 
1112
+        # First check disk space in temp area.
 
1113
+        tempfile, tempname = tempdir.default().mkstemp()
 
1114
+        os.close(tempfile)
 
1115
+        # strip off the temp dir and file
 
1116
+        tempfs = os.path.sep.join(tempname.split(os.path.sep)[:-2])
 
1117
+        try:
 
1118
+            stats = os.statvfs(tempfs)
 
1119
+        except Exception:
 
1120
+            log.FatalError(_("Unable to get free space on temp."),
 
1121
+                           log.ErrorCode.get_freespace_failed)
 
1122
+        # Calculate space we need for at least 2 volumes of full or inc
 
1123
+        # plus about 30% of one volume for the signature files.
 
1124
+        freespace = stats[statvfs.F_FRSIZE] * stats[statvfs.F_BAVAIL]
 
1125
+        needspace = (((globals.async_concurrency + 1) * globals.volsize)
 
1126
+                     + int(0.30 * globals.volsize))
 
1127
+        if freespace < needspace:
 
1128
+            log.FatalError(_("Temp space has %d available, backup needs approx %d.") %
 
1129
+                           (freespace, needspace), log.ErrorCode.not_enough_freespace)
 
1130
+        else:
 
1131
+            log.Info(_("Temp has %d available, backup will use approx %d.") %
 
1132
+                     (freespace, needspace))
 
1133
+
 
1134
+        # Some environments like Cygwin run with an artificially
 
1135
+        # low value for max open files.  Check for safe number.
 
1136
+        try:
 
1137
+            soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
 
1138
+        except resource.error:
 
1139
+            log.FatalError(_("Unable to get max open files."),
 
1140
+                           log.ErrorCode.get_ulimit_failed)
 
1141
+        maxopen = min([l for l in (soft, hard) if l > -1])
 
1142
+        if maxopen < 1024:
 
1143
+            log.FatalError(_("Max open files of %s is too low, should be >= 1024.\n"
 
1144
+                             "Use 'ulimit -n 1024' or higher to correct.\n") % (maxopen,),
 
1145
+                             log.ErrorCode.maxopen_too_low)
 
1146
+
 
1147
+def log_startup_parms(verbosity=log.INFO):
 
1148
+    """
 
1149
+    log Python, duplicity, and system versions
 
1150
+    """
 
1151
+    log.Log('=' * 80, verbosity)
 
1152
+    log.Log("duplicity 0.6.15 (August 19, 2011)", verbosity)
 
1153
+    log.Log("Args: %s" % (' '.join(sys.argv),), verbosity)
 
1154
+    log.Log(' '.join(platform.uname()), verbosity)
 
1155
+    log.Log("%s %s" % (sys.executable or sys.platform, sys.version), verbosity)
 
1156
+    log.Log('=' * 80, verbosity)
 
1157
+
 
1158
+
 
1159
+class Restart:
 
1160
+    """
 
1161
+    Class to aid in restart of inc or full backup.
 
1162
+    Instance in globals.restart if restart in progress.
 
1163
+    """
 
1164
+    def __init__(self, last_backup):
 
1165
+        self.type = None
 
1166
+        self.start_time = None
 
1167
+        self.end_time = None
 
1168
+        self.start_vol = None
 
1169
+        self.last_index = None
 
1170
+        self.last_block = None
 
1171
+        self.last_backup = last_backup
 
1172
+        self.setParms(last_backup)
 
1173
+
 
1174
+    def setParms(self, last_backup):
 
1175
+        if last_backup.time:
 
1176
+            self.type = "full"
 
1177
+            self.time = last_backup.time
 
1178
+        else:
 
1179
+            self.type = "inc"
 
1180
+            self.end_time = last_backup.end_time
 
1181
+            self.start_time = last_backup.start_time
 
1182
+        # We start one volume back in case we weren't able to finish writing
 
1183
+        # the most recent block.  Actually checking if we did (via hash) would
 
1184
+        # involve downloading the block.  Easier to just redo one block.
 
1185
+        self.start_vol = max(len(last_backup) - 1, 0)
 
1186
+
 
1187
+    def checkManifest(self, mf):
 
1188
+        mf_len = len(mf.volume_info_dict)
 
1189
+        if (mf_len != self.start_vol) or not (mf_len and self.start_vol):
 
1190
+            if self.start_vol == 0:
 
1191
+                # upload of 1st vol failed, clean and restart
 
1192
+                log.Notice(_("RESTART: The first volume failed to upload before termination.\n"
 
1193
+                             "         Restart is impossible...starting backup from beginning."))
 
1194
+                self.last_backup.delete()
 
1195
+                os.execve(sys.argv[0], sys.argv, os.environ)
 
1196
+            elif mf_len - self.start_vol > 0:
 
1197
+                # upload of N vols failed, fix manifest and restart
 
1198
+                log.Notice(_("RESTART: Volumes %d to %d failed to upload before termination.\n"
 
1199
+                             "         Restarting backup at volume %d.") %
 
1200
+                             (self.start_vol + 1, mf_len, self.start_vol + 1))
 
1201
+                for vol in range(self.start_vol + 1, mf_len + 1):
 
1202
+                    mf.del_volume_info(vol)
 
1203
+            else:
 
1204
+                # this is an 'impossible' state, remove last partial and restart
 
1205
+                log.Notice(_("RESTART: Impossible backup state: manifest has %d vols, remote has %d vols.\n"
 
1206
+                             "         Restart is impossible ... duplicity will clean off the last partial\n"
 
1207
+                             "         backup then restart the backup from the beginning.") %
 
1208
+                             (mf_len, self.start_vol))
 
1209
+                self.last_backup.delete()
 
1210
+                os.execve(sys.argv[0], sys.argv[1:], os.environ)
 
1211
+
 
1212
+    def setLastSaved(self, mf):
 
1213
+        vi = mf.volume_info_dict[self.start_vol]
 
1214
+        self.last_index = vi.end_index
 
1215
+        self.last_block = vi.end_block or 0
 
1216
+
 
1217
+
 
1218
+def main():
 
1219
+    """
 
1220
+    Start/end here
 
1221
+    """
 
1222
+    # if python is run setuid, it's only partway set,
 
1223
+    # so make sure to run with euid/egid of root
 
1224
+    if os.geteuid() == 0:
 
1225
+        # make sure uid/gid match euid/egid
 
1226
+        os.setuid(os.geteuid())
 
1227
+        os.setgid(os.getegid())
 
1228
+
 
1229
+    # set the current time strings (make it available for command line processing)
 
1230
+    dup_time.setcurtime()
 
1231
+
 
1232
+    # determine what action we're performing and process command line
 
1233
+    action = commandline.ProcessCommandLine(sys.argv[1:])
 
1234
+
 
1235
+    # set the current time strings again now that we have time separator
 
1236
+    if globals.current_time:
 
1237
+        dup_time.setcurtime(globals.current_time)
 
1238
+    else:
 
1239
+        dup_time.setcurtime()
 
1240
+
 
1241
+    # log some debugging status info
 
1242
+    log_startup_parms(log.INFO)
 
1243
+
 
1244
+    # check for disk space and available file handles
 
1245
+    check_resources(action)
 
1246
+
 
1247
+    # check archive synch with remote, fix if needed
 
1248
+    decrypt = action not in ["collection-status"]
 
1249
+    sync_archive(decrypt)
 
1250
+
 
1251
+    # get current collection status
 
1252
+    col_stats = collections.CollectionsStatus(globals.backend,
 
1253
+                                              globals.archive_dir).set_values()
 
1254
+
 
1255
+    while True:
 
1256
+        # if we have to clean up the last partial, then col_stats are invalidated
 
1257
+        # and we have to start the process all over again until clean.
 
1258
+        if action in ["full", "inc", "cleanup"]:
 
1259
+            last_full_chain = col_stats.get_last_backup_chain()
 
1260
+            if not last_full_chain:
 
1261
+                break
 
1262
+            last_backup = last_full_chain.get_last()
 
1263
+            if last_backup.partial:
 
1264
+                if action in ["full", "inc"]:
 
1265
+                    # set restart parms from last_backup info
 
1266
+                    globals.restart = Restart(last_backup)
 
1267
+                    # (possibly) reset action
 
1268
+                    action = globals.restart.type
 
1269
+                    # reset the time strings
 
1270
+                    if action == "full":
 
1271
+                        dup_time.setcurtime(globals.restart.time)
 
1272
+                    else:
 
1273
+                        dup_time.setcurtime(globals.restart.end_time)
 
1274
+                        dup_time.setprevtime(globals.restart.start_time)
 
1275
+                    # log it -- main restart heavy lifting is done in write_multivol
 
1276
+                    log.Notice(_("Last %s backup left a partial set, restarting." % action))
 
1277
+                    break
 
1278
+                else:
 
1279
+                    # remove last partial backup and get new collection status
 
1280
+                    log.Notice(_("Cleaning up previous partial %s backup set, restarting." % action))
 
1281
+                    last_backup.delete()
 
1282
+                    col_stats = collections.CollectionsStatus(globals.backend,
 
1283
+                                                              globals.archive_dir).set_values()
 
1284
+                    continue
 
1285
+            break
 
1286
+        break
 
1287
+
 
1288
+    # OK, now we have a stable collection
 
1289
+    last_full_time = col_stats.get_last_full_backup_time()
 
1290
+    if last_full_time > 0:
 
1291
+        log.Notice(_("Last full backup date:") + " " + dup_time.timetopretty(last_full_time))
 
1292
+    else:
 
1293
+        log.Notice(_("Last full backup date: none"))
 
1294
+    if not globals.restart and action == "inc" and last_full_time < globals.full_force_time:
 
1295
+        log.Notice(_("Last full backup is too old, forcing full backup"))
 
1296
+        action = "full"
 
1297
+    log.PrintCollectionStatus(col_stats)
 
1298
+
 
1299
+    os.umask(077)
 
1300
+
 
1301
+    # full/inc only needs a passphrase for symmetric keys
 
1302
+    if not action in ["full", "inc"] or not globals.gpg_profile.recipients:
 
1303
+        # get the passphrase if we need to based on action/options
 
1304
+        globals.gpg_profile.passphrase = get_passphrase(1, action)
 
1305
+
 
1306
+    if action == "restore":
 
1307
+        restore(col_stats)
 
1308
+    elif action == "verify":
 
1309
+        verify(col_stats)
 
1310
+    elif action == "list-current":
 
1311
+        list_current(col_stats)
 
1312
+    elif action == "collection-status":
 
1313
+        log.PrintCollectionStatus(col_stats, True)
 
1314
+    elif action == "cleanup":
 
1315
+        cleanup(col_stats)
 
1316
+    elif action == "remove-old":
 
1317
+        remove_old(col_stats)
 
1318
+    elif action == "remove-all-but-n-full" or action == "remove-all-inc-of-but-n-full":
 
1319
+        remove_all_but_n_full(col_stats)
 
1320
+    elif action == "sync":
 
1321
+        sync_archive(True)
 
1322
+    else:
 
1323
+        assert action == "inc" or action == "full", action
 
1324
+        # the passphrase for full and inc is used by --sign-key
 
1325
+        # the sign key can have a different passphrase than the encrypt
 
1326
+        # key, therefore request a passphrase
 
1327
+        if globals.gpg_profile.sign_key:
 
1328
+            globals.gpg_profile.signing_passphrase = get_passphrase(3, action, True)
 
1329
+
 
1330
+        # if there are no recipients (no --encrypt-key), it must be a
 
1331
+        # symmetric key. Therefore, confirm the passphrase
 
1332
+        if not globals.gpg_profile.recipients:
 
1333
+            globals.gpg_profile.passphrase = get_passphrase(2, action)
 
1334
+            # a limitation in the GPG implementation does not allow for
 
1335
+            # inputting different passphrases, this affects symmetric+sign.
 
1336
+            # Allow an empty passphrase for the key though to allow a non-empty
 
1337
+            # symmetric key
 
1338
+            if (globals.gpg_profile.signing_passphrase and
 
1339
+                globals.gpg_profile.passphrase != globals.gpg_profile.signing_passphrase):
 
1340
+                log.FatalError("When using symmetric encryption, the signing passphrase must equal the encryption passphrase.", log.ErrorCode.user_error)
 
1341
+
 
1342
+        if action == "full":
 
1343
+            full_backup(col_stats)
 
1344
+        else:  # attempt incremental
 
1345
+            sig_chain = check_sig_chain(col_stats)
 
1346
+            # action == "inc" was requested, but no full backup is available
 
1347
+            if not sig_chain:
 
1348
+                full_backup(col_stats)
 
1349
+            else:
 
1350
+                if not globals.restart:
 
1351
+                    # only ask for a passphrase if there was a previous backup
 
1352
+                    if col_stats.all_backup_chains:
 
1353
+                        globals.gpg_profile.passphrase = get_passphrase(1, action)
 
1354
+                    check_last_manifest(col_stats) # not needed for full backup
 
1355
+                incremental_backup(sig_chain)
 
1356
+    globals.backend.close()
 
1357
+    log.shutdown()
 
1358
+    if exit_val is not None:
 
1359
+        sys.exit(exit_val)
 
1360
+
 
1361
+
 
1362
+def with_tempdir(fn):
 
1363
+    """
 
1364
+    Execute function and guarantee cleanup of tempdir is called
 
1365
+
 
1366
+    @type fn: callable function
 
1367
+    @param fn: function to execute
 
1368
+
 
1369
+    @return: void
 
1370
+    @rtype: void
 
1371
+    """
 
1372
+    try:
 
1373
+        fn()
 
1374
+    finally:
 
1375
+        tempdir.default().cleanup()
 
1376
+
 
1377
+
 
1378
+if __name__ == "__main__":
 
1379
+    # The following is for starting remote debugging in Eclipse with Pydev.
 
1380
+    # Adjust the path to your location and version of Eclipse and Pydev.  Comment out
 
1381
+    # to run normally, or this process will hang at pydevd.settrace() waiting for the
 
1382
+    # remote debugger to start.
 
1383
+#    pysrc = "/opt/Aptana Studio 2/plugins/org.python.pydev.debug_2.1.0.2011052613/pysrc/"
 
1384
+#    sys.path.append(pysrc)
 
1385
+#    import pydevd #@UnresolvedImport
 
1386
+#    pydevd.settrace()
 
1387
+    # end remote debugger startup
 
1388
+
 
1389
+    try:
 
1390
+        with_tempdir(main)
 
1391
+
 
1392
+    # Don't move this lower.  In order to get an exit
 
1393
+    # status out of the system, you have to call the
 
1394
+    # sys.exit() function.  Python handles this by
 
1395
+    # raising the SystemExit exception.  Cleanup code
 
1396
+    # goes here, if needed.
 
1397
+    except SystemExit, e:
 
1398
+        # No traceback, just get out
 
1399
+        sys.exit(e)
 
1400
+
 
1401
+    except KeyboardInterrupt, e:
 
1402
+        # No traceback, just get out
 
1403
+        log.Info(_("INT intercepted...exiting."))
 
1404
+        sys.exit(4)
 
1405
+
 
1406
+    except gpg.GPGError, e:
 
1407
+        # For gpg errors, don't show an ugly stack trace by
 
1408
+        # default. But do with sufficient verbosity.
 
1409
+        log.Info(_("GPG error detail: %s")
 
1410
+                 % (''.join(traceback.format_exception(*sys.exc_info()))))
 
1411
+        log.FatalError("%s: %s" % (e.__class__.__name__, e.args[0]),
 
1412
+                       log.ErrorCode.gpg_failed,
 
1413
+                       e.__class__.__name__)
 
1414
+
 
1415
+    except duplicity.errors.UserError, e:
 
1416
+        # For user errors, don't show an ugly stack trace by
 
1417
+        # default. But do with sufficient verbosity.
 
1418
+        log.Info(_("User error detail: %s")
 
1419
+                 % (''.join(traceback.format_exception(*sys.exc_info()))))
 
1420
+        log.FatalError("%s: %s" % (e.__class__.__name__, str(e)),
 
1421
+                       log.ErrorCode.user_error,
 
1422
+                       e.__class__.__name__)
 
1423
+
 
1424
+    except duplicity.errors.BackendException, e:
 
1425
+        # For backend errors, don't show an ugly stack trace by
 
1426
+        # default. But do with sufficient verbosity.
 
1427
+        log.Info(_("Backend error detail: %s")
 
1428
+                 % (''.join(traceback.format_exception(*sys.exc_info()))))
 
1429
+        log.FatalError("%s: %s" % (e.__class__.__name__, str(e)),
 
1430
+                       log.ErrorCode.user_error,
 
1431
+                       e.__class__.__name__)
 
1432
+
 
1433
+    except Exception, e:
 
1434
+        if "Forced assertion for testing" in str(e):
 
1435
+            log.FatalError("%s: %s" % (e.__class__.__name__, str(e)),
 
1436
+                           log.ErrorCode.exception,
 
1437
+                           e.__class__.__name__)
 
1438
+        else:
 
1439
+            # Traceback and that mess
 
1440
+            log.FatalError("%s" % (''.join(traceback.format_exception(*sys.exc_info()))),
 
1441
+                           log.ErrorCode.exception,
 
1442
+                           e.__class__.__name__)