~verterok/ubuntuone-client/tritcask-fix-825366-stable-3-0

« back to all changes in this revision

Viewing changes to ubuntuone/platform/tools/__init__.py

  • Committer: Tarmac
  • Author(s): Natalia B. Bidart
  • Date: 2011-11-02 19:54:16 UTC
  • mfrom: (1144.1.64 u1sdtool-multiplatform)
  • Revision ID: tarmac-20111102195416-7dsu24ku871u75dv
- Do not start more than one instance of ubuntuone-syncdaemon (removed buggy duplication of is_running method) (LP: #803672).
- Make u1sdtool multiplatform (LP: #868661).
- Provide feature parity for SyncDaemonTool between linux and windows (LP: #879561).
- Make SyncDaemonTool only fire returned deferreds when the operation is fully completed (LP: #879556, LP: #879558).
- Replace some of the mocker tests with regular tests (LP: #879564).

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# -*- coding: utf-8 -*-
 
2
#
 
3
# Authors: Guillermo Gonzalez <guillermo.gonzalez@canonical.com>
 
4
#          Natalia B. Bidart <natalia.bidart@canonical.com>
 
5
#
 
6
# Copyright 2009-2011 Canonical Ltd.
 
7
#
 
8
# This program is free software: you can redistribute it and/or modify it
 
9
# under the terms of the GNU General Public License version 3, as published
 
10
# by the Free Software Foundation.
 
11
#
 
12
# This program is distributed in the hope that it will be useful, but
 
13
# WITHOUT ANY WARRANTY; without even the implied warranties of
 
14
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
 
15
# PURPOSE.  See the GNU General Public License for more details.
 
16
#
 
17
# You should have received a copy of the GNU General Public License along
 
18
# with this program.  If not, see <http://www.gnu.org/licenses/>.
 
19
 
 
20
"""SyncDaemon Tools."""
 
21
 
 
22
import logging
 
23
import sys
 
24
import time
 
25
import warnings
 
26
 
 
27
from twisted.internet import defer
 
28
from ubuntuone.logger import log_call
 
29
 
 
30
if sys.platform == 'win32':
 
31
    from ubuntuone.platform.tools import windows
 
32
    source = windows
 
33
else:
 
34
    from ubuntuone.platform.tools import linux
 
35
    source = linux
 
36
 
 
37
 
 
38
logger = logging.getLogger('ubuntuone.SyncDaemon.SDTool')
 
39
is_already_running = source.is_already_running
 
40
IPCError = source.IPCError
 
41
 
 
42
 
 
43
def is_running(bus=None):
 
44
    """Check if syncdaemon is running, without strating it.
 
45
 
 
46
    This method is DEPRECATED, please use is_already_running instead.
 
47
 
 
48
    """
 
49
    warnings.warn('Use is_already_running instead.', DeprecationWarning)
 
50
    return is_already_running(bus=bus)
 
51
 
 
52
 
 
53
class SyncDaemonTool(object):
 
54
    """Various utility methods to test/play with the SyncDaemon."""
 
55
 
 
56
    def __init__(self, bus=None):
 
57
        self.bus = bus
 
58
        self.last_event = 0
 
59
        self.delayed_call = None
 
60
        self.log = logger
 
61
        self.proxy = source.SyncDaemonToolProxy(bus=bus)
 
62
 
 
63
    def _get_dict(self, a_dict):
 
64
        """Converts a dict returned by the IPC client to a dict of strings."""
 
65
        str_dict = {}
 
66
        for key in a_dict:
 
67
            str_dict[key] = unicode(a_dict[key])
 
68
        return str_dict
 
69
 
 
70
    def shutdown(self):
 
71
        """Close connections."""
 
72
        return self.proxy.shutdown()
 
73
 
 
74
    def connect_signal(self, signal_name, handler):
 
75
        """Connect 'handler' with 'signal_name'."""
 
76
        return self.proxy.connect_signal(signal_name=signal_name,
 
77
                                         handler=handler)
 
78
 
 
79
    def disconnect_signal(self, signal_name, handler_or_match):
 
80
        """Disconnect 'handler_or_match' from 'signal_name'."""
 
81
        return self.proxy.disconnect_signal(signal_name=signal_name,
 
82
                                            handler_or_match=handler_or_match)
 
83
 
 
84
    @log_call(logger.debug)
 
85
    def wait_connected(self):
 
86
        """Wait until syncdaemon is connected to the server."""
 
87
        d = defer.Deferred()
 
88
 
 
89
        # check if the syncdaemon is running
 
90
        try:
 
91
            self.proxy.wait_connected()
 
92
            self.log.debug('wait_connected: Done!')
 
93
            d.callback(True)
 
94
        except Exception, e:  # catch all errors, pylint: disable=W0703
 
95
            self.log.debug('Not connected: %s', e)
 
96
            d.errback()
 
97
 
 
98
        return d
 
99
 
 
100
    @log_call(logger.debug)
 
101
    def wait_all_downloads(self, verbose=False):
 
102
        """Wait until there is no more pending downloads."""
 
103
        d = self.get_current_downloads()
 
104
 
 
105
        def reply_handler(downloads):
 
106
            """Check if there are downloads in progress.
 
107
 
 
108
            If so, reschedule a new check if there is at least one.
 
109
 
 
110
            """
 
111
            if verbose:
 
112
                sys.stdout.write(', %s' % str(len(downloads)))
 
113
                sys.stdout.flush()
 
114
            if len(downloads) > 0:
 
115
                self.log.debug('wait_all_downloads: %d', len(downloads))
 
116
                return self.get_current_downloads()
 
117
            else:
 
118
                self.log.debug('wait_all_downloads: No more downloads')
 
119
                return True
 
120
 
 
121
        if verbose:
 
122
            sys.stdout.write('\nchecking current downloads')
 
123
            sys.stdout.flush()
 
124
        d.addCallback(reply_handler)
 
125
        return d
 
126
 
 
127
    @log_call(logger.debug)
 
128
    def wait_all_uploads(self, verbose=False):
 
129
        """Wait until there is no more pending uploads."""
 
130
        d = self.get_current_uploads()
 
131
 
 
132
        def reply_handler(uploads):
 
133
            """Check if there are uploads in progress.
 
134
 
 
135
            If so, reschedule a new check if there is at least one.
 
136
 
 
137
            """
 
138
            if verbose:
 
139
                sys.stdout.write(', %s' % str(len(uploads)))
 
140
                sys.stdout.flush()
 
141
            if len(uploads) > 0:
 
142
                self.log.debug('wait_all_uploads: %d', len(uploads))
 
143
                return self.get_current_uploads()
 
144
            else:
 
145
                self.log.debug('wait_all_uploads: No more uploads')
 
146
                return True
 
147
 
 
148
        if verbose:
 
149
            sys.stdout.write('\nchecking current uploads')
 
150
            sys.stdout.flush()
 
151
 
 
152
        d.addCallback(reply_handler)
 
153
        return d
 
154
 
 
155
    @log_call(logger.debug)
 
156
    def wait_no_more_events(self, last_event_interval, verbose=False):
 
157
        """Wait until no more events are fired by the syncdaemon."""
 
158
        from twisted.internet import reactor
 
159
        d = defer.Deferred()
 
160
 
 
161
        def check_last_event():
 
162
            """Check time!
 
163
 
 
164
            Check if the daemon is connected and didn't received any events
 
165
            in the last_event_interval.
 
166
 
 
167
            """
 
168
            current_time = time.time()
 
169
            if self.last_event and \
 
170
               current_time - self.last_event < last_event_interval:
 
171
                # keep it running in case this is the last event
 
172
                self.log.debug('rescheduling wait_no_more_events')
 
173
                if not self.delayed_call.active():
 
174
                    self.delayed_call = reactor.callLater(last_event_interval,
 
175
                                                          check_last_event)
 
176
                else:
 
177
                    self.delayed_call.reset(last_event_interval)
 
178
            else:
 
179
                self.log.debug('wait_no_more_events: No more events!')
 
180
                d.callback(True)
 
181
 
 
182
        if verbose:
 
183
            sys.stdout.write("Listening events")
 
184
            sys.stdout.flush()
 
185
 
 
186
        def event_handler(event_dict):
 
187
            """Update last_event and run checks."""
 
188
            self.last_event = time.time()
 
189
            self.log.debug('wait_no_more_events - new event: %s - %s',
 
190
                           event_dict['event_name'], str(self.last_event))
 
191
            if verbose:
 
192
                sys.stdout.write('.')
 
193
                sys.stdout.flush()
 
194
            if self.delayed_call.active():
 
195
                self.delayed_call.reset(last_event_interval)
 
196
 
 
197
        match = self.connect_signal(signal_name='Event', handler=event_handler)
 
198
 
 
199
        def cleanup(result):
 
200
            """Remove the signal handler."""
 
201
            self.disconnect_signal(signal_name='Event', handler_or_match=match)
 
202
            return result
 
203
 
 
204
        d.addBoth(cleanup)
 
205
 
 
206
        # in case the daemon already reached nirvana
 
207
        self.delayed_call = reactor.callLater(last_event_interval,
 
208
                                              check_last_event)
 
209
        return d
 
210
 
 
211
    @log_call(logger.debug)
 
212
    def wait_for_nirvana(self, last_event_interval=5, verbose=False):
 
213
        """Wait until the syncdaemon reachs nirvana.
 
214
 
 
215
        This is when there are:
 
216
            - the syncdaemon is connected
 
217
            - 0 transfers inprogress
 
218
            - no more events are fired in the event queue
 
219
 
 
220
        @param last_event_interval: the seconds to wait to determine that there
 
221
        is no more events in the queue and the daemon reached nirvana
 
222
 
 
223
        """
 
224
        return self.proxy.call_method('sync_daemon', 'wait_for_nirvana',
 
225
                                      last_event_interval)
 
226
 
 
227
    @log_call(logger.debug)
 
228
    def wait_for_signals(self, signal_ok, signal_error=None,
 
229
                         success_filter=lambda *a: True,
 
230
                         error_filter=lambda *a: True, **kwargs):
 
231
        """Wait for one of the specified signals, return a deferred.
 
232
 
 
233
        @param signal_ok: this will fire the deferred's callback
 
234
 
 
235
        @param signal_error: the will fire the deferred's errback
 
236
 
 
237
        @param success_filter: callable to filter the signal_ok, must return
 
238
        True or False. If True is returned, the returned deferred is fired.
 
239
 
 
240
        @param error_filter: callable to filter the signal_error, must return
 
241
        True or False. If True is returned, the returned deferred is errback'd.
 
242
 
 
243
        Other params will be ignored.
 
244
 
 
245
        """
 
246
        d = defer.Deferred()
 
247
 
 
248
        def _success_handler(*args):
 
249
            """Callback 'd' only if the success_filter returns True."""
 
250
            try:
 
251
                if success_filter(*args):
 
252
                    d.callback(args)
 
253
            except Exception, e:
 
254
                logger.exception('wait_for_signals: success_handler failed:')
 
255
                d.errback(IPCError(e.__class__.__name__, args, e.message))
 
256
 
 
257
        def _error_handler(*args):
 
258
            """Errback 'd' only if the error_filter returns True."""
 
259
            try:
 
260
                if error_filter(*args):
 
261
                    d.errback(IPCError(signal_error, args))
 
262
            except Exception, e:
 
263
                logger.exception('wait_for_signals: error_handler failed:')
 
264
                d.errback(IPCError(e.__class__.__name__, args, e.message))
 
265
 
 
266
        # register signal handlers for success/error
 
267
        match_ok = self.connect_signal(signal_name=signal_ok,
 
268
                                       handler=_success_handler)
 
269
 
 
270
        if signal_error is not None:
 
271
            match_error = self.connect_signal(signal_name=signal_error,
 
272
                                              handler=_error_handler)
 
273
 
 
274
        def remove_signal_receiver(r):
 
275
            """Cleanup the signal receivers."""
 
276
            self.disconnect_signal(signal_name=signal_ok,
 
277
                                   handler_or_match=match_ok)
 
278
            if signal_error is not None:
 
279
                self.disconnect_signal(signal_name=signal_error,
 
280
                                       handler_or_match=match_error)
 
281
            return r
 
282
 
 
283
        d.addBoth(remove_signal_receiver)
 
284
        return d
 
285
 
 
286
    @log_call(logger.debug)
 
287
    def wait_for_signal(self, signal_name, filter):
 
288
        """Wait for the specified signal (the first received).
 
289
 
 
290
        @param signal_name: the signal name
 
291
        @param filter: a callable to filter signal, must return True, and is
 
292
        used to fire the deferred callback.
 
293
 
 
294
        DEPRECATED. Use wait_for_signals instead.
 
295
 
 
296
        """
 
297
        return self.wait_for_signals(signal_ok=signal_name,
 
298
                                     success_filter=filter)
 
299
 
 
300
    @defer.inlineCallbacks
 
301
    @log_call(logger.debug)
 
302
    def get_current_downloads(self):
 
303
        """Return a deferred that will be fired with the current downloads."""
 
304
        results = yield self.proxy.call_method('status', 'current_downloads')
 
305
        downloads = [self._get_dict(r) for r in results]
 
306
        self.log.debug('downloads: %r', downloads)
 
307
        defer.returnValue(downloads)
 
308
 
 
309
    @defer.inlineCallbacks
 
310
    @log_call(logger.debug)
 
311
    def get_current_uploads(self):
 
312
        """Return a deferred that will be fired with the current uploads."""
 
313
        results = yield self.proxy.call_method('status', 'current_uploads')
 
314
        uploads = [self._get_dict(r) for r in results]
 
315
        self.log.debug('uploads: %r', uploads)
 
316
        defer.returnValue(uploads)
 
317
 
 
318
    @defer.inlineCallbacks
 
319
    @log_call(logger.debug)
 
320
    def accept_share(self, share_id):
 
321
        """Accept the share with id: share_id."""
 
322
        d = self.wait_for_signals(signal_ok='ShareAnswerResponse',
 
323
                success_filter=lambda info: info['volume_id'] == share_id)
 
324
        self.proxy.call_method('shares', 'accept_share', share_id)
 
325
        result, = yield d
 
326
        defer.returnValue(result)
 
327
 
 
328
    @defer.inlineCallbacks
 
329
    @log_call(logger.debug)
 
330
    def reject_share(self, share_id):
 
331
        """Reject the share with id: share_id."""
 
332
        d = self.wait_for_signals(signal_ok='ShareAnswerResponse',
 
333
                success_filter=lambda info: info['volume_id'] == share_id)
 
334
        self.proxy.call_method('shares', 'reject_share', share_id)
 
335
        result, = yield d
 
336
        defer.returnValue(result)
 
337
 
 
338
    @defer.inlineCallbacks
 
339
    @log_call(logger.debug)
 
340
    def subscribe_share(self, share_id):
 
341
        """Subscribe to a share given its id."""
 
342
        d = self.wait_for_signals('ShareSubscribed', 'ShareSubscribeError',
 
343
                success_filter=lambda info: info['volume_id'] == share_id,
 
344
                error_filter=lambda info, _: info['volume_id'] == share_id)
 
345
        self.proxy.call_method('shares', 'subscribe', share_id)
 
346
        result, = yield d
 
347
        defer.returnValue(result)
 
348
 
 
349
    @defer.inlineCallbacks
 
350
    @log_call(logger.debug)
 
351
    def unsubscribe_share(self, share_id):
 
352
        """Unsubscribe from a share given its id."""
 
353
        d = self.wait_for_signals('ShareUnSubscribed', 'ShareUnSubscribeError',
 
354
                success_filter=lambda info: info['volume_id'] == share_id,
 
355
                error_filter=lambda info, _: info['volume_id'] == share_id)
 
356
        self.proxy.call_method('shares', 'unsubscribe', share_id)
 
357
        result, = yield d
 
358
        defer.returnValue(result)
 
359
 
 
360
    @defer.inlineCallbacks
 
361
    @log_call(logger.debug)
 
362
    def get_shares(self):
 
363
        """Get the list of shares (accepted or not)."""
 
364
        results = yield self.proxy.call_method('shares', 'get_shares')
 
365
        shares = [self._get_dict(r) for r in results]
 
366
        self.log.debug('shares: %r', shares)
 
367
        defer.returnValue(shares)
 
368
 
 
369
    @log_call(logger.debug)
 
370
    def refresh_shares(self):
 
371
        """Request a refresh of share list to the server."""
 
372
        return self.proxy.call_method('shares', 'refresh_shares')
 
373
 
 
374
    @log_call(logger.debug)
 
375
    def offer_share(self, path, username, name, access_level):
 
376
        """Offer a share at the specified path to user with id: username."""
 
377
        return self.proxy.call_method('shares', 'create_share', path,
 
378
                                             username, name, access_level)
 
379
 
 
380
    @defer.inlineCallbacks
 
381
    @log_call(logger.debug)
 
382
    def list_shared(self):
 
383
        """Get the list of the shares "shared"/created/offered."""
 
384
        results = yield self.proxy.call_method('shares', 'get_shared')
 
385
        shares = [self._get_dict(r) for r in results]
 
386
        self.log.debug('shared: %r', shares)
 
387
        defer.returnValue(shares)
 
388
 
 
389
    @defer.inlineCallbacks
 
390
    @log_call(logger.debug)
 
391
    def create_folder(self, path):
 
392
        """Create a user defined folder in the specified path."""
 
393
        d = self.wait_for_signals('FolderCreated', 'FolderCreateError',
 
394
                success_filter=lambda info: info['path'] == path,
 
395
                error_filter=lambda info, _: info['path'] == path)
 
396
 
 
397
        self.proxy.call_method('folders', 'create', path)
 
398
 
 
399
        result, = yield d
 
400
        defer.returnValue(result)
 
401
 
 
402
    @defer.inlineCallbacks
 
403
    @log_call(logger.info)
 
404
    def delete_folder(self, folder_id):
 
405
        """Delete a user defined folder given its id."""
 
406
        d = self.wait_for_signals('FolderDeleted', 'FolderDeleteError',
 
407
                success_filter=lambda info: info['volume_id'] == folder_id,
 
408
                error_filter=lambda info, _: info['volume_id'] == folder_id)
 
409
 
 
410
        self.proxy.call_method('folders', 'delete', folder_id)
 
411
 
 
412
        result, = yield d
 
413
        defer.returnValue(result)
 
414
 
 
415
    @defer.inlineCallbacks
 
416
    @log_call(logger.debug)
 
417
    def subscribe_folder(self, folder_id):
 
418
        """Subscribe to a user defined folder given its id."""
 
419
        d = self.wait_for_signals('FolderSubscribed', 'FolderSubscribeError',
 
420
                success_filter=lambda info: info['volume_id'] == folder_id,
 
421
                error_filter=lambda info, _: info['volume_id'] == folder_id)
 
422
 
 
423
        self.proxy.call_method('folders', 'subscribe', folder_id)
 
424
 
 
425
        result, = yield d
 
426
        defer.returnValue(result)
 
427
 
 
428
    @defer.inlineCallbacks
 
429
    @log_call(logger.debug)
 
430
    def unsubscribe_folder(self, folder_id):
 
431
        """Unsubscribe from a user defined folder given its id."""
 
432
        d = self.wait_for_signals(
 
433
                'FolderUnSubscribed', 'FolderUnSubscribeError',
 
434
                success_filter=lambda info: info['volume_id'] == folder_id,
 
435
                error_filter=lambda info, _: info['volume_id'] == folder_id)
 
436
 
 
437
        self.proxy.call_method('folders', 'unsubscribe', folder_id)
 
438
 
 
439
        result, = yield d
 
440
        defer.returnValue(result)
 
441
 
 
442
    @defer.inlineCallbacks
 
443
    @log_call(logger.debug)
 
444
    def get_folders(self):
 
445
        """Return the list of folders (a list of dicts)."""
 
446
        results = yield self.proxy.call_method('folders', 'get_folders')
 
447
        folders = [self._get_dict(r) for r in results]
 
448
        self.log.debug('folders: %r', folders)
 
449
        defer.returnValue(folders)
 
450
 
 
451
    @log_call(logger.debug)
 
452
    def get_folder_info(self, path):
 
453
        """Call the get_info method for a UDF path."""
 
454
        return self.proxy.call_method('folders', 'get_folder_info', path)
 
455
 
 
456
    @log_call(logger.debug)
 
457
    def get_metadata(self, path):
 
458
        """Get metadata for 'path'."""
 
459
        return self.proxy.call_method('file_system', 'get_metadata', path)
 
460
 
 
461
    @defer.inlineCallbacks
 
462
    @log_call(logger.debug)
 
463
    def change_public_access(self, path, is_public):
 
464
        """Change the public access for a given path."""
 
465
        d = self.wait_for_signals(
 
466
                'PublicAccessChanged', 'PublicAccessChangeError',
 
467
                success_filter=lambda info: info['path'] == path,
 
468
                error_filter=lambda info, _: info['path'] == path)
 
469
 
 
470
        metadata = yield self.get_metadata(path)
 
471
        args = (metadata['share_id'], metadata['node_id'], is_public)
 
472
        self.proxy.call_method('public_files', 'change_public_access', *args)
 
473
 
 
474
        (file_info,) = yield d
 
475
        defer.returnValue(file_info)
 
476
 
 
477
    @log_call(logger.debug)
 
478
    def quit(self):
 
479
        """Quit the syncdaemon."""
 
480
        if is_already_running(bus=self.bus):
 
481
            result = self.proxy.call_method('sync_daemon', 'quit')
 
482
        else:
 
483
            result = defer.succeed(None)
 
484
        return result
 
485
 
 
486
    @log_call(logger.debug)
 
487
    def connect(self):
 
488
        """Connect syncdaemon."""
 
489
        return self.proxy.call_method('sync_daemon', 'connect')
 
490
 
 
491
    @log_call(logger.debug)
 
492
    def disconnect(self):
 
493
        """Disconnect syncdaemon."""
 
494
        return self.proxy.call_method('sync_daemon', 'disconnect')
 
495
 
 
496
    @defer.inlineCallbacks
 
497
    @log_call(logger.debug)
 
498
    def get_status(self):
 
499
        """Get the current_status dict."""
 
500
        status = yield self.proxy.call_method('status', 'current_status')
 
501
        state_dict = self._get_dict(status)
 
502
        state_dict['is_connected'] = bool(state_dict['is_connected'])
 
503
        state_dict['is_online'] = bool(state_dict['is_online'])
 
504
        state_dict['is_error'] = bool(state_dict['is_error'])
 
505
        defer.returnValue(state_dict)
 
506
 
 
507
    @log_call(logger.debug)
 
508
    def free_space(self, vol_id):
 
509
        """Return the free space of the given volume."""
 
510
        return self.proxy.call_method('status', 'free_space', vol_id)
 
511
 
 
512
    @log_call(logger.debug)
 
513
    def waiting(self):
 
514
        """Return a description of the waiting queue elements."""
 
515
        return self.proxy.call_method('status', 'waiting')
 
516
 
 
517
    @log_call(logger.debug)
 
518
    def waiting_metadata(self):
 
519
        """Return a description of the waiting metadata queue elements."""
 
520
        return self.proxy.call_method('status', 'waiting_metadata')
 
521
 
 
522
    @log_call(logger.debug)
 
523
    def waiting_content(self):
 
524
        """Return the waiting content queue elements."""
 
525
        return self.proxy.call_method('status', 'waiting_content')
 
526
 
 
527
    @log_call(logger.debug)
 
528
    def start(self):
 
529
        """Start syncdaemon if it's not running."""
 
530
        if is_already_running(bus=self.bus):
 
531
            result = defer.succeed(None)
 
532
        else:
 
533
            result = self.proxy.start()
 
534
        return result
 
535
 
 
536
    @log_call(logger.debug)
 
537
    def get_throttling_limits(self):
 
538
        """Return a dict with the read and write limits."""
 
539
        return self.proxy.call_method('config', 'get_throttling_limits')
 
540
 
 
541
    @log_call(logger.debug)
 
542
    def set_throttling_limits(self, read_limit, write_limit):
 
543
        """Set the read and write limits."""
 
544
        return self.proxy.call_method('config', 'set_throttling_limits',
 
545
                                             read_limit, write_limit)
 
546
 
 
547
    def is_setting_enabled(self, setting_name):
 
548
        """Return whether 'setting_name' is enabled."""
 
549
        return self.proxy.call_method('config', '%s_enabled' % setting_name)
 
550
 
 
551
    def enable_setting(self, setting_name, enabled):
 
552
        """Enable/disable 'setting_name'."""
 
553
        if enabled:
 
554
            method = 'enable_%s'
 
555
        else:
 
556
            method = 'disable_%s'
 
557
        return self.proxy.call_method('config', method % setting_name)
 
558
 
 
559
    @log_call(logger.debug)
 
560
    def is_throttling_enabled(self):
 
561
        """Check if throttling is enabled."""
 
562
        return self.is_setting_enabled('bandwidth_throttling')
 
563
 
 
564
    @log_call(logger.debug)
 
565
    def enable_throttling(self, enabled=True):
 
566
        """Enable/disablew throttling."""
 
567
        return self.enable_setting('bandwidth_throttling', enabled)
 
568
 
 
569
    @log_call(logger.debug)
 
570
    def is_files_sync_enabled(self):
 
571
        """Check if files sync is enabled."""
 
572
        return self.is_setting_enabled('files_sync')
 
573
 
 
574
    @defer.inlineCallbacks
 
575
    @log_call(logger.debug)
 
576
    def enable_files_sync(self, enabled):
 
577
        """Enable/disable files sync."""
 
578
        yield self.enable_setting('files_sync', enabled)
 
579
        if not enabled:
 
580
            # User requested the service to be disabled
 
581
            yield self.quit()
 
582
        else:
 
583
            # User requested the service to be enabled
 
584
            yield self.start()
 
585
 
 
586
    @log_call(logger.debug)
 
587
    def is_autoconnect_enabled(self):
 
588
        """Check if autoconnect is enabled."""
 
589
        return self.is_setting_enabled('autoconnect')
 
590
 
 
591
    @log_call(logger.debug)
 
592
    def enable_autoconnect(self, enabled):
 
593
        """Enable/disable autoconnect."""
 
594
        return self.enable_setting('autoconnect', enabled)
 
595
 
 
596
    @log_call(logger.debug)
 
597
    def is_show_all_notifications_enabled(self):
 
598
        """Check if show_all_notifications is enabled."""
 
599
        return self.is_setting_enabled('show_all_notifications')
 
600
 
 
601
    @log_call(logger.debug)
 
602
    def enable_show_all_notifications(self, enabled):
 
603
        """Enable/disable show_all_notifications."""
 
604
        return self.enable_setting('show_all_notifications', enabled)
 
605
 
 
606
    @log_call(logger.debug)
 
607
    def is_share_autosubscribe_enabled(self):
 
608
        """Check if share_autosubscribe is enabled."""
 
609
        return self.is_setting_enabled('share_autosubscribe')
 
610
 
 
611
    @log_call(logger.debug)
 
612
    def enable_share_autosubscribe(self, enabled):
 
613
        """Enable/disable share_autosubscribe."""
 
614
        return self.enable_setting('share_autosubscribe', enabled)
 
615
 
 
616
    @log_call(logger.debug)
 
617
    def is_udf_autosubscribe_enabled(self):
 
618
        """Check if udf_autosubscribe is enabled."""
 
619
        return self.is_setting_enabled('udf_autosubscribe')
 
620
 
 
621
    @log_call(logger.debug)
 
622
    def enable_udf_autosubscribe(self, enabled):
 
623
        """Enable/disable udf_autosubscribe."""
 
624
        return self.enable_setting('udf_autosubscribe', enabled)
 
625
 
 
626
    @log_call(logger.debug)
 
627
    def refresh_volumes(self):
 
628
        """Request the volumes list to the server."""
 
629
        return self.proxy.call_method('folders', 'refresh_volumes')
 
630
 
 
631
    @log_call(logger.debug)
 
632
    def rescan_from_scratch(self, volume_id):
 
633
        """Request a rescan from scratch for volume_id."""
 
634
        return self.proxy.call_method('sync_daemon', 'rescan_from_scratch',
 
635
                                      volume_id)
 
636
 
 
637
    @log_call(logger.debug)
 
638
    def get_dirty_nodes(self):
 
639
        """Return the list of dirty nodes."""
 
640
        return self.proxy.call_method('file_system', 'get_dirty_nodes')
 
641
 
 
642
    @log_call(logger.debug)
 
643
    def get_root_dir(self):
 
644
        """Return the root directory."""
 
645
        return self.proxy.call_method('sync_daemon', 'get_rootdir')
 
646
 
 
647
    @log_call(logger.debug)
 
648
    def get_shares_dir(self):
 
649
        """Return the shares directory."""
 
650
        return self.proxy.call_method('sync_daemon', 'get_sharesdir')
 
651
 
 
652
    @log_call(logger.debug)
 
653
    def get_shares_dir_link(self):
 
654
        """Return the shares link directory."""
 
655
        return self.proxy.call_method('sync_daemon', 'get_sharesdir_link')
 
656
 
 
657
    @log_call(logger.debug)
 
658
    def set_status_changed_handler(self, handler):
 
659
        """Set the status changed handler."""
 
660
        return self.connect_signal(signal_name='StatusChanged',
 
661
                                   handler=handler)
 
662
 
 
663
 
 
664
# callbacks used by u1sdtool script
 
665
 
 
666
def show_shared(shares, out):
 
667
    """Callback that prints the list of shared shares."""
 
668
    if len(shares) == 0:
 
669
        out.write("No shared\n")
 
670
    else:
 
671
        out.write("Shared list:\n")
 
672
    for share in shares:
 
673
        msg_template = '  id=%s name=%s accepted=%s ' + \
 
674
                'access_level=%s to=%s path=%s\n'
 
675
        out.write(msg_template % (share['volume_id'], share['name'],
 
676
                                  bool(share['accepted']),
 
677
                                  share['access_level'],
 
678
                                  share['other_username'],
 
679
                                  share['path']))
 
680
 
 
681
 
 
682
def show_folders(folders, out):
 
683
    """Callback that prints the list of user defined folders."""
 
684
    if len(folders) == 0:
 
685
        out.write("No folders\n")
 
686
    else:
 
687
        out.write("Folder list:\n")
 
688
    for folder in folders:
 
689
        msg_template = '  id=%s subscribed=%s path=%s\n'
 
690
        out.write(msg_template % (folder['volume_id'],
 
691
                                  bool(folder['subscribed']),
 
692
                                  folder['path']))
 
693
 
 
694
 
 
695
def show_error(error, out):
 
696
    """Format an error when things go wrong"""
 
697
    try:
 
698
        raise error.value
 
699
    except:
 
700
        signal, (args, retval) = error.value.args
 
701
        msg_template = u"%s: %s (%s)\n"
 
702
        fmtd_args = u", ".join("%s=%s" % (k, v) for k, v in args.items())
 
703
        out.write(msg_template % (signal, retval, fmtd_args))
 
704
 
 
705
 
 
706
def show_shares(shares, out):
 
707
    """Callback that prints the list of shares."""
 
708
    if len(shares) == 0:
 
709
        out.write("No shares\n")
 
710
    else:
 
711
        out.write("Shares list:\n")
 
712
    for share in shares:
 
713
        out.write(' id=%s name=%s accepted=%s subscribed=%s access_level=%s '
 
714
                  'from=%s\n' %
 
715
                  (share['volume_id'], share['name'], bool(share['accepted']),
 
716
                   bool(share['subscribed']), share['access_level'],
 
717
                   share['other_username']))
 
718
 
 
719
 
 
720
def show_path_info(result, path, out):
 
721
    """Print the path info to stdout."""
 
722
    assert isinstance(path, unicode)
 
723
    out.write(" File: %s\n" % path)
 
724
    keys = list(result.keys())
 
725
    keys.sort()
 
726
    for key in keys:
 
727
        out.write("  %s: %s\n" % (key, result[key]))
 
728
 
 
729
 
 
730
def show_uploads(uploads, out):
 
731
    """Print the uploads to stdout."""
 
732
    if uploads:
 
733
        out.write("Current uploads:\n")
 
734
    else:
 
735
        out.write("Current uploads: 0\n")
 
736
    for upload in uploads:
 
737
        out.write("  path: %s\n" % upload['path'])
 
738
        out.write("    deflated size: %s\n" % \
 
739
                  upload.get('deflated_size', 'N/A'))
 
740
        out.write("    bytes written: %s\n" % upload['n_bytes_written'])
 
741
 
 
742
 
 
743
def show_downloads(downloads, out):
 
744
    """Print the downloads to stdout."""
 
745
    if downloads:
 
746
        out.write("Current downloads:\n")
 
747
    else:
 
748
        out.write("Current downloads: 0\n")
 
749
    for download in downloads:
 
750
        out.write("  path: %s\n" % download['path'])
 
751
        out.write("    deflated size: %s\n" % \
 
752
                  download.get('deflated_size', 'N/A'))
 
753
        out.write("    bytes read: %s\n" % download['n_bytes_read'])
 
754
 
 
755
 
 
756
def show_state(state_dict, out):
 
757
    """Print the state to out."""
 
758
    out.write("State: %s\n" % state_dict.pop('name'))
 
759
    for k, v in sorted(state_dict.items()):
 
760
        out.write("    %s: %s\n" % (k, v))
 
761
    out.write("\n")
 
762
 
 
763
 
 
764
def show_free_space(free_space, out):
 
765
    """Print the free_space result."""
 
766
    out.write("Free space: %d bytes\n" % (free_space,))
 
767
 
 
768
 
 
769
def show_waiting(waiting_ops, out):
 
770
    """Print the waiting result.
 
771
 
 
772
    We receive an unordered dict, but always try to show first the command
 
773
    name, if it's running or not, the share_id, then the node_id, then the
 
774
    path, and the rest in alphabetical order.
 
775
    """
 
776
    for op_name, op_id, op_data in waiting_ops:
 
777
        # running
 
778
        attributes = []
 
779
        running = op_data.pop('running', None)
 
780
        if running is not None:
 
781
            bool_text = u'True' if running else u'False'
 
782
            attributes.append(u"running=%s" % (bool_text,))
 
783
 
 
784
        # custom
 
785
        for attr in ('share_id', 'node_id', 'path'):
 
786
            if attr in op_data:
 
787
                attributes.append(u"%s='%s'" % (attr, op_data.pop(attr)))
 
788
 
 
789
        # the rest, ordered
 
790
        for attr in sorted(op_data):
 
791
            attributes.append(u"%s='%s'" % (attr, op_data[attr]))
 
792
 
 
793
        out.write("  %s(%s)\n" % (op_name, u', '.join(attributes)))
 
794
 
 
795
 
 
796
def show_waiting_metadata(waiting_ops, out):
 
797
    """Print the waiting_metadata result.
 
798
 
 
799
    We receive an unordered dict, but always try to show first the
 
800
    share_id, then the node_id, then the path, and the rest in
 
801
    alphabetical order.
 
802
    """
 
803
    out.write("Warning: this option is deprecated! Use '--waiting' instead\n")
 
804
    return show_waiting(((x[0], None, x[1]) for x in waiting_ops), out)
 
805
 
 
806
 
 
807
def show_waiting_content(waiting_ops, out):
 
808
    """Print the waiting_content result."""
 
809
    out.write("Warning: this option is deprecated! Use '--waiting' instead\n")
 
810
    value_tpl = "operation='%(operation)s' node_id='%(node)s' " + \
 
811
            "share_id='%(share)s' path='%(path)s'"
 
812
    for value in waiting_ops:
 
813
        str_value = value_tpl % value
 
814
        out.write("%s\n" % str_value)
 
815
 
 
816
 
 
817
def show_public_file_info(file_info, out):
 
818
    """Print the public access information for a file."""
 
819
    if file_info['is_public']:
 
820
        out.write("File is published at %s\n" % file_info['public_url'])
 
821
    else:
 
822
        out.write("File is not published\n")
 
823
 
 
824
 
 
825
def show_dirty_nodes(nodes, out):
 
826
    """Print the list of dirty nodes."""
 
827
    if not nodes:
 
828
        out.write(" No dirty nodes.\n")
 
829
        return
 
830
    node_line_tpl = "mdid: %(mdid)s volume_id: %(share_id)s " + \
 
831
            "node_id: %(node_id)s is_dir: %(is_dir)s path: %(path)s\n"
 
832
    out.write(" Dirty nodes:\n")
 
833
    for node in nodes:
 
834
        assert isinstance(node['path'], unicode)
 
835
        out.write(node_line_tpl % node)