1
# -*- coding: utf-8 -*-
3
# Authors: Guillermo Gonzalez <guillermo.gonzalez@canonical.com>
4
# Natalia B. Bidart <natalia.bidart@canonical.com>
6
# Copyright 2009-2011 Canonical Ltd.
8
# This program is free software: you can redistribute it and/or modify it
9
# under the terms of the GNU General Public License version 3, as published
10
# by the Free Software Foundation.
12
# This program is distributed in the hope that it will be useful, but
13
# WITHOUT ANY WARRANTY; without even the implied warranties of
14
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
15
# PURPOSE. See the GNU General Public License for more details.
17
# You should have received a copy of the GNU General Public License along
18
# with this program. If not, see <http://www.gnu.org/licenses/>.
20
"""SyncDaemon Tools."""
27
from twisted.internet import defer
28
from ubuntuone.logger import log_call
30
if sys.platform == 'win32':
31
from ubuntuone.platform.tools import windows
34
from ubuntuone.platform.tools import linux
38
logger = logging.getLogger('ubuntuone.SyncDaemon.SDTool')
39
is_already_running = source.is_already_running
40
IPCError = source.IPCError
43
def is_running(bus=None):
44
"""Check if syncdaemon is running, without strating it.
46
This method is DEPRECATED, please use is_already_running instead.
49
warnings.warn('Use is_already_running instead.', DeprecationWarning)
50
return is_already_running(bus=bus)
53
class SyncDaemonTool(object):
54
"""Various utility methods to test/play with the SyncDaemon."""
56
def __init__(self, bus=None):
59
self.delayed_call = None
61
self.proxy = source.SyncDaemonToolProxy(bus=bus)
63
def _get_dict(self, a_dict):
64
"""Converts a dict returned by the IPC client to a dict of strings."""
67
str_dict[key] = unicode(a_dict[key])
71
"""Close connections."""
72
return self.proxy.shutdown()
74
def connect_signal(self, signal_name, handler):
75
"""Connect 'handler' with 'signal_name'."""
76
return self.proxy.connect_signal(signal_name=signal_name,
79
def disconnect_signal(self, signal_name, handler_or_match):
80
"""Disconnect 'handler_or_match' from 'signal_name'."""
81
return self.proxy.disconnect_signal(signal_name=signal_name,
82
handler_or_match=handler_or_match)
84
@log_call(logger.debug)
85
def wait_connected(self):
86
"""Wait until syncdaemon is connected to the server."""
89
# check if the syncdaemon is running
91
self.proxy.wait_connected()
92
self.log.debug('wait_connected: Done!')
94
except Exception, e: # catch all errors, pylint: disable=W0703
95
self.log.debug('Not connected: %s', e)
100
@log_call(logger.debug)
101
def wait_all_downloads(self, verbose=False):
102
"""Wait until there is no more pending downloads."""
103
d = self.get_current_downloads()
105
def reply_handler(downloads):
106
"""Check if there are downloads in progress.
108
If so, reschedule a new check if there is at least one.
112
sys.stdout.write(', %s' % str(len(downloads)))
114
if len(downloads) > 0:
115
self.log.debug('wait_all_downloads: %d', len(downloads))
116
return self.get_current_downloads()
118
self.log.debug('wait_all_downloads: No more downloads')
122
sys.stdout.write('\nchecking current downloads')
124
d.addCallback(reply_handler)
127
@log_call(logger.debug)
128
def wait_all_uploads(self, verbose=False):
129
"""Wait until there is no more pending uploads."""
130
d = self.get_current_uploads()
132
def reply_handler(uploads):
133
"""Check if there are uploads in progress.
135
If so, reschedule a new check if there is at least one.
139
sys.stdout.write(', %s' % str(len(uploads)))
142
self.log.debug('wait_all_uploads: %d', len(uploads))
143
return self.get_current_uploads()
145
self.log.debug('wait_all_uploads: No more uploads')
149
sys.stdout.write('\nchecking current uploads')
152
d.addCallback(reply_handler)
155
@log_call(logger.debug)
156
def wait_no_more_events(self, last_event_interval, verbose=False):
157
"""Wait until no more events are fired by the syncdaemon."""
158
from twisted.internet import reactor
161
def check_last_event():
164
Check if the daemon is connected and didn't received any events
165
in the last_event_interval.
168
current_time = time.time()
169
if self.last_event and \
170
current_time - self.last_event < last_event_interval:
171
# keep it running in case this is the last event
172
self.log.debug('rescheduling wait_no_more_events')
173
if not self.delayed_call.active():
174
self.delayed_call = reactor.callLater(last_event_interval,
177
self.delayed_call.reset(last_event_interval)
179
self.log.debug('wait_no_more_events: No more events!')
183
sys.stdout.write("Listening events")
186
def event_handler(event_dict):
187
"""Update last_event and run checks."""
188
self.last_event = time.time()
189
self.log.debug('wait_no_more_events - new event: %s - %s',
190
event_dict['event_name'], str(self.last_event))
192
sys.stdout.write('.')
194
if self.delayed_call.active():
195
self.delayed_call.reset(last_event_interval)
197
match = self.connect_signal(signal_name='Event', handler=event_handler)
200
"""Remove the signal handler."""
201
self.disconnect_signal(signal_name='Event', handler_or_match=match)
206
# in case the daemon already reached nirvana
207
self.delayed_call = reactor.callLater(last_event_interval,
211
@log_call(logger.debug)
212
def wait_for_nirvana(self, last_event_interval=5, verbose=False):
213
"""Wait until the syncdaemon reachs nirvana.
215
This is when there are:
216
- the syncdaemon is connected
217
- 0 transfers inprogress
218
- no more events are fired in the event queue
220
@param last_event_interval: the seconds to wait to determine that there
221
is no more events in the queue and the daemon reached nirvana
224
return self.proxy.call_method('sync_daemon', 'wait_for_nirvana',
227
@log_call(logger.debug)
228
def wait_for_signals(self, signal_ok, signal_error=None,
229
success_filter=lambda *a: True,
230
error_filter=lambda *a: True, **kwargs):
231
"""Wait for one of the specified signals, return a deferred.
233
@param signal_ok: this will fire the deferred's callback
235
@param signal_error: the will fire the deferred's errback
237
@param success_filter: callable to filter the signal_ok, must return
238
True or False. If True is returned, the returned deferred is fired.
240
@param error_filter: callable to filter the signal_error, must return
241
True or False. If True is returned, the returned deferred is errback'd.
243
Other params will be ignored.
248
def _success_handler(*args):
249
"""Callback 'd' only if the success_filter returns True."""
251
if success_filter(*args):
254
logger.exception('wait_for_signals: success_handler failed:')
255
d.errback(IPCError(e.__class__.__name__, args, e.message))
257
def _error_handler(*args):
258
"""Errback 'd' only if the error_filter returns True."""
260
if error_filter(*args):
261
d.errback(IPCError(signal_error, args))
263
logger.exception('wait_for_signals: error_handler failed:')
264
d.errback(IPCError(e.__class__.__name__, args, e.message))
266
# register signal handlers for success/error
267
match_ok = self.connect_signal(signal_name=signal_ok,
268
handler=_success_handler)
270
if signal_error is not None:
271
match_error = self.connect_signal(signal_name=signal_error,
272
handler=_error_handler)
274
def remove_signal_receiver(r):
275
"""Cleanup the signal receivers."""
276
self.disconnect_signal(signal_name=signal_ok,
277
handler_or_match=match_ok)
278
if signal_error is not None:
279
self.disconnect_signal(signal_name=signal_error,
280
handler_or_match=match_error)
283
d.addBoth(remove_signal_receiver)
286
@log_call(logger.debug)
287
def wait_for_signal(self, signal_name, filter):
288
"""Wait for the specified signal (the first received).
290
@param signal_name: the signal name
291
@param filter: a callable to filter signal, must return True, and is
292
used to fire the deferred callback.
294
DEPRECATED. Use wait_for_signals instead.
297
return self.wait_for_signals(signal_ok=signal_name,
298
success_filter=filter)
300
@defer.inlineCallbacks
301
@log_call(logger.debug)
302
def get_current_downloads(self):
303
"""Return a deferred that will be fired with the current downloads."""
304
results = yield self.proxy.call_method('status', 'current_downloads')
305
downloads = [self._get_dict(r) for r in results]
306
self.log.debug('downloads: %r', downloads)
307
defer.returnValue(downloads)
309
@defer.inlineCallbacks
310
@log_call(logger.debug)
311
def get_current_uploads(self):
312
"""Return a deferred that will be fired with the current uploads."""
313
results = yield self.proxy.call_method('status', 'current_uploads')
314
uploads = [self._get_dict(r) for r in results]
315
self.log.debug('uploads: %r', uploads)
316
defer.returnValue(uploads)
318
@defer.inlineCallbacks
319
@log_call(logger.debug)
320
def accept_share(self, share_id):
321
"""Accept the share with id: share_id."""
322
d = self.wait_for_signals(signal_ok='ShareAnswerResponse',
323
success_filter=lambda info: info['volume_id'] == share_id)
324
self.proxy.call_method('shares', 'accept_share', share_id)
326
defer.returnValue(result)
328
@defer.inlineCallbacks
329
@log_call(logger.debug)
330
def reject_share(self, share_id):
331
"""Reject the share with id: share_id."""
332
d = self.wait_for_signals(signal_ok='ShareAnswerResponse',
333
success_filter=lambda info: info['volume_id'] == share_id)
334
self.proxy.call_method('shares', 'reject_share', share_id)
336
defer.returnValue(result)
338
@defer.inlineCallbacks
339
@log_call(logger.debug)
340
def subscribe_share(self, share_id):
341
"""Subscribe to a share given its id."""
342
d = self.wait_for_signals('ShareSubscribed', 'ShareSubscribeError',
343
success_filter=lambda info: info['volume_id'] == share_id,
344
error_filter=lambda info, _: info['volume_id'] == share_id)
345
self.proxy.call_method('shares', 'subscribe', share_id)
347
defer.returnValue(result)
349
@defer.inlineCallbacks
350
@log_call(logger.debug)
351
def unsubscribe_share(self, share_id):
352
"""Unsubscribe from a share given its id."""
353
d = self.wait_for_signals('ShareUnSubscribed', 'ShareUnSubscribeError',
354
success_filter=lambda info: info['volume_id'] == share_id,
355
error_filter=lambda info, _: info['volume_id'] == share_id)
356
self.proxy.call_method('shares', 'unsubscribe', share_id)
358
defer.returnValue(result)
360
@defer.inlineCallbacks
361
@log_call(logger.debug)
362
def get_shares(self):
363
"""Get the list of shares (accepted or not)."""
364
results = yield self.proxy.call_method('shares', 'get_shares')
365
shares = [self._get_dict(r) for r in results]
366
self.log.debug('shares: %r', shares)
367
defer.returnValue(shares)
369
@log_call(logger.debug)
370
def refresh_shares(self):
371
"""Request a refresh of share list to the server."""
372
return self.proxy.call_method('shares', 'refresh_shares')
374
@log_call(logger.debug)
375
def offer_share(self, path, username, name, access_level):
376
"""Offer a share at the specified path to user with id: username."""
377
return self.proxy.call_method('shares', 'create_share', path,
378
username, name, access_level)
380
@defer.inlineCallbacks
381
@log_call(logger.debug)
382
def list_shared(self):
383
"""Get the list of the shares "shared"/created/offered."""
384
results = yield self.proxy.call_method('shares', 'get_shared')
385
shares = [self._get_dict(r) for r in results]
386
self.log.debug('shared: %r', shares)
387
defer.returnValue(shares)
389
@defer.inlineCallbacks
390
@log_call(logger.debug)
391
def create_folder(self, path):
392
"""Create a user defined folder in the specified path."""
393
d = self.wait_for_signals('FolderCreated', 'FolderCreateError',
394
success_filter=lambda info: info['path'] == path,
395
error_filter=lambda info, _: info['path'] == path)
397
self.proxy.call_method('folders', 'create', path)
400
defer.returnValue(result)
402
@defer.inlineCallbacks
403
@log_call(logger.info)
404
def delete_folder(self, folder_id):
405
"""Delete a user defined folder given its id."""
406
d = self.wait_for_signals('FolderDeleted', 'FolderDeleteError',
407
success_filter=lambda info: info['volume_id'] == folder_id,
408
error_filter=lambda info, _: info['volume_id'] == folder_id)
410
self.proxy.call_method('folders', 'delete', folder_id)
413
defer.returnValue(result)
415
@defer.inlineCallbacks
416
@log_call(logger.debug)
417
def subscribe_folder(self, folder_id):
418
"""Subscribe to a user defined folder given its id."""
419
d = self.wait_for_signals('FolderSubscribed', 'FolderSubscribeError',
420
success_filter=lambda info: info['volume_id'] == folder_id,
421
error_filter=lambda info, _: info['volume_id'] == folder_id)
423
self.proxy.call_method('folders', 'subscribe', folder_id)
426
defer.returnValue(result)
428
@defer.inlineCallbacks
429
@log_call(logger.debug)
430
def unsubscribe_folder(self, folder_id):
431
"""Unsubscribe from a user defined folder given its id."""
432
d = self.wait_for_signals(
433
'FolderUnSubscribed', 'FolderUnSubscribeError',
434
success_filter=lambda info: info['volume_id'] == folder_id,
435
error_filter=lambda info, _: info['volume_id'] == folder_id)
437
self.proxy.call_method('folders', 'unsubscribe', folder_id)
440
defer.returnValue(result)
442
@defer.inlineCallbacks
443
@log_call(logger.debug)
444
def get_folders(self):
445
"""Return the list of folders (a list of dicts)."""
446
results = yield self.proxy.call_method('folders', 'get_folders')
447
folders = [self._get_dict(r) for r in results]
448
self.log.debug('folders: %r', folders)
449
defer.returnValue(folders)
451
@log_call(logger.debug)
452
def get_folder_info(self, path):
453
"""Call the get_info method for a UDF path."""
454
return self.proxy.call_method('folders', 'get_folder_info', path)
456
@log_call(logger.debug)
457
def get_metadata(self, path):
458
"""Get metadata for 'path'."""
459
return self.proxy.call_method('file_system', 'get_metadata', path)
461
@defer.inlineCallbacks
462
@log_call(logger.debug)
463
def change_public_access(self, path, is_public):
464
"""Change the public access for a given path."""
465
d = self.wait_for_signals(
466
'PublicAccessChanged', 'PublicAccessChangeError',
467
success_filter=lambda info: info['path'] == path,
468
error_filter=lambda info, _: info['path'] == path)
470
metadata = yield self.get_metadata(path)
471
args = (metadata['share_id'], metadata['node_id'], is_public)
472
self.proxy.call_method('public_files', 'change_public_access', *args)
474
(file_info,) = yield d
475
defer.returnValue(file_info)
477
@log_call(logger.debug)
479
"""Quit the syncdaemon."""
480
if is_already_running(bus=self.bus):
481
result = self.proxy.call_method('sync_daemon', 'quit')
483
result = defer.succeed(None)
486
@log_call(logger.debug)
488
"""Connect syncdaemon."""
489
return self.proxy.call_method('sync_daemon', 'connect')
491
@log_call(logger.debug)
492
def disconnect(self):
493
"""Disconnect syncdaemon."""
494
return self.proxy.call_method('sync_daemon', 'disconnect')
496
@defer.inlineCallbacks
497
@log_call(logger.debug)
498
def get_status(self):
499
"""Get the current_status dict."""
500
status = yield self.proxy.call_method('status', 'current_status')
501
state_dict = self._get_dict(status)
502
state_dict['is_connected'] = bool(state_dict['is_connected'])
503
state_dict['is_online'] = bool(state_dict['is_online'])
504
state_dict['is_error'] = bool(state_dict['is_error'])
505
defer.returnValue(state_dict)
507
@log_call(logger.debug)
508
def free_space(self, vol_id):
509
"""Return the free space of the given volume."""
510
return self.proxy.call_method('status', 'free_space', vol_id)
512
@log_call(logger.debug)
514
"""Return a description of the waiting queue elements."""
515
return self.proxy.call_method('status', 'waiting')
517
@log_call(logger.debug)
518
def waiting_metadata(self):
519
"""Return a description of the waiting metadata queue elements."""
520
return self.proxy.call_method('status', 'waiting_metadata')
522
@log_call(logger.debug)
523
def waiting_content(self):
524
"""Return the waiting content queue elements."""
525
return self.proxy.call_method('status', 'waiting_content')
527
@log_call(logger.debug)
529
"""Start syncdaemon if it's not running."""
530
if is_already_running(bus=self.bus):
531
result = defer.succeed(None)
533
result = self.proxy.start()
536
@log_call(logger.debug)
537
def get_throttling_limits(self):
538
"""Return a dict with the read and write limits."""
539
return self.proxy.call_method('config', 'get_throttling_limits')
541
@log_call(logger.debug)
542
def set_throttling_limits(self, read_limit, write_limit):
543
"""Set the read and write limits."""
544
return self.proxy.call_method('config', 'set_throttling_limits',
545
read_limit, write_limit)
547
def is_setting_enabled(self, setting_name):
548
"""Return whether 'setting_name' is enabled."""
549
return self.proxy.call_method('config', '%s_enabled' % setting_name)
551
def enable_setting(self, setting_name, enabled):
552
"""Enable/disable 'setting_name'."""
556
method = 'disable_%s'
557
return self.proxy.call_method('config', method % setting_name)
559
@log_call(logger.debug)
560
def is_throttling_enabled(self):
561
"""Check if throttling is enabled."""
562
return self.is_setting_enabled('bandwidth_throttling')
564
@log_call(logger.debug)
565
def enable_throttling(self, enabled=True):
566
"""Enable/disablew throttling."""
567
return self.enable_setting('bandwidth_throttling', enabled)
569
@log_call(logger.debug)
570
def is_files_sync_enabled(self):
571
"""Check if files sync is enabled."""
572
return self.is_setting_enabled('files_sync')
574
@defer.inlineCallbacks
575
@log_call(logger.debug)
576
def enable_files_sync(self, enabled):
577
"""Enable/disable files sync."""
578
yield self.enable_setting('files_sync', enabled)
580
# User requested the service to be disabled
583
# User requested the service to be enabled
586
@log_call(logger.debug)
587
def is_autoconnect_enabled(self):
588
"""Check if autoconnect is enabled."""
589
return self.is_setting_enabled('autoconnect')
591
@log_call(logger.debug)
592
def enable_autoconnect(self, enabled):
593
"""Enable/disable autoconnect."""
594
return self.enable_setting('autoconnect', enabled)
596
@log_call(logger.debug)
597
def is_show_all_notifications_enabled(self):
598
"""Check if show_all_notifications is enabled."""
599
return self.is_setting_enabled('show_all_notifications')
601
@log_call(logger.debug)
602
def enable_show_all_notifications(self, enabled):
603
"""Enable/disable show_all_notifications."""
604
return self.enable_setting('show_all_notifications', enabled)
606
@log_call(logger.debug)
607
def is_share_autosubscribe_enabled(self):
608
"""Check if share_autosubscribe is enabled."""
609
return self.is_setting_enabled('share_autosubscribe')
611
@log_call(logger.debug)
612
def enable_share_autosubscribe(self, enabled):
613
"""Enable/disable share_autosubscribe."""
614
return self.enable_setting('share_autosubscribe', enabled)
616
@log_call(logger.debug)
617
def is_udf_autosubscribe_enabled(self):
618
"""Check if udf_autosubscribe is enabled."""
619
return self.is_setting_enabled('udf_autosubscribe')
621
@log_call(logger.debug)
622
def enable_udf_autosubscribe(self, enabled):
623
"""Enable/disable udf_autosubscribe."""
624
return self.enable_setting('udf_autosubscribe', enabled)
626
@log_call(logger.debug)
627
def refresh_volumes(self):
628
"""Request the volumes list to the server."""
629
return self.proxy.call_method('folders', 'refresh_volumes')
631
@log_call(logger.debug)
632
def rescan_from_scratch(self, volume_id):
633
"""Request a rescan from scratch for volume_id."""
634
return self.proxy.call_method('sync_daemon', 'rescan_from_scratch',
637
@log_call(logger.debug)
638
def get_dirty_nodes(self):
639
"""Return the list of dirty nodes."""
640
return self.proxy.call_method('file_system', 'get_dirty_nodes')
642
@log_call(logger.debug)
643
def get_root_dir(self):
644
"""Return the root directory."""
645
return self.proxy.call_method('sync_daemon', 'get_rootdir')
647
@log_call(logger.debug)
648
def get_shares_dir(self):
649
"""Return the shares directory."""
650
return self.proxy.call_method('sync_daemon', 'get_sharesdir')
652
@log_call(logger.debug)
653
def get_shares_dir_link(self):
654
"""Return the shares link directory."""
655
return self.proxy.call_method('sync_daemon', 'get_sharesdir_link')
657
@log_call(logger.debug)
658
def set_status_changed_handler(self, handler):
659
"""Set the status changed handler."""
660
return self.connect_signal(signal_name='StatusChanged',
664
# callbacks used by u1sdtool script
666
def show_shared(shares, out):
667
"""Callback that prints the list of shared shares."""
669
out.write("No shared\n")
671
out.write("Shared list:\n")
673
msg_template = ' id=%s name=%s accepted=%s ' + \
674
'access_level=%s to=%s path=%s\n'
675
out.write(msg_template % (share['volume_id'], share['name'],
676
bool(share['accepted']),
677
share['access_level'],
678
share['other_username'],
682
def show_folders(folders, out):
683
"""Callback that prints the list of user defined folders."""
684
if len(folders) == 0:
685
out.write("No folders\n")
687
out.write("Folder list:\n")
688
for folder in folders:
689
msg_template = ' id=%s subscribed=%s path=%s\n'
690
out.write(msg_template % (folder['volume_id'],
691
bool(folder['subscribed']),
695
def show_error(error, out):
696
"""Format an error when things go wrong"""
700
signal, (args, retval) = error.value.args
701
msg_template = u"%s: %s (%s)\n"
702
fmtd_args = u", ".join("%s=%s" % (k, v) for k, v in args.items())
703
out.write(msg_template % (signal, retval, fmtd_args))
706
def show_shares(shares, out):
707
"""Callback that prints the list of shares."""
709
out.write("No shares\n")
711
out.write("Shares list:\n")
713
out.write(' id=%s name=%s accepted=%s subscribed=%s access_level=%s '
715
(share['volume_id'], share['name'], bool(share['accepted']),
716
bool(share['subscribed']), share['access_level'],
717
share['other_username']))
720
def show_path_info(result, path, out):
721
"""Print the path info to stdout."""
722
assert isinstance(path, unicode)
723
out.write(" File: %s\n" % path)
724
keys = list(result.keys())
727
out.write(" %s: %s\n" % (key, result[key]))
730
def show_uploads(uploads, out):
731
"""Print the uploads to stdout."""
733
out.write("Current uploads:\n")
735
out.write("Current uploads: 0\n")
736
for upload in uploads:
737
out.write(" path: %s\n" % upload['path'])
738
out.write(" deflated size: %s\n" % \
739
upload.get('deflated_size', 'N/A'))
740
out.write(" bytes written: %s\n" % upload['n_bytes_written'])
743
def show_downloads(downloads, out):
744
"""Print the downloads to stdout."""
746
out.write("Current downloads:\n")
748
out.write("Current downloads: 0\n")
749
for download in downloads:
750
out.write(" path: %s\n" % download['path'])
751
out.write(" deflated size: %s\n" % \
752
download.get('deflated_size', 'N/A'))
753
out.write(" bytes read: %s\n" % download['n_bytes_read'])
756
def show_state(state_dict, out):
757
"""Print the state to out."""
758
out.write("State: %s\n" % state_dict.pop('name'))
759
for k, v in sorted(state_dict.items()):
760
out.write(" %s: %s\n" % (k, v))
764
def show_free_space(free_space, out):
765
"""Print the free_space result."""
766
out.write("Free space: %d bytes\n" % (free_space,))
769
def show_waiting(waiting_ops, out):
770
"""Print the waiting result.
772
We receive an unordered dict, but always try to show first the command
773
name, if it's running or not, the share_id, then the node_id, then the
774
path, and the rest in alphabetical order.
776
for op_name, op_id, op_data in waiting_ops:
779
running = op_data.pop('running', None)
780
if running is not None:
781
bool_text = u'True' if running else u'False'
782
attributes.append(u"running=%s" % (bool_text,))
785
for attr in ('share_id', 'node_id', 'path'):
787
attributes.append(u"%s='%s'" % (attr, op_data.pop(attr)))
790
for attr in sorted(op_data):
791
attributes.append(u"%s='%s'" % (attr, op_data[attr]))
793
out.write(" %s(%s)\n" % (op_name, u', '.join(attributes)))
796
def show_waiting_metadata(waiting_ops, out):
797
"""Print the waiting_metadata result.
799
We receive an unordered dict, but always try to show first the
800
share_id, then the node_id, then the path, and the rest in
803
out.write("Warning: this option is deprecated! Use '--waiting' instead\n")
804
return show_waiting(((x[0], None, x[1]) for x in waiting_ops), out)
807
def show_waiting_content(waiting_ops, out):
808
"""Print the waiting_content result."""
809
out.write("Warning: this option is deprecated! Use '--waiting' instead\n")
810
value_tpl = "operation='%(operation)s' node_id='%(node)s' " + \
811
"share_id='%(share)s' path='%(path)s'"
812
for value in waiting_ops:
813
str_value = value_tpl % value
814
out.write("%s\n" % str_value)
817
def show_public_file_info(file_info, out):
818
"""Print the public access information for a file."""
819
if file_info['is_public']:
820
out.write("File is published at %s\n" % file_info['public_url'])
822
out.write("File is not published\n")
825
def show_dirty_nodes(nodes, out):
826
"""Print the list of dirty nodes."""
828
out.write(" No dirty nodes.\n")
830
node_line_tpl = "mdid: %(mdid)s volume_id: %(share_id)s " + \
831
"node_id: %(node_id)s is_dir: %(is_dir)s path: %(path)s\n"
832
out.write(" Dirty nodes:\n")
834
assert isinstance(node['path'], unicode)
835
out.write(node_line_tpl % node)