188
188
if not cr.fetchone():
189
189
cr.execute("CREATE INDEX sync_server_update_sequence_id_index on sync_server_update (sequence, id)")
191
def __init__(self, pool, cr):
192
self._cache_pullers = SavePullerCache(self)
193
super(update, self).__init__(pool, cr)
191
# def __init__(self, pool, cr):
192
# self._cache_pullers = SavePullerCache(self)
193
# super(update, self).__init__(pool, cr)
195
195
def _save_puller(self, cr, uid, context=None):
196
return self._cache_pullers.merge(cr, uid, context)
197
# return self._cache_pullers.merge(cr, uid, context)
198
199
def unfold_package(self, cr, uid, entity, packet, context=None):
314
315
seq = self.browse(cr, uid, ids, context=context)[0].sequence
317
def get_update_to_send(self,cr, uid, entity, update_ids, recover=False, context=None):
319
Called by get_package during the client instance pull process.
320
Return a list of browse_record with only the updates that really need to be send to the client.
321
It filter according to the rules:
322
- rule's directionality is 'up' and update source is a child of entity
323
- rule's directionality is 'down' and update source is an ancestor of entity
324
- rule's directionality is 'bidirectional' (synchronize every where)
325
- rule's directionality is 'bi-private': only the ancestors of update's owner field value
326
are allowed to get the update.
327
Then, it checks the groups.
331
@param entity : browse_record(sync.server.entity) : client instance entity
332
@param update_ids : list of update ids
333
@param recover : flag : if set to True, update of the same source than the entity who try to pull
334
are sent to the entity. Default is False.
335
@param context : context
337
@return : list of browse record of the updates to send
340
ancestor = self.pool.get('sync.server.entity')._get_ancestor(cr, uid, entity.id, context=context)
341
children = self.pool.get('sync.server.entity')._get_all_children(cr, uid, entity.id, context=context)
342
for update in self.browse(cr, uid, update_ids, context=context):
343
if update.rule_id.direction == 'bi-private':
344
if update.is_deleted:
345
privates = [entity.id]
346
elif not update.owner:
349
privates = self.pool.get('sync.server.entity')._get_ancestor(cr, uid, update.owner.id, context=context) + \
353
if not update.rule_id:
354
update_to_send.append(update)
355
elif (update.rule_id.direction == 'up' and update.source.id in children) or \
356
(update.rule_id.direction == 'down' and update.source.id in ancestor) or \
357
(update.rule_id.direction == 'bidirectional') or \
358
(entity.id in privates) or \
359
(recover and entity.id == update.source.id):
361
source_rules_ids = self.pool.get('sync_server.sync_rule')._get_groups_per_rule(cr, uid, update.source, context)
362
s_group = source_rules_ids.get(update.rule_id.id, [])
363
if any(group.id in s_group for group in entity.group_ids):
364
update_to_send.append(update)
365
return update_to_send
367
318
def get_package(self, cr, uid, entity, last_seq, offset, max_size, max_seq, recover=False, context=None):
434
385
timed_out = False
435
386
start_time = time.time()
436
387
self._logger.info("::::::::[%s] Data pull get package:: last_seq = %s, max_seq = %s, offset = %s, max_size = %s" % (entity.name, last_seq, max_seq, offset, max_size))
389
ancestor = self.pool.get('sync.server.entity')._get_ancestor(cr, uid, entity.id, context=context)
390
children = self.pool.get('sync.server.entity')._get_all_children(cr, uid, entity.id, context=context)
437
392
while not ids or not update_to_send:
438
393
if time.time() - start_time > 500:
443
398
ids = map(lambda x:x[0], cr.fetchall())
444
399
if not ids and update_master is None:
446
for update in self.get_update_to_send(cr, uid, entity, ids, recover, context):
447
if update_master is None:
448
update_master = update
449
update_to_send.append(update_master)
450
elif update.model == update_master.model and \
451
update.rule_id.id == update_master.rule_id.id and \
452
update.source.id == update_master.source.id and \
453
update.is_deleted == update_master.is_deleted and \
454
len(update_to_send) < max_size:
455
update_to_send.append(update)
402
for update_to_compute in self.browse(cr, uid, ids, context=context):
404
if update_to_compute.rule_id.direction == 'bi-private':
405
if update_to_compute.is_deleted:
406
privates = [entity.id]
407
elif not update_to_compute.owner:
410
privates = self.pool.get('sync.server.entity')._get_ancestor(cr, uid, update_to_compute.owner.id, context=context) + \
411
[update_to_compute.owner.id]
457
ids = ids[:ids.index(update.id)]
415
if not update_to_compute.rule_id:
416
update = update_to_compute
417
elif (update_to_compute.rule_id.direction == 'up' and update_to_compute.source.id in children) or \
418
(update_to_compute.rule_id.direction == 'down' and update_to_compute.source.id in ancestor) or \
419
(update_to_compute.rule_id.direction == 'bidirectional') or \
420
(entity.id in privates) or \
421
(recover and entity.id == update_to_compute.source.id):
423
if update_to_compute.source not in cache_rule:
424
cache_rule[update_to_compute.source] = self.pool.get('sync_server.sync_rule')._get_groups_per_rule(cr, uid, update_to_compute.source, context)
425
s_group = cache_rule[update_to_compute.source].get(update_to_compute.rule_id.id, [])
426
if any(group.id in s_group for group in entity.group_ids):
427
update = update_to_compute
430
if update_master is None:
431
update_master = update
432
update_to_send.append(update_master)
433
elif update.model == update_master.model and \
434
update.rule_id.id == update_master.rule_id.id and \
435
update.source.id == update_master.source.id and \
436
update.is_deleted == update_master.is_deleted and \
437
len(update_to_send) < max_size:
438
update_to_send.append(update)
440
ids = ids[:ids.index(update.id)]
459
442
offset += len(ids)
460
443
if timed_out and not update_to_send:
461
444
# send a fake update to keep to connection open