1
/* Copyright (c) 2008-2009 Dovecot authors, see the included COPYING file */
5
#include "bsearch-insert-pos.h"
8
#include "mail-index-modseq.h"
9
#include "mail-search-build.h"
10
#include "mailbox-search-result-private.h"
11
#include "index-sync-private.h"
12
#include "index-search-result.h"
13
#include "virtual-storage.h"
17
struct virtual_add_record {
18
struct virtual_mail_index_record rec;
22
struct virtual_sync_mail {
24
struct virtual_mail_index_record vrec;
27
struct virtual_sync_context {
28
struct virtual_mailbox *mbox;
29
struct mail_index_sync_ctx *index_sync_ctx;
30
struct mail_index *index;
31
struct mail_index_view *sync_view;
32
struct mail_index_transaction *trans;
33
const char *const *kw_all;
35
/* messages expunged within this sync */
36
ARRAY_TYPE(seq_range) sync_expunges;
38
ARRAY_DEFINE(all_adds, struct virtual_add_record);
39
enum mailbox_sync_flags flags;
40
uint32_t uid_validity;
42
unsigned int ext_header_changed:1;
43
unsigned int ext_header_rewrite:1;
44
unsigned int expunge_removed:1;
45
unsigned int index_broken:1;
48
static void virtual_sync_set_uidvalidity(struct virtual_sync_context *ctx)
50
uint32_t uid_validity = ioloop_time;
52
mail_index_update_header(ctx->trans,
53
offsetof(struct mail_index_header, uid_validity),
54
&uid_validity, sizeof(uid_validity), TRUE);
55
ctx->uid_validity = uid_validity;
58
static void virtual_sync_external_flags(struct virtual_sync_context *ctx,
59
struct virtual_backend_box *bbox,
60
uint32_t vseq, uint32_t real_uid)
62
enum mail_flags flags;
63
const char *const *kw_names;
64
struct mail_keywords *keywords;
66
if (!mail_set_uid(bbox->sync_mail, real_uid)) {
67
i_panic("UID %u lost unexpectedly from %s",
68
real_uid, bbox->box->name);
72
flags = mail_get_flags(bbox->sync_mail);
73
mail_index_update_flags(ctx->trans, vseq, MODIFY_REPLACE, flags);
76
kw_names = mail_get_keywords(bbox->sync_mail);
77
keywords = mail_index_keywords_create(ctx->index, kw_names);
78
mail_index_update_keywords(ctx->trans, vseq, MODIFY_REPLACE, keywords);
79
mail_index_keywords_free(&keywords);
82
static int virtual_sync_mail_cmp(const void *p1, const void *p2)
84
const struct virtual_sync_mail *m1 = p1, *m2 = p2;
86
if (m1->vrec.mailbox_id < m2->vrec.mailbox_id)
88
if (m1->vrec.mailbox_id > m2->vrec.mailbox_id)
91
if (m1->vrec.real_uid < m2->vrec.real_uid)
93
if (m1->vrec.real_uid > m2->vrec.real_uid)
100
virtual_backend_box_sync_mail_set(struct virtual_backend_box *bbox)
102
struct mailbox_transaction_context *trans;
104
if (bbox->sync_mail == NULL) {
105
trans = mailbox_transaction_begin(bbox->box, 0);
106
bbox->sync_mail = mail_alloc(trans, 0, NULL);
111
virtual_backend_box_sync_mail_unset(struct virtual_backend_box *bbox)
113
struct mailbox_transaction_context *trans;
115
if (bbox->sync_mail != NULL) {
116
trans = bbox->sync_mail->transaction;
117
mail_free(&bbox->sync_mail);
118
(void)mailbox_transaction_commit(&trans);
122
static int bbox_mailbox_id_cmp(const void *p1, const void *p2)
124
const struct virtual_backend_box *const *b1 = p1, *const *b2 = p2;
126
if ((*b1)->mailbox_id < (*b2)->mailbox_id)
128
if ((*b1)->mailbox_id > (*b2)->mailbox_id)
134
virtual_sync_get_backend_box(struct virtual_sync_context *ctx, const char *name,
135
struct virtual_backend_box **bbox_r)
137
*bbox_r = virtual_backend_box_lookup_name(ctx->mbox, name);
138
if (*bbox_r != NULL || !ctx->mbox->sync_initialized)
141
/* another process just added a new mailbox.
142
we can't handle this currently. */
143
ctx->mbox->inconsistent = TRUE;
144
mail_storage_set_error(ctx->mbox->ibox.box.storage, MAIL_ERROR_TEMP,
145
"Backend mailbox added by another session. "
146
"Reopen the virtual mailbox.");
150
static int virtual_sync_ext_header_read(struct virtual_sync_context *ctx)
152
const struct virtual_mail_index_header *ext_hdr;
153
const struct mail_index_header *hdr;
154
const struct virtual_mail_index_mailbox_record *mailboxes;
155
struct virtual_backend_box *bbox, **bboxes;
156
const void *ext_data;
158
unsigned int i, count, ext_name_offset, ext_mailbox_count;
159
uint32_t prev_mailbox_id;
162
hdr = mail_index_get_header(ctx->sync_view);
163
mail_index_get_header_ext(ctx->sync_view, ctx->mbox->virtual_ext_id,
164
&ext_data, &ext_size);
166
if (ctx->mbox->sync_initialized &&
167
ctx->mbox->prev_uid_validity == hdr->uid_validity &&
168
ext_size >= sizeof(*ext_hdr) &&
169
ctx->mbox->prev_change_counter == ext_hdr->change_counter) {
170
/* fully refreshed */
174
ctx->mbox->prev_uid_validity = hdr->uid_validity;
175
if (ext_hdr == NULL ||
176
ctx->mbox->search_args_crc32 != ext_hdr->search_args_crc32) {
179
ext_mailbox_count = 0;
181
ctx->mbox->prev_change_counter = ext_hdr->change_counter;
182
mailboxes = (const void *)(ext_hdr + 1);
183
ext_name_offset = sizeof(*ext_hdr) +
184
ext_hdr->mailbox_count * sizeof(*mailboxes);
185
if (ext_name_offset >= ext_size ||
186
ext_hdr->mailbox_count > INT_MAX/sizeof(*mailboxes)) {
187
i_error("virtual index %s: Broken mailbox_count header",
189
ctx->index_broken = TRUE;
190
ext_mailbox_count = 0;
193
ext_mailbox_count = ext_hdr->mailbox_count;
197
/* update mailbox backends */
199
for (i = 0; i < ext_mailbox_count; i++) {
200
if (mailboxes[i].id > ext_hdr->highest_mailbox_id ||
201
mailboxes[i].id <= prev_mailbox_id) {
202
i_error("virtual index %s: Broken mailbox id",
206
if (mailboxes[i].name_len == 0 ||
207
mailboxes[i].name_len > ext_size) {
208
i_error("virtual index %s: Broken mailbox name_len",
212
if (ext_name_offset + mailboxes[i].name_len > ext_size) {
213
i_error("virtual index %s: Broken mailbox list",
218
const unsigned char *nameptr;
221
nameptr = CONST_PTR_OFFSET(ext_data, ext_name_offset);
222
name = t_strndup(nameptr, mailboxes[i].name_len);
223
if (virtual_sync_get_backend_box(ctx, name, &bbox) < 0)
230
/* mailbox no longer exists. */
233
bbox->mailbox_id = mailboxes[i].id;
234
bbox->sync_uid_validity = mailboxes[i].uid_validity;
235
bbox->sync_highest_modseq = mailboxes[i].highest_modseq;
236
bbox->sync_next_uid = mailboxes[i].next_uid;
237
bbox->sync_mailbox_idx = i;
239
ext_name_offset += mailboxes[i].name_len;
240
prev_mailbox_id = mailboxes[i].id;
242
if (i < ext_mailbox_count) {
243
ctx->index_broken = TRUE;
246
ctx->mbox->highest_mailbox_id = ext_hdr == NULL ? 0 :
247
ext_hdr->highest_mailbox_id;
248
ctx->mbox->sync_initialized = TRUE;
250
/* assign new mailbox IDs if any are missing */
251
bboxes = array_get_modifiable(&ctx->mbox->backend_boxes, &count);
252
for (i = 0; i < count; i++) {
253
if (bboxes[i]->mailbox_id == 0) {
254
bboxes[i]->mailbox_id = ++ctx->mbox->highest_mailbox_id;
258
/* sort the backend mailboxes by mailbox_id. */
259
qsort(bboxes, count, sizeof(*bboxes), bbox_mailbox_id_cmp);
263
static void virtual_sync_ext_header_rewrite(struct virtual_sync_context *ctx)
265
struct virtual_mail_index_header ext_hdr;
266
struct virtual_mail_index_mailbox_record mailbox;
267
struct virtual_backend_box **bboxes;
269
const void *ext_data;
271
unsigned int i, mailbox_pos, name_pos, count;
273
bboxes = array_get_modifiable(&ctx->mbox->backend_boxes, &count);
274
mailbox_pos = sizeof(ext_hdr);
275
name_pos = mailbox_pos + sizeof(mailbox) * count;
277
memset(&ext_hdr, 0, sizeof(ext_hdr));
278
memset(&mailbox, 0, sizeof(mailbox));
280
ext_hdr.change_counter = ++ctx->mbox->prev_change_counter;
281
ext_hdr.mailbox_count = count;
282
ext_hdr.highest_mailbox_id = ctx->mbox->highest_mailbox_id;
283
ext_hdr.search_args_crc32 = ctx->mbox->search_args_crc32;
285
buf = buffer_create_dynamic(pool_datastack_create(), name_pos + 256);
286
buffer_append(buf, &ext_hdr, sizeof(ext_hdr));
288
for (i = 0; i < count; i++) {
290
bboxes[i]->mailbox_id > bboxes[i-1]->mailbox_id);
292
bboxes[i]->sync_mailbox_idx = i;
293
mailbox.id = bboxes[i]->mailbox_id;
294
mailbox.name_len = strlen(bboxes[i]->name);
295
mailbox.uid_validity = bboxes[i]->sync_uid_validity;
296
mailbox.highest_modseq = bboxes[i]->sync_highest_modseq;
297
mailbox.next_uid = bboxes[i]->sync_next_uid;
298
buffer_write(buf, mailbox_pos, &mailbox, sizeof(mailbox));
299
buffer_write(buf, name_pos, bboxes[i]->name, mailbox.name_len);
301
mailbox_pos += sizeof(mailbox);
302
name_pos += mailbox.name_len;
304
i_assert(buf->used == name_pos);
306
mail_index_get_header_ext(ctx->sync_view, ctx->mbox->virtual_ext_id,
307
&ext_data, &ext_size);
308
if (ext_size < name_pos) {
309
mail_index_ext_resize(ctx->trans, ctx->mbox->virtual_ext_id,
311
sizeof(struct virtual_mail_index_record),
314
mail_index_update_header_ext(ctx->trans, ctx->mbox->virtual_ext_id,
315
0, buf->data, name_pos);
318
static void virtual_sync_ext_header_update(struct virtual_sync_context *ctx)
320
struct virtual_mail_index_header ext_hdr;
322
if (!ctx->ext_header_changed)
325
/* we changed something - update the change counter in header */
326
ext_hdr.change_counter = ++ctx->mbox->prev_change_counter;
327
mail_index_update_header_ext(ctx->trans, ctx->mbox->virtual_ext_id,
328
offsetof(struct virtual_mail_index_header, change_counter),
329
&ext_hdr.change_counter, sizeof(ext_hdr.change_counter));
332
static void virtual_sync_index_rec(struct virtual_sync_context *ctx,
333
const struct mail_index_sync_rec *sync_rec)
335
uint32_t virtual_ext_id = ctx->mbox->virtual_ext_id;
336
struct virtual_backend_box *bbox;
337
const struct virtual_mail_index_record *vrec;
339
enum mail_flags flags;
340
struct mail_keywords *keywords;
341
enum modify_type modify_type;
342
const char *kw_names[2];
343
uint32_t vseq, seq1, seq2;
346
switch (sync_rec->type) {
347
case MAIL_INDEX_SYNC_TYPE_APPEND:
350
case MAIL_INDEX_SYNC_TYPE_EXPUNGE:
351
case MAIL_INDEX_SYNC_TYPE_FLAGS:
352
case MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD:
353
case MAIL_INDEX_SYNC_TYPE_KEYWORD_REMOVE:
354
case MAIL_INDEX_SYNC_TYPE_KEYWORD_RESET:
357
if (!mail_index_lookup_seq_range(ctx->sync_view,
358
sync_rec->uid1, sync_rec->uid2,
360
/* already expunged, nothing to do. */
364
for (vseq = seq1; vseq <= seq2; vseq++) {
365
mail_index_lookup_ext(ctx->sync_view, vseq, virtual_ext_id,
369
bbox = virtual_backend_box_lookup(ctx->mbox, vrec->mailbox_id);
373
virtual_backend_box_sync_mail_set(bbox);
374
if (!mail_set_uid(bbox->sync_mail, vrec->real_uid)) {
375
/* message is already expunged from backend mailbox. */
379
switch (sync_rec->type) {
380
case MAIL_INDEX_SYNC_TYPE_EXPUNGE:
381
mail_expunge(bbox->sync_mail);
383
case MAIL_INDEX_SYNC_TYPE_FLAGS:
384
flags = sync_rec->add_flags & MAIL_FLAGS_NONRECENT;
386
mail_update_flags(bbox->sync_mail,
389
flags = sync_rec->remove_flags & MAIL_FLAGS_NONRECENT;
391
mail_update_flags(bbox->sync_mail,
392
MODIFY_REMOVE, flags);
395
case MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD:
396
case MAIL_INDEX_SYNC_TYPE_KEYWORD_REMOVE:
397
kw_names[0] = ctx->kw_all[sync_rec->keyword_idx];
399
keywords = mailbox_keywords_create_valid(bbox->box,
402
modify_type = sync_rec->type ==
403
MAIL_INDEX_SYNC_TYPE_KEYWORD_ADD ?
404
MODIFY_ADD : MODIFY_REMOVE;
405
mail_update_keywords(bbox->sync_mail,
406
modify_type, keywords);
407
mailbox_keywords_free(bbox->box, &keywords);
409
case MAIL_INDEX_SYNC_TYPE_KEYWORD_RESET:
411
keywords = mailbox_keywords_create_valid(bbox->box,
413
mail_update_keywords(bbox->sync_mail, MODIFY_REPLACE,
415
mailbox_keywords_free(bbox->box, &keywords);
417
case MAIL_INDEX_SYNC_TYPE_APPEND:
423
static void virtual_sync_index_changes(struct virtual_sync_context *ctx)
425
const ARRAY_TYPE(keywords) *keywords;
426
struct mail_index_sync_rec sync_rec;
428
keywords = mail_index_get_keywords(ctx->index);
429
ctx->kw_all = array_count(keywords) == 0 ? NULL :
430
array_idx(keywords, 0);
431
while (mail_index_sync_next(ctx->index_sync_ctx, &sync_rec))
432
virtual_sync_index_rec(ctx, &sync_rec);
435
static void virtual_sync_index_finish(struct virtual_sync_context *ctx)
437
struct mailbox *box = &ctx->mbox->ibox.box;
438
const struct mail_index_header *hdr;
441
hdr = mail_index_get_header(ctx->sync_view);
442
if (hdr->uid_validity != 0)
443
ctx->uid_validity = hdr->uid_validity;
445
virtual_sync_set_uidvalidity(ctx);
447
/* mark the newly seen messages as recent */
448
if (mail_index_lookup_seq_range(ctx->sync_view, hdr->first_recent_uid,
449
hdr->next_uid, &seq1, &seq2)) {
450
index_mailbox_set_recent_seq(&ctx->mbox->ibox, ctx->sync_view,
453
if (ctx->ext_header_rewrite) {
454
/* entire mailbox list needs to be rewritten */
455
virtual_sync_ext_header_rewrite(ctx);
457
/* update only changed parts in the header */
458
virtual_sync_ext_header_update(ctx);
461
if (box->v.sync_notify != NULL)
462
box->v.sync_notify(box, 0, 0);
465
static int virtual_sync_backend_box_init(struct virtual_backend_box *bbox)
467
struct mailbox_transaction_context *trans;
468
struct mail_search_context *search_ctx;
470
struct virtual_backend_uidmap uidmap;
471
enum mailbox_search_result_flags result_flags;
474
trans = mailbox_transaction_begin(bbox->box, 0);
475
mail = mail_alloc(trans, 0, NULL);
477
search_ctx = mailbox_search_init(trans, bbox->search_args, NULL);
479
/* save the result and keep it updated */
480
result_flags = MAILBOX_SEARCH_RESULT_FLAG_UPDATE |
481
MAILBOX_SEARCH_RESULT_FLAG_QUEUE_SYNC;
482
bbox->search_result =
483
mailbox_search_result_save(search_ctx, result_flags);
485
/* add the found UIDs to uidmap. virtual_uid gets assigned later. */
486
memset(&uidmap, 0, sizeof(uidmap));
487
array_clear(&bbox->uids);
488
while (mailbox_search_next(search_ctx, mail) > 0) {
489
uidmap.real_uid = mail->uid;
490
array_append(&bbox->uids, &uidmap, 1);
493
ret = mailbox_search_deinit(&search_ctx);
496
(void)mailbox_transaction_commit(&trans);
500
static int virtual_backend_uidmap_bsearch_cmp(const void *key, const void *data)
502
const uint32_t *uidp = key;
503
const struct virtual_backend_uidmap *uidmap = data;
505
return *uidp < uidmap->real_uid ? -1 :
506
(*uidp > uidmap->real_uid ? 1 : 0);
510
virtual_sync_mailbox_box_remove(struct virtual_sync_context *ctx,
511
struct virtual_backend_box *bbox,
512
const ARRAY_TYPE(seq_range) *removed_uids)
514
const struct seq_range *uids;
515
struct virtual_backend_uidmap *uidmap;
516
unsigned int i, src, dest, uid_count, rec_count;
519
uids = array_get(removed_uids, &uid_count);
523
/* everything in removed_uids should exist in bbox->uids */
524
uidmap = array_get_modifiable(&bbox->uids, &rec_count);
525
i_assert(rec_count >= uid_count);
527
/* find the first uidmap record to be removed */
528
if (!bsearch_insert_pos(&uids[0].seq1, uidmap, rec_count,
530
virtual_backend_uidmap_bsearch_cmp, &src))
533
/* remove the unwanted messages */
535
for (i = 0; i < uid_count; i++) {
537
while (uidmap[src].real_uid != uid) {
538
uidmap[dest++] = uidmap[src++];
539
i_assert(src < rec_count);
542
for (; uid <= uids[i].seq2; uid++, src++) {
543
i_assert(src < rec_count);
544
i_assert(uidmap[src].real_uid == uid);
545
if (mail_index_lookup_seq(ctx->sync_view,
546
uidmap[src].virtual_uid,
548
mail_index_expunge(ctx->trans, vseq);
551
array_delete(&bbox->uids, dest, src - dest);
555
virtual_sync_mailbox_box_add(struct virtual_sync_context *ctx,
556
struct virtual_backend_box *bbox,
557
const ARRAY_TYPE(seq_range) *added_uids_arr)
559
const struct seq_range *added_uids;
560
struct virtual_backend_uidmap *uidmap;
561
struct virtual_add_record rec;
562
unsigned int i, src, dest, uid_count, add_count, rec_count;
565
added_uids = array_get(added_uids_arr, &uid_count);
568
add_count = seq_range_count(added_uids_arr);
570
/* none of added_uids should exist in bbox->uids. find the position
571
of the first inserted index. */
572
uidmap = array_get_modifiable(&bbox->uids, &rec_count);
573
if (rec_count == 0 ||
574
added_uids[0].seq1 > uidmap[rec_count-1].real_uid) {
575
/* fast path: usually messages are appended */
577
} else if (bsearch_insert_pos(&added_uids[0].seq1, uidmap, rec_count,
579
virtual_backend_uidmap_bsearch_cmp,
583
/* make space for all added UIDs. */
584
if (rec_count == dest)
585
array_idx_clear(&bbox->uids, dest + add_count-1);
587
array_copy(&bbox->uids.arr, dest + add_count,
588
&bbox->uids.arr, dest, rec_count - dest);
590
uidmap = array_get_modifiable(&bbox->uids, &rec_count);
591
src = dest + add_count;
593
/* add/move the UIDs to their correct positions */
594
memset(&rec, 0, sizeof(rec));
595
rec.rec.mailbox_id = bbox->mailbox_id;
596
for (i = 0; i < uid_count; i++) {
597
add_uid = added_uids[i].seq1;
598
while (src < rec_count && uidmap[src].real_uid < add_uid)
599
uidmap[dest++] = uidmap[src++];
601
for (; add_uid <= added_uids[i].seq2; add_uid++, dest++) {
602
i_assert(dest < rec_count);
604
uidmap[dest].real_uid = add_uid;
605
uidmap[dest].virtual_uid = 0;
607
if (ctx->mbox->uids_mapped) {
608
rec.rec.real_uid = add_uid;
609
array_append(&ctx->all_adds, &rec, 1);
615
static int virtual_backend_uidmap_cmp(const void *p1, const void *p2)
617
const struct virtual_backend_uidmap *u1 = p1, *u2 = p2;
619
if (u1->real_uid < u2->real_uid)
621
if (u1->real_uid > u2->real_uid)
626
static void virtual_sync_bbox_uids_sort(struct virtual_backend_box *bbox)
628
struct virtual_backend_uidmap *uids;
629
unsigned int uid_count;
631
/* the uidmap must be sorted by real_uids */
632
uids = array_get_modifiable(&bbox->uids, &uid_count);
633
qsort(uids, uid_count, sizeof(*uids), virtual_backend_uidmap_cmp);
634
bbox->uids_nonsorted = FALSE;
637
static void virtual_sync_backend_boxes_sort_uids(struct virtual_mailbox *mbox)
639
struct virtual_backend_box *const *bboxes;
640
unsigned int i, count;
642
bboxes = array_get(&mbox->backend_boxes, &count);
643
for (i = 0; i < count; i++) {
644
if (bboxes[i]->uids_nonsorted)
645
virtual_sync_bbox_uids_sort(bboxes[i]);
650
virtual_sync_backend_handle_old_vmsgs(struct virtual_sync_context *ctx,
651
struct virtual_backend_box *bbox,
652
struct mail_search_result *result)
654
struct index_mailbox *ibox = (struct index_mailbox *)bbox->box;
655
const struct virtual_mail_index_record *vrec;
656
struct virtual_backend_uidmap uidmap;
658
uint32_t seq, vseq, vuid, messages;
661
/* add the currently existing UIDs to uidmap. remember the messages
662
that were already expunged */
663
memset(&uidmap, 0, sizeof(uidmap));
664
array_clear(&bbox->uids);
666
messages = mail_index_view_get_messages_count(ctx->sync_view);
667
for (vseq = 1; vseq <= messages; vseq++) {
668
mail_index_lookup_uid(ctx->sync_view, vseq, &vuid);
669
mail_index_lookup_ext(ctx->sync_view, vseq,
670
ctx->mbox->virtual_ext_id,
673
if (vrec->mailbox_id == bbox->mailbox_id) {
674
uidmap.real_uid = vrec->real_uid;
675
uidmap.virtual_uid = vuid;
676
array_append(&bbox->uids, &uidmap, 1);
678
if (mail_index_lookup_seq(ibox->view, vrec->real_uid,
680
seq_range_array_add(&result->uids, 0,
683
seq_range_array_add(&result->removed_uids, 0,
689
virtual_sync_bbox_uids_sort(bbox);
692
static int virtual_sync_backend_box_continue(struct virtual_sync_context *ctx,
693
struct virtual_backend_box *bbox)
695
const enum mailbox_search_result_flags result_flags =
696
MAILBOX_SEARCH_RESULT_FLAG_UPDATE |
697
MAILBOX_SEARCH_RESULT_FLAG_QUEUE_SYNC;
698
struct index_mailbox *ibox = (struct index_mailbox *)bbox->box;
699
struct mail_search_result *result;
700
ARRAY_TYPE(seq_range) removed_uids, added_uids, flag_updates;
702
uint32_t seq, old_msg_count;
704
/* initialize the search result from all the existing messages in
706
result = mailbox_search_result_alloc(bbox->box, bbox->search_args,
708
mailbox_search_result_initial_done(result);
709
virtual_sync_backend_handle_old_vmsgs(ctx, bbox, result);
711
/* get list of changed old messages, based on modseq changes.
712
(we'll assume all modseq changes are due to flag changes, which
713
may not be true in future.) */
714
if (bbox->sync_next_uid <= 1 ||
715
!mail_index_lookup_seq_range(ibox->view, 1, bbox->sync_next_uid-1,
716
&seq, &old_msg_count))
718
t_array_init(&flag_updates, I_MIN(128, old_msg_count));
719
for (seq = 1; seq <= old_msg_count; seq++) {
720
modseq = mail_index_modseq_lookup(ibox->view, seq);
721
if (modseq > bbox->sync_highest_modseq)
722
seq_range_array_add(&flag_updates, 0, seq);
725
/* update the search result based on the flag changes and
727
if (index_search_result_update_flags(result, &flag_updates) < 0 ||
728
index_search_result_update_appends(result, old_msg_count) < 0) {
729
mailbox_search_result_free(&result);
733
t_array_init(&removed_uids, 128);
734
t_array_init(&added_uids, 128);
735
mailbox_search_result_sync(result, &removed_uids, &added_uids);
736
virtual_sync_mailbox_box_remove(ctx, bbox, &removed_uids);
737
virtual_sync_mailbox_box_add(ctx, bbox, &added_uids);
739
bbox->search_result = result;
743
static void virtual_sync_drop_existing(struct virtual_backend_box *bbox,
744
ARRAY_TYPE(seq_range) *added_uids)
746
ARRAY_TYPE(seq_range) drop_uids;
747
const struct virtual_backend_uidmap *uidmap;
748
struct seq_range_iter iter;
749
unsigned int i, n = 0, count;
752
seq_range_array_iter_init(&iter, added_uids);
753
if (!seq_range_array_iter_nth(&iter, n++, &add_uid))
756
uidmap = array_get_modifiable(&bbox->uids, &count);
757
(void)bsearch_insert_pos(&add_uid, uidmap, count, sizeof(*uidmap),
758
virtual_backend_uidmap_bsearch_cmp, &i);
762
t_array_init(&drop_uids, array_count(added_uids));
763
for (; i < count; ) {
764
if (uidmap[i].real_uid < add_uid) {
768
if (uidmap[i].real_uid == add_uid) {
769
seq_range_array_add(&drop_uids, 0, add_uid);
772
if (!seq_range_array_iter_nth(&iter, n++, &add_uid))
775
seq_range_array_remove_seq_range(added_uids, &drop_uids);
778
static void virtual_sync_drop_nonexisting(struct virtual_backend_box *bbox,
779
ARRAY_TYPE(seq_range) *removed_uids)
781
ARRAY_TYPE(seq_range) drop_uids;
782
const struct virtual_backend_uidmap *uidmap;
783
struct seq_range_iter iter;
784
unsigned int i, n = 0, count;
786
bool iter_done = FALSE;
788
seq_range_array_iter_init(&iter, removed_uids);
789
if (!seq_range_array_iter_nth(&iter, n++, &remove_uid))
792
uidmap = array_get_modifiable(&bbox->uids, &count);
793
(void)bsearch_insert_pos(&remove_uid, uidmap, count, sizeof(*uidmap),
794
virtual_backend_uidmap_bsearch_cmp, &i);
796
t_array_init(&drop_uids, array_count(removed_uids)); iter_done = FALSE;
797
for (; i < count; ) {
798
if (uidmap[i].real_uid < remove_uid) {
802
if (uidmap[i].real_uid != remove_uid)
803
seq_range_array_add(&drop_uids, 0, remove_uid);
806
if (!seq_range_array_iter_nth(&iter, n++, &remove_uid)) {
813
seq_range_array_add(&drop_uids, 0, remove_uid);
814
} while (seq_range_array_iter_nth(&iter, n++, &remove_uid));
816
seq_range_array_remove_seq_range(removed_uids, &drop_uids);
819
static void virtual_sync_mailbox_box_update(struct virtual_sync_context *ctx,
820
struct virtual_backend_box *bbox)
822
ARRAY_TYPE(seq_range) removed_uids, added_uids, temp_uids;
823
unsigned int count1, count2;
825
t_array_init(&removed_uids, 128);
826
t_array_init(&added_uids, 128);
828
mailbox_search_result_sync(bbox->search_result,
829
&removed_uids, &added_uids);
830
if (array_is_created(&bbox->sync_outside_expunges)) {
831
seq_range_array_remove_seq_range(&bbox->sync_outside_expunges,
833
seq_range_array_merge(&removed_uids,
834
&bbox->sync_outside_expunges);
835
array_clear(&bbox->sync_outside_expunges);
838
virtual_sync_drop_existing(bbox, &added_uids);
839
virtual_sync_drop_nonexisting(bbox, &removed_uids);
841
/* if any of the pending removes came back, we don't want to expunge
842
them anymore. also since they already exist, remove them from
844
count1 = array_count(&bbox->sync_pending_removes);
845
count2 = array_count(&added_uids);
846
if (count1 > 0 && count2 > 0) {
847
t_array_init(&temp_uids, count1);
848
array_append_array(&temp_uids, &bbox->sync_pending_removes);
849
if (seq_range_array_remove_seq_range(
850
&bbox->sync_pending_removes, &added_uids) > 0) {
851
seq_range_array_remove_seq_range(&added_uids,
856
if (!ctx->expunge_removed) {
857
/* delay removing messages that don't match the search
858
criteria, but don't delay removing expunged messages */
859
if (array_count(&ctx->sync_expunges) > 0) {
860
seq_range_array_remove_seq_range(&bbox->sync_pending_removes,
861
&ctx->sync_expunges);
862
seq_range_array_remove_seq_range(&removed_uids,
863
&ctx->sync_expunges);
864
virtual_sync_mailbox_box_remove(ctx, bbox,
865
&ctx->sync_expunges);
867
seq_range_array_merge(&bbox->sync_pending_removes,
869
} else if (array_count(&bbox->sync_pending_removes) > 0) {
870
/* remove all current and old */
871
seq_range_array_merge(&bbox->sync_pending_removes,
873
virtual_sync_mailbox_box_remove(ctx, bbox,
874
&bbox->sync_pending_removes);
875
array_clear(&bbox->sync_pending_removes);
877
virtual_sync_mailbox_box_remove(ctx, bbox, &removed_uids);
879
virtual_sync_mailbox_box_add(ctx, bbox, &added_uids);
882
static bool virtual_sync_find_seqs(struct virtual_backend_box *bbox,
883
const struct mailbox_sync_rec *sync_rec,
884
unsigned int *idx1_r,
885
unsigned int *idx2_r)
887
struct index_mailbox *ibox = (struct index_mailbox *)bbox->box;
888
const struct virtual_backend_uidmap *uidmap;
889
unsigned int idx, count;
892
mail_index_lookup_uid(ibox->view, sync_rec->seq1, &uid1);
893
mail_index_lookup_uid(ibox->view, sync_rec->seq2, &uid2);
894
uidmap = array_get_modifiable(&bbox->uids, &count);
895
(void)bsearch_insert_pos(&uid1, uidmap, count, sizeof(*uidmap),
896
virtual_backend_uidmap_bsearch_cmp, &idx);
897
if (idx == count || uidmap[idx].real_uid > uid2)
901
while (idx < count && uidmap[idx].real_uid <= uid2) idx++;
906
static void virtual_sync_expunge_add(struct virtual_sync_context *ctx,
907
struct virtual_backend_box *bbox,
908
const struct mailbox_sync_rec *sync_rec)
910
struct index_mailbox *ibox = (struct index_mailbox *)bbox->box;
911
struct virtual_backend_uidmap *uidmap;
913
unsigned int i, idx1, count;
915
mail_index_lookup_uid(ibox->view, sync_rec->seq1, &uid1);
916
mail_index_lookup_uid(ibox->view, sync_rec->seq2, &uid2);
918
/* remember only the expunges for messages that
919
already exist for this mailbox */
920
uidmap = array_get_modifiable(&bbox->uids, &count);
921
(void)bsearch_insert_pos(&uid1, uidmap, count, sizeof(*uidmap),
922
virtual_backend_uidmap_bsearch_cmp, &idx1);
923
for (i = idx1; i < count; i++) {
924
if (uidmap[i].real_uid > uid2)
926
seq_range_array_add(&ctx->sync_expunges, 0, uidmap[i].real_uid);
930
static int virtual_sync_backend_box_sync(struct virtual_sync_context *ctx,
931
struct virtual_backend_box *bbox,
932
enum mailbox_sync_flags sync_flags)
934
struct mailbox_sync_context *sync_ctx;
935
const struct virtual_backend_uidmap *uidmap;
936
struct mailbox_sync_rec sync_rec;
937
unsigned int idx1, idx2;
940
sync_ctx = mailbox_sync_init(bbox->box, sync_flags);
941
virtual_backend_box_sync_mail_set(bbox);
942
while (mailbox_sync_next(sync_ctx, &sync_rec)) {
943
switch (sync_rec.type) {
944
case MAILBOX_SYNC_TYPE_EXPUNGE:
945
if (ctx->expunge_removed) {
946
/* no need to keep track of expunges */
949
virtual_sync_expunge_add(ctx, bbox, &sync_rec);
951
case MAILBOX_SYNC_TYPE_FLAGS:
952
if (!virtual_sync_find_seqs(bbox, &sync_rec,
955
uidmap = array_idx(&bbox->uids, 0);
956
for (; idx1 <= idx2; idx1++) {
957
vuid = uidmap[idx1].virtual_uid;
958
if (!mail_index_lookup_seq(ctx->sync_view,
960
/* expunged by another session,
961
but we haven't yet updated
965
virtual_sync_external_flags(ctx, bbox, vseq,
966
uidmap[idx1].real_uid);
969
case MAILBOX_SYNC_TYPE_MODSEQ:
973
return mailbox_sync_deinit(&sync_ctx, 0, NULL);
976
static void virtual_sync_backend_ext_header(struct virtual_sync_context *ctx,
977
struct virtual_backend_box *bbox)
979
const unsigned int uidval_pos =
980
offsetof(struct virtual_mail_index_mailbox_record,
982
struct mailbox_status status;
983
struct virtual_mail_index_mailbox_record mailbox;
984
unsigned int mailbox_offset;
986
mailbox_get_status(bbox->box, STATUS_UIDVALIDITY |
987
STATUS_HIGHESTMODSEQ, &status);
988
if (bbox->sync_uid_validity == status.uidvalidity &&
989
bbox->sync_next_uid == status.uidnext &&
990
bbox->sync_highest_modseq == status.highest_modseq)
993
/* mailbox changed - update extension header */
994
bbox->sync_uid_validity = status.uidvalidity;
995
bbox->sync_highest_modseq = status.highest_modseq;
996
bbox->sync_next_uid = status.uidnext;
998
if (ctx->ext_header_rewrite) {
999
/* we'll rewrite the entire header later */
1003
memset(&mailbox, 0, sizeof(mailbox));
1004
mailbox.uid_validity = bbox->sync_uid_validity;
1005
mailbox.highest_modseq = bbox->sync_highest_modseq;
1006
mailbox.next_uid = bbox->sync_next_uid;
1008
mailbox_offset = sizeof(struct virtual_mail_index_header) +
1009
bbox->sync_mailbox_idx * sizeof(mailbox);
1010
mail_index_update_header_ext(ctx->trans, ctx->mbox->virtual_ext_id,
1011
mailbox_offset + uidval_pos,
1012
CONST_PTR_OFFSET(&mailbox, uidval_pos),
1013
sizeof(mailbox) - uidval_pos);
1014
ctx->ext_header_changed = TRUE;
1017
static int virtual_sync_backend_box(struct virtual_sync_context *ctx,
1018
struct virtual_backend_box *bbox)
1020
struct index_mailbox *ibox = (struct index_mailbox *)bbox->box;
1021
enum mailbox_sync_flags sync_flags;
1022
struct mailbox_status status;
1025
if (!bbox->box->opened)
1026
index_storage_mailbox_open(ibox);
1028
/* if we already did some changes to index, commit them before
1030
virtual_backend_box_sync_mail_unset(bbox);
1031
/* we use modseqs for speeding up initial search result build.
1032
make sure the backend has them enabled. */
1033
mail_index_modseq_enable(ibox->index);
1035
sync_flags = ctx->flags & (MAILBOX_SYNC_FLAG_FULL_READ |
1036
MAILBOX_SYNC_FLAG_FULL_WRITE |
1037
MAILBOX_SYNC_FLAG_FAST);
1039
if (bbox->search_result == NULL) {
1040
/* first sync in this process */
1041
i_assert(ctx->expunge_removed);
1043
if (mailbox_sync(bbox->box, sync_flags, STATUS_UIDVALIDITY,
1047
virtual_backend_box_sync_mail_set(bbox);
1048
if (status.uidvalidity != bbox->sync_uid_validity) {
1049
/* UID validity changed since last sync (or this is
1050
the first sync), do a full search */
1051
ret = virtual_sync_backend_box_init(bbox);
1053
/* build the initial search using the saved modseq. */
1054
ret = virtual_sync_backend_box_continue(ctx, bbox);
1057
/* sync using the existing search result */
1058
i_array_init(&ctx->sync_expunges, 32);
1059
ret = virtual_sync_backend_box_sync(ctx, bbox, sync_flags);
1060
if (ret == 0) T_BEGIN {
1061
virtual_sync_mailbox_box_update(ctx, bbox);
1063
array_free(&ctx->sync_expunges);
1066
virtual_sync_backend_ext_header(ctx, bbox);
1070
static void virtual_sync_backend_map_uids(struct virtual_sync_context *ctx)
1072
uint32_t virtual_ext_id = ctx->mbox->virtual_ext_id;
1073
struct virtual_sync_mail *vmails;
1074
struct virtual_backend_box *bbox, *const *bboxes;
1075
struct virtual_backend_uidmap *uidmap = NULL;
1076
struct virtual_add_record add_rec;
1077
const struct virtual_mail_index_record *vrec;
1080
uint32_t i, vseq, vuid, messages, count;
1081
unsigned int j = 0, uidmap_count = 0;
1083
messages = mail_index_view_get_messages_count(ctx->sync_view);
1085
/* sort the messages in current view by their backend mailbox and
1087
vmails = messages == 0 ? NULL :
1088
i_new(struct virtual_sync_mail, messages);
1089
for (vseq = 1; vseq <= messages; vseq++) {
1090
mail_index_lookup_ext(ctx->sync_view, vseq, virtual_ext_id,
1093
vmails[vseq-1].vseq = vseq;
1094
vmails[vseq-1].vrec = *vrec;
1096
qsort(vmails, messages, sizeof(*vmails), virtual_sync_mail_cmp);
1098
/* create real mailbox uid -> virtual uid mapping and expunge
1099
messages no longer matching the search rule */
1100
memset(&add_rec, 0, sizeof(add_rec));
1102
for (i = 0; i < messages; i++) {
1103
vseq = vmails[i].vseq;
1104
vrec = &vmails[i].vrec;
1106
if (bbox == NULL || bbox->mailbox_id != vrec->mailbox_id) {
1107
/* add the rest of the newly seen messages */
1108
for (; j < uidmap_count; j++) {
1109
add_rec.rec.real_uid = uidmap[j].real_uid;
1110
array_append(&ctx->all_adds, &add_rec, 1);
1112
bbox = virtual_backend_box_lookup(ctx->mbox,
1115
/* the entire mailbox is lost */
1116
mail_index_expunge(ctx->trans, vseq);
1119
uidmap = array_get_modifiable(&bbox->uids,
1122
add_rec.rec.mailbox_id = bbox->mailbox_id;
1123
bbox->sync_seen = TRUE;
1125
mail_index_lookup_uid(ctx->sync_view, vseq, &vuid);
1127
/* if virtual record doesn't exist in uidmap, it's expunged */
1128
for (; j < uidmap_count; j++) {
1129
if (uidmap[j].real_uid >= vrec->real_uid)
1132
/* newly seen message */
1133
add_rec.rec.real_uid = uidmap[j].real_uid;
1134
array_append(&ctx->all_adds, &add_rec, 1);
1136
if (j == uidmap_count || uidmap[j].real_uid != vrec->real_uid)
1137
mail_index_expunge(ctx->trans, vseq);
1139
/* exists - update uidmap and flags */
1140
uidmap[j++].virtual_uid = vuid;
1141
virtual_sync_external_flags(ctx, bbox, vseq,
1147
/* finish adding messages to the last mailbox */
1148
for (; j < uidmap_count; j++) {
1149
add_rec.rec.real_uid = uidmap[j].real_uid;
1150
array_append(&ctx->all_adds, &add_rec, 1);
1153
/* if there are any mailboxes we didn't yet sync, add new messages in
1155
bboxes = array_get(&ctx->mbox->backend_boxes, &count);
1156
for (i = 0; i < count; i++) {
1157
if (bboxes[i]->sync_seen)
1160
add_rec.rec.mailbox_id = bboxes[i]->mailbox_id;
1161
uidmap = array_get_modifiable(&bboxes[i]->uids, &uidmap_count);
1162
for (j = 0; j < uidmap_count; j++) {
1163
add_rec.rec.real_uid = uidmap[j].real_uid;
1164
array_append(&ctx->all_adds, &add_rec, 1);
1169
static int virtual_add_record_cmp(const void *p1, const void *p2)
1171
const struct virtual_add_record *add1 = p1, *add2 = p2;
1173
if (add1->received_date < add2->received_date)
1175
if (add1->received_date > add2->received_date)
1178
/* if they're in same mailbox, we can order them correctly by the UID.
1179
if they're in different mailboxes, ordering by UID doesn't really
1180
help but it doesn't really harm either. */
1181
if (add1->rec.real_uid < add2->rec.real_uid)
1183
if (add1->rec.real_uid > add2->rec.real_uid)
1186
/* two messages in different mailboxes have the same received date
1191
static void virtual_sync_backend_sort_new(struct virtual_sync_context *ctx)
1193
struct virtual_backend_box *bbox;
1194
struct virtual_add_record *adds;
1195
const struct virtual_mail_index_record *vrec;
1196
unsigned int i, count;
1198
/* get all messages' received dates */
1199
adds = array_get_modifiable(&ctx->all_adds, &count);
1200
for (bbox = NULL, i = 0; i < count; i++) {
1201
vrec = &adds[i].rec;
1203
if (bbox == NULL || bbox->mailbox_id != vrec->mailbox_id) {
1204
bbox = virtual_backend_box_lookup(ctx->mbox,
1207
if (!mail_set_uid(bbox->sync_mail, vrec->real_uid))
1209
if (mail_get_received_date(bbox->sync_mail,
1210
&adds[i].received_date) < 0) {
1211
/* probably expunged already, just add it somewhere */
1212
adds[i].received_date = 0;
1216
qsort(adds, count, sizeof(*adds), virtual_add_record_cmp);
1219
static void virtual_sync_backend_add_new(struct virtual_sync_context *ctx)
1221
uint32_t virtual_ext_id = ctx->mbox->virtual_ext_id;
1222
struct virtual_add_record *adds;
1223
struct virtual_backend_box *bbox;
1224
struct virtual_backend_uidmap *uidmap;
1225
const struct mail_index_header *hdr;
1226
const struct virtual_mail_index_record *vrec;
1227
unsigned int i, count, idx, uid_count;
1228
uint32_t vseq, first_uid, next_uid;
1230
hdr = mail_index_get_header(ctx->sync_view);
1231
adds = array_get_modifiable(&ctx->all_adds, &count);
1233
ctx->mbox->sync_virtual_next_uid = hdr->next_uid;
1237
if (adds[0].rec.mailbox_id == adds[count-1].rec.mailbox_id) {
1238
/* all messages are from a single mailbox. add them in
1241
/* sort new messages by received date to get the add order */
1242
virtual_sync_backend_sort_new(ctx);
1245
for (bbox = NULL, i = 0; i < count; i++) {
1246
vrec = &adds[i].rec;
1247
if (bbox == NULL || bbox->mailbox_id != vrec->mailbox_id) {
1248
bbox = virtual_backend_box_lookup(ctx->mbox,
1252
mail_index_append(ctx->trans, 0, &vseq);
1253
mail_index_update_ext(ctx->trans, vseq, virtual_ext_id,
1255
virtual_sync_external_flags(ctx, bbox, vseq, vrec->real_uid);
1258
/* assign UIDs to new messages */
1259
first_uid = hdr->next_uid;
1260
mail_index_append_assign_uids(ctx->trans, first_uid, &next_uid);
1262
/* update virtual UIDs in uidmap */
1263
for (bbox = NULL, i = 0; i < count; i++) {
1264
vrec = &adds[i].rec;
1265
if (bbox == NULL || bbox->mailbox_id != vrec->mailbox_id) {
1266
bbox = virtual_backend_box_lookup(ctx->mbox,
1270
uidmap = array_get_modifiable(&bbox->uids, &uid_count);
1271
if (!bsearch_insert_pos(&vrec->real_uid, uidmap, uid_count,
1273
virtual_backend_uidmap_bsearch_cmp,
1276
i_assert(uidmap[idx].virtual_uid == 0);
1277
uidmap[idx].virtual_uid = first_uid + i;
1279
ctx->mbox->sync_virtual_next_uid = first_uid + i;
1283
virtual_sync_apply_existing_appends(struct virtual_sync_context *ctx)
1285
uint32_t virtual_ext_id = ctx->mbox->virtual_ext_id;
1286
struct virtual_backend_box *bbox = NULL;
1287
const struct mail_index_header *hdr;
1288
const struct virtual_mail_index_record *vrec;
1289
struct virtual_backend_uidmap uidmap;
1294
if (!ctx->mbox->uids_mapped)
1297
hdr = mail_index_get_header(ctx->sync_view);
1298
if (ctx->mbox->sync_virtual_next_uid >= hdr->next_uid)
1301
/* another process added messages to virtual index. get backend boxes'
1302
uid lists up-to-date by adding the new messages there. */
1303
if (!mail_index_lookup_seq_range(ctx->sync_view,
1304
ctx->mbox->sync_virtual_next_uid,
1305
(uint32_t)-1, &seq, &seq2))
1308
memset(&uidmap, 0, sizeof(uidmap));
1309
for (; seq <= seq2; seq++) {
1310
mail_index_lookup_ext(ctx->sync_view, seq, virtual_ext_id,
1313
uidmap.real_uid = vrec->real_uid;
1314
mail_index_lookup_uid(ctx->sync_view, seq, &uidmap.virtual_uid);
1316
if (bbox == NULL || bbox->mailbox_id != vrec->mailbox_id) {
1317
bbox = virtual_backend_box_lookup(ctx->mbox,
1320
mail_storage_set_critical(
1321
ctx->mbox->ibox.box.storage,
1322
"Mailbox ID %u unexpectedly lost",
1327
array_append(&bbox->uids, &uidmap, 1);
1328
bbox->uids_nonsorted = TRUE;
1331
virtual_sync_backend_boxes_sort_uids(ctx->mbox);
1336
virtual_sync_apply_existing_expunges(struct virtual_mailbox *mbox,
1337
struct mailbox_sync_context *sync_ctx)
1339
struct index_mailbox_sync_context *isync_ctx =
1340
(struct index_mailbox_sync_context *)sync_ctx;
1341
struct virtual_backend_box *bbox = NULL;
1342
struct seq_range_iter iter;
1343
const struct virtual_mail_index_record *vrec;
1349
if (isync_ctx->expunges == NULL)
1352
seq_range_array_iter_init(&iter, isync_ctx->expunges);
1353
while (seq_range_array_iter_nth(&iter, n++, &seq)) {
1354
mail_index_lookup_ext(mbox->ibox.view, seq,
1355
mbox->virtual_ext_id, &data, &expunged);
1358
if (bbox == NULL || bbox->mailbox_id != vrec->mailbox_id) {
1359
bbox = virtual_backend_box_lookup(mbox,
1361
if (!array_is_created(&bbox->sync_outside_expunges))
1362
i_array_init(&bbox->sync_outside_expunges, 32);
1364
seq_range_array_add(&bbox->sync_outside_expunges, 0,
1369
static int virtual_sync_backend_boxes(struct virtual_sync_context *ctx)
1371
struct virtual_backend_box *const *bboxes;
1372
unsigned int i, count;
1375
if (virtual_sync_apply_existing_appends(ctx) < 0)
1378
i_array_init(&ctx->all_adds, 128);
1379
bboxes = array_get(&ctx->mbox->backend_boxes, &count);
1380
for (i = 0; i < count; i++) {
1382
ret = virtual_sync_backend_box(ctx, bboxes[i]);
1385
/* backend failed, copy the error */
1386
virtual_box_copy_error(&ctx->mbox->ibox.box,
1392
if (!ctx->mbox->uids_mapped) {
1393
/* initial sync: assign virtual UIDs to existing messages and
1395
ctx->mbox->uids_mapped = TRUE;
1396
virtual_sync_backend_map_uids(ctx);
1398
virtual_sync_backend_add_new(ctx);
1399
array_free(&ctx->all_adds);
1403
static void virtual_sync_backend_boxes_finish(struct virtual_sync_context *ctx)
1405
struct virtual_backend_box *const *bboxes;
1406
unsigned int i, count;
1408
bboxes = array_get(&ctx->mbox->backend_boxes, &count);
1409
for (i = 0; i < count; i++)
1410
virtual_backend_box_sync_mail_unset(bboxes[i]);
1413
static int virtual_sync_finish(struct virtual_sync_context *ctx, bool success)
1415
int ret = success ? 0 : -1;
1417
virtual_sync_backend_boxes_finish(ctx);
1419
if (mail_index_sync_commit(&ctx->index_sync_ctx) < 0) {
1420
mail_storage_set_index_error(&ctx->mbox->ibox);
1424
if (ctx->index_broken) {
1425
/* make sure we don't complain about the same errors
1426
over and over again. */
1427
if (mail_index_unlink(ctx->index) < 0) {
1428
i_error("virtual index %s: Failed to unlink() "
1429
"broken indexes: %m",
1433
mail_index_sync_rollback(&ctx->index_sync_ctx);
1439
static int virtual_sync(struct virtual_mailbox *mbox,
1440
enum mailbox_sync_flags flags)
1442
struct virtual_sync_context *ctx;
1443
enum mail_index_sync_flags index_sync_flags;
1446
ctx = i_new(struct virtual_sync_context, 1);
1449
ctx->index = mbox->ibox.index;
1450
/* Removed messages are expunged when
1452
b) Mailbox is being opened (FIX_INCONSISTENT is set) */
1453
ctx->expunge_removed =
1454
(ctx->flags & (MAILBOX_SYNC_FLAG_EXPUNGE |
1455
MAILBOX_SYNC_FLAG_FIX_INCONSISTENT)) != 0;
1457
index_sync_flags = MAIL_INDEX_SYNC_FLAG_FLUSH_DIRTY |
1458
MAIL_INDEX_SYNC_FLAG_AVOID_FLAG_UPDATES;
1459
if (!mbox->ibox.keep_recent)
1460
index_sync_flags |= MAIL_INDEX_SYNC_FLAG_DROP_RECENT;
1462
ret = mail_index_sync_begin(ctx->index, &ctx->index_sync_ctx,
1463
&ctx->sync_view, &ctx->trans,
1467
mail_storage_set_index_error(&mbox->ibox);
1472
ret = virtual_sync_ext_header_read(ctx);
1474
return virtual_sync_finish(ctx, FALSE);
1476
ctx->ext_header_rewrite = TRUE;
1477
/* apply changes from virtual index to backend mailboxes */
1478
virtual_sync_index_changes(ctx);
1479
/* update list of UIDs in backend mailboxes */
1480
if (virtual_sync_backend_boxes(ctx) < 0)
1481
return virtual_sync_finish(ctx, FALSE);
1483
virtual_sync_index_finish(ctx);
1484
return virtual_sync_finish(ctx, TRUE);
1487
struct mailbox_sync_context *
1488
virtual_storage_sync_init(struct mailbox *box, enum mailbox_sync_flags flags)
1490
struct virtual_mailbox *mbox = (struct virtual_mailbox *)box;
1491
struct mailbox_sync_context *sync_ctx;
1495
index_storage_mailbox_open(&mbox->ibox);
1497
if (index_mailbox_want_full_sync(&mbox->ibox, flags))
1498
ret = virtual_sync(mbox, flags);
1500
sync_ctx = index_mailbox_sync_init(box, flags, ret < 0);
1501
virtual_sync_apply_existing_expunges(mbox, sync_ctx);