39
39
#include "drbd_req.h"
41
41
static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
50
* more endio handlers:
51
atodb_endio in drbd_actlog.c
52
drbd_bm_async_io_complete in drbd_bitmap.c
42
static int w_make_resync_request(struct drbd_conf *mdev,
43
struct drbd_work *w, int cancel);
48
* drbd_md_io_complete (defined here)
49
* drbd_endio_pri (defined here)
50
* drbd_endio_sec (defined here)
51
* bm_async_io_complete (defined in drbd_bitmap.c)
54
53
* For all these callbacks, note the following:
55
54
* The callbacks will be called in irq context by the IDE drivers,
56
55
* and in Softirqs/Tasklets/BH context by the SCSI drivers.
94
93
if (list_empty(&mdev->read_ee))
95
94
wake_up(&mdev->ee_wait);
96
95
if (test_bit(__EE_WAS_ERROR, &e->flags))
97
__drbd_chk_io_error(mdev, FALSE);
96
__drbd_chk_io_error(mdev, false);
98
97
spin_unlock_irqrestore(&mdev->req_lock, flags);
100
99
drbd_queue_work(&mdev->data.work, &e->w);
127
126
list_del(&e->w.list); /* has been on active_ee or sync_ee */
128
127
list_add_tail(&e->w.list, &mdev->done_ee);
130
/* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
129
/* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
131
130
* neither did we wake possibly waiting conflicting requests.
132
131
* done from "drbd_process_done_ee" within the appropriate w.cb
133
132
* (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
137
136
: list_empty(&mdev->active_ee);
139
138
if (test_bit(__EE_WAS_ERROR, &e->flags))
140
__drbd_chk_io_error(mdev, FALSE);
139
__drbd_chk_io_error(mdev, false);
141
140
spin_unlock_irqrestore(&mdev->req_lock, flags);
143
142
if (is_syncer_req)
163
162
int uptodate = bio_flagged(bio, BIO_UPTODATE);
164
163
int is_write = bio_data_dir(bio) == WRITE;
165
if (error && __ratelimit(&drbd_ratelimit_state))
167
166
dev_warn(DEV, "%s: error=%d s=%llus\n",
168
167
is_write ? "write" : "read", error,
169
168
(unsigned long long)e->sector);
170
169
if (!error && !uptodate) {
171
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
172
is_write ? "write" : "read",
173
(unsigned long long)e->sector);
170
if (__ratelimit(&drbd_ratelimit_state))
171
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
172
is_write ? "write" : "read",
173
(unsigned long long)e->sector);
174
174
/* strange behavior of some lower level drivers...
175
175
* fail the request by clearing the uptodate flag,
176
176
* but do not return any error?! */
250
250
return w_send_read_req(mdev, w, 0);
253
int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
255
ERR_IF(cancel) return 1;
256
dev_err(DEV, "resync inactive, but callback triggered??\n");
257
return 1; /* Simply ignore this! */
260
253
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
262
255
struct hash_desc desc;
304
297
crypto_hash_final(&desc, digest);
307
static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
300
/* TODO merge common code with w_e_end_ov_req */
301
int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
309
303
struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
314
308
D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
316
if (unlikely(cancel)) {
310
if (unlikely(cancel))
313
if (likely((e->flags & EE_WAS_ERROR) != 0))
316
digest_size = crypto_hash_digestsize(mdev->csums_tfm);
317
digest = kmalloc(digest_size, GFP_NOIO);
319
sector_t sector = e->sector;
320
unsigned int size = e->size;
321
drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
322
/* Free e and pages before send.
323
* In case we block on congestion, we could otherwise run into
324
* some distributed deadlock, if the other side blocks on
325
* congestion as well, because our receiver blocks in
326
* drbd_pp_alloc due to pp_in_use > max_buffers. */
317
327
drbd_free_ee(mdev, e);
329
inc_rs_pending(mdev);
330
ok = drbd_send_drequest_csum(mdev, sector, size,
335
dev_err(DEV, "kmalloc() of digest failed.\n");
321
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
322
digest_size = crypto_hash_digestsize(mdev->csums_tfm);
323
digest = kmalloc(digest_size, GFP_NOIO);
325
drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
327
inc_rs_pending(mdev);
328
ok = drbd_send_drequest_csum(mdev,
336
dev_err(DEV, "kmalloc() of digest failed.\n");
342
drbd_free_ee(mdev, e);
341
drbd_free_ee(mdev, e);
344
343
if (unlikely(!ok))
345
344
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
355
354
if (!get_ldev(mdev))
358
if (drbd_rs_should_slow_down(mdev))
357
if (drbd_rs_should_slow_down(mdev, sector))
361
360
/* GFP_TRY, because if there is no memory available right now, this may
373
372
if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
376
/* drbd_submit_ee currently fails for one reason only:
377
* not being able to allocate enough bios.
378
* Is dropping the connection going to help? */
375
/* If it failed because of ENOMEM, retry should help. If it failed
376
* because bio_add_page failed (probably broken lower level driver),
377
* retry may or may not help.
378
* If it does not, you may need to force disconnect. */
379
379
spin_lock_irq(&mdev->req_lock);
380
380
list_del(&e->w.list);
381
381
spin_unlock_irq(&mdev->req_lock);
389
void resync_timer_fn(unsigned long data)
389
int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
391
struct drbd_conf *mdev = (struct drbd_conf *) data;
395
391
switch (mdev->state.conn) {
397
mdev->resync_work.cb = w_make_ov_request;
393
w_make_ov_request(mdev, w, cancel);
399
395
case C_SYNC_TARGET:
400
mdev->resync_work.cb = w_make_resync_request;
396
w_make_resync_request(mdev, w, cancel);
404
mdev->resync_work.cb = w_resync_inactive;
407
/* harmless race: list_empty outside data.work.q_lock */
408
if (list_empty(&mdev->resync_work.list) && queue)
403
void resync_timer_fn(unsigned long data)
405
struct drbd_conf *mdev = (struct drbd_conf *) data;
407
if (list_empty(&mdev->resync_work.list))
409
408
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
438
437
fb->values[i] += value;
441
int drbd_rs_controller(struct drbd_conf *mdev)
440
static int drbd_rs_controller(struct drbd_conf *mdev)
443
442
unsigned int sect_in; /* Number of sectors that came in since the last turn */
444
443
unsigned int want; /* The number of sectors we want in the proxy */
495
int w_make_resync_request(struct drbd_conf *mdev,
496
struct drbd_work *w, int cancel)
494
static int drbd_rs_number_requests(struct drbd_conf *mdev)
497
if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
498
number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
499
mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
501
mdev->c_sync_rate = mdev->sync_conf.rate;
502
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
505
/* ignore the amount of pending requests, the resync controller should
506
* throttle down to incoming reply rate soon enough anyways. */
510
static int w_make_resync_request(struct drbd_conf *mdev,
511
struct drbd_work *w, int cancel)
498
513
unsigned long bit;
500
515
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
501
int max_segment_size;
502
int number, rollback_i, size, pe, mx;
517
int number, rollback_i, size;
503
518
int align, queued, sndbuf;
506
521
if (unlikely(cancel))
509
if (unlikely(mdev->state.conn < C_CONNECTED)) {
510
dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
514
if (mdev->state.conn != C_SYNC_TARGET)
515
dev_err(DEV, "%s in w_make_resync_request\n",
516
drbd_conn_str(mdev->state.conn));
518
524
if (mdev->rs_total == 0) {
519
525
/* empty resync? */
520
526
drbd_resync_finished(mdev);
527
533
to continue resync with a broken disk makes no sense at
529
535
dev_err(DEV, "Disk broke down during resync!\n");
530
mdev->resync_work.cb = w_resync_inactive;
534
/* starting with drbd 8.3.8, we can handle multi-bio EEs,
535
* if it should be necessary */
537
mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
538
mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
540
if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
541
number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
542
mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
544
mdev->c_sync_rate = mdev->sync_conf.rate;
545
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
548
/* Throttle resync on lower level disk activity, which may also be
549
* caused by application IO on Primary/SyncTarget.
550
* Keep this after the call to drbd_rs_controller, as that assumes
551
* to be called as precisely as possible every SLEEP_TIME,
552
* and would be confused otherwise. */
553
if (drbd_rs_should_slow_down(mdev))
539
max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
540
number = drbd_rs_number_requests(mdev);
556
mutex_lock(&mdev->data.mutex);
557
if (mdev->data.socket)
558
mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
561
mutex_unlock(&mdev->data.mutex);
563
/* For resync rates >160MB/sec, allow more pending RS requests */
567
/* Limit the number of pending RS requests to no more than the peer's receive buffer */
568
pe = atomic_read(&mdev->rs_pending_cnt);
569
if ((pe + number) > mx) {
573
544
for (i = 0; i < number; i++) {
574
545
/* Stop generating RS requests, when half of the send buffer is filled */
575
546
mutex_lock(&mdev->data.mutex);
588
559
size = BM_BLOCK_SIZE;
589
560
bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
562
if (bit == DRBD_END_OF_BITMAP) {
592
563
mdev->bm_resync_fo = drbd_bm_bits(mdev);
593
mdev->resync_work.cb = w_resync_inactive;
598
568
sector = BM_BIT_TO_SECT(bit);
600
if (drbd_try_rs_begin_io(mdev, sector)) {
570
if (drbd_rs_should_slow_down(mdev, sector) ||
571
drbd_try_rs_begin_io(mdev, sector)) {
601
572
mdev->bm_resync_fo = bit;
706
676
if (unlikely(cancel))
709
if (unlikely(mdev->state.conn < C_CONNECTED)) {
710
dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
714
number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
715
if (atomic_read(&mdev->rs_pending_cnt) > number)
718
number -= atomic_read(&mdev->rs_pending_cnt);
679
number = drbd_rs_number_requests(mdev);
720
681
sector = mdev->ov_position;
721
682
for (i = 0; i < number; i++) {
722
683
if (sector >= capacity) {
723
mdev->resync_work.cb = w_resync_inactive;
727
687
size = BM_BLOCK_SIZE;
729
if (drbd_try_rs_begin_io(mdev, sector)) {
689
if (drbd_rs_should_slow_down(mdev, sector) ||
690
drbd_try_rs_begin_io(mdev, sector)) {
730
691
mdev->ov_position = sector;
744
705
mdev->ov_position = sector;
708
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
747
709
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
714
void start_resync_timer_fn(unsigned long data)
716
struct drbd_conf *mdev = (struct drbd_conf *) data;
718
drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
721
int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
723
if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
724
dev_warn(DEV, "w_start_resync later...\n");
725
mdev->start_resync_timer.expires = jiffies + HZ/10;
726
add_timer(&mdev->start_resync_timer);
730
drbd_start_resync(mdev, C_SYNC_SOURCE);
731
clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
752
735
int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
792
776
* queue (or even the read operations for those packets
793
777
* is not finished by now). Retry in 100ms. */
796
__set_current_state(TASK_INTERRUPTIBLE);
797
schedule_timeout(HZ / 10);
779
schedule_timeout_interruptible(HZ / 10);
798
780
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
800
782
w->cb = w_resync_finished;
819
801
spin_lock_irq(&mdev->req_lock);
820
802
os = mdev->state;
804
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
822
806
/* This protects us against multiple calls (that can happen in the presence
823
807
of application IO), and against connectivity loss just before we arrive here. */
824
808
if (os.conn <= C_CONNECTED)
828
812
ns.conn = C_CONNECTED;
830
814
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
831
(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
832
"Online verify " : "Resync",
815
verify_done ? "Online verify " : "Resync",
833
816
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
835
818
n_oos = drbd_bm_total_weight(mdev);
852
835
const int ratio =
854
837
(t < 100000) ? ((s*100)/t) : (s/(t/100));
855
dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; "
838
dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
856
839
"transferred %luK total %luK\n",
858
841
Bit2KB(mdev->rs_same_csum),
890
drbd_uuid_set_bm(mdev, 0UL);
893
/* Now the two UUID sets are equal, update what we
894
* know of the peer. */
896
for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
897
mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
873
if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
874
/* for verify runs, we don't update uuids here,
875
* so there would be nothing to report. */
876
drbd_uuid_set_bm(mdev, 0UL);
877
drbd_print_uuids(mdev, "updated UUIDs");
879
/* Now the two UUID sets are equal, update what we
880
* know of the peer. */
882
for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
883
mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
906
893
mdev->rs_total = 0;
907
894
mdev->rs_failed = 0;
908
895
mdev->rs_paused = 0;
909
mdev->ov_start_sector = 0;
897
mdev->ov_start_sector = 0;
911
899
drbd_md_sync(mdev);
913
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
914
dev_info(DEV, "Writing the whole bitmap\n");
915
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
919
902
drbd_khelper(mdev, khelper_cmd);
998
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
981
if (mdev->state.conn == C_AHEAD) {
982
ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
983
} else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
999
984
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
1000
985
inc_rs_pending(mdev);
1001
986
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
1075
/* TODO merge common code with w_e_send_csum */
1090
1076
int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1092
1078
struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1079
sector_t sector = e->sector;
1080
unsigned int size = e->size;
1093
1081
int digest_size;
1097
1085
if (unlikely(cancel))
1100
if (unlikely((e->flags & EE_WAS_ERROR) != 0))
1103
1088
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1104
/* FIXME if this allocation fails, online verify will not terminate! */
1105
1089
digest = kmalloc(digest_size, GFP_NOIO);
1091
ok = 0; /* terminate the connection in case the allocation failed */
1095
if (likely(!(e->flags & EE_WAS_ERROR)))
1107
1096
drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1108
inc_rs_pending(mdev);
1109
ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
1110
digest, digest_size, P_OV_REPLY);
1112
dec_rs_pending(mdev);
1098
memset(digest, 0, digest_size);
1100
/* Free e and pages before send.
1101
* In case we block on congestion, we could otherwise run into
1102
* some distributed deadlock, if the other side blocks on
1103
* congestion as well, because our receiver blocks in
1104
* drbd_pp_alloc due to pp_in_use > max_buffers. */
1105
drbd_free_ee(mdev, e);
1107
inc_rs_pending(mdev);
1108
ok = drbd_send_drequest_csum(mdev, sector, size,
1109
digest, digest_size,
1112
dec_rs_pending(mdev);
1117
drbd_free_ee(mdev, e);
1117
drbd_free_ee(mdev, e);
1119
1118
dec_unacked(mdev);
1130
1128
mdev->ov_last_oos_size = size>>9;
1132
1130
drbd_set_out_of_sync(mdev, sector, size);
1133
set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
1136
1133
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1138
1135
struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1139
1136
struct digest_info *di;
1138
sector_t sector = e->sector;
1139
unsigned int size = e->size;
1140
1140
int digest_size;
1142
1141
int ok, eq = 0;
1144
1143
if (unlikely(cancel)) {
1166
1165
eq = !memcmp(digest, di->digest, digest_size);
1170
ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1171
if (__ratelimit(&drbd_ratelimit_state))
1172
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1170
/* Free e and pages before send.
1171
* In case we block on congestion, we could otherwise run into
1172
* some distributed deadlock, if the other side blocks on
1173
* congestion as well, because our receiver blocks in
1174
* drbd_pp_alloc due to pp_in_use > max_buffers. */
1175
drbd_free_ee(mdev, e);
1177
drbd_ov_oos_found(mdev, e->sector, e->size);
1177
drbd_ov_oos_found(mdev, sector, size);
1179
1179
ov_oos_print(mdev);
1181
ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
1181
ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
1182
1182
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1184
drbd_free_ee(mdev, e);
1186
if (--mdev->ov_left == 0) {
1188
/* let's advance progress step marks only for every other megabyte */
1189
if ((mdev->ov_left & 0x200) == 0x200)
1190
drbd_advance_rs_marks(mdev, mdev->ov_left);
1192
if (mdev->ov_left == 0) {
1187
1193
ov_oos_print(mdev);
1188
1194
drbd_resync_finished(mdev);
1236
1242
return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1245
int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1247
struct drbd_request *req = container_of(w, struct drbd_request, w);
1250
if (unlikely(cancel)) {
1251
req_mod(req, send_canceled);
1255
ok = drbd_send_oos(mdev, req);
1256
req_mod(req, oos_handed_to_network);
1240
1262
* w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1241
1263
* @mdev: DRBD device.
1431
1453
return retcode;
1456
void drbd_rs_controller_reset(struct drbd_conf *mdev)
1458
atomic_set(&mdev->rs_sect_in, 0);
1459
atomic_set(&mdev->rs_sect_ev, 0);
1460
mdev->rs_in_flight = 0;
1461
mdev->rs_planed = 0;
1462
spin_lock(&mdev->peer_seq_lock);
1463
fifo_set(&mdev->rs_plan_s, 0);
1464
spin_unlock(&mdev->peer_seq_lock);
1435
1468
* drbd_start_resync() - Start the resync process
1436
1469
* @mdev: DRBD device.
1444
1477
union drbd_state ns;
1447
if (mdev->state.conn >= C_SYNC_SOURCE) {
1480
if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
1448
1481
dev_err(DEV, "Resync already running!\n");
1452
/* In case a previous resync run was aborted by an IO error/detach on the peer. */
1453
drbd_rs_cancel_all(mdev);
1485
if (mdev->state.conn < C_AHEAD) {
1486
/* In case a previous resync run was aborted by an IO error/detach on the peer. */
1487
drbd_rs_cancel_all(mdev);
1488
/* This should be done when we abort the resync. We definitely do not
1489
want to have this for connections going back and forth between
1490
Ahead/Behind and SyncSource/SyncTarget */
1455
1493
if (side == C_SYNC_TARGET) {
1456
1494
/* Since application IO was locked out during C_WF_BITMAP_T and
1464
1502
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1505
} else /* C_SYNC_SOURCE */ {
1506
r = drbd_khelper(mdev, "before-resync-source");
1507
r = (r >> 8) & 0xff;
1510
dev_info(DEV, "before-resync-source handler returned %d, "
1511
"ignoring. Old userland tools?", r);
1513
dev_info(DEV, "before-resync-source handler returned %d, "
1514
"dropping connection.\n", r);
1515
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1469
1521
drbd_state_lock(mdev);
1476
if (side == C_SYNC_TARGET) {
1477
mdev->bm_resync_fo = 0;
1478
} else /* side == C_SYNC_SOURCE */ {
1481
get_random_bytes(&uuid, sizeof(u64));
1482
drbd_uuid_set(mdev, UI_BITMAP, uuid);
1483
drbd_send_sync_uuid(mdev, uuid);
1485
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1488
1528
write_lock_irq(&global_state_lock);
1489
1529
ns = mdev->state;
1522
1562
_drbd_pause_after(mdev);
1524
1564
write_unlock_irq(&global_state_lock);
1527
1566
if (r == SS_SUCCESS) {
1528
1567
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1529
1568
drbd_conn_str(ns.conn),
1530
1569
(unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1531
1570
(unsigned long) mdev->rs_total);
1571
if (side == C_SYNC_TARGET)
1572
mdev->bm_resync_fo = 0;
1574
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
1575
* with w_send_oos, or the sync target will get confused as to
1576
* how much bits to resync. We cannot do that always, because for an
1577
* empty resync and protocol < 95, we need to do it here, as we call
1578
* drbd_resync_finished from here in that case.
1579
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
1580
* and from after_state_ch otherwise. */
1581
if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
1582
drbd_gen_and_send_sync_uuid(mdev);
1533
1584
if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
1534
1585
/* This still has a race (about when exactly the peers
1548
1599
drbd_resync_finished(mdev);
1551
atomic_set(&mdev->rs_sect_in, 0);
1552
atomic_set(&mdev->rs_sect_ev, 0);
1553
mdev->rs_in_flight = 0;
1554
mdev->rs_planed = 0;
1555
spin_lock(&mdev->peer_seq_lock);
1556
fifo_set(&mdev->rs_plan_s, 0);
1557
spin_unlock(&mdev->peer_seq_lock);
1602
drbd_rs_controller_reset(mdev);
1558
1603
/* ns.conn may already be != mdev->state.conn,
1559
1604
* we may have been paused in between, or become paused until
1560
1605
* the timer triggers.