~ubuntu-branches/ubuntu/raring/linux-ti-omap4/raring-proposed

« back to all changes in this revision

Viewing changes to drivers/md/raid10.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati, Ubuntu: 3.5.0-22.34
  • Date: 2013-01-11 15:02:20 UTC
  • mfrom: (72.1.1 quantal-proposed)
  • Revision ID: package-import@ubuntu.com-20130111150220-kgmbtlwhyc4kwqhg
Tags: 3.5.0-217.25
* Release Tracking Bug
  - LP: #1097912

[ Paolo Pisati ]

* rebased on Ubuntu-3.5.0-22.34

[ Ubuntu: 3.5.0-22.34 ]

* Release Tracking Bug
  - LP: #1097343
* Revert "SAUCE: fsnotify: dont put marks on temporary list when clearing
  marks by group"
  - LP: #1096137
* Revert "SAUCE: fsnotify: introduce locked versions of
  fsnotify_add_mark() and fsnotify_remove_mark()"
  - LP: #1096137
* Revert "SAUCE: fsnotify: pass group to fsnotify_destroy_mark()"
  - LP: #1096137
* Revert "SAUCE: fsnotify: use a mutex instead of a spinlock to protect a
  groups mark list"
  - LP: #1096137
* Revert "SAUCE: fanotify: add an extra flag to mark_remove_from_mask
  that indicates wheather a mark should be destroyed"
  - LP: #1096137
* Revert "SAUCE: fsnotify: take groups mark_lock before mark lock"
  - LP: #1096137
* Revert "SAUCE: fsnotify: use reference counting for groups"
  - LP: #1096137
* Revert "SAUCE: fsnotify: introduce fsnotify_get_group()"
  - LP: #1096137
* fsnotify: introduce fsnotify_get_group()
  - LP: #1096137
* fsnotify: use reference counting for groups
  - LP: #1096137
* fsnotify: take groups mark_lock before mark lock
  - LP: #1096137
* fanotify: add an extra flag to mark_remove_from_mask that indicates
  wheather a mark should be destroyed
  - LP: #1096137
* fsnotify: use a mutex instead of a spinlock to protect a groups mark
  list
  - LP: #1096137
* fsnotify: pass group to fsnotify_destroy_mark()
  - LP: #1096137
* fsnotify: introduce locked versions of fsnotify_add_mark() and
  fsnotify_remove_mark()
  - LP: #1096137
* fsnotify: dont put marks on temporary list when clearing marks by group
  - LP: #1096137
* fsnotify: change locking order
  - LP: #1096137

Show diffs side-by-side

added added

removed removed

Lines of Context:
485
485
         */
486
486
        one_write_done(r10_bio);
487
487
        if (dec_rdev)
488
 
                rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 
488
                rdev_dec_pending(rdev, conf->mddev);
489
489
}
490
490
 
491
491
/*
1267
1267
                        blocked_rdev = rrdev;
1268
1268
                        break;
1269
1269
                }
 
1270
                if (rdev && (test_bit(Faulty, &rdev->flags)
 
1271
                             || test_bit(Unmerged, &rdev->flags)))
 
1272
                        rdev = NULL;
1270
1273
                if (rrdev && (test_bit(Faulty, &rrdev->flags)
1271
1274
                              || test_bit(Unmerged, &rrdev->flags)))
1272
1275
                        rrdev = NULL;
1273
1276
 
1274
1277
                r10_bio->devs[i].bio = NULL;
1275
1278
                r10_bio->devs[i].repl_bio = NULL;
1276
 
                if (!rdev || test_bit(Faulty, &rdev->flags) ||
1277
 
                    test_bit(Unmerged, &rdev->flags)) {
 
1279
 
 
1280
                if (!rdev && !rrdev) {
1278
1281
                        set_bit(R10BIO_Degraded, &r10_bio->state);
1279
1282
                        continue;
1280
1283
                }
1281
 
                if (test_bit(WriteErrorSeen, &rdev->flags)) {
 
1284
                if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1282
1285
                        sector_t first_bad;
1283
1286
                        sector_t dev_sector = r10_bio->devs[i].addr;
1284
1287
                        int bad_sectors;
1320
1323
                                        max_sectors = good_sectors;
1321
1324
                        }
1322
1325
                }
1323
 
                r10_bio->devs[i].bio = bio;
1324
 
                atomic_inc(&rdev->nr_pending);
 
1326
                if (rdev) {
 
1327
                        r10_bio->devs[i].bio = bio;
 
1328
                        atomic_inc(&rdev->nr_pending);
 
1329
                }
1325
1330
                if (rrdev) {
1326
1331
                        r10_bio->devs[i].repl_bio = bio;
1327
1332
                        atomic_inc(&rrdev->nr_pending);
1377
1382
        for (i = 0; i < conf->copies; i++) {
1378
1383
                struct bio *mbio;
1379
1384
                int d = r10_bio->devs[i].devnum;
1380
 
                if (!r10_bio->devs[i].bio)
1381
 
                        continue;
1382
 
 
1383
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1384
 
                md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1385
 
                            max_sectors);
1386
 
                r10_bio->devs[i].bio = mbio;
1387
 
 
1388
 
                mbio->bi_sector = (r10_bio->devs[i].addr+
1389
 
                                   choose_data_offset(r10_bio,
1390
 
                                                      conf->mirrors[d].rdev));
1391
 
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1392
 
                mbio->bi_end_io = raid10_end_write_request;
1393
 
                mbio->bi_rw = WRITE | do_sync | do_fua;
1394
 
                mbio->bi_private = r10_bio;
1395
 
 
1396
 
                atomic_inc(&r10_bio->remaining);
1397
 
                spin_lock_irqsave(&conf->device_lock, flags);
1398
 
                bio_list_add(&conf->pending_bio_list, mbio);
1399
 
                conf->pending_count++;
1400
 
                spin_unlock_irqrestore(&conf->device_lock, flags);
1401
 
                if (!mddev_check_plugged(mddev))
1402
 
                        md_wakeup_thread(mddev->thread);
1403
 
 
1404
 
                if (!r10_bio->devs[i].repl_bio)
1405
 
                        continue;
1406
 
 
1407
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1408
 
                md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1409
 
                            max_sectors);
1410
 
                r10_bio->devs[i].repl_bio = mbio;
1411
 
 
1412
 
                /* We are actively writing to the original device
1413
 
                 * so it cannot disappear, so the replacement cannot
1414
 
                 * become NULL here
1415
 
                 */
1416
 
                mbio->bi_sector = (r10_bio->devs[i].addr +
1417
 
                                   choose_data_offset(
1418
 
                                           r10_bio,
1419
 
                                           conf->mirrors[d].replacement));
1420
 
                mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
1421
 
                mbio->bi_end_io = raid10_end_write_request;
1422
 
                mbio->bi_rw = WRITE | do_sync | do_fua;
1423
 
                mbio->bi_private = r10_bio;
1424
 
 
1425
 
                atomic_inc(&r10_bio->remaining);
1426
 
                spin_lock_irqsave(&conf->device_lock, flags);
1427
 
                bio_list_add(&conf->pending_bio_list, mbio);
1428
 
                conf->pending_count++;
1429
 
                spin_unlock_irqrestore(&conf->device_lock, flags);
1430
 
                if (!mddev_check_plugged(mddev))
1431
 
                        md_wakeup_thread(mddev->thread);
 
1385
                if (r10_bio->devs[i].bio) {
 
1386
                        struct md_rdev *rdev = conf->mirrors[d].rdev;
 
1387
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
 
1388
                        md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
 
1389
                                    max_sectors);
 
1390
                        r10_bio->devs[i].bio = mbio;
 
1391
 
 
1392
                        mbio->bi_sector = (r10_bio->devs[i].addr+
 
1393
                                           choose_data_offset(r10_bio,
 
1394
                                                              rdev));
 
1395
                        mbio->bi_bdev = rdev->bdev;
 
1396
                        mbio->bi_end_io = raid10_end_write_request;
 
1397
                        mbio->bi_rw = WRITE | do_sync | do_fua;
 
1398
                        mbio->bi_private = r10_bio;
 
1399
 
 
1400
                        atomic_inc(&r10_bio->remaining);
 
1401
                        spin_lock_irqsave(&conf->device_lock, flags);
 
1402
                        bio_list_add(&conf->pending_bio_list, mbio);
 
1403
                        conf->pending_count++;
 
1404
                        spin_unlock_irqrestore(&conf->device_lock, flags);
 
1405
                        if (!mddev_check_plugged(mddev))
 
1406
                                md_wakeup_thread(mddev->thread);
 
1407
                }
 
1408
 
 
1409
                if (r10_bio->devs[i].repl_bio) {
 
1410
                        struct md_rdev *rdev = conf->mirrors[d].replacement;
 
1411
                        if (rdev == NULL) {
 
1412
                                /* Replacement just got moved to main 'rdev' */
 
1413
                                smp_mb();
 
1414
                                rdev = conf->mirrors[d].rdev;
 
1415
                        }
 
1416
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
 
1417
                        md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
 
1418
                                    max_sectors);
 
1419
                        r10_bio->devs[i].repl_bio = mbio;
 
1420
 
 
1421
                        mbio->bi_sector = (r10_bio->devs[i].addr +
 
1422
                                           choose_data_offset(
 
1423
                                                   r10_bio, rdev));
 
1424
                        mbio->bi_bdev = rdev->bdev;
 
1425
                        mbio->bi_end_io = raid10_end_write_request;
 
1426
                        mbio->bi_rw = WRITE | do_sync | do_fua;
 
1427
                        mbio->bi_private = r10_bio;
 
1428
 
 
1429
                        atomic_inc(&r10_bio->remaining);
 
1430
                        spin_lock_irqsave(&conf->device_lock, flags);
 
1431
                        bio_list_add(&conf->pending_bio_list, mbio);
 
1432
                        conf->pending_count++;
 
1433
                        spin_unlock_irqrestore(&conf->device_lock, flags);
 
1434
                        if (!mddev_check_plugged(mddev))
 
1435
                                md_wakeup_thread(mddev->thread);
 
1436
                }
1432
1437
        }
1433
1438
 
1434
1439
        /* Don't remove the bias on 'remaining' (one_write_done) until