~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise-security

« back to all changes in this revision

Viewing changes to drivers/md/raid0.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati
  • Date: 2011-12-06 15:56:07 UTC
  • Revision ID: package-import@ubuntu.com-20111206155607-pcf44kv5fmhk564f
Tags: 3.2.0-1401.1
[ Paolo Pisati ]

* Rebased on top of Ubuntu-3.2.0-3.8
* Tilt-tracking @ ef2487af4bb15bdd0689631774b5a5e3a59f74e2
* Delete debian.ti-omap4/control, it shoudln't be tracked
* Fix architecture spelling (s/armel/armhf/)
* [Config] Update configs following 3.2 import
* [Config] Fix compilation: disable CODA and ARCH_OMAP3
* [Config] Fix compilation: disable Ethernet Faraday
* Update series to precise

Show diffs side-by-side

added added

removed removed

Lines of Context:
20
20
 
21
21
#include <linux/blkdev.h>
22
22
#include <linux/seq_file.h>
 
23
#include <linux/module.h>
23
24
#include <linux/slab.h>
24
25
#include "md.h"
25
26
#include "raid0.h"
27
28
 
28
29
static int raid0_congested(void *data, int bits)
29
30
{
30
 
        mddev_t *mddev = data;
31
 
        raid0_conf_t *conf = mddev->private;
32
 
        mdk_rdev_t **devlist = conf->devlist;
 
31
        struct mddev *mddev = data;
 
32
        struct r0conf *conf = mddev->private;
 
33
        struct md_rdev **devlist = conf->devlist;
33
34
        int raid_disks = conf->strip_zone[0].nb_dev;
34
35
        int i, ret = 0;
35
36
 
47
48
/*
48
49
 * inform the user of the raid configuration
49
50
*/
50
 
static void dump_zones(mddev_t *mddev)
 
51
static void dump_zones(struct mddev *mddev)
51
52
{
52
 
        int j, k, h;
 
53
        int j, k;
53
54
        sector_t zone_size = 0;
54
55
        sector_t zone_start = 0;
55
56
        char b[BDEVNAME_SIZE];
56
 
        raid0_conf_t *conf = mddev->private;
 
57
        struct r0conf *conf = mddev->private;
57
58
        int raid_disks = conf->strip_zone[0].nb_dev;
58
 
        printk(KERN_INFO "******* %s configuration *********\n",
59
 
                mdname(mddev));
60
 
        h = 0;
 
59
        printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
 
60
               mdname(mddev),
 
61
               conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
61
62
        for (j = 0; j < conf->nr_strip_zones; j++) {
62
 
                printk(KERN_INFO "zone%d=[", j);
 
63
                printk(KERN_INFO "md: zone%d=[", j);
63
64
                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
64
 
                        printk(KERN_CONT "%s/",
 
65
                        printk(KERN_CONT "%s%s", k?"/":"",
65
66
                        bdevname(conf->devlist[j*raid_disks
66
67
                                                + k]->bdev, b));
67
68
                printk(KERN_CONT "]\n");
68
69
 
69
70
                zone_size  = conf->strip_zone[j].zone_end - zone_start;
70
 
                printk(KERN_INFO "        zone offset=%llukb "
71
 
                                "device offset=%llukb size=%llukb\n",
 
71
                printk(KERN_INFO "      zone-offset=%10lluKB, "
 
72
                                "device-offset=%10lluKB, size=%10lluKB\n",
72
73
                        (unsigned long long)zone_start>>1,
73
74
                        (unsigned long long)conf->strip_zone[j].dev_start>>1,
74
75
                        (unsigned long long)zone_size>>1);
75
76
                zone_start = conf->strip_zone[j].zone_end;
76
77
        }
77
 
        printk(KERN_INFO "**********************************\n\n");
 
78
        printk(KERN_INFO "\n");
78
79
}
79
80
 
80
 
static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
 
81
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
81
82
{
82
83
        int i, c, err;
83
84
        sector_t curr_zone_end, sectors;
84
 
        mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
 
85
        struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
85
86
        struct strip_zone *zone;
86
87
        int cnt;
87
88
        char b[BDEVNAME_SIZE];
88
 
        raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 
89
        char b2[BDEVNAME_SIZE];
 
90
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
89
91
 
90
92
        if (!conf)
91
93
                return -ENOMEM;
92
94
        list_for_each_entry(rdev1, &mddev->disks, same_set) {
93
 
                printk(KERN_INFO "md/raid0:%s: looking at %s\n",
94
 
                       mdname(mddev),
95
 
                       bdevname(rdev1->bdev, b));
 
95
                pr_debug("md/raid0:%s: looking at %s\n",
 
96
                         mdname(mddev),
 
97
                         bdevname(rdev1->bdev, b));
96
98
                c = 0;
97
99
 
98
100
                /* round size to chunk_size */
101
103
                rdev1->sectors = sectors * mddev->chunk_sectors;
102
104
 
103
105
                list_for_each_entry(rdev2, &mddev->disks, same_set) {
104
 
                        printk(KERN_INFO "md/raid0:%s:   comparing %s(%llu)",
105
 
                               mdname(mddev),
106
 
                               bdevname(rdev1->bdev,b),
107
 
                               (unsigned long long)rdev1->sectors);
108
 
                        printk(KERN_CONT " with %s(%llu)\n",
109
 
                               bdevname(rdev2->bdev,b),
110
 
                               (unsigned long long)rdev2->sectors);
 
106
                        pr_debug("md/raid0:%s:   comparing %s(%llu)"
 
107
                                 " with %s(%llu)\n",
 
108
                                 mdname(mddev),
 
109
                                 bdevname(rdev1->bdev,b),
 
110
                                 (unsigned long long)rdev1->sectors,
 
111
                                 bdevname(rdev2->bdev,b2),
 
112
                                 (unsigned long long)rdev2->sectors);
111
113
                        if (rdev2 == rdev1) {
112
 
                                printk(KERN_INFO "md/raid0:%s:   END\n",
113
 
                                       mdname(mddev));
 
114
                                pr_debug("md/raid0:%s:   END\n",
 
115
                                         mdname(mddev));
114
116
                                break;
115
117
                        }
116
118
                        if (rdev2->sectors == rdev1->sectors) {
118
120
                                 * Not unique, don't count it as a new
119
121
                                 * group
120
122
                                 */
121
 
                                printk(KERN_INFO "md/raid0:%s:   EQUAL\n",
122
 
                                       mdname(mddev));
 
123
                                pr_debug("md/raid0:%s:   EQUAL\n",
 
124
                                         mdname(mddev));
123
125
                                c = 1;
124
126
                                break;
125
127
                        }
126
 
                        printk(KERN_INFO "md/raid0:%s:   NOT EQUAL\n",
127
 
                               mdname(mddev));
 
128
                        pr_debug("md/raid0:%s:   NOT EQUAL\n",
 
129
                                 mdname(mddev));
128
130
                }
129
131
                if (!c) {
130
 
                        printk(KERN_INFO "md/raid0:%s:   ==> UNIQUE\n",
131
 
                               mdname(mddev));
 
132
                        pr_debug("md/raid0:%s:   ==> UNIQUE\n",
 
133
                                 mdname(mddev));
132
134
                        conf->nr_strip_zones++;
133
 
                        printk(KERN_INFO "md/raid0:%s: %d zones\n",
134
 
                               mdname(mddev), conf->nr_strip_zones);
 
135
                        pr_debug("md/raid0:%s: %d zones\n",
 
136
                                 mdname(mddev), conf->nr_strip_zones);
135
137
                }
136
138
        }
137
 
        printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
138
 
               mdname(mddev), conf->nr_strip_zones);
 
139
        pr_debug("md/raid0:%s: FINAL %d zones\n",
 
140
                 mdname(mddev), conf->nr_strip_zones);
139
141
        err = -ENOMEM;
140
142
        conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
141
143
                                conf->nr_strip_zones, GFP_KERNEL);
142
144
        if (!conf->strip_zone)
143
145
                goto abort;
144
 
        conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
 
146
        conf->devlist = kzalloc(sizeof(struct md_rdev*)*
145
147
                                conf->nr_strip_zones*mddev->raid_disks,
146
148
                                GFP_KERNEL);
147
149
        if (!conf->devlist)
218
220
                zone = conf->strip_zone + i;
219
221
                dev = conf->devlist + i * mddev->raid_disks;
220
222
 
221
 
                printk(KERN_INFO "md/raid0:%s: zone %d\n",
222
 
                       mdname(mddev), i);
 
223
                pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
223
224
                zone->dev_start = smallest->sectors;
224
225
                smallest = NULL;
225
226
                c = 0;
226
227
 
227
228
                for (j=0; j<cnt; j++) {
228
229
                        rdev = conf->devlist[j];
229
 
                        printk(KERN_INFO "md/raid0:%s: checking %s ...",
230
 
                               mdname(mddev),
231
 
                               bdevname(rdev->bdev, b));
232
230
                        if (rdev->sectors <= zone->dev_start) {
233
 
                                printk(KERN_CONT " nope.\n");
 
231
                                pr_debug("md/raid0:%s: checking %s ... nope\n",
 
232
                                         mdname(mddev),
 
233
                                         bdevname(rdev->bdev, b));
234
234
                                continue;
235
235
                        }
236
 
                        printk(KERN_CONT " contained as device %d\n", c);
 
236
                        pr_debug("md/raid0:%s: checking %s ..."
 
237
                                 " contained as device %d\n",
 
238
                                 mdname(mddev),
 
239
                                 bdevname(rdev->bdev, b), c);
237
240
                        dev[c] = rdev;
238
241
                        c++;
239
242
                        if (!smallest || rdev->sectors < smallest->sectors) {
240
243
                                smallest = rdev;
241
 
                                printk(KERN_INFO "md/raid0:%s:  (%llu) is smallest!.\n",
242
 
                                       mdname(mddev),
243
 
                                       (unsigned long long)rdev->sectors);
 
244
                                pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
 
245
                                         mdname(mddev),
 
246
                                         (unsigned long long)rdev->sectors);
244
247
                        }
245
248
                }
246
249
 
247
250
                zone->nb_dev = c;
248
251
                sectors = (smallest->sectors - zone->dev_start) * c;
249
 
                printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
250
 
                       mdname(mddev),
251
 
                       zone->nb_dev, (unsigned long long)sectors);
 
252
                pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
 
253
                         mdname(mddev),
 
254
                         zone->nb_dev, (unsigned long long)sectors);
252
255
 
253
256
                curr_zone_end += sectors;
254
257
                zone->zone_end = curr_zone_end;
255
258
 
256
 
                printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
257
 
                       mdname(mddev),
258
 
                       (unsigned long long)smallest->sectors);
 
259
                pr_debug("md/raid0:%s: current zone start: %llu\n",
 
260
                         mdname(mddev),
 
261
                         (unsigned long long)smallest->sectors);
259
262
        }
260
263
        mddev->queue->backing_dev_info.congested_fn = raid0_congested;
261
264
        mddev->queue->backing_dev_info.congested_data = mddev;
275
278
        blk_queue_io_opt(mddev->queue,
276
279
                         (mddev->chunk_sectors << 9) * mddev->raid_disks);
277
280
 
278
 
        printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
 
281
        pr_debug("md/raid0:%s: done.\n", mdname(mddev));
279
282
        *private_conf = conf;
280
283
 
281
284
        return 0;
299
302
                                struct bvec_merge_data *bvm,
300
303
                                struct bio_vec *biovec)
301
304
{
302
 
        mddev_t *mddev = q->queuedata;
 
305
        struct mddev *mddev = q->queuedata;
303
306
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
304
307
        int max;
305
308
        unsigned int chunk_sectors = mddev->chunk_sectors;
318
321
                return max;
319
322
}
320
323
 
321
 
static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 
324
static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
322
325
{
323
326
        sector_t array_sectors = 0;
324
 
        mdk_rdev_t *rdev;
 
327
        struct md_rdev *rdev;
325
328
 
326
329
        WARN_ONCE(sectors || raid_disks,
327
330
                  "%s does not support generic reshape\n", __func__);
332
335
        return array_sectors;
333
336
}
334
337
 
335
 
static int raid0_run(mddev_t *mddev)
 
338
static int raid0_run(struct mddev *mddev)
336
339
{
337
 
        raid0_conf_t *conf;
 
340
        struct r0conf *conf;
338
341
        int ret;
339
342
 
340
343
        if (mddev->chunk_sectors == 0) {
382
385
        return md_integrity_register(mddev);
383
386
}
384
387
 
385
 
static int raid0_stop(mddev_t *mddev)
 
388
static int raid0_stop(struct mddev *mddev)
386
389
{
387
 
        raid0_conf_t *conf = mddev->private;
 
390
        struct r0conf *conf = mddev->private;
388
391
 
389
392
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
390
393
        kfree(conf->strip_zone);
397
400
/* Find the zone which holds a particular offset
398
401
 * Update *sectorp to be an offset in that zone
399
402
 */
400
 
static struct strip_zone *find_zone(struct raid0_private_data *conf,
 
403
static struct strip_zone *find_zone(struct r0conf *conf,
401
404
                                    sector_t *sectorp)
402
405
{
403
406
        int i;
417
420
 * remaps the bio to the target device. we separate two flows.
418
421
 * power 2 flow and a general flow for the sake of perfromance
419
422
*/
420
 
static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
 
423
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
421
424
                                sector_t sector, sector_t *sector_offset)
422
425
{
423
426
        unsigned int sect_in_chunk;
424
427
        sector_t chunk;
425
 
        raid0_conf_t *conf = mddev->private;
 
428
        struct r0conf *conf = mddev->private;
426
429
        int raid_disks = conf->strip_zone[0].nb_dev;
427
430
        unsigned int chunk_sects = mddev->chunk_sectors;
428
431
 
453
456
/*
454
457
 * Is io distribute over 1 or more chunks ?
455
458
*/
456
 
static inline int is_io_in_chunk_boundary(mddev_t *mddev,
 
459
static inline int is_io_in_chunk_boundary(struct mddev *mddev,
457
460
                        unsigned int chunk_sects, struct bio *bio)
458
461
{
459
462
        if (likely(is_power_of_2(chunk_sects))) {
466
469
        }
467
470
}
468
471
 
469
 
static int raid0_make_request(mddev_t *mddev, struct bio *bio)
 
472
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
470
473
{
471
474
        unsigned int chunk_sects;
472
475
        sector_t sector_offset;
473
476
        struct strip_zone *zone;
474
 
        mdk_rdev_t *tmp_dev;
 
477
        struct md_rdev *tmp_dev;
475
478
 
476
479
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
477
480
                md_flush_request(mddev, bio);
478
 
                return 0;
 
481
                return;
479
482
        }
480
483
 
481
484
        chunk_sects = mddev->chunk_sectors;
495
498
                else
496
499
                        bp = bio_split(bio, chunk_sects -
497
500
                                       sector_div(sector, chunk_sects));
498
 
                if (raid0_make_request(mddev, &bp->bio1))
499
 
                        generic_make_request(&bp->bio1);
500
 
                if (raid0_make_request(mddev, &bp->bio2))
501
 
                        generic_make_request(&bp->bio2);
502
 
 
 
501
                raid0_make_request(mddev, &bp->bio1);
 
502
                raid0_make_request(mddev, &bp->bio2);
503
503
                bio_pair_release(bp);
504
 
                return 0;
 
504
                return;
505
505
        }
506
506
 
507
507
        sector_offset = bio->bi_sector;
511
511
        bio->bi_bdev = tmp_dev->bdev;
512
512
        bio->bi_sector = sector_offset + zone->dev_start +
513
513
                tmp_dev->data_offset;
514
 
        /*
515
 
         * Let the main block layer submit the IO and resolve recursion:
516
 
         */
517
 
        return 1;
 
514
 
 
515
        generic_make_request(bio);
 
516
        return;
518
517
 
519
518
bad_map:
520
519
        printk("md/raid0:%s: make_request bug: can't convert block across chunks"
523
522
               (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
524
523
 
525
524
        bio_io_error(bio);
526
 
        return 0;
 
525
        return;
527
526
}
528
527
 
529
 
static void raid0_status(struct seq_file *seq, mddev_t *mddev)
 
528
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
530
529
{
531
 
#undef MD_DEBUG
532
 
#ifdef MD_DEBUG
533
 
        int j, k, h;
534
 
        char b[BDEVNAME_SIZE];
535
 
        raid0_conf_t *conf = mddev->private;
536
 
        int raid_disks = conf->strip_zone[0].nb_dev;
537
 
 
538
 
        sector_t zone_size;
539
 
        sector_t zone_start = 0;
540
 
        h = 0;
541
 
 
542
 
        for (j = 0; j < conf->nr_strip_zones; j++) {
543
 
                seq_printf(seq, "      z%d", j);
544
 
                seq_printf(seq, "=[");
545
 
                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
546
 
                        seq_printf(seq, "%s/", bdevname(
547
 
                                conf->devlist[j*raid_disks + k]
548
 
                                                ->bdev, b));
549
 
 
550
 
                zone_size  = conf->strip_zone[j].zone_end - zone_start;
551
 
                seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
552
 
                        (unsigned long long)zone_start>>1,
553
 
                        (unsigned long long)conf->strip_zone[j].dev_start>>1,
554
 
                        (unsigned long long)zone_size>>1);
555
 
                zone_start = conf->strip_zone[j].zone_end;
556
 
        }
557
 
#endif
558
530
        seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
559
531
        return;
560
532
}
561
533
 
562
 
static void *raid0_takeover_raid45(mddev_t *mddev)
 
534
static void *raid0_takeover_raid45(struct mddev *mddev)
563
535
{
564
 
        mdk_rdev_t *rdev;
565
 
        raid0_conf_t *priv_conf;
 
536
        struct md_rdev *rdev;
 
537
        struct r0conf *priv_conf;
566
538
 
567
539
        if (mddev->degraded != 1) {
568
540
                printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
593
565
        return priv_conf;
594
566
}
595
567
 
596
 
static void *raid0_takeover_raid10(mddev_t *mddev)
 
568
static void *raid0_takeover_raid10(struct mddev *mddev)
597
569
{
598
 
        raid0_conf_t *priv_conf;
 
570
        struct r0conf *priv_conf;
599
571
 
600
572
        /* Check layout:
601
573
         *  - far_copies must be 1
634
606
        return priv_conf;
635
607
}
636
608
 
637
 
static void *raid0_takeover_raid1(mddev_t *mddev)
 
609
static void *raid0_takeover_raid1(struct mddev *mddev)
638
610
{
639
 
        raid0_conf_t *priv_conf;
 
611
        struct r0conf *priv_conf;
640
612
 
641
613
        /* Check layout:
642
614
         *  - (N - 1) mirror drives must be already faulty
660
632
        return priv_conf;
661
633
}
662
634
 
663
 
static void *raid0_takeover(mddev_t *mddev)
 
635
static void *raid0_takeover(struct mddev *mddev)
664
636
{
665
637
        /* raid0 can take over:
666
638
         *  raid4 - if all data disks are active.
691
663
        return ERR_PTR(-EINVAL);
692
664
}
693
665
 
694
 
static void raid0_quiesce(mddev_t *mddev, int state)
 
666
static void raid0_quiesce(struct mddev *mddev, int state)
695
667
{
696
668
}
697
669
 
698
 
static struct mdk_personality raid0_personality=
 
670
static struct md_personality raid0_personality=
699
671
{
700
672
        .name           = "raid0",
701
673
        .level          = 0,