2
md.c : Multiple Devices driver for Linux
3
Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
completely rewritten, based on the MD driver code from Marc Zyngier
9
- RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10
- RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13
- kmod support by: Cyrus Durgin
14
- RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15
- Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
- lots of fixes and improvements to the RAID1/RAID5 and generic
18
RAID code (such as request based resynchronization):
20
Neil Brown <neilb@cse.unsw.edu.au>.
22
- persistent bitmap code
23
Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
This program is free software; you can redistribute it and/or modify
26
it under the terms of the GNU General Public License as published by
27
the Free Software Foundation; either version 2, or (at your option)
30
You should have received a copy of the GNU General Public License
31
(for example /usr/src/linux/COPYING); if not, write to the Free
32
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35
#include <linux/kthread.h>
36
#include <linux/blkdev.h>
37
#include <linux/sysctl.h>
38
#include <linux/seq_file.h>
39
#include <linux/mutex.h>
40
#include <linux/buffer_head.h> /* for invalidate_bdev */
41
#include <linux/poll.h>
42
#include <linux/ctype.h>
43
#include <linux/string.h>
44
#include <linux/hdreg.h>
45
#include <linux/proc_fs.h>
46
#include <linux/random.h>
47
#include <linux/module.h>
48
#include <linux/reboot.h>
49
#include <linux/file.h>
50
#include <linux/compat.h>
51
#include <linux/delay.h>
52
#include <linux/raid/md_p.h>
53
#include <linux/raid/md_u.h>
54
#include <linux/slab.h>
59
static void autostart_arrays(int part);
62
/* pers_list is a list of registered personalities protected
64
* pers_lock does extra service to protect accesses to
65
* mddev->thread when the mutex cannot be held.
67
static LIST_HEAD(pers_list);
68
static DEFINE_SPINLOCK(pers_lock);
70
static void md_print_devices(void);
72
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
73
static struct workqueue_struct *md_wq;
74
static struct workqueue_struct *md_misc_wq;
76
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79
* Default number of read corrections we'll attempt on an rdev
80
* before ejecting it from the array. We divide the read error
81
* count by 2 for every hour elapsed between read errors.
83
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
85
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
86
* is 1000 KB/sec, so the extra system load does not show up that much.
87
* Increase it if you want to have more _guaranteed_ speed. Note that
88
* the RAID driver will use the maximum available bandwidth if the IO
89
* subsystem is idle. There is also an 'absolute maximum' reconstruction
90
* speed limit - in case reconstruction slows down your system despite
93
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
94
* or /sys/block/mdX/md/sync_speed_{min,max}
97
static int sysctl_speed_limit_min = 1000;
98
static int sysctl_speed_limit_max = 200000;
99
static inline int speed_min(struct mddev *mddev)
101
return mddev->sync_speed_min ?
102
mddev->sync_speed_min : sysctl_speed_limit_min;
105
static inline int speed_max(struct mddev *mddev)
107
return mddev->sync_speed_max ?
108
mddev->sync_speed_max : sysctl_speed_limit_max;
111
static struct ctl_table_header *raid_table_header;
113
static ctl_table raid_table[] = {
115
.procname = "speed_limit_min",
116
.data = &sysctl_speed_limit_min,
117
.maxlen = sizeof(int),
118
.mode = S_IRUGO|S_IWUSR,
119
.proc_handler = proc_dointvec,
122
.procname = "speed_limit_max",
123
.data = &sysctl_speed_limit_max,
124
.maxlen = sizeof(int),
125
.mode = S_IRUGO|S_IWUSR,
126
.proc_handler = proc_dointvec,
131
static ctl_table raid_dir_table[] = {
135
.mode = S_IRUGO|S_IXUGO,
141
static ctl_table raid_root_table[] = {
146
.child = raid_dir_table,
151
static const struct block_device_operations md_fops;
153
static int start_readonly;
156
* like bio_clone, but with a local bio set
159
static void mddev_bio_destructor(struct bio *bio)
161
struct mddev *mddev, **mddevp;
166
bio_free(bio, mddev->bio_set);
169
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
173
struct mddev **mddevp;
175
if (!mddev || !mddev->bio_set)
176
return bio_alloc(gfp_mask, nr_iovecs);
178
b = bio_alloc_bioset(gfp_mask, nr_iovecs,
184
b->bi_destructor = mddev_bio_destructor;
187
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
189
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
193
struct mddev **mddevp;
195
if (!mddev || !mddev->bio_set)
196
return bio_clone(bio, gfp_mask);
198
b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
204
b->bi_destructor = mddev_bio_destructor;
206
if (bio_integrity(bio)) {
209
ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
219
EXPORT_SYMBOL_GPL(bio_clone_mddev);
221
void md_trim_bio(struct bio *bio, int offset, int size)
223
/* 'bio' is a cloned bio which we need to trim to match
224
* the given offset and size.
225
* This requires adjusting bi_sector, bi_size, and bi_io_vec
228
struct bio_vec *bvec;
232
if (offset == 0 && size == bio->bi_size)
235
bio->bi_sector += offset;
238
clear_bit(BIO_SEG_VALID, &bio->bi_flags);
240
while (bio->bi_idx < bio->bi_vcnt &&
241
bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
242
/* remove this whole bio_vec */
243
offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
246
if (bio->bi_idx < bio->bi_vcnt) {
247
bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
248
bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
250
/* avoid any complications with bi_idx being non-zero*/
252
memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
253
(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
254
bio->bi_vcnt -= bio->bi_idx;
257
/* Make sure vcnt and last bv are not too big */
258
bio_for_each_segment(bvec, bio, i) {
259
if (sofar + bvec->bv_len > size)
260
bvec->bv_len = size - sofar;
261
if (bvec->bv_len == 0) {
265
sofar += bvec->bv_len;
268
EXPORT_SYMBOL_GPL(md_trim_bio);
271
* We have a system wide 'event count' that is incremented
272
* on any 'interesting' event, and readers of /proc/mdstat
273
* can use 'poll' or 'select' to find out when the event
277
* start array, stop array, error, add device, remove device,
278
* start build, activate spare
280
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
281
static atomic_t md_event_count;
282
void md_new_event(struct mddev *mddev)
284
atomic_inc(&md_event_count);
285
wake_up(&md_event_waiters);
287
EXPORT_SYMBOL_GPL(md_new_event);
289
/* Alternate version that can be called from interrupts
290
* when calling sysfs_notify isn't needed.
292
static void md_new_event_inintr(struct mddev *mddev)
294
atomic_inc(&md_event_count);
295
wake_up(&md_event_waiters);
299
* Enables to iterate over all existing md arrays
300
* all_mddevs_lock protects this list.
302
static LIST_HEAD(all_mddevs);
303
static DEFINE_SPINLOCK(all_mddevs_lock);
307
* iterates through all used mddevs in the system.
308
* We take care to grab the all_mddevs_lock whenever navigating
309
* the list, and to always hold a refcount when unlocked.
310
* Any code which breaks out of this loop while own
311
* a reference to the current mddev and must mddev_put it.
313
#define for_each_mddev(_mddev,_tmp) \
315
for (({ spin_lock(&all_mddevs_lock); \
316
_tmp = all_mddevs.next; \
318
({ if (_tmp != &all_mddevs) \
319
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
320
spin_unlock(&all_mddevs_lock); \
321
if (_mddev) mddev_put(_mddev); \
322
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
323
_tmp != &all_mddevs;}); \
324
({ spin_lock(&all_mddevs_lock); \
325
_tmp = _tmp->next;}) \
329
/* Rather than calling directly into the personality make_request function,
330
* IO requests come here first so that we can check if the device is
331
* being suspended pending a reconfiguration.
332
* We hold a refcount over the call to ->make_request. By the time that
333
* call has finished, the bio has been linked into some internal structure
334
* and so is visible to ->quiesce(), so we don't need the refcount any more.
336
static void md_make_request(struct request_queue *q, struct bio *bio)
338
const int rw = bio_data_dir(bio);
339
struct mddev *mddev = q->queuedata;
341
unsigned int sectors;
343
if (mddev == NULL || mddev->pers == NULL
348
smp_rmb(); /* Ensure implications of 'active' are visible */
350
if (mddev->suspended) {
353
prepare_to_wait(&mddev->sb_wait, &__wait,
354
TASK_UNINTERRUPTIBLE);
355
if (!mddev->suspended)
361
finish_wait(&mddev->sb_wait, &__wait);
363
atomic_inc(&mddev->active_io);
367
* save the sectors now since our bio can
368
* go away inside make_request
370
sectors = bio_sectors(bio);
371
mddev->pers->make_request(mddev, bio);
373
cpu = part_stat_lock();
374
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
375
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
378
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
379
wake_up(&mddev->sb_wait);
382
/* mddev_suspend makes sure no new requests are submitted
383
* to the device, and that any requests that have been submitted
384
* are completely handled.
385
* Once ->stop is called and completes, the module will be completely
388
void mddev_suspend(struct mddev *mddev)
390
BUG_ON(mddev->suspended);
391
mddev->suspended = 1;
393
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
394
mddev->pers->quiesce(mddev, 1);
396
EXPORT_SYMBOL_GPL(mddev_suspend);
398
void mddev_resume(struct mddev *mddev)
400
mddev->suspended = 0;
401
wake_up(&mddev->sb_wait);
402
mddev->pers->quiesce(mddev, 0);
404
md_wakeup_thread(mddev->thread);
405
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
407
EXPORT_SYMBOL_GPL(mddev_resume);
409
int mddev_congested(struct mddev *mddev, int bits)
411
return mddev->suspended;
413
EXPORT_SYMBOL(mddev_congested);
416
* Generic flush handling for md
419
static void md_end_flush(struct bio *bio, int err)
421
struct md_rdev *rdev = bio->bi_private;
422
struct mddev *mddev = rdev->mddev;
424
rdev_dec_pending(rdev, mddev);
426
if (atomic_dec_and_test(&mddev->flush_pending)) {
427
/* The pre-request flush has finished */
428
queue_work(md_wq, &mddev->flush_work);
433
static void md_submit_flush_data(struct work_struct *ws);
435
static void submit_flushes(struct work_struct *ws)
437
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
438
struct md_rdev *rdev;
440
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
441
atomic_set(&mddev->flush_pending, 1);
443
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
444
if (rdev->raid_disk >= 0 &&
445
!test_bit(Faulty, &rdev->flags)) {
446
/* Take two references, one is dropped
447
* when request finishes, one after
448
* we reclaim rcu_read_lock
451
atomic_inc(&rdev->nr_pending);
452
atomic_inc(&rdev->nr_pending);
454
bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
455
bi->bi_end_io = md_end_flush;
456
bi->bi_private = rdev;
457
bi->bi_bdev = rdev->bdev;
458
atomic_inc(&mddev->flush_pending);
459
submit_bio(WRITE_FLUSH, bi);
461
rdev_dec_pending(rdev, mddev);
464
if (atomic_dec_and_test(&mddev->flush_pending))
465
queue_work(md_wq, &mddev->flush_work);
468
static void md_submit_flush_data(struct work_struct *ws)
470
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
471
struct bio *bio = mddev->flush_bio;
473
if (bio->bi_size == 0)
474
/* an empty barrier - all done */
477
bio->bi_rw &= ~REQ_FLUSH;
478
mddev->pers->make_request(mddev, bio);
481
mddev->flush_bio = NULL;
482
wake_up(&mddev->sb_wait);
485
void md_flush_request(struct mddev *mddev, struct bio *bio)
487
spin_lock_irq(&mddev->write_lock);
488
wait_event_lock_irq(mddev->sb_wait,
490
mddev->write_lock, /*nothing*/);
491
mddev->flush_bio = bio;
492
spin_unlock_irq(&mddev->write_lock);
494
INIT_WORK(&mddev->flush_work, submit_flushes);
495
queue_work(md_wq, &mddev->flush_work);
497
EXPORT_SYMBOL(md_flush_request);
499
/* Support for plugging.
500
* This mirrors the plugging support in request_queue, but does not
501
* require having a whole queue or request structures.
502
* We allocate an md_plug_cb for each md device and each thread it gets
503
* plugged on. This links tot the private plug_handle structure in the
504
* personality data where we keep a count of the number of outstanding
505
* plugs so other code can see if a plug is active.
508
struct blk_plug_cb cb;
512
static void plugger_unplug(struct blk_plug_cb *cb)
514
struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
515
if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
516
md_wakeup_thread(mdcb->mddev->thread);
520
/* Check that an unplug wakeup will come shortly.
521
* If not, wakeup the md thread immediately
523
int mddev_check_plugged(struct mddev *mddev)
525
struct blk_plug *plug = current->plug;
526
struct md_plug_cb *mdcb;
531
list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
532
if (mdcb->cb.callback == plugger_unplug &&
533
mdcb->mddev == mddev) {
534
/* Already on the list, move to top */
535
if (mdcb != list_first_entry(&plug->cb_list,
538
list_move(&mdcb->cb.list, &plug->cb_list);
542
/* Not currently on the callback list */
543
mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
548
mdcb->cb.callback = plugger_unplug;
549
atomic_inc(&mddev->plug_cnt);
550
list_add(&mdcb->cb.list, &plug->cb_list);
553
EXPORT_SYMBOL_GPL(mddev_check_plugged);
555
static inline struct mddev *mddev_get(struct mddev *mddev)
557
atomic_inc(&mddev->active);
561
static void mddev_delayed_delete(struct work_struct *ws);
563
static void mddev_put(struct mddev *mddev)
565
struct bio_set *bs = NULL;
567
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
569
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
570
mddev->ctime == 0 && !mddev->hold_active) {
571
/* Array is not configured at all, and not held active,
573
list_del_init(&mddev->all_mddevs);
575
mddev->bio_set = NULL;
576
if (mddev->gendisk) {
577
/* We did a probe so need to clean up. Call
578
* queue_work inside the spinlock so that
579
* flush_workqueue() after mddev_find will
580
* succeed in waiting for the work to be done.
582
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
583
queue_work(md_misc_wq, &mddev->del_work);
587
spin_unlock(&all_mddevs_lock);
592
void mddev_init(struct mddev *mddev)
594
mutex_init(&mddev->open_mutex);
595
mutex_init(&mddev->reconfig_mutex);
596
mutex_init(&mddev->bitmap_info.mutex);
597
INIT_LIST_HEAD(&mddev->disks);
598
INIT_LIST_HEAD(&mddev->all_mddevs);
599
init_timer(&mddev->safemode_timer);
600
atomic_set(&mddev->active, 1);
601
atomic_set(&mddev->openers, 0);
602
atomic_set(&mddev->active_io, 0);
603
atomic_set(&mddev->plug_cnt, 0);
604
spin_lock_init(&mddev->write_lock);
605
atomic_set(&mddev->flush_pending, 0);
606
init_waitqueue_head(&mddev->sb_wait);
607
init_waitqueue_head(&mddev->recovery_wait);
608
mddev->reshape_position = MaxSector;
609
mddev->resync_min = 0;
610
mddev->resync_max = MaxSector;
611
mddev->level = LEVEL_NONE;
613
EXPORT_SYMBOL_GPL(mddev_init);
615
static struct mddev * mddev_find(dev_t unit)
617
struct mddev *mddev, *new = NULL;
619
if (unit && MAJOR(unit) != MD_MAJOR)
620
unit &= ~((1<<MdpMinorShift)-1);
623
spin_lock(&all_mddevs_lock);
626
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
627
if (mddev->unit == unit) {
629
spin_unlock(&all_mddevs_lock);
635
list_add(&new->all_mddevs, &all_mddevs);
636
spin_unlock(&all_mddevs_lock);
637
new->hold_active = UNTIL_IOCTL;
641
/* find an unused unit number */
642
static int next_minor = 512;
643
int start = next_minor;
647
dev = MKDEV(MD_MAJOR, next_minor);
649
if (next_minor > MINORMASK)
651
if (next_minor == start) {
652
/* Oh dear, all in use. */
653
spin_unlock(&all_mddevs_lock);
659
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
660
if (mddev->unit == dev) {
666
new->md_minor = MINOR(dev);
667
new->hold_active = UNTIL_STOP;
668
list_add(&new->all_mddevs, &all_mddevs);
669
spin_unlock(&all_mddevs_lock);
672
spin_unlock(&all_mddevs_lock);
674
new = kzalloc(sizeof(*new), GFP_KERNEL);
679
if (MAJOR(unit) == MD_MAJOR)
680
new->md_minor = MINOR(unit);
682
new->md_minor = MINOR(unit) >> MdpMinorShift;
689
static inline int mddev_lock(struct mddev * mddev)
691
return mutex_lock_interruptible(&mddev->reconfig_mutex);
694
static inline int mddev_is_locked(struct mddev *mddev)
696
return mutex_is_locked(&mddev->reconfig_mutex);
699
static inline int mddev_trylock(struct mddev * mddev)
701
return mutex_trylock(&mddev->reconfig_mutex);
704
static struct attribute_group md_redundancy_group;
706
static void mddev_unlock(struct mddev * mddev)
708
if (mddev->to_remove) {
709
/* These cannot be removed under reconfig_mutex as
710
* an access to the files will try to take reconfig_mutex
711
* while holding the file unremovable, which leads to
713
* So hold set sysfs_active while the remove in happeing,
714
* and anything else which might set ->to_remove or my
715
* otherwise change the sysfs namespace will fail with
716
* -EBUSY if sysfs_active is still set.
717
* We set sysfs_active under reconfig_mutex and elsewhere
718
* test it under the same mutex to ensure its correct value
721
struct attribute_group *to_remove = mddev->to_remove;
722
mddev->to_remove = NULL;
723
mddev->sysfs_active = 1;
724
mutex_unlock(&mddev->reconfig_mutex);
726
if (mddev->kobj.sd) {
727
if (to_remove != &md_redundancy_group)
728
sysfs_remove_group(&mddev->kobj, to_remove);
729
if (mddev->pers == NULL ||
730
mddev->pers->sync_request == NULL) {
731
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
732
if (mddev->sysfs_action)
733
sysfs_put(mddev->sysfs_action);
734
mddev->sysfs_action = NULL;
737
mddev->sysfs_active = 0;
739
mutex_unlock(&mddev->reconfig_mutex);
741
/* As we've dropped the mutex we need a spinlock to
742
* make sure the thread doesn't disappear
744
spin_lock(&pers_lock);
745
md_wakeup_thread(mddev->thread);
746
spin_unlock(&pers_lock);
749
static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
751
struct md_rdev *rdev;
753
list_for_each_entry(rdev, &mddev->disks, same_set)
754
if (rdev->desc_nr == nr)
760
static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
762
struct md_rdev *rdev;
764
list_for_each_entry(rdev, &mddev->disks, same_set)
765
if (rdev->bdev->bd_dev == dev)
771
static struct md_personality *find_pers(int level, char *clevel)
773
struct md_personality *pers;
774
list_for_each_entry(pers, &pers_list, list) {
775
if (level != LEVEL_NONE && pers->level == level)
777
if (strcmp(pers->name, clevel)==0)
783
/* return the offset of the super block in 512byte sectors */
784
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
786
sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
787
return MD_NEW_SIZE_SECTORS(num_sectors);
790
static int alloc_disk_sb(struct md_rdev * rdev)
795
rdev->sb_page = alloc_page(GFP_KERNEL);
796
if (!rdev->sb_page) {
797
printk(KERN_ALERT "md: out of memory.\n");
804
static void free_disk_sb(struct md_rdev * rdev)
807
put_page(rdev->sb_page);
809
rdev->sb_page = NULL;
814
put_page(rdev->bb_page);
815
rdev->bb_page = NULL;
820
static void super_written(struct bio *bio, int error)
822
struct md_rdev *rdev = bio->bi_private;
823
struct mddev *mddev = rdev->mddev;
825
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
826
printk("md: super_written gets error=%d, uptodate=%d\n",
827
error, test_bit(BIO_UPTODATE, &bio->bi_flags));
828
WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
829
md_error(mddev, rdev);
832
if (atomic_dec_and_test(&mddev->pending_writes))
833
wake_up(&mddev->sb_wait);
837
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
838
sector_t sector, int size, struct page *page)
840
/* write first size bytes of page to sector of rdev
841
* Increment mddev->pending_writes before returning
842
* and decrement it on completion, waking up sb_wait
843
* if zero is reached.
844
* If an error occurred, call md_error
846
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
848
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
849
bio->bi_sector = sector;
850
bio_add_page(bio, page, size, 0);
851
bio->bi_private = rdev;
852
bio->bi_end_io = super_written;
854
atomic_inc(&mddev->pending_writes);
855
submit_bio(WRITE_FLUSH_FUA, bio);
858
void md_super_wait(struct mddev *mddev)
860
/* wait for all superblock writes that were scheduled to complete */
863
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
864
if (atomic_read(&mddev->pending_writes)==0)
868
finish_wait(&mddev->sb_wait, &wq);
871
static void bi_complete(struct bio *bio, int error)
873
complete((struct completion*)bio->bi_private);
876
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
877
struct page *page, int rw, bool metadata_op)
879
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
880
struct completion event;
885
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
886
rdev->meta_bdev : rdev->bdev;
888
bio->bi_sector = sector + rdev->sb_start;
890
bio->bi_sector = sector + rdev->data_offset;
891
bio_add_page(bio, page, size, 0);
892
init_completion(&event);
893
bio->bi_private = &event;
894
bio->bi_end_io = bi_complete;
896
wait_for_completion(&event);
898
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
902
EXPORT_SYMBOL_GPL(sync_page_io);
904
static int read_disk_sb(struct md_rdev * rdev, int size)
906
char b[BDEVNAME_SIZE];
907
if (!rdev->sb_page) {
915
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
921
printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
922
bdevname(rdev->bdev,b));
926
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
928
return sb1->set_uuid0 == sb2->set_uuid0 &&
929
sb1->set_uuid1 == sb2->set_uuid1 &&
930
sb1->set_uuid2 == sb2->set_uuid2 &&
931
sb1->set_uuid3 == sb2->set_uuid3;
934
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
937
mdp_super_t *tmp1, *tmp2;
939
tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
940
tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
942
if (!tmp1 || !tmp2) {
944
printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
952
* nr_disks is not constant
957
ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
965
static u32 md_csum_fold(u32 csum)
967
csum = (csum & 0xffff) + (csum >> 16);
968
return (csum & 0xffff) + (csum >> 16);
971
static unsigned int calc_sb_csum(mdp_super_t * sb)
974
u32 *sb32 = (u32*)sb;
976
unsigned int disk_csum, csum;
978
disk_csum = sb->sb_csum;
981
for (i = 0; i < MD_SB_BYTES/4 ; i++)
983
csum = (newcsum & 0xffffffff) + (newcsum>>32);
987
/* This used to use csum_partial, which was wrong for several
988
* reasons including that different results are returned on
989
* different architectures. It isn't critical that we get exactly
990
* the same return value as before (we always csum_fold before
991
* testing, and that removes any differences). However as we
992
* know that csum_partial always returned a 16bit value on
993
* alphas, do a fold to maximise conformity to previous behaviour.
995
sb->sb_csum = md_csum_fold(disk_csum);
997
sb->sb_csum = disk_csum;
1004
* Handle superblock details.
1005
* We want to be able to handle multiple superblock formats
1006
* so we have a common interface to them all, and an array of
1007
* different handlers.
1008
* We rely on user-space to write the initial superblock, and support
1009
* reading and updating of superblocks.
1010
* Interface methods are:
1011
* int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1012
* loads and validates a superblock on dev.
1013
* if refdev != NULL, compare superblocks on both devices
1015
* 0 - dev has a superblock that is compatible with refdev
1016
* 1 - dev has a superblock that is compatible and newer than refdev
1017
* so dev should be used as the refdev in future
1018
* -EINVAL superblock incompatible or invalid
1019
* -othererror e.g. -EIO
1021
* int validate_super(struct mddev *mddev, struct md_rdev *dev)
1022
* Verify that dev is acceptable into mddev.
1023
* The first time, mddev->raid_disks will be 0, and data from
1024
* dev should be merged in. Subsequent calls check that dev
1025
* is new enough. Return 0 or -EINVAL
1027
* void sync_super(struct mddev *mddev, struct md_rdev *dev)
1028
* Update the superblock for rdev with data in mddev
1029
* This does not write to disc.
1035
struct module *owner;
1036
int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev,
1038
int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev);
1039
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
1040
unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
1041
sector_t num_sectors);
1045
* Check that the given mddev has no bitmap.
1047
* This function is called from the run method of all personalities that do not
1048
* support bitmaps. It prints an error message and returns non-zero if mddev
1049
* has a bitmap. Otherwise, it returns 0.
1052
int md_check_no_bitmap(struct mddev *mddev)
1054
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1056
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
1057
mdname(mddev), mddev->pers->name);
1060
EXPORT_SYMBOL(md_check_no_bitmap);
1063
* load_super for 0.90.0
1065
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1067
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1072
* Calculate the position of the superblock (512byte sectors),
1073
* it's at the end of the disk.
1075
* It also happens to be a multiple of 4Kb.
1077
rdev->sb_start = calc_dev_sboffset(rdev);
1079
ret = read_disk_sb(rdev, MD_SB_BYTES);
1080
if (ret) return ret;
1084
bdevname(rdev->bdev, b);
1085
sb = page_address(rdev->sb_page);
1087
if (sb->md_magic != MD_SB_MAGIC) {
1088
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
1093
if (sb->major_version != 0 ||
1094
sb->minor_version < 90 ||
1095
sb->minor_version > 91) {
1096
printk(KERN_WARNING "Bad version number %d.%d on %s\n",
1097
sb->major_version, sb->minor_version,
1102
if (sb->raid_disks <= 0)
1105
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1106
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
1111
rdev->preferred_minor = sb->md_minor;
1112
rdev->data_offset = 0;
1113
rdev->sb_size = MD_SB_BYTES;
1114
rdev->badblocks.shift = -1;
1116
if (sb->level == LEVEL_MULTIPATH)
1119
rdev->desc_nr = sb->this_disk.number;
1125
mdp_super_t *refsb = page_address(refdev->sb_page);
1126
if (!uuid_equal(refsb, sb)) {
1127
printk(KERN_WARNING "md: %s has different UUID to %s\n",
1128
b, bdevname(refdev->bdev,b2));
1131
if (!sb_equal(refsb, sb)) {
1132
printk(KERN_WARNING "md: %s has same UUID"
1133
" but different superblock to %s\n",
1134
b, bdevname(refdev->bdev, b2));
1138
ev2 = md_event(refsb);
1144
rdev->sectors = rdev->sb_start;
1145
/* Limit to 4TB as metadata cannot record more than that */
1146
if (rdev->sectors >= (2ULL << 32))
1147
rdev->sectors = (2ULL << 32) - 2;
1149
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1150
/* "this cannot possibly happen" ... */
1158
* validate_super for 0.90.0
1160
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1163
mdp_super_t *sb = page_address(rdev->sb_page);
1164
__u64 ev1 = md_event(sb);
1166
rdev->raid_disk = -1;
1167
clear_bit(Faulty, &rdev->flags);
1168
clear_bit(In_sync, &rdev->flags);
1169
clear_bit(WriteMostly, &rdev->flags);
1171
if (mddev->raid_disks == 0) {
1172
mddev->major_version = 0;
1173
mddev->minor_version = sb->minor_version;
1174
mddev->patch_version = sb->patch_version;
1175
mddev->external = 0;
1176
mddev->chunk_sectors = sb->chunk_size >> 9;
1177
mddev->ctime = sb->ctime;
1178
mddev->utime = sb->utime;
1179
mddev->level = sb->level;
1180
mddev->clevel[0] = 0;
1181
mddev->layout = sb->layout;
1182
mddev->raid_disks = sb->raid_disks;
1183
mddev->dev_sectors = ((sector_t)sb->size) * 2;
1184
mddev->events = ev1;
1185
mddev->bitmap_info.offset = 0;
1186
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1188
if (mddev->minor_version >= 91) {
1189
mddev->reshape_position = sb->reshape_position;
1190
mddev->delta_disks = sb->delta_disks;
1191
mddev->new_level = sb->new_level;
1192
mddev->new_layout = sb->new_layout;
1193
mddev->new_chunk_sectors = sb->new_chunk >> 9;
1195
mddev->reshape_position = MaxSector;
1196
mddev->delta_disks = 0;
1197
mddev->new_level = mddev->level;
1198
mddev->new_layout = mddev->layout;
1199
mddev->new_chunk_sectors = mddev->chunk_sectors;
1202
if (sb->state & (1<<MD_SB_CLEAN))
1203
mddev->recovery_cp = MaxSector;
1205
if (sb->events_hi == sb->cp_events_hi &&
1206
sb->events_lo == sb->cp_events_lo) {
1207
mddev->recovery_cp = sb->recovery_cp;
1209
mddev->recovery_cp = 0;
1212
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1213
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1214
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1215
memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1217
mddev->max_disks = MD_SB_DISKS;
1219
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1220
mddev->bitmap_info.file == NULL)
1221
mddev->bitmap_info.offset =
1222
mddev->bitmap_info.default_offset;
1224
} else if (mddev->pers == NULL) {
1225
/* Insist on good event counter while assembling, except
1226
* for spares (which don't need an event count) */
1228
if (sb->disks[rdev->desc_nr].state & (
1229
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1230
if (ev1 < mddev->events)
1232
} else if (mddev->bitmap) {
1233
/* if adding to array with a bitmap, then we can accept an
1234
* older device ... but not too old.
1236
if (ev1 < mddev->bitmap->events_cleared)
1239
if (ev1 < mddev->events)
1240
/* just a hot-add of a new device, leave raid_disk at -1 */
1244
if (mddev->level != LEVEL_MULTIPATH) {
1245
desc = sb->disks + rdev->desc_nr;
1247
if (desc->state & (1<<MD_DISK_FAULTY))
1248
set_bit(Faulty, &rdev->flags);
1249
else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1250
desc->raid_disk < mddev->raid_disks */) {
1251
set_bit(In_sync, &rdev->flags);
1252
rdev->raid_disk = desc->raid_disk;
1253
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1254
/* active but not in sync implies recovery up to
1255
* reshape position. We don't know exactly where
1256
* that is, so set to zero for now */
1257
if (mddev->minor_version >= 91) {
1258
rdev->recovery_offset = 0;
1259
rdev->raid_disk = desc->raid_disk;
1262
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1263
set_bit(WriteMostly, &rdev->flags);
1264
} else /* MULTIPATH are always insync */
1265
set_bit(In_sync, &rdev->flags);
1270
* sync_super for 0.90.0
1272
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1275
struct md_rdev *rdev2;
1276
int next_spare = mddev->raid_disks;
1279
/* make rdev->sb match mddev data..
1282
* 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1283
* 3/ any empty disks < next_spare become removed
1285
* disks[0] gets initialised to REMOVED because
1286
* we cannot be sure from other fields if it has
1287
* been initialised or not.
1290
int active=0, working=0,failed=0,spare=0,nr_disks=0;
1292
rdev->sb_size = MD_SB_BYTES;
1294
sb = page_address(rdev->sb_page);
1296
memset(sb, 0, sizeof(*sb));
1298
sb->md_magic = MD_SB_MAGIC;
1299
sb->major_version = mddev->major_version;
1300
sb->patch_version = mddev->patch_version;
1301
sb->gvalid_words = 0; /* ignored */
1302
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1303
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1304
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1305
memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1307
sb->ctime = mddev->ctime;
1308
sb->level = mddev->level;
1309
sb->size = mddev->dev_sectors / 2;
1310
sb->raid_disks = mddev->raid_disks;
1311
sb->md_minor = mddev->md_minor;
1312
sb->not_persistent = 0;
1313
sb->utime = mddev->utime;
1315
sb->events_hi = (mddev->events>>32);
1316
sb->events_lo = (u32)mddev->events;
1318
if (mddev->reshape_position == MaxSector)
1319
sb->minor_version = 90;
1321
sb->minor_version = 91;
1322
sb->reshape_position = mddev->reshape_position;
1323
sb->new_level = mddev->new_level;
1324
sb->delta_disks = mddev->delta_disks;
1325
sb->new_layout = mddev->new_layout;
1326
sb->new_chunk = mddev->new_chunk_sectors << 9;
1328
mddev->minor_version = sb->minor_version;
1331
sb->recovery_cp = mddev->recovery_cp;
1332
sb->cp_events_hi = (mddev->events>>32);
1333
sb->cp_events_lo = (u32)mddev->events;
1334
if (mddev->recovery_cp == MaxSector)
1335
sb->state = (1<< MD_SB_CLEAN);
1337
sb->recovery_cp = 0;
1339
sb->layout = mddev->layout;
1340
sb->chunk_size = mddev->chunk_sectors << 9;
1342
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1343
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1345
sb->disks[0].state = (1<<MD_DISK_REMOVED);
1346
list_for_each_entry(rdev2, &mddev->disks, same_set) {
1349
int is_active = test_bit(In_sync, &rdev2->flags);
1351
if (rdev2->raid_disk >= 0 &&
1352
sb->minor_version >= 91)
1353
/* we have nowhere to store the recovery_offset,
1354
* but if it is not below the reshape_position,
1355
* we can piggy-back on that.
1358
if (rdev2->raid_disk < 0 ||
1359
test_bit(Faulty, &rdev2->flags))
1362
desc_nr = rdev2->raid_disk;
1364
desc_nr = next_spare++;
1365
rdev2->desc_nr = desc_nr;
1366
d = &sb->disks[rdev2->desc_nr];
1368
d->number = rdev2->desc_nr;
1369
d->major = MAJOR(rdev2->bdev->bd_dev);
1370
d->minor = MINOR(rdev2->bdev->bd_dev);
1372
d->raid_disk = rdev2->raid_disk;
1374
d->raid_disk = rdev2->desc_nr; /* compatibility */
1375
if (test_bit(Faulty, &rdev2->flags))
1376
d->state = (1<<MD_DISK_FAULTY);
1377
else if (is_active) {
1378
d->state = (1<<MD_DISK_ACTIVE);
1379
if (test_bit(In_sync, &rdev2->flags))
1380
d->state |= (1<<MD_DISK_SYNC);
1388
if (test_bit(WriteMostly, &rdev2->flags))
1389
d->state |= (1<<MD_DISK_WRITEMOSTLY);
1391
/* now set the "removed" and "faulty" bits on any missing devices */
1392
for (i=0 ; i < mddev->raid_disks ; i++) {
1393
mdp_disk_t *d = &sb->disks[i];
1394
if (d->state == 0 && d->number == 0) {
1397
d->state = (1<<MD_DISK_REMOVED);
1398
d->state |= (1<<MD_DISK_FAULTY);
1402
sb->nr_disks = nr_disks;
1403
sb->active_disks = active;
1404
sb->working_disks = working;
1405
sb->failed_disks = failed;
1406
sb->spare_disks = spare;
1408
sb->this_disk = sb->disks[rdev->desc_nr];
1409
sb->sb_csum = calc_sb_csum(sb);
1413
* rdev_size_change for 0.90.0
1415
static unsigned long long
1416
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1418
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1419
return 0; /* component must fit device */
1420
if (rdev->mddev->bitmap_info.offset)
1421
return 0; /* can't move bitmap */
1422
rdev->sb_start = calc_dev_sboffset(rdev);
1423
if (!num_sectors || num_sectors > rdev->sb_start)
1424
num_sectors = rdev->sb_start;
1425
/* Limit to 4TB as metadata cannot record more than that.
1426
* 4TB == 2^32 KB, or 2*2^32 sectors.
1428
if (num_sectors >= (2ULL << 32))
1429
num_sectors = (2ULL << 32) - 2;
1430
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1432
md_super_wait(rdev->mddev);
1438
* version 1 superblock
1441
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1445
unsigned long long newcsum;
1446
int size = 256 + le32_to_cpu(sb->max_dev)*2;
1447
__le32 *isuper = (__le32*)sb;
1450
disk_csum = sb->sb_csum;
1453
for (i=0; size>=4; size -= 4 )
1454
newcsum += le32_to_cpu(*isuper++);
1457
newcsum += le16_to_cpu(*(__le16*) isuper);
1459
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1460
sb->sb_csum = disk_csum;
1461
return cpu_to_le32(csum);
1464
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1466
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1468
struct mdp_superblock_1 *sb;
1471
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1475
* Calculate the position of the superblock in 512byte sectors.
1476
* It is always aligned to a 4K boundary and
1477
* depeding on minor_version, it can be:
1478
* 0: At least 8K, but less than 12K, from end of device
1479
* 1: At start of device
1480
* 2: 4K from start of device.
1482
switch(minor_version) {
1484
sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1486
sb_start &= ~(sector_t)(4*2-1);
1497
rdev->sb_start = sb_start;
1499
/* superblock is rarely larger than 1K, but it can be larger,
1500
* and it is safe to read 4k, so we do that
1502
ret = read_disk_sb(rdev, 4096);
1503
if (ret) return ret;
1506
sb = page_address(rdev->sb_page);
1508
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1509
sb->major_version != cpu_to_le32(1) ||
1510
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1511
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1512
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1515
if (calc_sb_1_csum(sb) != sb->sb_csum) {
1516
printk("md: invalid superblock checksum on %s\n",
1517
bdevname(rdev->bdev,b));
1520
if (le64_to_cpu(sb->data_size) < 10) {
1521
printk("md: data_size too small on %s\n",
1522
bdevname(rdev->bdev,b));
1526
rdev->preferred_minor = 0xffff;
1527
rdev->data_offset = le64_to_cpu(sb->data_offset);
1528
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1530
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1531
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1532
if (rdev->sb_size & bmask)
1533
rdev->sb_size = (rdev->sb_size | bmask) + 1;
1536
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
1539
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1542
rdev->desc_nr = le32_to_cpu(sb->dev_number);
1544
if (!rdev->bb_page) {
1545
rdev->bb_page = alloc_page(GFP_KERNEL);
1549
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1550
rdev->badblocks.count == 0) {
1551
/* need to load the bad block list.
1552
* Currently we limit it to one page.
1558
int sectors = le16_to_cpu(sb->bblog_size);
1559
if (sectors > (PAGE_SIZE / 512))
1561
offset = le32_to_cpu(sb->bblog_offset);
1564
bb_sector = (long long)offset;
1565
if (!sync_page_io(rdev, bb_sector, sectors << 9,
1566
rdev->bb_page, READ, true))
1568
bbp = (u64 *)page_address(rdev->bb_page);
1569
rdev->badblocks.shift = sb->bblog_shift;
1570
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1571
u64 bb = le64_to_cpu(*bbp);
1572
int count = bb & (0x3ff);
1573
u64 sector = bb >> 10;
1574
sector <<= sb->bblog_shift;
1575
count <<= sb->bblog_shift;
1578
if (md_set_badblocks(&rdev->badblocks,
1579
sector, count, 1) == 0)
1582
} else if (sb->bblog_offset == 0)
1583
rdev->badblocks.shift = -1;
1589
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1591
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1592
sb->level != refsb->level ||
1593
sb->layout != refsb->layout ||
1594
sb->chunksize != refsb->chunksize) {
1595
printk(KERN_WARNING "md: %s has strangely different"
1596
" superblock to %s\n",
1597
bdevname(rdev->bdev,b),
1598
bdevname(refdev->bdev,b2));
1601
ev1 = le64_to_cpu(sb->events);
1602
ev2 = le64_to_cpu(refsb->events);
1610
rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
1611
le64_to_cpu(sb->data_offset);
1613
rdev->sectors = rdev->sb_start;
1614
if (rdev->sectors < le64_to_cpu(sb->data_size))
1616
rdev->sectors = le64_to_cpu(sb->data_size);
1617
if (le64_to_cpu(sb->size) > rdev->sectors)
1622
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1624
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1625
__u64 ev1 = le64_to_cpu(sb->events);
1627
rdev->raid_disk = -1;
1628
clear_bit(Faulty, &rdev->flags);
1629
clear_bit(In_sync, &rdev->flags);
1630
clear_bit(WriteMostly, &rdev->flags);
1632
if (mddev->raid_disks == 0) {
1633
mddev->major_version = 1;
1634
mddev->patch_version = 0;
1635
mddev->external = 0;
1636
mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1637
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1638
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1639
mddev->level = le32_to_cpu(sb->level);
1640
mddev->clevel[0] = 0;
1641
mddev->layout = le32_to_cpu(sb->layout);
1642
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1643
mddev->dev_sectors = le64_to_cpu(sb->size);
1644
mddev->events = ev1;
1645
mddev->bitmap_info.offset = 0;
1646
mddev->bitmap_info.default_offset = 1024 >> 9;
1648
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1649
memcpy(mddev->uuid, sb->set_uuid, 16);
1651
mddev->max_disks = (4096-256)/2;
1653
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1654
mddev->bitmap_info.file == NULL )
1655
mddev->bitmap_info.offset =
1656
(__s32)le32_to_cpu(sb->bitmap_offset);
1658
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1659
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1660
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1661
mddev->new_level = le32_to_cpu(sb->new_level);
1662
mddev->new_layout = le32_to_cpu(sb->new_layout);
1663
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1665
mddev->reshape_position = MaxSector;
1666
mddev->delta_disks = 0;
1667
mddev->new_level = mddev->level;
1668
mddev->new_layout = mddev->layout;
1669
mddev->new_chunk_sectors = mddev->chunk_sectors;
1672
} else if (mddev->pers == NULL) {
1673
/* Insist of good event counter while assembling, except for
1674
* spares (which don't need an event count) */
1676
if (rdev->desc_nr >= 0 &&
1677
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1678
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1679
if (ev1 < mddev->events)
1681
} else if (mddev->bitmap) {
1682
/* If adding to array with a bitmap, then we can accept an
1683
* older device, but not too old.
1685
if (ev1 < mddev->bitmap->events_cleared)
1688
if (ev1 < mddev->events)
1689
/* just a hot-add of a new device, leave raid_disk at -1 */
1692
if (mddev->level != LEVEL_MULTIPATH) {
1694
if (rdev->desc_nr < 0 ||
1695
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1699
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1701
case 0xffff: /* spare */
1703
case 0xfffe: /* faulty */
1704
set_bit(Faulty, &rdev->flags);
1707
if ((le32_to_cpu(sb->feature_map) &
1708
MD_FEATURE_RECOVERY_OFFSET))
1709
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1711
set_bit(In_sync, &rdev->flags);
1712
rdev->raid_disk = role;
1715
if (sb->devflags & WriteMostly1)
1716
set_bit(WriteMostly, &rdev->flags);
1717
} else /* MULTIPATH are always insync */
1718
set_bit(In_sync, &rdev->flags);
1723
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1725
struct mdp_superblock_1 *sb;
1726
struct md_rdev *rdev2;
1728
/* make rdev->sb match mddev and rdev data. */
1730
sb = page_address(rdev->sb_page);
1732
sb->feature_map = 0;
1734
sb->recovery_offset = cpu_to_le64(0);
1735
memset(sb->pad1, 0, sizeof(sb->pad1));
1736
memset(sb->pad3, 0, sizeof(sb->pad3));
1738
sb->utime = cpu_to_le64((__u64)mddev->utime);
1739
sb->events = cpu_to_le64(mddev->events);
1741
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1743
sb->resync_offset = cpu_to_le64(0);
1745
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1747
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1748
sb->size = cpu_to_le64(mddev->dev_sectors);
1749
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1750
sb->level = cpu_to_le32(mddev->level);
1751
sb->layout = cpu_to_le32(mddev->layout);
1753
if (test_bit(WriteMostly, &rdev->flags))
1754
sb->devflags |= WriteMostly1;
1756
sb->devflags &= ~WriteMostly1;
1758
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1759
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1760
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1763
if (rdev->raid_disk >= 0 &&
1764
!test_bit(In_sync, &rdev->flags)) {
1766
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1767
sb->recovery_offset =
1768
cpu_to_le64(rdev->recovery_offset);
1771
if (mddev->reshape_position != MaxSector) {
1772
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1773
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1774
sb->new_layout = cpu_to_le32(mddev->new_layout);
1775
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1776
sb->new_level = cpu_to_le32(mddev->new_level);
1777
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1780
if (rdev->badblocks.count == 0)
1781
/* Nothing to do for bad blocks*/ ;
1782
else if (sb->bblog_offset == 0)
1783
/* Cannot record bad blocks on this device */
1784
md_error(mddev, rdev);
1786
struct badblocks *bb = &rdev->badblocks;
1787
u64 *bbp = (u64 *)page_address(rdev->bb_page);
1789
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1794
seq = read_seqbegin(&bb->lock);
1796
memset(bbp, 0xff, PAGE_SIZE);
1798
for (i = 0 ; i < bb->count ; i++) {
1799
u64 internal_bb = *p++;
1800
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1801
| BB_LEN(internal_bb));
1802
*bbp++ = cpu_to_le64(store_bb);
1804
if (read_seqretry(&bb->lock, seq))
1807
bb->sector = (rdev->sb_start +
1808
(int)le32_to_cpu(sb->bblog_offset));
1809
bb->size = le16_to_cpu(sb->bblog_size);
1815
list_for_each_entry(rdev2, &mddev->disks, same_set)
1816
if (rdev2->desc_nr+1 > max_dev)
1817
max_dev = rdev2->desc_nr+1;
1819
if (max_dev > le32_to_cpu(sb->max_dev)) {
1821
sb->max_dev = cpu_to_le32(max_dev);
1822
rdev->sb_size = max_dev * 2 + 256;
1823
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1824
if (rdev->sb_size & bmask)
1825
rdev->sb_size = (rdev->sb_size | bmask) + 1;
1827
max_dev = le32_to_cpu(sb->max_dev);
1829
for (i=0; i<max_dev;i++)
1830
sb->dev_roles[i] = cpu_to_le16(0xfffe);
1832
list_for_each_entry(rdev2, &mddev->disks, same_set) {
1834
if (test_bit(Faulty, &rdev2->flags))
1835
sb->dev_roles[i] = cpu_to_le16(0xfffe);
1836
else if (test_bit(In_sync, &rdev2->flags))
1837
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1838
else if (rdev2->raid_disk >= 0)
1839
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1841
sb->dev_roles[i] = cpu_to_le16(0xffff);
1844
sb->sb_csum = calc_sb_1_csum(sb);
1847
static unsigned long long
1848
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1850
struct mdp_superblock_1 *sb;
1851
sector_t max_sectors;
1852
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1853
return 0; /* component must fit device */
1854
if (rdev->sb_start < rdev->data_offset) {
1855
/* minor versions 1 and 2; superblock before data */
1856
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1857
max_sectors -= rdev->data_offset;
1858
if (!num_sectors || num_sectors > max_sectors)
1859
num_sectors = max_sectors;
1860
} else if (rdev->mddev->bitmap_info.offset) {
1861
/* minor version 0 with bitmap we can't move */
1864
/* minor version 0; superblock after data */
1866
sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1867
sb_start &= ~(sector_t)(4*2 - 1);
1868
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1869
if (!num_sectors || num_sectors > max_sectors)
1870
num_sectors = max_sectors;
1871
rdev->sb_start = sb_start;
1873
sb = page_address(rdev->sb_page);
1874
sb->data_size = cpu_to_le64(num_sectors);
1875
sb->super_offset = rdev->sb_start;
1876
sb->sb_csum = calc_sb_1_csum(sb);
1877
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1879
md_super_wait(rdev->mddev);
1883
static struct super_type super_types[] = {
1886
.owner = THIS_MODULE,
1887
.load_super = super_90_load,
1888
.validate_super = super_90_validate,
1889
.sync_super = super_90_sync,
1890
.rdev_size_change = super_90_rdev_size_change,
1894
.owner = THIS_MODULE,
1895
.load_super = super_1_load,
1896
.validate_super = super_1_validate,
1897
.sync_super = super_1_sync,
1898
.rdev_size_change = super_1_rdev_size_change,
1902
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1904
if (mddev->sync_super) {
1905
mddev->sync_super(mddev, rdev);
1909
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1911
super_types[mddev->major_version].sync_super(mddev, rdev);
1914
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1916
struct md_rdev *rdev, *rdev2;
1919
rdev_for_each_rcu(rdev, mddev1)
1920
rdev_for_each_rcu(rdev2, mddev2)
1921
if (rdev->bdev->bd_contains ==
1922
rdev2->bdev->bd_contains) {
1930
static LIST_HEAD(pending_raid_disks);
1933
* Try to register data integrity profile for an mddev
1935
* This is called when an array is started and after a disk has been kicked
1936
* from the array. It only succeeds if all working and active component devices
1937
* are integrity capable with matching profiles.
1939
int md_integrity_register(struct mddev *mddev)
1941
struct md_rdev *rdev, *reference = NULL;
1943
if (list_empty(&mddev->disks))
1944
return 0; /* nothing to do */
1945
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1946
return 0; /* shouldn't register, or already is */
1947
list_for_each_entry(rdev, &mddev->disks, same_set) {
1948
/* skip spares and non-functional disks */
1949
if (test_bit(Faulty, &rdev->flags))
1951
if (rdev->raid_disk < 0)
1954
/* Use the first rdev as the reference */
1958
/* does this rdev's profile match the reference profile? */
1959
if (blk_integrity_compare(reference->bdev->bd_disk,
1960
rdev->bdev->bd_disk) < 0)
1963
if (!reference || !bdev_get_integrity(reference->bdev))
1966
* All component devices are integrity capable and have matching
1967
* profiles, register the common profile for the md device.
1969
if (blk_integrity_register(mddev->gendisk,
1970
bdev_get_integrity(reference->bdev)) != 0) {
1971
printk(KERN_ERR "md: failed to register integrity for %s\n",
1975
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1976
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1977
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
1983
EXPORT_SYMBOL(md_integrity_register);
1985
/* Disable data integrity if non-capable/non-matching disk is being added */
1986
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
1988
struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1989
struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1991
if (!bi_mddev) /* nothing to do */
1993
if (rdev->raid_disk < 0) /* skip spares */
1995
if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1996
rdev->bdev->bd_disk) >= 0)
1998
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1999
blk_integrity_unregister(mddev->gendisk);
2001
EXPORT_SYMBOL(md_integrity_add_rdev);
2003
static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
2005
char b[BDEVNAME_SIZE];
2015
/* prevent duplicates */
2016
if (find_rdev(mddev, rdev->bdev->bd_dev))
2019
/* make sure rdev->sectors exceeds mddev->dev_sectors */
2020
if (rdev->sectors && (mddev->dev_sectors == 0 ||
2021
rdev->sectors < mddev->dev_sectors)) {
2023
/* Cannot change size, so fail
2024
* If mddev->level <= 0, then we don't care
2025
* about aligning sizes (e.g. linear)
2027
if (mddev->level > 0)
2030
mddev->dev_sectors = rdev->sectors;
2033
/* Verify rdev->desc_nr is unique.
2034
* If it is -1, assign a free number, else
2035
* check number is not in use
2037
if (rdev->desc_nr < 0) {
2039
if (mddev->pers) choice = mddev->raid_disks;
2040
while (find_rdev_nr(mddev, choice))
2042
rdev->desc_nr = choice;
2044
if (find_rdev_nr(mddev, rdev->desc_nr))
2047
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2048
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2049
mdname(mddev), mddev->max_disks);
2052
bdevname(rdev->bdev,b);
2053
while ( (s=strchr(b, '/')) != NULL)
2056
rdev->mddev = mddev;
2057
printk(KERN_INFO "md: bind<%s>\n", b);
2059
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2062
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2063
if (sysfs_create_link(&rdev->kobj, ko, "block"))
2064
/* failure here is OK */;
2065
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2067
list_add_rcu(&rdev->same_set, &mddev->disks);
2068
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2070
/* May as well allow recovery to be retried once */
2071
mddev->recovery_disabled++;
2076
printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2081
static void md_delayed_delete(struct work_struct *ws)
2083
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2084
kobject_del(&rdev->kobj);
2085
kobject_put(&rdev->kobj);
2088
static void unbind_rdev_from_array(struct md_rdev * rdev)
2090
char b[BDEVNAME_SIZE];
2095
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2096
list_del_rcu(&rdev->same_set);
2097
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2099
sysfs_remove_link(&rdev->kobj, "block");
2100
sysfs_put(rdev->sysfs_state);
2101
rdev->sysfs_state = NULL;
2102
kfree(rdev->badblocks.page);
2103
rdev->badblocks.count = 0;
2104
rdev->badblocks.page = NULL;
2105
/* We need to delay this, otherwise we can deadlock when
2106
* writing to 'remove' to "dev/state". We also need
2107
* to delay it due to rcu usage.
2110
INIT_WORK(&rdev->del_work, md_delayed_delete);
2111
kobject_get(&rdev->kobj);
2112
queue_work(md_misc_wq, &rdev->del_work);
2116
* prevent the device from being mounted, repartitioned or
2117
* otherwise reused by a RAID array (or any other kernel
2118
* subsystem), by bd_claiming the device.
2120
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2123
struct block_device *bdev;
2124
char b[BDEVNAME_SIZE];
2126
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2127
shared ? (struct md_rdev *)lock_rdev : rdev);
2129
printk(KERN_ERR "md: could not open %s.\n",
2130
__bdevname(dev, b));
2131
return PTR_ERR(bdev);
2137
static void unlock_rdev(struct md_rdev *rdev)
2139
struct block_device *bdev = rdev->bdev;
2143
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2146
void md_autodetect_dev(dev_t dev);
2148
static void export_rdev(struct md_rdev * rdev)
2150
char b[BDEVNAME_SIZE];
2151
printk(KERN_INFO "md: export_rdev(%s)\n",
2152
bdevname(rdev->bdev,b));
2157
if (test_bit(AutoDetected, &rdev->flags))
2158
md_autodetect_dev(rdev->bdev->bd_dev);
2161
kobject_put(&rdev->kobj);
2164
static void kick_rdev_from_array(struct md_rdev * rdev)
2166
unbind_rdev_from_array(rdev);
2170
static void export_array(struct mddev *mddev)
2172
struct md_rdev *rdev, *tmp;
2174
rdev_for_each(rdev, tmp, mddev) {
2179
kick_rdev_from_array(rdev);
2181
if (!list_empty(&mddev->disks))
2183
mddev->raid_disks = 0;
2184
mddev->major_version = 0;
2187
static void print_desc(mdp_disk_t *desc)
2189
printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
2190
desc->major,desc->minor,desc->raid_disk,desc->state);
2193
static void print_sb_90(mdp_super_t *sb)
2198
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
2199
sb->major_version, sb->minor_version, sb->patch_version,
2200
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
2202
printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
2203
sb->level, sb->size, sb->nr_disks, sb->raid_disks,
2204
sb->md_minor, sb->layout, sb->chunk_size);
2205
printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
2206
" FD:%d SD:%d CSUM:%08x E:%08lx\n",
2207
sb->utime, sb->state, sb->active_disks, sb->working_disks,
2208
sb->failed_disks, sb->spare_disks,
2209
sb->sb_csum, (unsigned long)sb->events_lo);
2212
for (i = 0; i < MD_SB_DISKS; i++) {
2215
desc = sb->disks + i;
2216
if (desc->number || desc->major || desc->minor ||
2217
desc->raid_disk || (desc->state && (desc->state != 4))) {
2218
printk(" D %2d: ", i);
2222
printk(KERN_INFO "md: THIS: ");
2223
print_desc(&sb->this_disk);
2226
static void print_sb_1(struct mdp_superblock_1 *sb)
2230
uuid = sb->set_uuid;
2232
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
2233
"md: Name: \"%s\" CT:%llu\n",
2234
le32_to_cpu(sb->major_version),
2235
le32_to_cpu(sb->feature_map),
2238
(unsigned long long)le64_to_cpu(sb->ctime)
2239
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
2241
uuid = sb->device_uuid;
2243
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
2245
"md: Dev:%08x UUID: %pU\n"
2246
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
2247
"md: (MaxDev:%u) \n",
2248
le32_to_cpu(sb->level),
2249
(unsigned long long)le64_to_cpu(sb->size),
2250
le32_to_cpu(sb->raid_disks),
2251
le32_to_cpu(sb->layout),
2252
le32_to_cpu(sb->chunksize),
2253
(unsigned long long)le64_to_cpu(sb->data_offset),
2254
(unsigned long long)le64_to_cpu(sb->data_size),
2255
(unsigned long long)le64_to_cpu(sb->super_offset),
2256
(unsigned long long)le64_to_cpu(sb->recovery_offset),
2257
le32_to_cpu(sb->dev_number),
2260
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2261
(unsigned long long)le64_to_cpu(sb->events),
2262
(unsigned long long)le64_to_cpu(sb->resync_offset),
2263
le32_to_cpu(sb->sb_csum),
2264
le32_to_cpu(sb->max_dev)
2268
static void print_rdev(struct md_rdev *rdev, int major_version)
2270
char b[BDEVNAME_SIZE];
2271
printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2272
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2273
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2275
if (rdev->sb_loaded) {
2276
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2277
switch (major_version) {
2279
print_sb_90(page_address(rdev->sb_page));
2282
print_sb_1(page_address(rdev->sb_page));
2286
printk(KERN_INFO "md: no rdev superblock!\n");
2289
static void md_print_devices(void)
2291
struct list_head *tmp;
2292
struct md_rdev *rdev;
2293
struct mddev *mddev;
2294
char b[BDEVNAME_SIZE];
2297
printk("md: **********************************\n");
2298
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
2299
printk("md: **********************************\n");
2300
for_each_mddev(mddev, tmp) {
2303
bitmap_print_sb(mddev->bitmap);
2305
printk("%s: ", mdname(mddev));
2306
list_for_each_entry(rdev, &mddev->disks, same_set)
2307
printk("<%s>", bdevname(rdev->bdev,b));
2310
list_for_each_entry(rdev, &mddev->disks, same_set)
2311
print_rdev(rdev, mddev->major_version);
2313
printk("md: **********************************\n");
2318
static void sync_sbs(struct mddev * mddev, int nospares)
2320
/* Update each superblock (in-memory image), but
2321
* if we are allowed to, skip spares which already
2322
* have the right event counter, or have one earlier
2323
* (which would mean they aren't being marked as dirty
2324
* with the rest of the array)
2326
struct md_rdev *rdev;
2327
list_for_each_entry(rdev, &mddev->disks, same_set) {
2328
if (rdev->sb_events == mddev->events ||
2330
rdev->raid_disk < 0 &&
2331
rdev->sb_events+1 == mddev->events)) {
2332
/* Don't update this superblock */
2333
rdev->sb_loaded = 2;
2335
sync_super(mddev, rdev);
2336
rdev->sb_loaded = 1;
2341
static void md_update_sb(struct mddev * mddev, int force_change)
2343
struct md_rdev *rdev;
2346
int any_badblocks_changed = 0;
2349
/* First make sure individual recovery_offsets are correct */
2350
list_for_each_entry(rdev, &mddev->disks, same_set) {
2351
if (rdev->raid_disk >= 0 &&
2352
mddev->delta_disks >= 0 &&
2353
!test_bit(In_sync, &rdev->flags) &&
2354
mddev->curr_resync_completed > rdev->recovery_offset)
2355
rdev->recovery_offset = mddev->curr_resync_completed;
2358
if (!mddev->persistent) {
2359
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2360
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2361
if (!mddev->external) {
2362
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2363
list_for_each_entry(rdev, &mddev->disks, same_set) {
2364
if (rdev->badblocks.changed) {
2365
md_ack_all_badblocks(&rdev->badblocks);
2366
md_error(mddev, rdev);
2368
clear_bit(Blocked, &rdev->flags);
2369
clear_bit(BlockedBadBlocks, &rdev->flags);
2370
wake_up(&rdev->blocked_wait);
2373
wake_up(&mddev->sb_wait);
2377
spin_lock_irq(&mddev->write_lock);
2379
mddev->utime = get_seconds();
2381
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2383
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2384
/* just a clean<-> dirty transition, possibly leave spares alone,
2385
* though if events isn't the right even/odd, we will have to do
2391
if (mddev->degraded)
2392
/* If the array is degraded, then skipping spares is both
2393
* dangerous and fairly pointless.
2394
* Dangerous because a device that was removed from the array
2395
* might have a event_count that still looks up-to-date,
2396
* so it can be re-added without a resync.
2397
* Pointless because if there are any spares to skip,
2398
* then a recovery will happen and soon that array won't
2399
* be degraded any more and the spare can go back to sleep then.
2403
sync_req = mddev->in_sync;
2405
/* If this is just a dirty<->clean transition, and the array is clean
2406
* and 'events' is odd, we can roll back to the previous clean state */
2408
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
2409
&& mddev->can_decrease_events
2410
&& mddev->events != 1) {
2412
mddev->can_decrease_events = 0;
2414
/* otherwise we have to go forward and ... */
2416
mddev->can_decrease_events = nospares;
2419
if (!mddev->events) {
2421
* oops, this 64-bit counter should never wrap.
2422
* Either we are in around ~1 trillion A.C., assuming
2423
* 1 reboot per second, or we have a bug:
2429
list_for_each_entry(rdev, &mddev->disks, same_set) {
2430
if (rdev->badblocks.changed)
2431
any_badblocks_changed++;
2432
if (test_bit(Faulty, &rdev->flags))
2433
set_bit(FaultRecorded, &rdev->flags);
2436
sync_sbs(mddev, nospares);
2437
spin_unlock_irq(&mddev->write_lock);
2439
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2440
mdname(mddev), mddev->in_sync);
2442
bitmap_update_sb(mddev->bitmap);
2443
list_for_each_entry(rdev, &mddev->disks, same_set) {
2444
char b[BDEVNAME_SIZE];
2446
if (rdev->sb_loaded != 1)
2447
continue; /* no noise on spare devices */
2449
if (!test_bit(Faulty, &rdev->flags) &&
2450
rdev->saved_raid_disk == -1) {
2451
md_super_write(mddev,rdev,
2452
rdev->sb_start, rdev->sb_size,
2454
pr_debug("md: (write) %s's sb offset: %llu\n",
2455
bdevname(rdev->bdev, b),
2456
(unsigned long long)rdev->sb_start);
2457
rdev->sb_events = mddev->events;
2458
if (rdev->badblocks.size) {
2459
md_super_write(mddev, rdev,
2460
rdev->badblocks.sector,
2461
rdev->badblocks.size << 9,
2463
rdev->badblocks.size = 0;
2466
} else if (test_bit(Faulty, &rdev->flags))
2467
pr_debug("md: %s (skipping faulty)\n",
2468
bdevname(rdev->bdev, b));
2470
pr_debug("(skipping incremental s/r ");
2472
if (mddev->level == LEVEL_MULTIPATH)
2473
/* only need to write one superblock... */
2476
md_super_wait(mddev);
2477
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2479
spin_lock_irq(&mddev->write_lock);
2480
if (mddev->in_sync != sync_req ||
2481
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2482
/* have to write it out again */
2483
spin_unlock_irq(&mddev->write_lock);
2486
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2487
spin_unlock_irq(&mddev->write_lock);
2488
wake_up(&mddev->sb_wait);
2489
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2490
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2492
list_for_each_entry(rdev, &mddev->disks, same_set) {
2493
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2494
clear_bit(Blocked, &rdev->flags);
2496
if (any_badblocks_changed)
2497
md_ack_all_badblocks(&rdev->badblocks);
2498
clear_bit(BlockedBadBlocks, &rdev->flags);
2499
wake_up(&rdev->blocked_wait);
2503
/* words written to sysfs files may, or may not, be \n terminated.
2504
* We want to accept with case. For this we use cmd_match.
2506
static int cmd_match(const char *cmd, const char *str)
2508
/* See if cmd, written into a sysfs file, matches
2509
* str. They must either be the same, or cmd can
2510
* have a trailing newline
2512
while (*cmd && *str && *cmd == *str) {
2523
struct rdev_sysfs_entry {
2524
struct attribute attr;
2525
ssize_t (*show)(struct md_rdev *, char *);
2526
ssize_t (*store)(struct md_rdev *, const char *, size_t);
2530
state_show(struct md_rdev *rdev, char *page)
2535
if (test_bit(Faulty, &rdev->flags) ||
2536
rdev->badblocks.unacked_exist) {
2537
len+= sprintf(page+len, "%sfaulty",sep);
2540
if (test_bit(In_sync, &rdev->flags)) {
2541
len += sprintf(page+len, "%sin_sync",sep);
2544
if (test_bit(WriteMostly, &rdev->flags)) {
2545
len += sprintf(page+len, "%swrite_mostly",sep);
2548
if (test_bit(Blocked, &rdev->flags) ||
2549
(rdev->badblocks.unacked_exist
2550
&& !test_bit(Faulty, &rdev->flags))) {
2551
len += sprintf(page+len, "%sblocked", sep);
2554
if (!test_bit(Faulty, &rdev->flags) &&
2555
!test_bit(In_sync, &rdev->flags)) {
2556
len += sprintf(page+len, "%sspare", sep);
2559
if (test_bit(WriteErrorSeen, &rdev->flags)) {
2560
len += sprintf(page+len, "%swrite_error", sep);
2563
return len+sprintf(page+len, "\n");
2567
state_store(struct md_rdev *rdev, const char *buf, size_t len)
2570
* faulty - simulates an error
2571
* remove - disconnects the device
2572
* writemostly - sets write_mostly
2573
* -writemostly - clears write_mostly
2574
* blocked - sets the Blocked flags
2575
* -blocked - clears the Blocked and possibly simulates an error
2576
* insync - sets Insync providing device isn't active
2577
* write_error - sets WriteErrorSeen
2578
* -write_error - clears WriteErrorSeen
2581
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2582
md_error(rdev->mddev, rdev);
2583
if (test_bit(Faulty, &rdev->flags))
2587
} else if (cmd_match(buf, "remove")) {
2588
if (rdev->raid_disk >= 0)
2591
struct mddev *mddev = rdev->mddev;
2592
kick_rdev_from_array(rdev);
2594
md_update_sb(mddev, 1);
2595
md_new_event(mddev);
2598
} else if (cmd_match(buf, "writemostly")) {
2599
set_bit(WriteMostly, &rdev->flags);
2601
} else if (cmd_match(buf, "-writemostly")) {
2602
clear_bit(WriteMostly, &rdev->flags);
2604
} else if (cmd_match(buf, "blocked")) {
2605
set_bit(Blocked, &rdev->flags);
2607
} else if (cmd_match(buf, "-blocked")) {
2608
if (!test_bit(Faulty, &rdev->flags) &&
2609
rdev->badblocks.unacked_exist) {
2610
/* metadata handler doesn't understand badblocks,
2611
* so we need to fail the device
2613
md_error(rdev->mddev, rdev);
2615
clear_bit(Blocked, &rdev->flags);
2616
clear_bit(BlockedBadBlocks, &rdev->flags);
2617
wake_up(&rdev->blocked_wait);
2618
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2619
md_wakeup_thread(rdev->mddev->thread);
2622
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2623
set_bit(In_sync, &rdev->flags);
2625
} else if (cmd_match(buf, "write_error")) {
2626
set_bit(WriteErrorSeen, &rdev->flags);
2628
} else if (cmd_match(buf, "-write_error")) {
2629
clear_bit(WriteErrorSeen, &rdev->flags);
2633
sysfs_notify_dirent_safe(rdev->sysfs_state);
2634
return err ? err : len;
2636
static struct rdev_sysfs_entry rdev_state =
2637
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2640
errors_show(struct md_rdev *rdev, char *page)
2642
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2646
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2649
unsigned long n = simple_strtoul(buf, &e, 10);
2650
if (*buf && (*e == 0 || *e == '\n')) {
2651
atomic_set(&rdev->corrected_errors, n);
2656
static struct rdev_sysfs_entry rdev_errors =
2657
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2660
slot_show(struct md_rdev *rdev, char *page)
2662
if (rdev->raid_disk < 0)
2663
return sprintf(page, "none\n");
2665
return sprintf(page, "%d\n", rdev->raid_disk);
2669
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2673
int slot = simple_strtoul(buf, &e, 10);
2674
if (strncmp(buf, "none", 4)==0)
2676
else if (e==buf || (*e && *e!= '\n'))
2678
if (rdev->mddev->pers && slot == -1) {
2679
/* Setting 'slot' on an active array requires also
2680
* updating the 'rd%d' link, and communicating
2681
* with the personality with ->hot_*_disk.
2682
* For now we only support removing
2683
* failed/spare devices. This normally happens automatically,
2684
* but not when the metadata is externally managed.
2686
if (rdev->raid_disk == -1)
2688
/* personality does all needed checks */
2689
if (rdev->mddev->pers->hot_remove_disk == NULL)
2691
err = rdev->mddev->pers->
2692
hot_remove_disk(rdev->mddev, rdev->raid_disk);
2695
sysfs_unlink_rdev(rdev->mddev, rdev);
2696
rdev->raid_disk = -1;
2697
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2698
md_wakeup_thread(rdev->mddev->thread);
2699
} else if (rdev->mddev->pers) {
2700
struct md_rdev *rdev2;
2701
/* Activating a spare .. or possibly reactivating
2702
* if we ever get bitmaps working here.
2705
if (rdev->raid_disk != -1)
2708
if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2711
if (rdev->mddev->pers->hot_add_disk == NULL)
2714
list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2715
if (rdev2->raid_disk == slot)
2718
if (slot >= rdev->mddev->raid_disks &&
2719
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2722
rdev->raid_disk = slot;
2723
if (test_bit(In_sync, &rdev->flags))
2724
rdev->saved_raid_disk = slot;
2726
rdev->saved_raid_disk = -1;
2727
clear_bit(In_sync, &rdev->flags);
2728
err = rdev->mddev->pers->
2729
hot_add_disk(rdev->mddev, rdev);
2731
rdev->raid_disk = -1;
2734
sysfs_notify_dirent_safe(rdev->sysfs_state);
2735
if (sysfs_link_rdev(rdev->mddev, rdev))
2736
/* failure here is OK */;
2737
/* don't wakeup anyone, leave that to userspace. */
2739
if (slot >= rdev->mddev->raid_disks &&
2740
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2742
rdev->raid_disk = slot;
2743
/* assume it is working */
2744
clear_bit(Faulty, &rdev->flags);
2745
clear_bit(WriteMostly, &rdev->flags);
2746
set_bit(In_sync, &rdev->flags);
2747
sysfs_notify_dirent_safe(rdev->sysfs_state);
2753
static struct rdev_sysfs_entry rdev_slot =
2754
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2757
offset_show(struct md_rdev *rdev, char *page)
2759
return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2763
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2766
unsigned long long offset = simple_strtoull(buf, &e, 10);
2767
if (e==buf || (*e && *e != '\n'))
2769
if (rdev->mddev->pers && rdev->raid_disk >= 0)
2771
if (rdev->sectors && rdev->mddev->external)
2772
/* Must set offset before size, so overlap checks
2775
rdev->data_offset = offset;
2779
static struct rdev_sysfs_entry rdev_offset =
2780
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2783
rdev_size_show(struct md_rdev *rdev, char *page)
2785
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2788
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2790
/* check if two start/length pairs overlap */
2798
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2800
unsigned long long blocks;
2803
if (strict_strtoull(buf, 10, &blocks) < 0)
2806
if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2807
return -EINVAL; /* sector conversion overflow */
2810
if (new != blocks * 2)
2811
return -EINVAL; /* unsigned long long to sector_t overflow */
2818
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2820
struct mddev *my_mddev = rdev->mddev;
2821
sector_t oldsectors = rdev->sectors;
2824
if (strict_blocks_to_sectors(buf, §ors) < 0)
2826
if (my_mddev->pers && rdev->raid_disk >= 0) {
2827
if (my_mddev->persistent) {
2828
sectors = super_types[my_mddev->major_version].
2829
rdev_size_change(rdev, sectors);
2832
} else if (!sectors)
2833
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2836
if (sectors < my_mddev->dev_sectors)
2837
return -EINVAL; /* component must fit device */
2839
rdev->sectors = sectors;
2840
if (sectors > oldsectors && my_mddev->external) {
2841
/* need to check that all other rdevs with the same ->bdev
2842
* do not overlap. We need to unlock the mddev to avoid
2843
* a deadlock. We have already changed rdev->sectors, and if
2844
* we have to change it back, we will have the lock again.
2846
struct mddev *mddev;
2848
struct list_head *tmp;
2850
mddev_unlock(my_mddev);
2851
for_each_mddev(mddev, tmp) {
2852
struct md_rdev *rdev2;
2855
list_for_each_entry(rdev2, &mddev->disks, same_set)
2856
if (rdev->bdev == rdev2->bdev &&
2858
overlaps(rdev->data_offset, rdev->sectors,
2864
mddev_unlock(mddev);
2870
mddev_lock(my_mddev);
2872
/* Someone else could have slipped in a size
2873
* change here, but doing so is just silly.
2874
* We put oldsectors back because we *know* it is
2875
* safe, and trust userspace not to race with
2878
rdev->sectors = oldsectors;
2885
static struct rdev_sysfs_entry rdev_size =
2886
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2889
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
2891
unsigned long long recovery_start = rdev->recovery_offset;
2893
if (test_bit(In_sync, &rdev->flags) ||
2894
recovery_start == MaxSector)
2895
return sprintf(page, "none\n");
2897
return sprintf(page, "%llu\n", recovery_start);
2900
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
2902
unsigned long long recovery_start;
2904
if (cmd_match(buf, "none"))
2905
recovery_start = MaxSector;
2906
else if (strict_strtoull(buf, 10, &recovery_start))
2909
if (rdev->mddev->pers &&
2910
rdev->raid_disk >= 0)
2913
rdev->recovery_offset = recovery_start;
2914
if (recovery_start == MaxSector)
2915
set_bit(In_sync, &rdev->flags);
2917
clear_bit(In_sync, &rdev->flags);
2921
static struct rdev_sysfs_entry rdev_recovery_start =
2922
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2926
badblocks_show(struct badblocks *bb, char *page, int unack);
2928
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
2930
static ssize_t bb_show(struct md_rdev *rdev, char *page)
2932
return badblocks_show(&rdev->badblocks, page, 0);
2934
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
2936
int rv = badblocks_store(&rdev->badblocks, page, len, 0);
2937
/* Maybe that ack was all we needed */
2938
if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
2939
wake_up(&rdev->blocked_wait);
2942
static struct rdev_sysfs_entry rdev_bad_blocks =
2943
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
2946
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
2948
return badblocks_show(&rdev->badblocks, page, 1);
2950
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
2952
return badblocks_store(&rdev->badblocks, page, len, 1);
2954
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
2955
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
2957
static struct attribute *rdev_default_attrs[] = {
2963
&rdev_recovery_start.attr,
2964
&rdev_bad_blocks.attr,
2965
&rdev_unack_bad_blocks.attr,
2969
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2971
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2972
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
2973
struct mddev *mddev = rdev->mddev;
2979
rv = mddev ? mddev_lock(mddev) : -EBUSY;
2981
if (rdev->mddev == NULL)
2984
rv = entry->show(rdev, page);
2985
mddev_unlock(mddev);
2991
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2992
const char *page, size_t length)
2994
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2995
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
2997
struct mddev *mddev = rdev->mddev;
3001
if (!capable(CAP_SYS_ADMIN))
3003
rv = mddev ? mddev_lock(mddev): -EBUSY;
3005
if (rdev->mddev == NULL)
3008
rv = entry->store(rdev, page, length);
3009
mddev_unlock(mddev);
3014
static void rdev_free(struct kobject *ko)
3016
struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3019
static const struct sysfs_ops rdev_sysfs_ops = {
3020
.show = rdev_attr_show,
3021
.store = rdev_attr_store,
3023
static struct kobj_type rdev_ktype = {
3024
.release = rdev_free,
3025
.sysfs_ops = &rdev_sysfs_ops,
3026
.default_attrs = rdev_default_attrs,
3029
int md_rdev_init(struct md_rdev *rdev)
3032
rdev->saved_raid_disk = -1;
3033
rdev->raid_disk = -1;
3035
rdev->data_offset = 0;
3036
rdev->sb_events = 0;
3037
rdev->last_read_error.tv_sec = 0;
3038
rdev->last_read_error.tv_nsec = 0;
3039
rdev->sb_loaded = 0;
3040
rdev->bb_page = NULL;
3041
atomic_set(&rdev->nr_pending, 0);
3042
atomic_set(&rdev->read_errors, 0);
3043
atomic_set(&rdev->corrected_errors, 0);
3045
INIT_LIST_HEAD(&rdev->same_set);
3046
init_waitqueue_head(&rdev->blocked_wait);
3048
/* Add space to store bad block list.
3049
* This reserves the space even on arrays where it cannot
3050
* be used - I wonder if that matters
3052
rdev->badblocks.count = 0;
3053
rdev->badblocks.shift = 0;
3054
rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
3055
seqlock_init(&rdev->badblocks.lock);
3056
if (rdev->badblocks.page == NULL)
3061
EXPORT_SYMBOL_GPL(md_rdev_init);
3063
* Import a device. If 'super_format' >= 0, then sanity check the superblock
3065
* mark the device faulty if:
3067
* - the device is nonexistent (zero size)
3068
* - the device has no valid superblock
3070
* a faulty rdev _never_ has rdev->sb set.
3072
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3074
char b[BDEVNAME_SIZE];
3076
struct md_rdev *rdev;
3079
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3081
printk(KERN_ERR "md: could not alloc mem for new device!\n");
3082
return ERR_PTR(-ENOMEM);
3085
err = md_rdev_init(rdev);
3088
err = alloc_disk_sb(rdev);
3092
err = lock_rdev(rdev, newdev, super_format == -2);
3096
kobject_init(&rdev->kobj, &rdev_ktype);
3098
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3101
"md: %s has zero or unknown size, marking faulty!\n",
3102
bdevname(rdev->bdev,b));
3107
if (super_format >= 0) {
3108
err = super_types[super_format].
3109
load_super(rdev, NULL, super_minor);
3110
if (err == -EINVAL) {
3112
"md: %s does not have a valid v%d.%d "
3113
"superblock, not importing!\n",
3114
bdevname(rdev->bdev,b),
3115
super_format, super_minor);
3120
"md: could not read %s's sb, not importing!\n",
3121
bdevname(rdev->bdev,b));
3125
if (super_format == -1)
3126
/* hot-add for 0.90, or non-persistent: so no badblocks */
3127
rdev->badblocks.shift = -1;
3135
kfree(rdev->badblocks.page);
3137
return ERR_PTR(err);
3141
* Check a full RAID array for plausibility
3145
static void analyze_sbs(struct mddev * mddev)
3148
struct md_rdev *rdev, *freshest, *tmp;
3149
char b[BDEVNAME_SIZE];
3152
rdev_for_each(rdev, tmp, mddev)
3153
switch (super_types[mddev->major_version].
3154
load_super(rdev, freshest, mddev->minor_version)) {
3162
"md: fatal superblock inconsistency in %s"
3163
" -- removing from array\n",
3164
bdevname(rdev->bdev,b));
3165
kick_rdev_from_array(rdev);
3169
super_types[mddev->major_version].
3170
validate_super(mddev, freshest);
3173
rdev_for_each(rdev, tmp, mddev) {
3174
if (mddev->max_disks &&
3175
(rdev->desc_nr >= mddev->max_disks ||
3176
i > mddev->max_disks)) {
3178
"md: %s: %s: only %d devices permitted\n",
3179
mdname(mddev), bdevname(rdev->bdev, b),
3181
kick_rdev_from_array(rdev);
3184
if (rdev != freshest)
3185
if (super_types[mddev->major_version].
3186
validate_super(mddev, rdev)) {
3187
printk(KERN_WARNING "md: kicking non-fresh %s"
3189
bdevname(rdev->bdev,b));
3190
kick_rdev_from_array(rdev);
3193
if (mddev->level == LEVEL_MULTIPATH) {
3194
rdev->desc_nr = i++;
3195
rdev->raid_disk = rdev->desc_nr;
3196
set_bit(In_sync, &rdev->flags);
3197
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3198
rdev->raid_disk = -1;
3199
clear_bit(In_sync, &rdev->flags);
3204
/* Read a fixed-point number.
3205
* Numbers in sysfs attributes should be in "standard" units where
3206
* possible, so time should be in seconds.
3207
* However we internally use a a much smaller unit such as
3208
* milliseconds or jiffies.
3209
* This function takes a decimal number with a possible fractional
3210
* component, and produces an integer which is the result of
3211
* multiplying that number by 10^'scale'.
3212
* all without any floating-point arithmetic.
3214
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3216
unsigned long result = 0;
3218
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3221
else if (decimals < scale) {
3224
result = result * 10 + value;
3236
while (decimals < scale) {
3245
static void md_safemode_timeout(unsigned long data);
3248
safe_delay_show(struct mddev *mddev, char *page)
3250
int msec = (mddev->safemode_delay*1000)/HZ;
3251
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3254
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3258
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3261
mddev->safemode_delay = 0;
3263
unsigned long old_delay = mddev->safemode_delay;
3264
mddev->safemode_delay = (msec*HZ)/1000;
3265
if (mddev->safemode_delay == 0)
3266
mddev->safemode_delay = 1;
3267
if (mddev->safemode_delay < old_delay)
3268
md_safemode_timeout((unsigned long)mddev);
3272
static struct md_sysfs_entry md_safe_delay =
3273
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3276
level_show(struct mddev *mddev, char *page)
3278
struct md_personality *p = mddev->pers;
3280
return sprintf(page, "%s\n", p->name);
3281
else if (mddev->clevel[0])
3282
return sprintf(page, "%s\n", mddev->clevel);
3283
else if (mddev->level != LEVEL_NONE)
3284
return sprintf(page, "%d\n", mddev->level);
3290
level_store(struct mddev *mddev, const char *buf, size_t len)
3294
struct md_personality *pers;
3297
struct md_rdev *rdev;
3299
if (mddev->pers == NULL) {
3302
if (len >= sizeof(mddev->clevel))
3304
strncpy(mddev->clevel, buf, len);
3305
if (mddev->clevel[len-1] == '\n')
3307
mddev->clevel[len] = 0;
3308
mddev->level = LEVEL_NONE;
3312
/* request to change the personality. Need to ensure:
3313
* - array is not engaged in resync/recovery/reshape
3314
* - old personality can be suspended
3315
* - new personality will access other array.
3318
if (mddev->sync_thread ||
3319
mddev->reshape_position != MaxSector ||
3320
mddev->sysfs_active)
3323
if (!mddev->pers->quiesce) {
3324
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3325
mdname(mddev), mddev->pers->name);
3329
/* Now find the new personality */
3330
if (len == 0 || len >= sizeof(clevel))
3332
strncpy(clevel, buf, len);
3333
if (clevel[len-1] == '\n')
3336
if (strict_strtol(clevel, 10, &level))
3339
if (request_module("md-%s", clevel) != 0)
3340
request_module("md-level-%s", clevel);
3341
spin_lock(&pers_lock);
3342
pers = find_pers(level, clevel);
3343
if (!pers || !try_module_get(pers->owner)) {
3344
spin_unlock(&pers_lock);
3345
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3348
spin_unlock(&pers_lock);
3350
if (pers == mddev->pers) {
3351
/* Nothing to do! */
3352
module_put(pers->owner);
3355
if (!pers->takeover) {
3356
module_put(pers->owner);
3357
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3358
mdname(mddev), clevel);
3362
list_for_each_entry(rdev, &mddev->disks, same_set)
3363
rdev->new_raid_disk = rdev->raid_disk;
3365
/* ->takeover must set new_* and/or delta_disks
3366
* if it succeeds, and may set them when it fails.
3368
priv = pers->takeover(mddev);
3370
mddev->new_level = mddev->level;
3371
mddev->new_layout = mddev->layout;
3372
mddev->new_chunk_sectors = mddev->chunk_sectors;
3373
mddev->raid_disks -= mddev->delta_disks;
3374
mddev->delta_disks = 0;
3375
module_put(pers->owner);
3376
printk(KERN_WARNING "md: %s: %s would not accept array\n",
3377
mdname(mddev), clevel);
3378
return PTR_ERR(priv);
3381
/* Looks like we have a winner */
3382
mddev_suspend(mddev);
3383
mddev->pers->stop(mddev);
3385
if (mddev->pers->sync_request == NULL &&
3386
pers->sync_request != NULL) {
3387
/* need to add the md_redundancy_group */
3388
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3390
"md: cannot register extra attributes for %s\n",
3392
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
3394
if (mddev->pers->sync_request != NULL &&
3395
pers->sync_request == NULL) {
3396
/* need to remove the md_redundancy_group */
3397
if (mddev->to_remove == NULL)
3398
mddev->to_remove = &md_redundancy_group;
3401
if (mddev->pers->sync_request == NULL &&
3403
/* We are converting from a no-redundancy array
3404
* to a redundancy array and metadata is managed
3405
* externally so we need to be sure that writes
3406
* won't block due to a need to transition
3408
* until external management is started.
3411
mddev->safemode_delay = 0;
3412
mddev->safemode = 0;
3415
list_for_each_entry(rdev, &mddev->disks, same_set) {
3416
if (rdev->raid_disk < 0)
3418
if (rdev->new_raid_disk >= mddev->raid_disks)
3419
rdev->new_raid_disk = -1;
3420
if (rdev->new_raid_disk == rdev->raid_disk)
3422
sysfs_unlink_rdev(mddev, rdev);
3424
list_for_each_entry(rdev, &mddev->disks, same_set) {
3425
if (rdev->raid_disk < 0)
3427
if (rdev->new_raid_disk == rdev->raid_disk)
3429
rdev->raid_disk = rdev->new_raid_disk;
3430
if (rdev->raid_disk < 0)
3431
clear_bit(In_sync, &rdev->flags);
3433
if (sysfs_link_rdev(mddev, rdev))
3434
printk(KERN_WARNING "md: cannot register rd%d"
3435
" for %s after level change\n",
3436
rdev->raid_disk, mdname(mddev));
3440
module_put(mddev->pers->owner);
3442
mddev->private = priv;
3443
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3444
mddev->level = mddev->new_level;
3445
mddev->layout = mddev->new_layout;
3446
mddev->chunk_sectors = mddev->new_chunk_sectors;
3447
mddev->delta_disks = 0;
3448
mddev->degraded = 0;
3449
if (mddev->pers->sync_request == NULL) {
3450
/* this is now an array without redundancy, so
3451
* it must always be in_sync
3454
del_timer_sync(&mddev->safemode_timer);
3457
mddev_resume(mddev);
3458
set_bit(MD_CHANGE_DEVS, &mddev->flags);
3459
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3460
md_wakeup_thread(mddev->thread);
3461
sysfs_notify(&mddev->kobj, NULL, "level");
3462
md_new_event(mddev);
3466
static struct md_sysfs_entry md_level =
3467
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3471
layout_show(struct mddev *mddev, char *page)
3473
/* just a number, not meaningful for all levels */
3474
if (mddev->reshape_position != MaxSector &&
3475
mddev->layout != mddev->new_layout)
3476
return sprintf(page, "%d (%d)\n",
3477
mddev->new_layout, mddev->layout);
3478
return sprintf(page, "%d\n", mddev->layout);
3482
layout_store(struct mddev *mddev, const char *buf, size_t len)
3485
unsigned long n = simple_strtoul(buf, &e, 10);
3487
if (!*buf || (*e && *e != '\n'))
3492
if (mddev->pers->check_reshape == NULL)
3494
mddev->new_layout = n;
3495
err = mddev->pers->check_reshape(mddev);
3497
mddev->new_layout = mddev->layout;
3501
mddev->new_layout = n;
3502
if (mddev->reshape_position == MaxSector)
3507
static struct md_sysfs_entry md_layout =
3508
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3512
raid_disks_show(struct mddev *mddev, char *page)
3514
if (mddev->raid_disks == 0)
3516
if (mddev->reshape_position != MaxSector &&
3517
mddev->delta_disks != 0)
3518
return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3519
mddev->raid_disks - mddev->delta_disks);
3520
return sprintf(page, "%d\n", mddev->raid_disks);
3523
static int update_raid_disks(struct mddev *mddev, int raid_disks);
3526
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3530
unsigned long n = simple_strtoul(buf, &e, 10);
3532
if (!*buf || (*e && *e != '\n'))
3536
rv = update_raid_disks(mddev, n);
3537
else if (mddev->reshape_position != MaxSector) {
3538
int olddisks = mddev->raid_disks - mddev->delta_disks;
3539
mddev->delta_disks = n - olddisks;
3540
mddev->raid_disks = n;
3542
mddev->raid_disks = n;
3543
return rv ? rv : len;
3545
static struct md_sysfs_entry md_raid_disks =
3546
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3549
chunk_size_show(struct mddev *mddev, char *page)
3551
if (mddev->reshape_position != MaxSector &&
3552
mddev->chunk_sectors != mddev->new_chunk_sectors)
3553
return sprintf(page, "%d (%d)\n",
3554
mddev->new_chunk_sectors << 9,
3555
mddev->chunk_sectors << 9);
3556
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3560
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3563
unsigned long n = simple_strtoul(buf, &e, 10);
3565
if (!*buf || (*e && *e != '\n'))
3570
if (mddev->pers->check_reshape == NULL)
3572
mddev->new_chunk_sectors = n >> 9;
3573
err = mddev->pers->check_reshape(mddev);
3575
mddev->new_chunk_sectors = mddev->chunk_sectors;
3579
mddev->new_chunk_sectors = n >> 9;
3580
if (mddev->reshape_position == MaxSector)
3581
mddev->chunk_sectors = n >> 9;
3585
static struct md_sysfs_entry md_chunk_size =
3586
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3589
resync_start_show(struct mddev *mddev, char *page)
3591
if (mddev->recovery_cp == MaxSector)
3592
return sprintf(page, "none\n");
3593
return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3597
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3600
unsigned long long n = simple_strtoull(buf, &e, 10);
3602
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3604
if (cmd_match(buf, "none"))
3606
else if (!*buf || (*e && *e != '\n'))
3609
mddev->recovery_cp = n;
3612
static struct md_sysfs_entry md_resync_start =
3613
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3616
* The array state can be:
3619
* No devices, no size, no level
3620
* Equivalent to STOP_ARRAY ioctl
3622
* May have some settings, but array is not active
3623
* all IO results in error
3624
* When written, doesn't tear down array, but just stops it
3625
* suspended (not supported yet)
3626
* All IO requests will block. The array can be reconfigured.
3627
* Writing this, if accepted, will block until array is quiescent
3629
* no resync can happen. no superblocks get written.
3630
* write requests fail
3632
* like readonly, but behaves like 'clean' on a write request.
3634
* clean - no pending writes, but otherwise active.
3635
* When written to inactive array, starts without resync
3636
* If a write request arrives then
3637
* if metadata is known, mark 'dirty' and switch to 'active'.
3638
* if not known, block and switch to write-pending
3639
* If written to an active array that has pending writes, then fails.
3641
* fully active: IO and resync can be happening.
3642
* When written to inactive array, starts with resync
3645
* clean, but writes are blocked waiting for 'active' to be written.
3648
* like active, but no writes have been seen for a while (100msec).
3651
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3652
write_pending, active_idle, bad_word};
3653
static char *array_states[] = {
3654
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3655
"write-pending", "active-idle", NULL };
3657
static int match_word(const char *word, char **list)
3660
for (n=0; list[n]; n++)
3661
if (cmd_match(word, list[n]))
3667
array_state_show(struct mddev *mddev, char *page)
3669
enum array_state st = inactive;
3682
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3684
else if (mddev->safemode)
3690
if (list_empty(&mddev->disks) &&
3691
mddev->raid_disks == 0 &&
3692
mddev->dev_sectors == 0)
3697
return sprintf(page, "%s\n", array_states[st]);
3700
static int do_md_stop(struct mddev * mddev, int ro, int is_open);
3701
static int md_set_readonly(struct mddev * mddev, int is_open);
3702
static int do_md_run(struct mddev * mddev);
3703
static int restart_array(struct mddev *mddev);
3706
array_state_store(struct mddev *mddev, const char *buf, size_t len)
3709
enum array_state st = match_word(buf, array_states);
3714
/* stopping an active array */
3715
if (atomic_read(&mddev->openers) > 0)
3717
err = do_md_stop(mddev, 0, 0);
3720
/* stopping an active array */
3722
if (atomic_read(&mddev->openers) > 0)
3724
err = do_md_stop(mddev, 2, 0);
3726
err = 0; /* already inactive */
3729
break; /* not supported yet */
3732
err = md_set_readonly(mddev, 0);
3735
set_disk_ro(mddev->gendisk, 1);
3736
err = do_md_run(mddev);
3742
err = md_set_readonly(mddev, 0);
3743
else if (mddev->ro == 1)
3744
err = restart_array(mddev);
3747
set_disk_ro(mddev->gendisk, 0);
3751
err = do_md_run(mddev);
3756
restart_array(mddev);
3757
spin_lock_irq(&mddev->write_lock);
3758
if (atomic_read(&mddev->writes_pending) == 0) {
3759
if (mddev->in_sync == 0) {
3761
if (mddev->safemode == 1)
3762
mddev->safemode = 0;
3763
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3768
spin_unlock_irq(&mddev->write_lock);
3774
restart_array(mddev);
3775
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3776
wake_up(&mddev->sb_wait);
3780
set_disk_ro(mddev->gendisk, 0);
3781
err = do_md_run(mddev);
3786
/* these cannot be set */
3792
if (mddev->hold_active == UNTIL_IOCTL)
3793
mddev->hold_active = 0;
3794
sysfs_notify_dirent_safe(mddev->sysfs_state);
3798
static struct md_sysfs_entry md_array_state =
3799
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3802
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
3803
return sprintf(page, "%d\n",
3804
atomic_read(&mddev->max_corr_read_errors));
3808
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
3811
unsigned long n = simple_strtoul(buf, &e, 10);
3813
if (*buf && (*e == 0 || *e == '\n')) {
3814
atomic_set(&mddev->max_corr_read_errors, n);
3820
static struct md_sysfs_entry max_corr_read_errors =
3821
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3822
max_corrected_read_errors_store);
3825
null_show(struct mddev *mddev, char *page)
3831
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
3833
/* buf must be %d:%d\n? giving major and minor numbers */
3834
/* The new device is added to the array.
3835
* If the array has a persistent superblock, we read the
3836
* superblock to initialise info and check validity.
3837
* Otherwise, only checking done is that in bind_rdev_to_array,
3838
* which mainly checks size.
3841
int major = simple_strtoul(buf, &e, 10);
3844
struct md_rdev *rdev;
3847
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3849
minor = simple_strtoul(e+1, &e, 10);
3850
if (*e && *e != '\n')
3852
dev = MKDEV(major, minor);
3853
if (major != MAJOR(dev) ||
3854
minor != MINOR(dev))
3858
if (mddev->persistent) {
3859
rdev = md_import_device(dev, mddev->major_version,
3860
mddev->minor_version);
3861
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3862
struct md_rdev *rdev0
3863
= list_entry(mddev->disks.next,
3864
struct md_rdev, same_set);
3865
err = super_types[mddev->major_version]
3866
.load_super(rdev, rdev0, mddev->minor_version);
3870
} else if (mddev->external)
3871
rdev = md_import_device(dev, -2, -1);
3873
rdev = md_import_device(dev, -1, -1);
3876
return PTR_ERR(rdev);
3877
err = bind_rdev_to_array(rdev, mddev);
3881
return err ? err : len;
3884
static struct md_sysfs_entry md_new_device =
3885
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3888
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
3891
unsigned long chunk, end_chunk;
3895
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3897
chunk = end_chunk = simple_strtoul(buf, &end, 0);
3898
if (buf == end) break;
3899
if (*end == '-') { /* range */
3901
end_chunk = simple_strtoul(buf, &end, 0);
3902
if (buf == end) break;
3904
if (*end && !isspace(*end)) break;
3905
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3906
buf = skip_spaces(end);
3908
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3913
static struct md_sysfs_entry md_bitmap =
3914
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3917
size_show(struct mddev *mddev, char *page)
3919
return sprintf(page, "%llu\n",
3920
(unsigned long long)mddev->dev_sectors / 2);
3923
static int update_size(struct mddev *mddev, sector_t num_sectors);
3926
size_store(struct mddev *mddev, const char *buf, size_t len)
3928
/* If array is inactive, we can reduce the component size, but
3929
* not increase it (except from 0).
3930
* If array is active, we can try an on-line resize
3933
int err = strict_blocks_to_sectors(buf, §ors);
3938
err = update_size(mddev, sectors);
3939
md_update_sb(mddev, 1);
3941
if (mddev->dev_sectors == 0 ||
3942
mddev->dev_sectors > sectors)
3943
mddev->dev_sectors = sectors;
3947
return err ? err : len;
3950
static struct md_sysfs_entry md_size =
3951
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3956
* 'none' for arrays with no metadata (good luck...)
3957
* 'external' for arrays with externally managed metadata,
3958
* or N.M for internally known formats
3961
metadata_show(struct mddev *mddev, char *page)
3963
if (mddev->persistent)
3964
return sprintf(page, "%d.%d\n",
3965
mddev->major_version, mddev->minor_version);
3966
else if (mddev->external)
3967
return sprintf(page, "external:%s\n", mddev->metadata_type);
3969
return sprintf(page, "none\n");
3973
metadata_store(struct mddev *mddev, const char *buf, size_t len)
3977
/* Changing the details of 'external' metadata is
3978
* always permitted. Otherwise there must be
3979
* no devices attached to the array.
3981
if (mddev->external && strncmp(buf, "external:", 9) == 0)
3983
else if (!list_empty(&mddev->disks))
3986
if (cmd_match(buf, "none")) {
3987
mddev->persistent = 0;
3988
mddev->external = 0;
3989
mddev->major_version = 0;
3990
mddev->minor_version = 90;
3993
if (strncmp(buf, "external:", 9) == 0) {
3994
size_t namelen = len-9;
3995
if (namelen >= sizeof(mddev->metadata_type))
3996
namelen = sizeof(mddev->metadata_type)-1;
3997
strncpy(mddev->metadata_type, buf+9, namelen);
3998
mddev->metadata_type[namelen] = 0;
3999
if (namelen && mddev->metadata_type[namelen-1] == '\n')
4000
mddev->metadata_type[--namelen] = 0;
4001
mddev->persistent = 0;
4002
mddev->external = 1;
4003
mddev->major_version = 0;
4004
mddev->minor_version = 90;
4007
major = simple_strtoul(buf, &e, 10);
4008
if (e==buf || *e != '.')
4011
minor = simple_strtoul(buf, &e, 10);
4012
if (e==buf || (*e && *e != '\n') )
4014
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4016
mddev->major_version = major;
4017
mddev->minor_version = minor;
4018
mddev->persistent = 1;
4019
mddev->external = 0;
4023
static struct md_sysfs_entry md_metadata =
4024
__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4027
action_show(struct mddev *mddev, char *page)
4029
char *type = "idle";
4030
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4032
else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4033
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
4034
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4036
else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4037
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
4039
else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
4043
} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
4046
return sprintf(page, "%s\n", type);
4049
static void reap_sync_thread(struct mddev *mddev);
4052
action_store(struct mddev *mddev, const char *page, size_t len)
4054
if (!mddev->pers || !mddev->pers->sync_request)
4057
if (cmd_match(page, "frozen"))
4058
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4060
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4062
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4063
if (mddev->sync_thread) {
4064
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4065
reap_sync_thread(mddev);
4067
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4068
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4070
else if (cmd_match(page, "resync"))
4071
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4072
else if (cmd_match(page, "recover")) {
4073
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4074
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4075
} else if (cmd_match(page, "reshape")) {
4077
if (mddev->pers->start_reshape == NULL)
4079
err = mddev->pers->start_reshape(mddev);
4082
sysfs_notify(&mddev->kobj, NULL, "degraded");
4084
if (cmd_match(page, "check"))
4085
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4086
else if (!cmd_match(page, "repair"))
4088
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4089
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4091
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4092
md_wakeup_thread(mddev->thread);
4093
sysfs_notify_dirent_safe(mddev->sysfs_action);
4098
mismatch_cnt_show(struct mddev *mddev, char *page)
4100
return sprintf(page, "%llu\n",
4101
(unsigned long long) mddev->resync_mismatches);
4104
static struct md_sysfs_entry md_scan_mode =
4105
__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4108
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4111
sync_min_show(struct mddev *mddev, char *page)
4113
return sprintf(page, "%d (%s)\n", speed_min(mddev),
4114
mddev->sync_speed_min ? "local": "system");
4118
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4122
if (strncmp(buf, "system", 6)==0) {
4123
mddev->sync_speed_min = 0;
4126
min = simple_strtoul(buf, &e, 10);
4127
if (buf == e || (*e && *e != '\n') || min <= 0)
4129
mddev->sync_speed_min = min;
4133
static struct md_sysfs_entry md_sync_min =
4134
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4137
sync_max_show(struct mddev *mddev, char *page)
4139
return sprintf(page, "%d (%s)\n", speed_max(mddev),
4140
mddev->sync_speed_max ? "local": "system");
4144
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4148
if (strncmp(buf, "system", 6)==0) {
4149
mddev->sync_speed_max = 0;
4152
max = simple_strtoul(buf, &e, 10);
4153
if (buf == e || (*e && *e != '\n') || max <= 0)
4155
mddev->sync_speed_max = max;
4159
static struct md_sysfs_entry md_sync_max =
4160
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4163
degraded_show(struct mddev *mddev, char *page)
4165
return sprintf(page, "%d\n", mddev->degraded);
4167
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4170
sync_force_parallel_show(struct mddev *mddev, char *page)
4172
return sprintf(page, "%d\n", mddev->parallel_resync);
4176
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4180
if (strict_strtol(buf, 10, &n))
4183
if (n != 0 && n != 1)
4186
mddev->parallel_resync = n;
4188
if (mddev->sync_thread)
4189
wake_up(&resync_wait);
4194
/* force parallel resync, even with shared block devices */
4195
static struct md_sysfs_entry md_sync_force_parallel =
4196
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4197
sync_force_parallel_show, sync_force_parallel_store);
4200
sync_speed_show(struct mddev *mddev, char *page)
4202
unsigned long resync, dt, db;
4203
if (mddev->curr_resync == 0)
4204
return sprintf(page, "none\n");
4205
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4206
dt = (jiffies - mddev->resync_mark) / HZ;
4208
db = resync - mddev->resync_mark_cnt;
4209
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4212
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4215
sync_completed_show(struct mddev *mddev, char *page)
4217
unsigned long long max_sectors, resync;
4219
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4220
return sprintf(page, "none\n");
4222
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4223
max_sectors = mddev->resync_max_sectors;
4225
max_sectors = mddev->dev_sectors;
4227
resync = mddev->curr_resync_completed;
4228
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4231
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
4234
min_sync_show(struct mddev *mddev, char *page)
4236
return sprintf(page, "%llu\n",
4237
(unsigned long long)mddev->resync_min);
4240
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4242
unsigned long long min;
4243
if (strict_strtoull(buf, 10, &min))
4245
if (min > mddev->resync_max)
4247
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4250
/* Must be a multiple of chunk_size */
4251
if (mddev->chunk_sectors) {
4252
sector_t temp = min;
4253
if (sector_div(temp, mddev->chunk_sectors))
4256
mddev->resync_min = min;
4261
static struct md_sysfs_entry md_min_sync =
4262
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4265
max_sync_show(struct mddev *mddev, char *page)
4267
if (mddev->resync_max == MaxSector)
4268
return sprintf(page, "max\n");
4270
return sprintf(page, "%llu\n",
4271
(unsigned long long)mddev->resync_max);
4274
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4276
if (strncmp(buf, "max", 3) == 0)
4277
mddev->resync_max = MaxSector;
4279
unsigned long long max;
4280
if (strict_strtoull(buf, 10, &max))
4282
if (max < mddev->resync_min)
4284
if (max < mddev->resync_max &&
4286
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4289
/* Must be a multiple of chunk_size */
4290
if (mddev->chunk_sectors) {
4291
sector_t temp = max;
4292
if (sector_div(temp, mddev->chunk_sectors))
4295
mddev->resync_max = max;
4297
wake_up(&mddev->recovery_wait);
4301
static struct md_sysfs_entry md_max_sync =
4302
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4305
suspend_lo_show(struct mddev *mddev, char *page)
4307
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4311
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4314
unsigned long long new = simple_strtoull(buf, &e, 10);
4315
unsigned long long old = mddev->suspend_lo;
4317
if (mddev->pers == NULL ||
4318
mddev->pers->quiesce == NULL)
4320
if (buf == e || (*e && *e != '\n'))
4323
mddev->suspend_lo = new;
4325
/* Shrinking suspended region */
4326
mddev->pers->quiesce(mddev, 2);
4328
/* Expanding suspended region - need to wait */
4329
mddev->pers->quiesce(mddev, 1);
4330
mddev->pers->quiesce(mddev, 0);
4334
static struct md_sysfs_entry md_suspend_lo =
4335
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4339
suspend_hi_show(struct mddev *mddev, char *page)
4341
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4345
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4348
unsigned long long new = simple_strtoull(buf, &e, 10);
4349
unsigned long long old = mddev->suspend_hi;
4351
if (mddev->pers == NULL ||
4352
mddev->pers->quiesce == NULL)
4354
if (buf == e || (*e && *e != '\n'))
4357
mddev->suspend_hi = new;
4359
/* Shrinking suspended region */
4360
mddev->pers->quiesce(mddev, 2);
4362
/* Expanding suspended region - need to wait */
4363
mddev->pers->quiesce(mddev, 1);
4364
mddev->pers->quiesce(mddev, 0);
4368
static struct md_sysfs_entry md_suspend_hi =
4369
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4372
reshape_position_show(struct mddev *mddev, char *page)
4374
if (mddev->reshape_position != MaxSector)
4375
return sprintf(page, "%llu\n",
4376
(unsigned long long)mddev->reshape_position);
4377
strcpy(page, "none\n");
4382
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4385
unsigned long long new = simple_strtoull(buf, &e, 10);
4388
if (buf == e || (*e && *e != '\n'))
4390
mddev->reshape_position = new;
4391
mddev->delta_disks = 0;
4392
mddev->new_level = mddev->level;
4393
mddev->new_layout = mddev->layout;
4394
mddev->new_chunk_sectors = mddev->chunk_sectors;
4398
static struct md_sysfs_entry md_reshape_position =
4399
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4400
reshape_position_store);
4403
array_size_show(struct mddev *mddev, char *page)
4405
if (mddev->external_size)
4406
return sprintf(page, "%llu\n",
4407
(unsigned long long)mddev->array_sectors/2);
4409
return sprintf(page, "default\n");
4413
array_size_store(struct mddev *mddev, const char *buf, size_t len)
4417
if (strncmp(buf, "default", 7) == 0) {
4419
sectors = mddev->pers->size(mddev, 0, 0);
4421
sectors = mddev->array_sectors;
4423
mddev->external_size = 0;
4425
if (strict_blocks_to_sectors(buf, §ors) < 0)
4427
if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4430
mddev->external_size = 1;
4433
mddev->array_sectors = sectors;
4435
set_capacity(mddev->gendisk, mddev->array_sectors);
4436
revalidate_disk(mddev->gendisk);
4441
static struct md_sysfs_entry md_array_size =
4442
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4445
static struct attribute *md_default_attrs[] = {
4448
&md_raid_disks.attr,
4449
&md_chunk_size.attr,
4451
&md_resync_start.attr,
4453
&md_new_device.attr,
4454
&md_safe_delay.attr,
4455
&md_array_state.attr,
4456
&md_reshape_position.attr,
4457
&md_array_size.attr,
4458
&max_corr_read_errors.attr,
4462
static struct attribute *md_redundancy_attrs[] = {
4464
&md_mismatches.attr,
4467
&md_sync_speed.attr,
4468
&md_sync_force_parallel.attr,
4469
&md_sync_completed.attr,
4472
&md_suspend_lo.attr,
4473
&md_suspend_hi.attr,
4478
static struct attribute_group md_redundancy_group = {
4480
.attrs = md_redundancy_attrs,
4485
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4487
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4488
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4493
spin_lock(&all_mddevs_lock);
4494
if (list_empty(&mddev->all_mddevs)) {
4495
spin_unlock(&all_mddevs_lock);
4499
spin_unlock(&all_mddevs_lock);
4501
rv = mddev_lock(mddev);
4503
rv = entry->show(mddev, page);
4504
mddev_unlock(mddev);
4511
md_attr_store(struct kobject *kobj, struct attribute *attr,
4512
const char *page, size_t length)
4514
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4515
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4520
if (!capable(CAP_SYS_ADMIN))
4522
spin_lock(&all_mddevs_lock);
4523
if (list_empty(&mddev->all_mddevs)) {
4524
spin_unlock(&all_mddevs_lock);
4528
spin_unlock(&all_mddevs_lock);
4529
rv = mddev_lock(mddev);
4531
rv = entry->store(mddev, page, length);
4532
mddev_unlock(mddev);
4538
static void md_free(struct kobject *ko)
4540
struct mddev *mddev = container_of(ko, struct mddev, kobj);
4542
if (mddev->sysfs_state)
4543
sysfs_put(mddev->sysfs_state);
4545
if (mddev->gendisk) {
4546
del_gendisk(mddev->gendisk);
4547
put_disk(mddev->gendisk);
4550
blk_cleanup_queue(mddev->queue);
4555
static const struct sysfs_ops md_sysfs_ops = {
4556
.show = md_attr_show,
4557
.store = md_attr_store,
4559
static struct kobj_type md_ktype = {
4561
.sysfs_ops = &md_sysfs_ops,
4562
.default_attrs = md_default_attrs,
4567
static void mddev_delayed_delete(struct work_struct *ws)
4569
struct mddev *mddev = container_of(ws, struct mddev, del_work);
4571
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4572
kobject_del(&mddev->kobj);
4573
kobject_put(&mddev->kobj);
4576
static int md_alloc(dev_t dev, char *name)
4578
static DEFINE_MUTEX(disks_mutex);
4579
struct mddev *mddev = mddev_find(dev);
4580
struct gendisk *disk;
4589
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4590
shift = partitioned ? MdpMinorShift : 0;
4591
unit = MINOR(mddev->unit) >> shift;
4593
/* wait for any previous instance of this device to be
4594
* completely removed (mddev_delayed_delete).
4596
flush_workqueue(md_misc_wq);
4598
mutex_lock(&disks_mutex);
4604
/* Need to ensure that 'name' is not a duplicate.
4606
struct mddev *mddev2;
4607
spin_lock(&all_mddevs_lock);
4609
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4610
if (mddev2->gendisk &&
4611
strcmp(mddev2->gendisk->disk_name, name) == 0) {
4612
spin_unlock(&all_mddevs_lock);
4615
spin_unlock(&all_mddevs_lock);
4619
mddev->queue = blk_alloc_queue(GFP_KERNEL);
4622
mddev->queue->queuedata = mddev;
4624
blk_queue_make_request(mddev->queue, md_make_request);
4626
disk = alloc_disk(1 << shift);
4628
blk_cleanup_queue(mddev->queue);
4629
mddev->queue = NULL;
4632
disk->major = MAJOR(mddev->unit);
4633
disk->first_minor = unit << shift;
4635
strcpy(disk->disk_name, name);
4636
else if (partitioned)
4637
sprintf(disk->disk_name, "md_d%d", unit);
4639
sprintf(disk->disk_name, "md%d", unit);
4640
disk->fops = &md_fops;
4641
disk->private_data = mddev;
4642
disk->queue = mddev->queue;
4643
blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4644
/* Allow extended partitions. This makes the
4645
* 'mdp' device redundant, but we can't really
4648
disk->flags |= GENHD_FL_EXT_DEVT;
4649
mddev->gendisk = disk;
4650
/* As soon as we call add_disk(), another thread could get
4651
* through to md_open, so make sure it doesn't get too far
4653
mutex_lock(&mddev->open_mutex);
4656
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4657
&disk_to_dev(disk)->kobj, "%s", "md");
4659
/* This isn't possible, but as kobject_init_and_add is marked
4660
* __must_check, we must do something with the result
4662
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4666
if (mddev->kobj.sd &&
4667
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4668
printk(KERN_DEBUG "pointless warning\n");
4669
mutex_unlock(&mddev->open_mutex);
4671
mutex_unlock(&disks_mutex);
4672
if (!error && mddev->kobj.sd) {
4673
kobject_uevent(&mddev->kobj, KOBJ_ADD);
4674
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4680
static struct kobject *md_probe(dev_t dev, int *part, void *data)
4682
md_alloc(dev, NULL);
4686
static int add_named_array(const char *val, struct kernel_param *kp)
4688
/* val must be "md_*" where * is not all digits.
4689
* We allocate an array with a large free minor number, and
4690
* set the name to val. val must not already be an active name.
4692
int len = strlen(val);
4693
char buf[DISK_NAME_LEN];
4695
while (len && val[len-1] == '\n')
4697
if (len >= DISK_NAME_LEN)
4699
strlcpy(buf, val, len+1);
4700
if (strncmp(buf, "md_", 3) != 0)
4702
return md_alloc(0, buf);
4705
static void md_safemode_timeout(unsigned long data)
4707
struct mddev *mddev = (struct mddev *) data;
4709
if (!atomic_read(&mddev->writes_pending)) {
4710
mddev->safemode = 1;
4711
if (mddev->external)
4712
sysfs_notify_dirent_safe(mddev->sysfs_state);
4714
md_wakeup_thread(mddev->thread);
4717
static int start_dirty_degraded;
4719
int md_run(struct mddev *mddev)
4722
struct md_rdev *rdev;
4723
struct md_personality *pers;
4725
if (list_empty(&mddev->disks))
4726
/* cannot run an array with no devices.. */
4731
/* Cannot run until previous stop completes properly */
4732
if (mddev->sysfs_active)
4736
* Analyze all RAID superblock(s)
4738
if (!mddev->raid_disks) {
4739
if (!mddev->persistent)
4744
if (mddev->level != LEVEL_NONE)
4745
request_module("md-level-%d", mddev->level);
4746
else if (mddev->clevel[0])
4747
request_module("md-%s", mddev->clevel);
4750
* Drop all container device buffers, from now on
4751
* the only valid external interface is through the md
4754
list_for_each_entry(rdev, &mddev->disks, same_set) {
4755
if (test_bit(Faulty, &rdev->flags))
4757
sync_blockdev(rdev->bdev);
4758
invalidate_bdev(rdev->bdev);
4760
/* perform some consistency tests on the device.
4761
* We don't want the data to overlap the metadata,
4762
* Internal Bitmap issues have been handled elsewhere.
4764
if (rdev->meta_bdev) {
4765
/* Nothing to check */;
4766
} else if (rdev->data_offset < rdev->sb_start) {
4767
if (mddev->dev_sectors &&
4768
rdev->data_offset + mddev->dev_sectors
4770
printk("md: %s: data overlaps metadata\n",
4775
if (rdev->sb_start + rdev->sb_size/512
4776
> rdev->data_offset) {
4777
printk("md: %s: metadata overlaps data\n",
4782
sysfs_notify_dirent_safe(rdev->sysfs_state);
4785
if (mddev->bio_set == NULL)
4786
mddev->bio_set = bioset_create(BIO_POOL_SIZE,
4787
sizeof(struct mddev *));
4789
spin_lock(&pers_lock);
4790
pers = find_pers(mddev->level, mddev->clevel);
4791
if (!pers || !try_module_get(pers->owner)) {
4792
spin_unlock(&pers_lock);
4793
if (mddev->level != LEVEL_NONE)
4794
printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4797
printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4802
spin_unlock(&pers_lock);
4803
if (mddev->level != pers->level) {
4804
mddev->level = pers->level;
4805
mddev->new_level = pers->level;
4807
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4809
if (mddev->reshape_position != MaxSector &&
4810
pers->start_reshape == NULL) {
4811
/* This personality cannot handle reshaping... */
4813
module_put(pers->owner);
4817
if (pers->sync_request) {
4818
/* Warn if this is a potentially silly
4821
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4822
struct md_rdev *rdev2;
4825
list_for_each_entry(rdev, &mddev->disks, same_set)
4826
list_for_each_entry(rdev2, &mddev->disks, same_set) {
4828
rdev->bdev->bd_contains ==
4829
rdev2->bdev->bd_contains) {
4831
"%s: WARNING: %s appears to be"
4832
" on the same physical disk as"
4835
bdevname(rdev->bdev,b),
4836
bdevname(rdev2->bdev,b2));
4843
"True protection against single-disk"
4844
" failure might be compromised.\n");
4847
mddev->recovery = 0;
4848
/* may be over-ridden by personality */
4849
mddev->resync_max_sectors = mddev->dev_sectors;
4851
mddev->ok_start_degraded = start_dirty_degraded;
4853
if (start_readonly && mddev->ro == 0)
4854
mddev->ro = 2; /* read-only, but switch on first write */
4856
err = mddev->pers->run(mddev);
4858
printk(KERN_ERR "md: pers->run() failed ...\n");
4859
else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4860
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4861
" but 'external_size' not in effect?\n", __func__);
4863
"md: invalid array_size %llu > default size %llu\n",
4864
(unsigned long long)mddev->array_sectors / 2,
4865
(unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4867
mddev->pers->stop(mddev);
4869
if (err == 0 && mddev->pers->sync_request) {
4870
err = bitmap_create(mddev);
4872
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4873
mdname(mddev), err);
4874
mddev->pers->stop(mddev);
4878
module_put(mddev->pers->owner);
4880
bitmap_destroy(mddev);
4883
if (mddev->pers->sync_request) {
4884
if (mddev->kobj.sd &&
4885
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4887
"md: cannot register extra attributes for %s\n",
4889
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
4890
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
4893
atomic_set(&mddev->writes_pending,0);
4894
atomic_set(&mddev->max_corr_read_errors,
4895
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4896
mddev->safemode = 0;
4897
mddev->safemode_timer.function = md_safemode_timeout;
4898
mddev->safemode_timer.data = (unsigned long) mddev;
4899
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4903
list_for_each_entry(rdev, &mddev->disks, same_set)
4904
if (rdev->raid_disk >= 0)
4905
if (sysfs_link_rdev(mddev, rdev))
4906
/* failure here is OK */;
4908
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4911
md_update_sb(mddev, 0);
4913
md_new_event(mddev);
4914
sysfs_notify_dirent_safe(mddev->sysfs_state);
4915
sysfs_notify_dirent_safe(mddev->sysfs_action);
4916
sysfs_notify(&mddev->kobj, NULL, "degraded");
4919
EXPORT_SYMBOL_GPL(md_run);
4921
static int do_md_run(struct mddev *mddev)
4925
err = md_run(mddev);
4928
err = bitmap_load(mddev);
4930
bitmap_destroy(mddev);
4934
md_wakeup_thread(mddev->thread);
4935
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4937
set_capacity(mddev->gendisk, mddev->array_sectors);
4938
revalidate_disk(mddev->gendisk);
4940
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4945
static int restart_array(struct mddev *mddev)
4947
struct gendisk *disk = mddev->gendisk;
4949
/* Complain if it has no devices */
4950
if (list_empty(&mddev->disks))
4956
mddev->safemode = 0;
4958
set_disk_ro(disk, 0);
4959
printk(KERN_INFO "md: %s switched to read-write mode.\n",
4961
/* Kick recovery or resync if necessary */
4962
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4963
md_wakeup_thread(mddev->thread);
4964
md_wakeup_thread(mddev->sync_thread);
4965
sysfs_notify_dirent_safe(mddev->sysfs_state);
4969
/* similar to deny_write_access, but accounts for our holding a reference
4970
* to the file ourselves */
4971
static int deny_bitmap_write_access(struct file * file)
4973
struct inode *inode = file->f_mapping->host;
4975
spin_lock(&inode->i_lock);
4976
if (atomic_read(&inode->i_writecount) > 1) {
4977
spin_unlock(&inode->i_lock);
4980
atomic_set(&inode->i_writecount, -1);
4981
spin_unlock(&inode->i_lock);
4986
void restore_bitmap_write_access(struct file *file)
4988
struct inode *inode = file->f_mapping->host;
4990
spin_lock(&inode->i_lock);
4991
atomic_set(&inode->i_writecount, 1);
4992
spin_unlock(&inode->i_lock);
4995
static void md_clean(struct mddev *mddev)
4997
mddev->array_sectors = 0;
4998
mddev->external_size = 0;
4999
mddev->dev_sectors = 0;
5000
mddev->raid_disks = 0;
5001
mddev->recovery_cp = 0;
5002
mddev->resync_min = 0;
5003
mddev->resync_max = MaxSector;
5004
mddev->reshape_position = MaxSector;
5005
mddev->external = 0;
5006
mddev->persistent = 0;
5007
mddev->level = LEVEL_NONE;
5008
mddev->clevel[0] = 0;
5011
mddev->metadata_type[0] = 0;
5012
mddev->chunk_sectors = 0;
5013
mddev->ctime = mddev->utime = 0;
5015
mddev->max_disks = 0;
5017
mddev->can_decrease_events = 0;
5018
mddev->delta_disks = 0;
5019
mddev->new_level = LEVEL_NONE;
5020
mddev->new_layout = 0;
5021
mddev->new_chunk_sectors = 0;
5022
mddev->curr_resync = 0;
5023
mddev->resync_mismatches = 0;
5024
mddev->suspend_lo = mddev->suspend_hi = 0;
5025
mddev->sync_speed_min = mddev->sync_speed_max = 0;
5026
mddev->recovery = 0;
5029
mddev->degraded = 0;
5030
mddev->safemode = 0;
5031
mddev->bitmap_info.offset = 0;
5032
mddev->bitmap_info.default_offset = 0;
5033
mddev->bitmap_info.chunksize = 0;
5034
mddev->bitmap_info.daemon_sleep = 0;
5035
mddev->bitmap_info.max_write_behind = 0;
5038
static void __md_stop_writes(struct mddev *mddev)
5040
if (mddev->sync_thread) {
5041
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5042
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5043
reap_sync_thread(mddev);
5046
del_timer_sync(&mddev->safemode_timer);
5048
bitmap_flush(mddev);
5049
md_super_wait(mddev);
5051
if (!mddev->in_sync || mddev->flags) {
5052
/* mark array as shutdown cleanly */
5054
md_update_sb(mddev, 1);
5058
void md_stop_writes(struct mddev *mddev)
5061
__md_stop_writes(mddev);
5062
mddev_unlock(mddev);
5064
EXPORT_SYMBOL_GPL(md_stop_writes);
5066
void md_stop(struct mddev *mddev)
5069
mddev->pers->stop(mddev);
5070
if (mddev->pers->sync_request && mddev->to_remove == NULL)
5071
mddev->to_remove = &md_redundancy_group;
5072
module_put(mddev->pers->owner);
5074
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5076
EXPORT_SYMBOL_GPL(md_stop);
5078
static int md_set_readonly(struct mddev *mddev, int is_open)
5081
mutex_lock(&mddev->open_mutex);
5082
if (atomic_read(&mddev->openers) > is_open) {
5083
printk("md: %s still in use.\n",mdname(mddev));
5088
__md_stop_writes(mddev);
5094
set_disk_ro(mddev->gendisk, 1);
5095
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5096
sysfs_notify_dirent_safe(mddev->sysfs_state);
5100
mutex_unlock(&mddev->open_mutex);
5105
* 0 - completely stop and dis-assemble array
5106
* 2 - stop but do not disassemble array
5108
static int do_md_stop(struct mddev * mddev, int mode, int is_open)
5110
struct gendisk *disk = mddev->gendisk;
5111
struct md_rdev *rdev;
5113
mutex_lock(&mddev->open_mutex);
5114
if (atomic_read(&mddev->openers) > is_open ||
5115
mddev->sysfs_active) {
5116
printk("md: %s still in use.\n",mdname(mddev));
5117
mutex_unlock(&mddev->open_mutex);
5123
set_disk_ro(disk, 0);
5125
__md_stop_writes(mddev);
5127
mddev->queue->merge_bvec_fn = NULL;
5128
mddev->queue->backing_dev_info.congested_fn = NULL;
5130
/* tell userspace to handle 'inactive' */
5131
sysfs_notify_dirent_safe(mddev->sysfs_state);
5133
list_for_each_entry(rdev, &mddev->disks, same_set)
5134
if (rdev->raid_disk >= 0)
5135
sysfs_unlink_rdev(mddev, rdev);
5137
set_capacity(disk, 0);
5138
mutex_unlock(&mddev->open_mutex);
5140
revalidate_disk(disk);
5145
mutex_unlock(&mddev->open_mutex);
5147
* Free resources if final stop
5150
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
5152
bitmap_destroy(mddev);
5153
if (mddev->bitmap_info.file) {
5154
restore_bitmap_write_access(mddev->bitmap_info.file);
5155
fput(mddev->bitmap_info.file);
5156
mddev->bitmap_info.file = NULL;
5158
mddev->bitmap_info.offset = 0;
5160
export_array(mddev);
5163
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5164
if (mddev->hold_active == UNTIL_STOP)
5165
mddev->hold_active = 0;
5167
blk_integrity_unregister(disk);
5168
md_new_event(mddev);
5169
sysfs_notify_dirent_safe(mddev->sysfs_state);
5174
static void autorun_array(struct mddev *mddev)
5176
struct md_rdev *rdev;
5179
if (list_empty(&mddev->disks))
5182
printk(KERN_INFO "md: running: ");
5184
list_for_each_entry(rdev, &mddev->disks, same_set) {
5185
char b[BDEVNAME_SIZE];
5186
printk("<%s>", bdevname(rdev->bdev,b));
5190
err = do_md_run(mddev);
5192
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5193
do_md_stop(mddev, 0, 0);
5198
* lets try to run arrays based on all disks that have arrived
5199
* until now. (those are in pending_raid_disks)
5201
* the method: pick the first pending disk, collect all disks with
5202
* the same UUID, remove all from the pending list and put them into
5203
* the 'same_array' list. Then order this list based on superblock
5204
* update time (freshest comes first), kick out 'old' disks and
5205
* compare superblocks. If everything's fine then run it.
5207
* If "unit" is allocated, then bump its reference count
5209
static void autorun_devices(int part)
5211
struct md_rdev *rdev0, *rdev, *tmp;
5212
struct mddev *mddev;
5213
char b[BDEVNAME_SIZE];
5215
printk(KERN_INFO "md: autorun ...\n");
5216
while (!list_empty(&pending_raid_disks)) {
5219
LIST_HEAD(candidates);
5220
rdev0 = list_entry(pending_raid_disks.next,
5221
struct md_rdev, same_set);
5223
printk(KERN_INFO "md: considering %s ...\n",
5224
bdevname(rdev0->bdev,b));
5225
INIT_LIST_HEAD(&candidates);
5226
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5227
if (super_90_load(rdev, rdev0, 0) >= 0) {
5228
printk(KERN_INFO "md: adding %s ...\n",
5229
bdevname(rdev->bdev,b));
5230
list_move(&rdev->same_set, &candidates);
5233
* now we have a set of devices, with all of them having
5234
* mostly sane superblocks. It's time to allocate the
5238
dev = MKDEV(mdp_major,
5239
rdev0->preferred_minor << MdpMinorShift);
5240
unit = MINOR(dev) >> MdpMinorShift;
5242
dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5245
if (rdev0->preferred_minor != unit) {
5246
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
5247
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5251
md_probe(dev, NULL, NULL);
5252
mddev = mddev_find(dev);
5253
if (!mddev || !mddev->gendisk) {
5257
"md: cannot allocate memory for md drive.\n");
5260
if (mddev_lock(mddev))
5261
printk(KERN_WARNING "md: %s locked, cannot run\n",
5263
else if (mddev->raid_disks || mddev->major_version
5264
|| !list_empty(&mddev->disks)) {
5266
"md: %s already running, cannot run %s\n",
5267
mdname(mddev), bdevname(rdev0->bdev,b));
5268
mddev_unlock(mddev);
5270
printk(KERN_INFO "md: created %s\n", mdname(mddev));
5271
mddev->persistent = 1;
5272
rdev_for_each_list(rdev, tmp, &candidates) {
5273
list_del_init(&rdev->same_set);
5274
if (bind_rdev_to_array(rdev, mddev))
5277
autorun_array(mddev);
5278
mddev_unlock(mddev);
5280
/* on success, candidates will be empty, on error
5283
rdev_for_each_list(rdev, tmp, &candidates) {
5284
list_del_init(&rdev->same_set);
5289
printk(KERN_INFO "md: ... autorun DONE.\n");
5291
#endif /* !MODULE */
5293
static int get_version(void __user * arg)
5297
ver.major = MD_MAJOR_VERSION;
5298
ver.minor = MD_MINOR_VERSION;
5299
ver.patchlevel = MD_PATCHLEVEL_VERSION;
5301
if (copy_to_user(arg, &ver, sizeof(ver)))
5307
static int get_array_info(struct mddev * mddev, void __user * arg)
5309
mdu_array_info_t info;
5310
int nr,working,insync,failed,spare;
5311
struct md_rdev *rdev;
5313
nr=working=insync=failed=spare=0;
5314
list_for_each_entry(rdev, &mddev->disks, same_set) {
5316
if (test_bit(Faulty, &rdev->flags))
5320
if (test_bit(In_sync, &rdev->flags))
5327
info.major_version = mddev->major_version;
5328
info.minor_version = mddev->minor_version;
5329
info.patch_version = MD_PATCHLEVEL_VERSION;
5330
info.ctime = mddev->ctime;
5331
info.level = mddev->level;
5332
info.size = mddev->dev_sectors / 2;
5333
if (info.size != mddev->dev_sectors / 2) /* overflow */
5336
info.raid_disks = mddev->raid_disks;
5337
info.md_minor = mddev->md_minor;
5338
info.not_persistent= !mddev->persistent;
5340
info.utime = mddev->utime;
5343
info.state = (1<<MD_SB_CLEAN);
5344
if (mddev->bitmap && mddev->bitmap_info.offset)
5345
info.state = (1<<MD_SB_BITMAP_PRESENT);
5346
info.active_disks = insync;
5347
info.working_disks = working;
5348
info.failed_disks = failed;
5349
info.spare_disks = spare;
5351
info.layout = mddev->layout;
5352
info.chunk_size = mddev->chunk_sectors << 9;
5354
if (copy_to_user(arg, &info, sizeof(info)))
5360
static int get_bitmap_file(struct mddev * mddev, void __user * arg)
5362
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5363
char *ptr, *buf = NULL;
5366
if (md_allow_write(mddev))
5367
file = kmalloc(sizeof(*file), GFP_NOIO);
5369
file = kmalloc(sizeof(*file), GFP_KERNEL);
5374
/* bitmap disabled, zero the first byte and copy out */
5375
if (!mddev->bitmap || !mddev->bitmap->file) {
5376
file->pathname[0] = '\0';
5380
buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
5384
ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
5388
strcpy(file->pathname, ptr);
5392
if (copy_to_user(arg, file, sizeof(*file)))
5400
static int get_disk_info(struct mddev * mddev, void __user * arg)
5402
mdu_disk_info_t info;
5403
struct md_rdev *rdev;
5405
if (copy_from_user(&info, arg, sizeof(info)))
5408
rdev = find_rdev_nr(mddev, info.number);
5410
info.major = MAJOR(rdev->bdev->bd_dev);
5411
info.minor = MINOR(rdev->bdev->bd_dev);
5412
info.raid_disk = rdev->raid_disk;
5414
if (test_bit(Faulty, &rdev->flags))
5415
info.state |= (1<<MD_DISK_FAULTY);
5416
else if (test_bit(In_sync, &rdev->flags)) {
5417
info.state |= (1<<MD_DISK_ACTIVE);
5418
info.state |= (1<<MD_DISK_SYNC);
5420
if (test_bit(WriteMostly, &rdev->flags))
5421
info.state |= (1<<MD_DISK_WRITEMOSTLY);
5423
info.major = info.minor = 0;
5424
info.raid_disk = -1;
5425
info.state = (1<<MD_DISK_REMOVED);
5428
if (copy_to_user(arg, &info, sizeof(info)))
5434
static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
5436
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5437
struct md_rdev *rdev;
5438
dev_t dev = MKDEV(info->major,info->minor);
5440
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5443
if (!mddev->raid_disks) {
5445
/* expecting a device which has a superblock */
5446
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5449
"md: md_import_device returned %ld\n",
5451
return PTR_ERR(rdev);
5453
if (!list_empty(&mddev->disks)) {
5454
struct md_rdev *rdev0
5455
= list_entry(mddev->disks.next,
5456
struct md_rdev, same_set);
5457
err = super_types[mddev->major_version]
5458
.load_super(rdev, rdev0, mddev->minor_version);
5461
"md: %s has different UUID to %s\n",
5462
bdevname(rdev->bdev,b),
5463
bdevname(rdev0->bdev,b2));
5468
err = bind_rdev_to_array(rdev, mddev);
5475
* add_new_disk can be used once the array is assembled
5476
* to add "hot spares". They must already have a superblock
5481
if (!mddev->pers->hot_add_disk) {
5483
"%s: personality does not support diskops!\n",
5487
if (mddev->persistent)
5488
rdev = md_import_device(dev, mddev->major_version,
5489
mddev->minor_version);
5491
rdev = md_import_device(dev, -1, -1);
5494
"md: md_import_device returned %ld\n",
5496
return PTR_ERR(rdev);
5498
/* set saved_raid_disk if appropriate */
5499
if (!mddev->persistent) {
5500
if (info->state & (1<<MD_DISK_SYNC) &&
5501
info->raid_disk < mddev->raid_disks) {
5502
rdev->raid_disk = info->raid_disk;
5503
set_bit(In_sync, &rdev->flags);
5505
rdev->raid_disk = -1;
5507
super_types[mddev->major_version].
5508
validate_super(mddev, rdev);
5509
if ((info->state & (1<<MD_DISK_SYNC)) &&
5510
(!test_bit(In_sync, &rdev->flags) ||
5511
rdev->raid_disk != info->raid_disk)) {
5512
/* This was a hot-add request, but events doesn't
5513
* match, so reject it.
5519
if (test_bit(In_sync, &rdev->flags))
5520
rdev->saved_raid_disk = rdev->raid_disk;
5522
rdev->saved_raid_disk = -1;
5524
clear_bit(In_sync, &rdev->flags); /* just to be sure */
5525
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5526
set_bit(WriteMostly, &rdev->flags);
5528
clear_bit(WriteMostly, &rdev->flags);
5530
rdev->raid_disk = -1;
5531
err = bind_rdev_to_array(rdev, mddev);
5532
if (!err && !mddev->pers->hot_remove_disk) {
5533
/* If there is hot_add_disk but no hot_remove_disk
5534
* then added disks for geometry changes,
5535
* and should be added immediately.
5537
super_types[mddev->major_version].
5538
validate_super(mddev, rdev);
5539
err = mddev->pers->hot_add_disk(mddev, rdev);
5541
unbind_rdev_from_array(rdev);
5546
sysfs_notify_dirent_safe(rdev->sysfs_state);
5548
md_update_sb(mddev, 1);
5549
if (mddev->degraded)
5550
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5551
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5553
md_new_event(mddev);
5554
md_wakeup_thread(mddev->thread);
5558
/* otherwise, add_new_disk is only allowed
5559
* for major_version==0 superblocks
5561
if (mddev->major_version != 0) {
5562
printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5567
if (!(info->state & (1<<MD_DISK_FAULTY))) {
5569
rdev = md_import_device(dev, -1, 0);
5572
"md: error, md_import_device() returned %ld\n",
5574
return PTR_ERR(rdev);
5576
rdev->desc_nr = info->number;
5577
if (info->raid_disk < mddev->raid_disks)
5578
rdev->raid_disk = info->raid_disk;
5580
rdev->raid_disk = -1;
5582
if (rdev->raid_disk < mddev->raid_disks)
5583
if (info->state & (1<<MD_DISK_SYNC))
5584
set_bit(In_sync, &rdev->flags);
5586
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5587
set_bit(WriteMostly, &rdev->flags);
5589
if (!mddev->persistent) {
5590
printk(KERN_INFO "md: nonpersistent superblock ...\n");
5591
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5593
rdev->sb_start = calc_dev_sboffset(rdev);
5594
rdev->sectors = rdev->sb_start;
5596
err = bind_rdev_to_array(rdev, mddev);
5606
static int hot_remove_disk(struct mddev * mddev, dev_t dev)
5608
char b[BDEVNAME_SIZE];
5609
struct md_rdev *rdev;
5611
rdev = find_rdev(mddev, dev);
5615
if (rdev->raid_disk >= 0)
5618
kick_rdev_from_array(rdev);
5619
md_update_sb(mddev, 1);
5620
md_new_event(mddev);
5624
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5625
bdevname(rdev->bdev,b), mdname(mddev));
5629
static int hot_add_disk(struct mddev * mddev, dev_t dev)
5631
char b[BDEVNAME_SIZE];
5633
struct md_rdev *rdev;
5638
if (mddev->major_version != 0) {
5639
printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5640
" version-0 superblocks.\n",
5644
if (!mddev->pers->hot_add_disk) {
5646
"%s: personality does not support diskops!\n",
5651
rdev = md_import_device(dev, -1, 0);
5654
"md: error, md_import_device() returned %ld\n",
5659
if (mddev->persistent)
5660
rdev->sb_start = calc_dev_sboffset(rdev);
5662
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5664
rdev->sectors = rdev->sb_start;
5666
if (test_bit(Faulty, &rdev->flags)) {
5668
"md: can not hot-add faulty %s disk to %s!\n",
5669
bdevname(rdev->bdev,b), mdname(mddev));
5673
clear_bit(In_sync, &rdev->flags);
5675
rdev->saved_raid_disk = -1;
5676
err = bind_rdev_to_array(rdev, mddev);
5681
* The rest should better be atomic, we can have disk failures
5682
* noticed in interrupt contexts ...
5685
rdev->raid_disk = -1;
5687
md_update_sb(mddev, 1);
5690
* Kick recovery, maybe this spare has to be added to the
5691
* array immediately.
5693
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5694
md_wakeup_thread(mddev->thread);
5695
md_new_event(mddev);
5703
static int set_bitmap_file(struct mddev *mddev, int fd)
5708
if (!mddev->pers->quiesce)
5710
if (mddev->recovery || mddev->sync_thread)
5712
/* we should be able to change the bitmap.. */
5718
return -EEXIST; /* cannot add when bitmap is present */
5719
mddev->bitmap_info.file = fget(fd);
5721
if (mddev->bitmap_info.file == NULL) {
5722
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5727
err = deny_bitmap_write_access(mddev->bitmap_info.file);
5729
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5731
fput(mddev->bitmap_info.file);
5732
mddev->bitmap_info.file = NULL;
5735
mddev->bitmap_info.offset = 0; /* file overrides offset */
5736
} else if (mddev->bitmap == NULL)
5737
return -ENOENT; /* cannot remove what isn't there */
5740
mddev->pers->quiesce(mddev, 1);
5742
err = bitmap_create(mddev);
5744
err = bitmap_load(mddev);
5746
if (fd < 0 || err) {
5747
bitmap_destroy(mddev);
5748
fd = -1; /* make sure to put the file */
5750
mddev->pers->quiesce(mddev, 0);
5753
if (mddev->bitmap_info.file) {
5754
restore_bitmap_write_access(mddev->bitmap_info.file);
5755
fput(mddev->bitmap_info.file);
5757
mddev->bitmap_info.file = NULL;
5764
* set_array_info is used two different ways
5765
* The original usage is when creating a new array.
5766
* In this usage, raid_disks is > 0 and it together with
5767
* level, size, not_persistent,layout,chunksize determine the
5768
* shape of the array.
5769
* This will always create an array with a type-0.90.0 superblock.
5770
* The newer usage is when assembling an array.
5771
* In this case raid_disks will be 0, and the major_version field is
5772
* use to determine which style super-blocks are to be found on the devices.
5773
* The minor and patch _version numbers are also kept incase the
5774
* super_block handler wishes to interpret them.
5776
static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
5779
if (info->raid_disks == 0) {
5780
/* just setting version number for superblock loading */
5781
if (info->major_version < 0 ||
5782
info->major_version >= ARRAY_SIZE(super_types) ||
5783
super_types[info->major_version].name == NULL) {
5784
/* maybe try to auto-load a module? */
5786
"md: superblock version %d not known\n",
5787
info->major_version);
5790
mddev->major_version = info->major_version;
5791
mddev->minor_version = info->minor_version;
5792
mddev->patch_version = info->patch_version;
5793
mddev->persistent = !info->not_persistent;
5794
/* ensure mddev_put doesn't delete this now that there
5795
* is some minimal configuration.
5797
mddev->ctime = get_seconds();
5800
mddev->major_version = MD_MAJOR_VERSION;
5801
mddev->minor_version = MD_MINOR_VERSION;
5802
mddev->patch_version = MD_PATCHLEVEL_VERSION;
5803
mddev->ctime = get_seconds();
5805
mddev->level = info->level;
5806
mddev->clevel[0] = 0;
5807
mddev->dev_sectors = 2 * (sector_t)info->size;
5808
mddev->raid_disks = info->raid_disks;
5809
/* don't set md_minor, it is determined by which /dev/md* was
5812
if (info->state & (1<<MD_SB_CLEAN))
5813
mddev->recovery_cp = MaxSector;
5815
mddev->recovery_cp = 0;
5816
mddev->persistent = ! info->not_persistent;
5817
mddev->external = 0;
5819
mddev->layout = info->layout;
5820
mddev->chunk_sectors = info->chunk_size >> 9;
5822
mddev->max_disks = MD_SB_DISKS;
5824
if (mddev->persistent)
5826
set_bit(MD_CHANGE_DEVS, &mddev->flags);
5828
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5829
mddev->bitmap_info.offset = 0;
5831
mddev->reshape_position = MaxSector;
5834
* Generate a 128 bit UUID
5836
get_random_bytes(mddev->uuid, 16);
5838
mddev->new_level = mddev->level;
5839
mddev->new_chunk_sectors = mddev->chunk_sectors;
5840
mddev->new_layout = mddev->layout;
5841
mddev->delta_disks = 0;
5846
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
5848
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5850
if (mddev->external_size)
5853
mddev->array_sectors = array_sectors;
5855
EXPORT_SYMBOL(md_set_array_sectors);
5857
static int update_size(struct mddev *mddev, sector_t num_sectors)
5859
struct md_rdev *rdev;
5861
int fit = (num_sectors == 0);
5863
if (mddev->pers->resize == NULL)
5865
/* The "num_sectors" is the number of sectors of each device that
5866
* is used. This can only make sense for arrays with redundancy.
5867
* linear and raid0 always use whatever space is available. We can only
5868
* consider changing this number if no resync or reconstruction is
5869
* happening, and if the new size is acceptable. It must fit before the
5870
* sb_start or, if that is <data_offset, it must fit before the size
5871
* of each device. If num_sectors is zero, we find the largest size
5874
if (mddev->sync_thread)
5877
/* Sorry, cannot grow a bitmap yet, just remove it,
5881
list_for_each_entry(rdev, &mddev->disks, same_set) {
5882
sector_t avail = rdev->sectors;
5884
if (fit && (num_sectors == 0 || num_sectors > avail))
5885
num_sectors = avail;
5886
if (avail < num_sectors)
5889
rv = mddev->pers->resize(mddev, num_sectors);
5891
revalidate_disk(mddev->gendisk);
5895
static int update_raid_disks(struct mddev *mddev, int raid_disks)
5898
/* change the number of raid disks */
5899
if (mddev->pers->check_reshape == NULL)
5901
if (raid_disks <= 0 ||
5902
(mddev->max_disks && raid_disks >= mddev->max_disks))
5904
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5906
mddev->delta_disks = raid_disks - mddev->raid_disks;
5908
rv = mddev->pers->check_reshape(mddev);
5910
mddev->delta_disks = 0;
5916
* update_array_info is used to change the configuration of an
5918
* The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5919
* fields in the info are checked against the array.
5920
* Any differences that cannot be handled will cause an error.
5921
* Normally, only one change can be managed at a time.
5923
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
5929
/* calculate expected state,ignoring low bits */
5930
if (mddev->bitmap && mddev->bitmap_info.offset)
5931
state |= (1 << MD_SB_BITMAP_PRESENT);
5933
if (mddev->major_version != info->major_version ||
5934
mddev->minor_version != info->minor_version ||
5935
/* mddev->patch_version != info->patch_version || */
5936
mddev->ctime != info->ctime ||
5937
mddev->level != info->level ||
5938
/* mddev->layout != info->layout || */
5939
!mddev->persistent != info->not_persistent||
5940
mddev->chunk_sectors != info->chunk_size >> 9 ||
5941
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5942
((state^info->state) & 0xfffffe00)
5945
/* Check there is only one change */
5946
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5948
if (mddev->raid_disks != info->raid_disks)
5950
if (mddev->layout != info->layout)
5952
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5959
if (mddev->layout != info->layout) {
5961
* we don't need to do anything at the md level, the
5962
* personality will take care of it all.
5964
if (mddev->pers->check_reshape == NULL)
5967
mddev->new_layout = info->layout;
5968
rv = mddev->pers->check_reshape(mddev);
5970
mddev->new_layout = mddev->layout;
5974
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5975
rv = update_size(mddev, (sector_t)info->size * 2);
5977
if (mddev->raid_disks != info->raid_disks)
5978
rv = update_raid_disks(mddev, info->raid_disks);
5980
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5981
if (mddev->pers->quiesce == NULL)
5983
if (mddev->recovery || mddev->sync_thread)
5985
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5986
/* add the bitmap */
5989
if (mddev->bitmap_info.default_offset == 0)
5991
mddev->bitmap_info.offset =
5992
mddev->bitmap_info.default_offset;
5993
mddev->pers->quiesce(mddev, 1);
5994
rv = bitmap_create(mddev);
5996
rv = bitmap_load(mddev);
5998
bitmap_destroy(mddev);
5999
mddev->pers->quiesce(mddev, 0);
6001
/* remove the bitmap */
6004
if (mddev->bitmap->file)
6006
mddev->pers->quiesce(mddev, 1);
6007
bitmap_destroy(mddev);
6008
mddev->pers->quiesce(mddev, 0);
6009
mddev->bitmap_info.offset = 0;
6012
md_update_sb(mddev, 1);
6016
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6018
struct md_rdev *rdev;
6020
if (mddev->pers == NULL)
6023
rdev = find_rdev(mddev, dev);
6027
md_error(mddev, rdev);
6028
if (!test_bit(Faulty, &rdev->flags))
6034
* We have a problem here : there is no easy way to give a CHS
6035
* virtual geometry. We currently pretend that we have a 2 heads
6036
* 4 sectors (with a BIG number of cylinders...). This drives
6037
* dosfs just mad... ;-)
6039
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6041
struct mddev *mddev = bdev->bd_disk->private_data;
6045
geo->cylinders = mddev->array_sectors / 8;
6049
static int md_ioctl(struct block_device *bdev, fmode_t mode,
6050
unsigned int cmd, unsigned long arg)
6053
void __user *argp = (void __user *)arg;
6054
struct mddev *mddev = NULL;
6057
if (!capable(CAP_SYS_ADMIN))
6061
* Commands dealing with the RAID driver but not any
6067
err = get_version(argp);
6070
case PRINT_RAID_DEBUG:
6078
autostart_arrays(arg);
6085
* Commands creating/starting a new array:
6088
mddev = bdev->bd_disk->private_data;
6095
err = mddev_lock(mddev);
6098
"md: ioctl lock interrupted, reason %d, cmd %d\n",
6105
case SET_ARRAY_INFO:
6107
mdu_array_info_t info;
6109
memset(&info, 0, sizeof(info));
6110
else if (copy_from_user(&info, argp, sizeof(info))) {
6115
err = update_array_info(mddev, &info);
6117
printk(KERN_WARNING "md: couldn't update"
6118
" array info. %d\n", err);
6123
if (!list_empty(&mddev->disks)) {
6125
"md: array %s already has disks!\n",
6130
if (mddev->raid_disks) {
6132
"md: array %s already initialised!\n",
6137
err = set_array_info(mddev, &info);
6139
printk(KERN_WARNING "md: couldn't set"
6140
" array info. %d\n", err);
6150
* Commands querying/configuring an existing array:
6152
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6153
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6154
if ((!mddev->raid_disks && !mddev->external)
6155
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6156
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6157
&& cmd != GET_BITMAP_FILE) {
6163
* Commands even a read-only array can execute:
6167
case GET_ARRAY_INFO:
6168
err = get_array_info(mddev, argp);
6171
case GET_BITMAP_FILE:
6172
err = get_bitmap_file(mddev, argp);
6176
err = get_disk_info(mddev, argp);
6179
case RESTART_ARRAY_RW:
6180
err = restart_array(mddev);
6184
err = do_md_stop(mddev, 0, 1);
6188
err = md_set_readonly(mddev, 1);
6192
if (get_user(ro, (int __user *)(arg))) {
6198
/* if the bdev is going readonly the value of mddev->ro
6199
* does not matter, no writes are coming
6204
/* are we are already prepared for writes? */
6208
/* transitioning to readauto need only happen for
6209
* arrays that call md_write_start
6212
err = restart_array(mddev);
6215
set_disk_ro(mddev->gendisk, 0);
6222
* The remaining ioctls are changing the state of the
6223
* superblock, so we do not allow them on read-only arrays.
6224
* However non-MD ioctls (e.g. get-size) will still come through
6225
* here and hit the 'default' below, so only disallow
6226
* 'md' ioctls, and switch to rw mode if started auto-readonly.
6228
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
6229
if (mddev->ro == 2) {
6231
sysfs_notify_dirent_safe(mddev->sysfs_state);
6232
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6233
md_wakeup_thread(mddev->thread);
6244
mdu_disk_info_t info;
6245
if (copy_from_user(&info, argp, sizeof(info)))
6248
err = add_new_disk(mddev, &info);
6252
case HOT_REMOVE_DISK:
6253
err = hot_remove_disk(mddev, new_decode_dev(arg));
6257
err = hot_add_disk(mddev, new_decode_dev(arg));
6260
case SET_DISK_FAULTY:
6261
err = set_disk_faulty(mddev, new_decode_dev(arg));
6265
err = do_md_run(mddev);
6268
case SET_BITMAP_FILE:
6269
err = set_bitmap_file(mddev, (int)arg);
6279
if (mddev->hold_active == UNTIL_IOCTL &&
6281
mddev->hold_active = 0;
6282
mddev_unlock(mddev);
6291
#ifdef CONFIG_COMPAT
6292
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
6293
unsigned int cmd, unsigned long arg)
6296
case HOT_REMOVE_DISK:
6298
case SET_DISK_FAULTY:
6299
case SET_BITMAP_FILE:
6300
/* These take in integer arg, do not convert */
6303
arg = (unsigned long)compat_ptr(arg);
6307
return md_ioctl(bdev, mode, cmd, arg);
6309
#endif /* CONFIG_COMPAT */
6311
static int md_open(struct block_device *bdev, fmode_t mode)
6314
* Succeed if we can lock the mddev, which confirms that
6315
* it isn't being stopped right now.
6317
struct mddev *mddev = mddev_find(bdev->bd_dev);
6320
if (mddev->gendisk != bdev->bd_disk) {
6321
/* we are racing with mddev_put which is discarding this
6325
/* Wait until bdev->bd_disk is definitely gone */
6326
flush_workqueue(md_misc_wq);
6327
/* Then retry the open from the top */
6328
return -ERESTARTSYS;
6330
BUG_ON(mddev != bdev->bd_disk->private_data);
6332
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
6336
atomic_inc(&mddev->openers);
6337
mutex_unlock(&mddev->open_mutex);
6339
check_disk_change(bdev);
6344
static int md_release(struct gendisk *disk, fmode_t mode)
6346
struct mddev *mddev = disk->private_data;
6349
atomic_dec(&mddev->openers);
6355
static int md_media_changed(struct gendisk *disk)
6357
struct mddev *mddev = disk->private_data;
6359
return mddev->changed;
6362
static int md_revalidate(struct gendisk *disk)
6364
struct mddev *mddev = disk->private_data;
6369
static const struct block_device_operations md_fops =
6371
.owner = THIS_MODULE,
6373
.release = md_release,
6375
#ifdef CONFIG_COMPAT
6376
.compat_ioctl = md_compat_ioctl,
6378
.getgeo = md_getgeo,
6379
.media_changed = md_media_changed,
6380
.revalidate_disk= md_revalidate,
6383
static int md_thread(void * arg)
6385
struct md_thread *thread = arg;
6388
* md_thread is a 'system-thread', it's priority should be very
6389
* high. We avoid resource deadlocks individually in each
6390
* raid personality. (RAID5 does preallocation) We also use RR and
6391
* the very same RT priority as kswapd, thus we will never get
6392
* into a priority inversion deadlock.
6394
* we definitely have to have equal or higher priority than
6395
* bdflush, otherwise bdflush will deadlock if there are too
6396
* many dirty RAID5 blocks.
6399
allow_signal(SIGKILL);
6400
while (!kthread_should_stop()) {
6402
/* We need to wait INTERRUPTIBLE so that
6403
* we don't add to the load-average.
6404
* That means we need to be sure no signals are
6407
if (signal_pending(current))
6408
flush_signals(current);
6410
wait_event_interruptible_timeout
6412
test_bit(THREAD_WAKEUP, &thread->flags)
6413
|| kthread_should_stop(),
6416
clear_bit(THREAD_WAKEUP, &thread->flags);
6417
if (!kthread_should_stop())
6418
thread->run(thread->mddev);
6424
void md_wakeup_thread(struct md_thread *thread)
6427
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
6428
set_bit(THREAD_WAKEUP, &thread->flags);
6429
wake_up(&thread->wqueue);
6433
struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev,
6436
struct md_thread *thread;
6438
thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
6442
init_waitqueue_head(&thread->wqueue);
6445
thread->mddev = mddev;
6446
thread->timeout = MAX_SCHEDULE_TIMEOUT;
6447
thread->tsk = kthread_run(md_thread, thread,
6449
mdname(thread->mddev),
6450
name ?: mddev->pers->name);
6451
if (IS_ERR(thread->tsk)) {
6458
void md_unregister_thread(struct md_thread **threadp)
6460
struct md_thread *thread = *threadp;
6463
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6464
/* Locking ensures that mddev_unlock does not wake_up a
6465
* non-existent thread
6467
spin_lock(&pers_lock);
6469
spin_unlock(&pers_lock);
6471
kthread_stop(thread->tsk);
6475
void md_error(struct mddev *mddev, struct md_rdev *rdev)
6482
if (!rdev || test_bit(Faulty, &rdev->flags))
6485
if (!mddev->pers || !mddev->pers->error_handler)
6487
mddev->pers->error_handler(mddev,rdev);
6488
if (mddev->degraded)
6489
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6490
sysfs_notify_dirent_safe(rdev->sysfs_state);
6491
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6492
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6493
md_wakeup_thread(mddev->thread);
6494
if (mddev->event_work.func)
6495
queue_work(md_misc_wq, &mddev->event_work);
6496
md_new_event_inintr(mddev);
6499
/* seq_file implementation /proc/mdstat */
6501
static void status_unused(struct seq_file *seq)
6504
struct md_rdev *rdev;
6506
seq_printf(seq, "unused devices: ");
6508
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6509
char b[BDEVNAME_SIZE];
6511
seq_printf(seq, "%s ",
6512
bdevname(rdev->bdev,b));
6515
seq_printf(seq, "<none>");
6517
seq_printf(seq, "\n");
6521
static void status_resync(struct seq_file *seq, struct mddev * mddev)
6523
sector_t max_sectors, resync, res;
6524
unsigned long dt, db;
6527
unsigned int per_milli;
6529
resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
6531
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6532
max_sectors = mddev->resync_max_sectors;
6534
max_sectors = mddev->dev_sectors;
6537
* Should not happen.
6543
/* Pick 'scale' such that (resync>>scale)*1000 will fit
6544
* in a sector_t, and (max_sectors>>scale) will fit in a
6545
* u32, as those are the requirements for sector_div.
6546
* Thus 'scale' must be at least 10
6549
if (sizeof(sector_t) > sizeof(unsigned long)) {
6550
while ( max_sectors/2 > (1ULL<<(scale+32)))
6553
res = (resync>>scale)*1000;
6554
sector_div(res, (u32)((max_sectors>>scale)+1));
6558
int i, x = per_milli/50, y = 20-x;
6559
seq_printf(seq, "[");
6560
for (i = 0; i < x; i++)
6561
seq_printf(seq, "=");
6562
seq_printf(seq, ">");
6563
for (i = 0; i < y; i++)
6564
seq_printf(seq, ".");
6565
seq_printf(seq, "] ");
6567
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6568
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6570
(test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6572
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6573
"resync" : "recovery"))),
6574
per_milli/10, per_milli % 10,
6575
(unsigned long long) resync/2,
6576
(unsigned long long) max_sectors/2);
6579
* dt: time from mark until now
6580
* db: blocks written from mark until now
6581
* rt: remaining time
6583
* rt is a sector_t, so could be 32bit or 64bit.
6584
* So we divide before multiply in case it is 32bit and close
6586
* We scale the divisor (db) by 32 to avoid losing precision
6587
* near the end of resync when the number of remaining sectors
6589
* We then divide rt by 32 after multiplying by db to compensate.
6590
* The '+1' avoids division by zero if db is very small.
6592
dt = ((jiffies - mddev->resync_mark) / HZ);
6594
db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6595
- mddev->resync_mark_cnt;
6597
rt = max_sectors - resync; /* number of remaining sectors */
6598
sector_div(rt, db/32+1);
6602
seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
6603
((unsigned long)rt % 60)/6);
6605
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
6608
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
6610
struct list_head *tmp;
6612
struct mddev *mddev;
6620
spin_lock(&all_mddevs_lock);
6621
list_for_each(tmp,&all_mddevs)
6623
mddev = list_entry(tmp, struct mddev, all_mddevs);
6625
spin_unlock(&all_mddevs_lock);
6628
spin_unlock(&all_mddevs_lock);
6630
return (void*)2;/* tail */
6634
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
6636
struct list_head *tmp;
6637
struct mddev *next_mddev, *mddev = v;
6643
spin_lock(&all_mddevs_lock);
6645
tmp = all_mddevs.next;
6647
tmp = mddev->all_mddevs.next;
6648
if (tmp != &all_mddevs)
6649
next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
6651
next_mddev = (void*)2;
6654
spin_unlock(&all_mddevs_lock);
6662
static void md_seq_stop(struct seq_file *seq, void *v)
6664
struct mddev *mddev = v;
6666
if (mddev && v != (void*)1 && v != (void*)2)
6670
static int md_seq_show(struct seq_file *seq, void *v)
6672
struct mddev *mddev = v;
6674
struct md_rdev *rdev;
6675
struct bitmap *bitmap;
6677
if (v == (void*)1) {
6678
struct md_personality *pers;
6679
seq_printf(seq, "Personalities : ");
6680
spin_lock(&pers_lock);
6681
list_for_each_entry(pers, &pers_list, list)
6682
seq_printf(seq, "[%s] ", pers->name);
6684
spin_unlock(&pers_lock);
6685
seq_printf(seq, "\n");
6686
seq->poll_event = atomic_read(&md_event_count);
6689
if (v == (void*)2) {
6694
if (mddev_lock(mddev) < 0)
6697
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6698
seq_printf(seq, "%s : %sactive", mdname(mddev),
6699
mddev->pers ? "" : "in");
6702
seq_printf(seq, " (read-only)");
6704
seq_printf(seq, " (auto-read-only)");
6705
seq_printf(seq, " %s", mddev->pers->name);
6709
list_for_each_entry(rdev, &mddev->disks, same_set) {
6710
char b[BDEVNAME_SIZE];
6711
seq_printf(seq, " %s[%d]",
6712
bdevname(rdev->bdev,b), rdev->desc_nr);
6713
if (test_bit(WriteMostly, &rdev->flags))
6714
seq_printf(seq, "(W)");
6715
if (test_bit(Faulty, &rdev->flags)) {
6716
seq_printf(seq, "(F)");
6718
} else if (rdev->raid_disk < 0)
6719
seq_printf(seq, "(S)"); /* spare */
6720
sectors += rdev->sectors;
6723
if (!list_empty(&mddev->disks)) {
6725
seq_printf(seq, "\n %llu blocks",
6726
(unsigned long long)
6727
mddev->array_sectors / 2);
6729
seq_printf(seq, "\n %llu blocks",
6730
(unsigned long long)sectors / 2);
6732
if (mddev->persistent) {
6733
if (mddev->major_version != 0 ||
6734
mddev->minor_version != 90) {
6735
seq_printf(seq," super %d.%d",
6736
mddev->major_version,
6737
mddev->minor_version);
6739
} else if (mddev->external)
6740
seq_printf(seq, " super external:%s",
6741
mddev->metadata_type);
6743
seq_printf(seq, " super non-persistent");
6746
mddev->pers->status(seq, mddev);
6747
seq_printf(seq, "\n ");
6748
if (mddev->pers->sync_request) {
6749
if (mddev->curr_resync > 2) {
6750
status_resync(seq, mddev);
6751
seq_printf(seq, "\n ");
6752
} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
6753
seq_printf(seq, "\tresync=DELAYED\n ");
6754
else if (mddev->recovery_cp < MaxSector)
6755
seq_printf(seq, "\tresync=PENDING\n ");
6758
seq_printf(seq, "\n ");
6760
if ((bitmap = mddev->bitmap)) {
6761
unsigned long chunk_kb;
6762
unsigned long flags;
6763
spin_lock_irqsave(&bitmap->lock, flags);
6764
chunk_kb = mddev->bitmap_info.chunksize >> 10;
6765
seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6767
bitmap->pages - bitmap->missing_pages,
6769
(bitmap->pages - bitmap->missing_pages)
6770
<< (PAGE_SHIFT - 10),
6771
chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6772
chunk_kb ? "KB" : "B");
6774
seq_printf(seq, ", file: ");
6775
seq_path(seq, &bitmap->file->f_path, " \t\n");
6778
seq_printf(seq, "\n");
6779
spin_unlock_irqrestore(&bitmap->lock, flags);
6782
seq_printf(seq, "\n");
6784
mddev_unlock(mddev);
6789
static const struct seq_operations md_seq_ops = {
6790
.start = md_seq_start,
6791
.next = md_seq_next,
6792
.stop = md_seq_stop,
6793
.show = md_seq_show,
6796
static int md_seq_open(struct inode *inode, struct file *file)
6798
struct seq_file *seq;
6801
error = seq_open(file, &md_seq_ops);
6805
seq = file->private_data;
6806
seq->poll_event = atomic_read(&md_event_count);
6810
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6812
struct seq_file *seq = filp->private_data;
6815
poll_wait(filp, &md_event_waiters, wait);
6817
/* always allow read */
6818
mask = POLLIN | POLLRDNORM;
6820
if (seq->poll_event != atomic_read(&md_event_count))
6821
mask |= POLLERR | POLLPRI;
6825
static const struct file_operations md_seq_fops = {
6826
.owner = THIS_MODULE,
6827
.open = md_seq_open,
6829
.llseek = seq_lseek,
6830
.release = seq_release_private,
6831
.poll = mdstat_poll,
6834
int register_md_personality(struct md_personality *p)
6836
spin_lock(&pers_lock);
6837
list_add_tail(&p->list, &pers_list);
6838
printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6839
spin_unlock(&pers_lock);
6843
int unregister_md_personality(struct md_personality *p)
6845
printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6846
spin_lock(&pers_lock);
6847
list_del_init(&p->list);
6848
spin_unlock(&pers_lock);
6852
static int is_mddev_idle(struct mddev *mddev, int init)
6854
struct md_rdev * rdev;
6860
rdev_for_each_rcu(rdev, mddev) {
6861
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6862
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6863
(int)part_stat_read(&disk->part0, sectors[1]) -
6864
atomic_read(&disk->sync_io);
6865
/* sync IO will cause sync_io to increase before the disk_stats
6866
* as sync_io is counted when a request starts, and
6867
* disk_stats is counted when it completes.
6868
* So resync activity will cause curr_events to be smaller than
6869
* when there was no such activity.
6870
* non-sync IO will cause disk_stat to increase without
6871
* increasing sync_io so curr_events will (eventually)
6872
* be larger than it was before. Once it becomes
6873
* substantially larger, the test below will cause
6874
* the array to appear non-idle, and resync will slow
6876
* If there is a lot of outstanding resync activity when
6877
* we set last_event to curr_events, then all that activity
6878
* completing might cause the array to appear non-idle
6879
* and resync will be slowed down even though there might
6880
* not have been non-resync activity. This will only
6881
* happen once though. 'last_events' will soon reflect
6882
* the state where there is little or no outstanding
6883
* resync requests, and further resync activity will
6884
* always make curr_events less than last_events.
6887
if (init || curr_events - rdev->last_events > 64) {
6888
rdev->last_events = curr_events;
6896
void md_done_sync(struct mddev *mddev, int blocks, int ok)
6898
/* another "blocks" (512byte) blocks have been synced */
6899
atomic_sub(blocks, &mddev->recovery_active);
6900
wake_up(&mddev->recovery_wait);
6902
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6903
md_wakeup_thread(mddev->thread);
6904
// stop recovery, signal do_sync ....
6909
/* md_write_start(mddev, bi)
6910
* If we need to update some array metadata (e.g. 'active' flag
6911
* in superblock) before writing, schedule a superblock update
6912
* and wait for it to complete.
6914
void md_write_start(struct mddev *mddev, struct bio *bi)
6917
if (bio_data_dir(bi) != WRITE)
6920
BUG_ON(mddev->ro == 1);
6921
if (mddev->ro == 2) {
6922
/* need to switch to read/write */
6924
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6925
md_wakeup_thread(mddev->thread);
6926
md_wakeup_thread(mddev->sync_thread);
6929
atomic_inc(&mddev->writes_pending);
6930
if (mddev->safemode == 1)
6931
mddev->safemode = 0;
6932
if (mddev->in_sync) {
6933
spin_lock_irq(&mddev->write_lock);
6934
if (mddev->in_sync) {
6936
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6937
set_bit(MD_CHANGE_PENDING, &mddev->flags);
6938
md_wakeup_thread(mddev->thread);
6941
spin_unlock_irq(&mddev->write_lock);
6944
sysfs_notify_dirent_safe(mddev->sysfs_state);
6945
wait_event(mddev->sb_wait,
6946
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
6949
void md_write_end(struct mddev *mddev)
6951
if (atomic_dec_and_test(&mddev->writes_pending)) {
6952
if (mddev->safemode == 2)
6953
md_wakeup_thread(mddev->thread);
6954
else if (mddev->safemode_delay)
6955
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6959
/* md_allow_write(mddev)
6960
* Calling this ensures that the array is marked 'active' so that writes
6961
* may proceed without blocking. It is important to call this before
6962
* attempting a GFP_KERNEL allocation while holding the mddev lock.
6963
* Must be called with mddev_lock held.
6965
* In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6966
* is dropped, so return -EAGAIN after notifying userspace.
6968
int md_allow_write(struct mddev *mddev)
6974
if (!mddev->pers->sync_request)
6977
spin_lock_irq(&mddev->write_lock);
6978
if (mddev->in_sync) {
6980
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6981
set_bit(MD_CHANGE_PENDING, &mddev->flags);
6982
if (mddev->safemode_delay &&
6983
mddev->safemode == 0)
6984
mddev->safemode = 1;
6985
spin_unlock_irq(&mddev->write_lock);
6986
md_update_sb(mddev, 0);
6987
sysfs_notify_dirent_safe(mddev->sysfs_state);
6989
spin_unlock_irq(&mddev->write_lock);
6991
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
6996
EXPORT_SYMBOL_GPL(md_allow_write);
6998
#define SYNC_MARKS 10
6999
#define SYNC_MARK_STEP (3*HZ)
7000
void md_do_sync(struct mddev *mddev)
7002
struct mddev *mddev2;
7003
unsigned int currspeed = 0,
7005
sector_t max_sectors,j, io_sectors;
7006
unsigned long mark[SYNC_MARKS];
7007
sector_t mark_cnt[SYNC_MARKS];
7009
struct list_head *tmp;
7010
sector_t last_check;
7012
struct md_rdev *rdev;
7015
/* just incase thread restarts... */
7016
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7018
if (mddev->ro) /* never try to sync a read-only array */
7021
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7022
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
7023
desc = "data-check";
7024
else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7025
desc = "requested-resync";
7028
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7033
/* we overload curr_resync somewhat here.
7034
* 0 == not engaged in resync at all
7035
* 2 == checking that there is no conflict with another sync
7036
* 1 == like 2, but have yielded to allow conflicting resync to
7038
* other == active in resync - this many blocks
7040
* Before starting a resync we must have set curr_resync to
7041
* 2, and then checked that every "conflicting" array has curr_resync
7042
* less than ours. When we find one that is the same or higher
7043
* we wait on resync_wait. To avoid deadlock, we reduce curr_resync
7044
* to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7045
* This will mean we have to start checking from the beginning again.
7050
mddev->curr_resync = 2;
7053
if (kthread_should_stop())
7054
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7056
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7058
for_each_mddev(mddev2, tmp) {
7059
if (mddev2 == mddev)
7061
if (!mddev->parallel_resync
7062
&& mddev2->curr_resync
7063
&& match_mddev_units(mddev, mddev2)) {
7065
if (mddev < mddev2 && mddev->curr_resync == 2) {
7066
/* arbitrarily yield */
7067
mddev->curr_resync = 1;
7068
wake_up(&resync_wait);
7070
if (mddev > mddev2 && mddev->curr_resync == 1)
7071
/* no need to wait here, we can wait the next
7072
* time 'round when curr_resync == 2
7075
/* We need to wait 'interruptible' so as not to
7076
* contribute to the load average, and not to
7077
* be caught by 'softlockup'
7079
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7080
if (!kthread_should_stop() &&
7081
mddev2->curr_resync >= mddev->curr_resync) {
7082
printk(KERN_INFO "md: delaying %s of %s"
7083
" until %s has finished (they"
7084
" share one or more physical units)\n",
7085
desc, mdname(mddev), mdname(mddev2));
7087
if (signal_pending(current))
7088
flush_signals(current);
7090
finish_wait(&resync_wait, &wq);
7093
finish_wait(&resync_wait, &wq);
7096
} while (mddev->curr_resync < 2);
7099
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7100
/* resync follows the size requested by the personality,
7101
* which defaults to physical size, but can be virtual size
7103
max_sectors = mddev->resync_max_sectors;
7104
mddev->resync_mismatches = 0;
7105
/* we don't use the checkpoint if there's a bitmap */
7106
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7107
j = mddev->resync_min;
7108
else if (!mddev->bitmap)
7109
j = mddev->recovery_cp;
7111
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7112
max_sectors = mddev->dev_sectors;
7114
/* recovery follows the physical size of devices */
7115
max_sectors = mddev->dev_sectors;
7118
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
7119
if (rdev->raid_disk >= 0 &&
7120
!test_bit(Faulty, &rdev->flags) &&
7121
!test_bit(In_sync, &rdev->flags) &&
7122
rdev->recovery_offset < j)
7123
j = rdev->recovery_offset;
7127
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
7128
printk(KERN_INFO "md: minimum _guaranteed_ speed:"
7129
" %d KB/sec/disk.\n", speed_min(mddev));
7130
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7131
"(but not more than %d KB/sec) for %s.\n",
7132
speed_max(mddev), desc);
7134
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7137
for (m = 0; m < SYNC_MARKS; m++) {
7139
mark_cnt[m] = io_sectors;
7142
mddev->resync_mark = mark[last_mark];
7143
mddev->resync_mark_cnt = mark_cnt[last_mark];
7146
* Tune reconstruction:
7148
window = 32*(PAGE_SIZE/512);
7149
printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
7150
window/2, (unsigned long long)max_sectors/2);
7152
atomic_set(&mddev->recovery_active, 0);
7157
"md: resuming %s of %s from checkpoint.\n",
7158
desc, mdname(mddev));
7159
mddev->curr_resync = j;
7161
mddev->curr_resync_completed = j;
7163
while (j < max_sectors) {
7168
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7169
((mddev->curr_resync > mddev->curr_resync_completed &&
7170
(mddev->curr_resync - mddev->curr_resync_completed)
7171
> (max_sectors >> 4)) ||
7172
(j - mddev->curr_resync_completed)*2
7173
>= mddev->resync_max - mddev->curr_resync_completed
7175
/* time to update curr_resync_completed */
7176
wait_event(mddev->recovery_wait,
7177
atomic_read(&mddev->recovery_active) == 0);
7178
mddev->curr_resync_completed = j;
7179
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7180
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7183
while (j >= mddev->resync_max && !kthread_should_stop()) {
7184
/* As this condition is controlled by user-space,
7185
* we can block indefinitely, so use '_interruptible'
7186
* to avoid triggering warnings.
7188
flush_signals(current); /* just in case */
7189
wait_event_interruptible(mddev->recovery_wait,
7190
mddev->resync_max > j
7191
|| kthread_should_stop());
7194
if (kthread_should_stop())
7197
sectors = mddev->pers->sync_request(mddev, j, &skipped,
7198
currspeed < speed_min(mddev));
7200
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7204
if (!skipped) { /* actual IO requested */
7205
io_sectors += sectors;
7206
atomic_add(sectors, &mddev->recovery_active);
7209
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7213
if (j>1) mddev->curr_resync = j;
7214
mddev->curr_mark_cnt = io_sectors;
7215
if (last_check == 0)
7216
/* this is the earliest that rebuild will be
7217
* visible in /proc/mdstat
7219
md_new_event(mddev);
7221
if (last_check + window > io_sectors || j == max_sectors)
7224
last_check = io_sectors;
7226
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
7228
int next = (last_mark+1) % SYNC_MARKS;
7230
mddev->resync_mark = mark[next];
7231
mddev->resync_mark_cnt = mark_cnt[next];
7232
mark[next] = jiffies;
7233
mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
7238
if (kthread_should_stop())
7243
* this loop exits only if either when we are slower than
7244
* the 'hard' speed limit, or the system was IO-idle for
7246
* the system might be non-idle CPU-wise, but we only care
7247
* about not overloading the IO subsystem. (things like an
7248
* e2fsck being done on the RAID array should execute fast)
7252
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
7253
/((jiffies-mddev->resync_mark)/HZ +1) +1;
7255
if (currspeed > speed_min(mddev)) {
7256
if ((currspeed > speed_max(mddev)) ||
7257
!is_mddev_idle(mddev, 0)) {
7263
printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
7265
* this also signals 'finished resyncing' to md_stop
7268
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
7270
/* tell personality that we are finished */
7271
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
7273
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
7274
mddev->curr_resync > 2) {
7275
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7276
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7277
if (mddev->curr_resync >= mddev->recovery_cp) {
7279
"md: checkpointing %s of %s.\n",
7280
desc, mdname(mddev));
7281
mddev->recovery_cp = mddev->curr_resync;
7284
mddev->recovery_cp = MaxSector;
7286
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7287
mddev->curr_resync = MaxSector;
7289
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
7290
if (rdev->raid_disk >= 0 &&
7291
mddev->delta_disks >= 0 &&
7292
!test_bit(Faulty, &rdev->flags) &&
7293
!test_bit(In_sync, &rdev->flags) &&
7294
rdev->recovery_offset < mddev->curr_resync)
7295
rdev->recovery_offset = mddev->curr_resync;
7299
set_bit(MD_CHANGE_DEVS, &mddev->flags);
7302
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7303
/* We completed so min/max setting can be forgotten if used. */
7304
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7305
mddev->resync_min = 0;
7306
mddev->resync_max = MaxSector;
7307
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7308
mddev->resync_min = mddev->curr_resync_completed;
7309
mddev->curr_resync = 0;
7310
wake_up(&resync_wait);
7311
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
7312
md_wakeup_thread(mddev->thread);
7317
* got a signal, exit.
7320
"md: md_do_sync() got signal ... exiting\n");
7321
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7325
EXPORT_SYMBOL_GPL(md_do_sync);
7327
static int remove_and_add_spares(struct mddev *mddev)
7329
struct md_rdev *rdev;
7332
mddev->curr_resync_completed = 0;
7334
list_for_each_entry(rdev, &mddev->disks, same_set)
7335
if (rdev->raid_disk >= 0 &&
7336
!test_bit(Blocked, &rdev->flags) &&
7337
(test_bit(Faulty, &rdev->flags) ||
7338
! test_bit(In_sync, &rdev->flags)) &&
7339
atomic_read(&rdev->nr_pending)==0) {
7340
if (mddev->pers->hot_remove_disk(
7341
mddev, rdev->raid_disk)==0) {
7342
sysfs_unlink_rdev(mddev, rdev);
7343
rdev->raid_disk = -1;
7347
if (mddev->degraded) {
7348
list_for_each_entry(rdev, &mddev->disks, same_set) {
7349
if (rdev->raid_disk >= 0 &&
7350
!test_bit(In_sync, &rdev->flags) &&
7351
!test_bit(Faulty, &rdev->flags))
7353
if (rdev->raid_disk < 0
7354
&& !test_bit(Faulty, &rdev->flags)) {
7355
rdev->recovery_offset = 0;
7357
hot_add_disk(mddev, rdev) == 0) {
7358
if (sysfs_link_rdev(mddev, rdev))
7359
/* failure here is OK */;
7361
md_new_event(mddev);
7362
set_bit(MD_CHANGE_DEVS, &mddev->flags);
7370
static void reap_sync_thread(struct mddev *mddev)
7372
struct md_rdev *rdev;
7374
/* resync has finished, collect result */
7375
md_unregister_thread(&mddev->sync_thread);
7376
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7377
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7379
/* activate any spares */
7380
if (mddev->pers->spare_active(mddev))
7381
sysfs_notify(&mddev->kobj, NULL,
7384
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7385
mddev->pers->finish_reshape)
7386
mddev->pers->finish_reshape(mddev);
7388
/* If array is no-longer degraded, then any saved_raid_disk
7389
* information must be scrapped. Also if any device is now
7390
* In_sync we must scrape the saved_raid_disk for that device
7391
* do the superblock for an incrementally recovered device
7394
list_for_each_entry(rdev, &mddev->disks, same_set)
7395
if (!mddev->degraded ||
7396
test_bit(In_sync, &rdev->flags))
7397
rdev->saved_raid_disk = -1;
7399
md_update_sb(mddev, 1);
7400
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7401
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7402
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7403
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7404
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7405
/* flag recovery needed just to double check */
7406
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7407
sysfs_notify_dirent_safe(mddev->sysfs_action);
7408
md_new_event(mddev);
7409
if (mddev->event_work.func)
7410
queue_work(md_misc_wq, &mddev->event_work);
7414
* This routine is regularly called by all per-raid-array threads to
7415
* deal with generic issues like resync and super-block update.
7416
* Raid personalities that don't have a thread (linear/raid0) do not
7417
* need this as they never do any recovery or update the superblock.
7419
* It does not do any resync itself, but rather "forks" off other threads
7420
* to do that as needed.
7421
* When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
7422
* "->recovery" and create a thread at ->sync_thread.
7423
* When the thread finishes it sets MD_RECOVERY_DONE
7424
* and wakeups up this thread which will reap the thread and finish up.
7425
* This thread also removes any faulty devices (with nr_pending == 0).
7427
* The overall approach is:
7428
* 1/ if the superblock needs updating, update it.
7429
* 2/ If a recovery thread is running, don't do anything else.
7430
* 3/ If recovery has finished, clean up, possibly marking spares active.
7431
* 4/ If there are any faulty devices, remove them.
7432
* 5/ If array is degraded, try to add spares devices
7433
* 6/ If array has spares or is not in-sync, start a resync thread.
7435
void md_check_recovery(struct mddev *mddev)
7437
if (mddev->suspended)
7441
bitmap_daemon_work(mddev);
7443
if (signal_pending(current)) {
7444
if (mddev->pers->sync_request && !mddev->external) {
7445
printk(KERN_INFO "md: %s in immediate safe mode\n",
7447
mddev->safemode = 2;
7449
flush_signals(current);
7452
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7455
(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7456
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7457
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7458
(mddev->external == 0 && mddev->safemode == 1) ||
7459
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
7460
&& !mddev->in_sync && mddev->recovery_cp == MaxSector)
7464
if (mddev_trylock(mddev)) {
7468
/* Only thing we do on a ro array is remove
7471
struct md_rdev *rdev;
7472
list_for_each_entry(rdev, &mddev->disks, same_set)
7473
if (rdev->raid_disk >= 0 &&
7474
!test_bit(Blocked, &rdev->flags) &&
7475
test_bit(Faulty, &rdev->flags) &&
7476
atomic_read(&rdev->nr_pending)==0) {
7477
if (mddev->pers->hot_remove_disk(
7478
mddev, rdev->raid_disk)==0) {
7479
sysfs_unlink_rdev(mddev, rdev);
7480
rdev->raid_disk = -1;
7483
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7487
if (!mddev->external) {
7489
spin_lock_irq(&mddev->write_lock);
7490
if (mddev->safemode &&
7491
!atomic_read(&mddev->writes_pending) &&
7493
mddev->recovery_cp == MaxSector) {
7496
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7498
if (mddev->safemode == 1)
7499
mddev->safemode = 0;
7500
spin_unlock_irq(&mddev->write_lock);
7502
sysfs_notify_dirent_safe(mddev->sysfs_state);
7506
md_update_sb(mddev, 0);
7508
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7509
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7510
/* resync/recovery still happening */
7511
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7514
if (mddev->sync_thread) {
7515
reap_sync_thread(mddev);
7518
/* Set RUNNING before clearing NEEDED to avoid
7519
* any transients in the value of "sync_action".
7521
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7522
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7523
/* Clear some bits that don't mean anything, but
7526
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7527
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7529
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7531
/* no recovery is running.
7532
* remove any failed drives, then
7533
* add spares if possible.
7534
* Spare are also removed and re-added, to allow
7535
* the personality to fail the re-add.
7538
if (mddev->reshape_position != MaxSector) {
7539
if (mddev->pers->check_reshape == NULL ||
7540
mddev->pers->check_reshape(mddev) != 0)
7541
/* Cannot proceed */
7543
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7544
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7545
} else if ((spares = remove_and_add_spares(mddev))) {
7546
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7547
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7548
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7549
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7550
} else if (mddev->recovery_cp < MaxSector) {
7551
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7552
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7553
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7554
/* nothing to be done ... */
7557
if (mddev->pers->sync_request) {
7558
if (spares && mddev->bitmap && ! mddev->bitmap->file) {
7559
/* We are adding a device or devices to an array
7560
* which has the bitmap stored on all devices.
7561
* So make sure all bitmap pages get written
7563
bitmap_write_all(mddev->bitmap);
7565
mddev->sync_thread = md_register_thread(md_do_sync,
7568
if (!mddev->sync_thread) {
7569
printk(KERN_ERR "%s: could not start resync"
7572
/* leave the spares where they are, it shouldn't hurt */
7573
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7574
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7575
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7576
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7577
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7579
md_wakeup_thread(mddev->sync_thread);
7580
sysfs_notify_dirent_safe(mddev->sysfs_action);
7581
md_new_event(mddev);
7584
if (!mddev->sync_thread) {
7585
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7586
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7588
if (mddev->sysfs_action)
7589
sysfs_notify_dirent_safe(mddev->sysfs_action);
7591
mddev_unlock(mddev);
7595
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
7597
sysfs_notify_dirent_safe(rdev->sysfs_state);
7598
wait_event_timeout(rdev->blocked_wait,
7599
!test_bit(Blocked, &rdev->flags) &&
7600
!test_bit(BlockedBadBlocks, &rdev->flags),
7601
msecs_to_jiffies(5000));
7602
rdev_dec_pending(rdev, mddev);
7604
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
7607
/* Bad block management.
7608
* We can record which blocks on each device are 'bad' and so just
7609
* fail those blocks, or that stripe, rather than the whole device.
7610
* Entries in the bad-block table are 64bits wide. This comprises:
7611
* Length of bad-range, in sectors: 0-511 for lengths 1-512
7612
* Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
7613
* A 'shift' can be set so that larger blocks are tracked and
7614
* consequently larger devices can be covered.
7615
* 'Acknowledged' flag - 1 bit. - the most significant bit.
7617
* Locking of the bad-block table uses a seqlock so md_is_badblock
7618
* might need to retry if it is very unlucky.
7619
* We will sometimes want to check for bad blocks in a bi_end_io function,
7620
* so we use the write_seqlock_irq variant.
7622
* When looking for a bad block we specify a range and want to
7623
* know if any block in the range is bad. So we binary-search
7624
* to the last range that starts at-or-before the given endpoint,
7625
* (or "before the sector after the target range")
7626
* then see if it ends after the given start.
7628
* 0 if there are no known bad blocks in the range
7629
* 1 if there are known bad block which are all acknowledged
7630
* -1 if there are bad blocks which have not yet been acknowledged in metadata.
7631
* plus the start/length of the first bad section we overlap.
7633
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
7634
sector_t *first_bad, int *bad_sectors)
7640
sector_t target = s + sectors;
7643
if (bb->shift > 0) {
7644
/* round the start down, and the end up */
7646
target += (1<<bb->shift) - 1;
7647
target >>= bb->shift;
7648
sectors = target - s;
7650
/* 'target' is now the first block after the bad range */
7653
seq = read_seqbegin(&bb->lock);
7657
/* Binary search between lo and hi for 'target'
7658
* i.e. for the last range that starts before 'target'
7660
/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
7661
* are known not to be the last range before target.
7662
* VARIANT: hi-lo is the number of possible
7663
* ranges, and decreases until it reaches 1
7665
while (hi - lo > 1) {
7666
int mid = (lo + hi) / 2;
7667
sector_t a = BB_OFFSET(p[mid]);
7669
/* This could still be the one, earlier ranges
7673
/* This and later ranges are definitely out. */
7676
/* 'lo' might be the last that started before target, but 'hi' isn't */
7678
/* need to check all range that end after 's' to see if
7679
* any are unacknowledged.
7682
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
7683
if (BB_OFFSET(p[lo]) < target) {
7684
/* starts before the end, and finishes after
7685
* the start, so they must overlap
7687
if (rv != -1 && BB_ACK(p[lo]))
7691
*first_bad = BB_OFFSET(p[lo]);
7692
*bad_sectors = BB_LEN(p[lo]);
7698
if (read_seqretry(&bb->lock, seq))
7703
EXPORT_SYMBOL_GPL(md_is_badblock);
7706
* Add a range of bad blocks to the table.
7707
* This might extend the table, or might contract it
7708
* if two adjacent ranges can be merged.
7709
* We binary-search to find the 'insertion' point, then
7710
* decide how best to handle it.
7712
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
7720
/* badblocks are disabled */
7724
/* round the start down, and the end up */
7725
sector_t next = s + sectors;
7727
next += (1<<bb->shift) - 1;
7732
write_seqlock_irq(&bb->lock);
7737
/* Find the last range that starts at-or-before 's' */
7738
while (hi - lo > 1) {
7739
int mid = (lo + hi) / 2;
7740
sector_t a = BB_OFFSET(p[mid]);
7746
if (hi > lo && BB_OFFSET(p[lo]) > s)
7750
/* we found a range that might merge with the start
7753
sector_t a = BB_OFFSET(p[lo]);
7754
sector_t e = a + BB_LEN(p[lo]);
7755
int ack = BB_ACK(p[lo]);
7757
/* Yes, we can merge with a previous range */
7758
if (s == a && s + sectors >= e)
7759
/* new range covers old */
7762
ack = ack && acknowledged;
7764
if (e < s + sectors)
7766
if (e - a <= BB_MAX_LEN) {
7767
p[lo] = BB_MAKE(a, e-a, ack);
7770
/* does not all fit in one range,
7771
* make p[lo] maximal
7773
if (BB_LEN(p[lo]) != BB_MAX_LEN)
7774
p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
7780
if (sectors && hi < bb->count) {
7781
/* 'hi' points to the first range that starts after 's'.
7782
* Maybe we can merge with the start of that range */
7783
sector_t a = BB_OFFSET(p[hi]);
7784
sector_t e = a + BB_LEN(p[hi]);
7785
int ack = BB_ACK(p[hi]);
7786
if (a <= s + sectors) {
7787
/* merging is possible */
7788
if (e <= s + sectors) {
7793
ack = ack && acknowledged;
7796
if (e - a <= BB_MAX_LEN) {
7797
p[hi] = BB_MAKE(a, e-a, ack);
7800
p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
7808
if (sectors == 0 && hi < bb->count) {
7809
/* we might be able to combine lo and hi */
7810
/* Note: 's' is at the end of 'lo' */
7811
sector_t a = BB_OFFSET(p[hi]);
7812
int lolen = BB_LEN(p[lo]);
7813
int hilen = BB_LEN(p[hi]);
7814
int newlen = lolen + hilen - (s - a);
7815
if (s >= a && newlen < BB_MAX_LEN) {
7816
/* yes, we can combine them */
7817
int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
7818
p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
7819
memmove(p + hi, p + hi + 1,
7820
(bb->count - hi - 1) * 8);
7825
/* didn't merge (it all).
7826
* Need to add a range just before 'hi' */
7827
if (bb->count >= MD_MAX_BADBLOCKS) {
7828
/* No room for more */
7832
int this_sectors = sectors;
7833
memmove(p + hi + 1, p + hi,
7834
(bb->count - hi) * 8);
7837
if (this_sectors > BB_MAX_LEN)
7838
this_sectors = BB_MAX_LEN;
7839
p[hi] = BB_MAKE(s, this_sectors, acknowledged);
7840
sectors -= this_sectors;
7847
bb->unacked_exist = 1;
7848
write_sequnlock_irq(&bb->lock);
7853
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
7856
int rv = md_set_badblocks(&rdev->badblocks,
7857
s + rdev->data_offset, sectors, acknowledged);
7859
/* Make sure they get written out promptly */
7860
sysfs_notify_dirent_safe(rdev->sysfs_state);
7861
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
7862
md_wakeup_thread(rdev->mddev->thread);
7866
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
7869
* Remove a range of bad blocks from the table.
7870
* This may involve extending the table if we spilt a region,
7871
* but it must not fail. So if the table becomes full, we just
7872
* drop the remove request.
7874
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
7878
sector_t target = s + sectors;
7881
if (bb->shift > 0) {
7882
/* When clearing we round the start up and the end down.
7883
* This should not matter as the shift should align with
7884
* the block size and no rounding should ever be needed.
7885
* However it is better the think a block is bad when it
7886
* isn't than to think a block is not bad when it is.
7888
s += (1<<bb->shift) - 1;
7890
target >>= bb->shift;
7891
sectors = target - s;
7894
write_seqlock_irq(&bb->lock);
7899
/* Find the last range that starts before 'target' */
7900
while (hi - lo > 1) {
7901
int mid = (lo + hi) / 2;
7902
sector_t a = BB_OFFSET(p[mid]);
7909
/* p[lo] is the last range that could overlap the
7910
* current range. Earlier ranges could also overlap,
7911
* but only this one can overlap the end of the range.
7913
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
7914
/* Partial overlap, leave the tail of this range */
7915
int ack = BB_ACK(p[lo]);
7916
sector_t a = BB_OFFSET(p[lo]);
7917
sector_t end = a + BB_LEN(p[lo]);
7920
/* we need to split this range */
7921
if (bb->count >= MD_MAX_BADBLOCKS) {
7925
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
7927
p[lo] = BB_MAKE(a, s-a, ack);
7930
p[lo] = BB_MAKE(target, end - target, ack);
7931
/* there is no longer an overlap */
7936
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
7937
/* This range does overlap */
7938
if (BB_OFFSET(p[lo]) < s) {
7939
/* Keep the early parts of this range. */
7940
int ack = BB_ACK(p[lo]);
7941
sector_t start = BB_OFFSET(p[lo]);
7942
p[lo] = BB_MAKE(start, s - start, ack);
7943
/* now low doesn't overlap, so.. */
7948
/* 'lo' is strictly before, 'hi' is strictly after,
7949
* anything between needs to be discarded
7952
memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
7953
bb->count -= (hi - lo - 1);
7959
write_sequnlock_irq(&bb->lock);
7963
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors)
7965
return md_clear_badblocks(&rdev->badblocks,
7966
s + rdev->data_offset,
7969
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
7972
* Acknowledge all bad blocks in a list.
7973
* This only succeeds if ->changed is clear. It is used by
7974
* in-kernel metadata updates
7976
void md_ack_all_badblocks(struct badblocks *bb)
7978
if (bb->page == NULL || bb->changed)
7979
/* no point even trying */
7981
write_seqlock_irq(&bb->lock);
7983
if (bb->changed == 0) {
7986
for (i = 0; i < bb->count ; i++) {
7987
if (!BB_ACK(p[i])) {
7988
sector_t start = BB_OFFSET(p[i]);
7989
int len = BB_LEN(p[i]);
7990
p[i] = BB_MAKE(start, len, 1);
7993
bb->unacked_exist = 0;
7995
write_sequnlock_irq(&bb->lock);
7997
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
7999
/* sysfs access to bad-blocks list.
8000
* We present two files.
8001
* 'bad-blocks' lists sector numbers and lengths of ranges that
8002
* are recorded as bad. The list is truncated to fit within
8003
* the one-page limit of sysfs.
8004
* Writing "sector length" to this file adds an acknowledged
8006
* 'unacknowledged-bad-blocks' lists bad blocks that have not yet
8007
* been acknowledged. Writing to this file adds bad blocks
8008
* without acknowledging them. This is largely for testing.
8012
badblocks_show(struct badblocks *bb, char *page, int unack)
8023
seq = read_seqbegin(&bb->lock);
8028
while (len < PAGE_SIZE && i < bb->count) {
8029
sector_t s = BB_OFFSET(p[i]);
8030
unsigned int length = BB_LEN(p[i]);
8031
int ack = BB_ACK(p[i]);
8037
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
8038
(unsigned long long)s << bb->shift,
8039
length << bb->shift);
8041
if (unack && len == 0)
8042
bb->unacked_exist = 0;
8044
if (read_seqretry(&bb->lock, seq))
8053
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
8055
unsigned long long sector;
8059
/* Allow clearing via sysfs *only* for testing/debugging.
8060
* Normally only a successful write may clear a badblock
8063
if (page[0] == '-') {
8067
#endif /* DO_DEBUG */
8069
switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
8071
if (newline != '\n')
8083
md_clear_badblocks(bb, sector, length);
8086
#endif /* DO_DEBUG */
8087
if (md_set_badblocks(bb, sector, length, !unack))
8093
static int md_notify_reboot(struct notifier_block *this,
8094
unsigned long code, void *x)
8096
struct list_head *tmp;
8097
struct mddev *mddev;
8100
if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
8102
printk(KERN_INFO "md: stopping all md devices.\n");
8104
for_each_mddev(mddev, tmp) {
8105
if (mddev_trylock(mddev)) {
8106
/* Force a switch to readonly even array
8107
* appears to still be in use. Hence
8110
md_set_readonly(mddev, 100);
8111
mddev_unlock(mddev);
8116
* certain more exotic SCSI devices are known to be
8117
* volatile wrt too early system reboots. While the
8118
* right place to handle this issue is the given
8119
* driver, we do want to have a safe RAID driver ...
8127
static struct notifier_block md_notifier = {
8128
.notifier_call = md_notify_reboot,
8130
.priority = INT_MAX, /* before any real devices */
8133
static void md_geninit(void)
8135
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8137
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8140
static int __init md_init(void)
8144
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8148
md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8152
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8155
if ((ret = register_blkdev(0, "mdp")) < 0)
8159
blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
8160
md_probe, NULL, NULL);
8161
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8162
md_probe, NULL, NULL);
8164
register_reboot_notifier(&md_notifier);
8165
raid_table_header = register_sysctl_table(raid_root_table);
8171
unregister_blkdev(MD_MAJOR, "md");
8173
destroy_workqueue(md_misc_wq);
8175
destroy_workqueue(md_wq);
8183
* Searches all registered partitions for autorun RAID arrays
8187
static LIST_HEAD(all_detected_devices);
8188
struct detected_devices_node {
8189
struct list_head list;
8193
void md_autodetect_dev(dev_t dev)
8195
struct detected_devices_node *node_detected_dev;
8197
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8198
if (node_detected_dev) {
8199
node_detected_dev->dev = dev;
8200
list_add_tail(&node_detected_dev->list, &all_detected_devices);
8202
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
8203
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
8208
static void autostart_arrays(int part)
8210
struct md_rdev *rdev;
8211
struct detected_devices_node *node_detected_dev;
8213
int i_scanned, i_passed;
8218
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
8220
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8222
node_detected_dev = list_entry(all_detected_devices.next,
8223
struct detected_devices_node, list);
8224
list_del(&node_detected_dev->list);
8225
dev = node_detected_dev->dev;
8226
kfree(node_detected_dev);
8227
rdev = md_import_device(dev,0, 90);
8231
if (test_bit(Faulty, &rdev->flags)) {
8235
set_bit(AutoDetected, &rdev->flags);
8236
list_add(&rdev->same_set, &pending_raid_disks);
8240
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
8241
i_scanned, i_passed);
8243
autorun_devices(part);
8246
#endif /* !MODULE */
8248
static __exit void md_exit(void)
8250
struct mddev *mddev;
8251
struct list_head *tmp;
8253
blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
8254
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8256
unregister_blkdev(MD_MAJOR,"md");
8257
unregister_blkdev(mdp_major, "mdp");
8258
unregister_reboot_notifier(&md_notifier);
8259
unregister_sysctl_table(raid_table_header);
8260
remove_proc_entry("mdstat", NULL);
8261
for_each_mddev(mddev, tmp) {
8262
export_array(mddev);
8263
mddev->hold_active = 0;
8265
destroy_workqueue(md_misc_wq);
8266
destroy_workqueue(md_wq);
8269
subsys_initcall(md_init);
8270
module_exit(md_exit)
8272
static int get_ro(char *buffer, struct kernel_param *kp)
8274
return sprintf(buffer, "%d", start_readonly);
8276
static int set_ro(const char *val, struct kernel_param *kp)
8279
int num = simple_strtoul(val, &e, 10);
8280
if (*val && (*e == '\0' || *e == '\n')) {
8281
start_readonly = num;
8287
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8288
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8290
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8292
EXPORT_SYMBOL(register_md_personality);
8293
EXPORT_SYMBOL(unregister_md_personality);
8294
EXPORT_SYMBOL(md_error);
8295
EXPORT_SYMBOL(md_done_sync);
8296
EXPORT_SYMBOL(md_write_start);
8297
EXPORT_SYMBOL(md_write_end);
8298
EXPORT_SYMBOL(md_register_thread);
8299
EXPORT_SYMBOL(md_unregister_thread);
8300
EXPORT_SYMBOL(md_wakeup_thread);
8301
EXPORT_SYMBOL(md_check_recovery);
8302
MODULE_LICENSE("GPL");
8303
MODULE_DESCRIPTION("MD RAID framework");
8305
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);