2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 This program is free software; you can redistribute it and/or modify
23 it under the terms of the GNU General Public License as published by
24 the Free Software Foundation; either version 2, or (at your option)
27 You should have received a copy of the GNU General Public License
28 (for example /usr/src/linux/COPYING); if not, write to the Free
29 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 #include <linux/module.h>
33 #include <linux/config.h>
34 #include <linux/linkage.h>
35 #include <linux/raid/md.h>
36 #include <linux/sysctl.h>
37 #include <linux/devfs_fs_kernel.h>
38 #include <linux/buffer_head.h> /* for invalidate_bdev */
39 #include <linux/suspend.h>
41 #include <linux/init.h>
44 #include <linux/kmod.h>
47 #include <asm/unaligned.h>
49 #define MAJOR_NR MD_MAJOR
52 /* 63 partitions with the alternate major number (mdp) */
53 #define MdpMinorShift 6
56 #define dprintk(x...) ((void)(DEBUG && printk(x)))
60 static void autostart_arrays (int part);
63 static mdk_personality_t *pers[MAX_PERSONALITY];
64 static DEFINE_SPINLOCK(pers_lock);
67 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
68 * is 1000 KB/sec, so the extra system load does not show up that much.
69 * Increase it if you want to have more _guaranteed_ speed. Note that
70 * the RAID driver will use the maximum available bandwith if the IO
71 * subsystem is idle. There is also an 'absolute maximum' reconstruction
72 * speed limit - in case reconstruction slows down your system despite
75 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
78 static int sysctl_speed_limit_min = 1000;
79 static int sysctl_speed_limit_max = 200000;
81 static struct ctl_table_header *raid_table_header;
83 static ctl_table raid_table[] = {
85 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
86 .procname = "speed_limit_min",
87 .data = &sysctl_speed_limit_min,
88 .maxlen = sizeof(int),
90 .proc_handler = &proc_dointvec,
93 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
94 .procname = "speed_limit_max",
95 .data = &sysctl_speed_limit_max,
96 .maxlen = sizeof(int),
98 .proc_handler = &proc_dointvec,
103 static ctl_table raid_dir_table[] = {
105 .ctl_name = DEV_RAID,
114 static ctl_table raid_root_table[] = {
120 .child = raid_dir_table,
125 static struct block_device_operations md_fops;
128 * Enables to iterate over all existing md arrays
129 * all_mddevs_lock protects this list.
131 static LIST_HEAD(all_mddevs);
132 static DEFINE_SPINLOCK(all_mddevs_lock);
136 * iterates through all used mddevs in the system.
137 * We take care to grab the all_mddevs_lock whenever navigating
138 * the list, and to always hold a refcount when unlocked.
139 * Any code which breaks out of this loop while own
140 * a reference to the current mddev and must mddev_put it.
142 #define ITERATE_MDDEV(mddev,tmp) \
144 for (({ spin_lock(&all_mddevs_lock); \
145 tmp = all_mddevs.next; \
147 ({ if (tmp != &all_mddevs) \
148 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
149 spin_unlock(&all_mddevs_lock); \
150 if (mddev) mddev_put(mddev); \
151 mddev = list_entry(tmp, mddev_t, all_mddevs); \
152 tmp != &all_mddevs;}); \
153 ({ spin_lock(&all_mddevs_lock); \
158 static int md_fail_request (request_queue_t *q, struct bio *bio)
160 bio_io_error(bio, bio->bi_size);
164 static inline mddev_t *mddev_get(mddev_t *mddev)
166 atomic_inc(&mddev->active);
170 static void mddev_put(mddev_t *mddev)
172 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
174 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
175 list_del(&mddev->all_mddevs);
176 blk_put_queue(mddev->queue);
179 spin_unlock(&all_mddevs_lock);
182 static mddev_t * mddev_find(dev_t unit)
184 mddev_t *mddev, *new = NULL;
187 spin_lock(&all_mddevs_lock);
188 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
189 if (mddev->unit == unit) {
191 spin_unlock(&all_mddevs_lock);
198 list_add(&new->all_mddevs, &all_mddevs);
199 spin_unlock(&all_mddevs_lock);
202 spin_unlock(&all_mddevs_lock);
204 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
208 memset(new, 0, sizeof(*new));
211 if (MAJOR(unit) == MD_MAJOR)
212 new->md_minor = MINOR(unit);
214 new->md_minor = MINOR(unit) >> MdpMinorShift;
216 init_MUTEX(&new->reconfig_sem);
217 INIT_LIST_HEAD(&new->disks);
218 INIT_LIST_HEAD(&new->all_mddevs);
219 init_timer(&new->safemode_timer);
220 atomic_set(&new->active, 1);
222 new->queue = blk_alloc_queue(GFP_KERNEL);
228 blk_queue_make_request(new->queue, md_fail_request);
233 static inline int mddev_lock(mddev_t * mddev)
235 return down_interruptible(&mddev->reconfig_sem);
238 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
240 down(&mddev->reconfig_sem);
243 static inline int mddev_trylock(mddev_t * mddev)
245 return down_trylock(&mddev->reconfig_sem);
248 static inline void mddev_unlock(mddev_t * mddev)
250 up(&mddev->reconfig_sem);
253 md_wakeup_thread(mddev->thread);
256 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
259 struct list_head *tmp;
261 ITERATE_RDEV(mddev,rdev,tmp) {
262 if (rdev->desc_nr == nr)
268 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
270 struct list_head *tmp;
273 ITERATE_RDEV(mddev,rdev,tmp) {
274 if (rdev->bdev->bd_dev == dev)
280 inline static sector_t calc_dev_sboffset(struct block_device *bdev)
282 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
283 return MD_NEW_SIZE_BLOCKS(size);
286 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
290 size = rdev->sb_offset;
293 size &= ~((sector_t)chunk_size/1024 - 1);
297 static int alloc_disk_sb(mdk_rdev_t * rdev)
302 rdev->sb_page = alloc_page(GFP_KERNEL);
303 if (!rdev->sb_page) {
304 printk(KERN_ALERT "md: out of memory.\n");
311 static void free_disk_sb(mdk_rdev_t * rdev)
314 page_cache_release(rdev->sb_page);
316 rdev->sb_page = NULL;
323 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
328 complete((struct completion*)bio->bi_private);
332 static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
333 struct page *page, int rw)
335 struct bio *bio = bio_alloc(GFP_KERNEL, 1);
336 struct completion event;
339 rw |= (1 << BIO_RW_SYNC);
342 bio->bi_sector = sector;
343 bio_add_page(bio, page, size, 0);
344 init_completion(&event);
345 bio->bi_private = &event;
346 bio->bi_end_io = bi_complete;
348 wait_for_completion(&event);
350 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
355 static int read_disk_sb(mdk_rdev_t * rdev)
357 char b[BDEVNAME_SIZE];
358 if (!rdev->sb_page) {
366 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
372 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
373 bdevname(rdev->bdev,b));
377 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
379 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
380 (sb1->set_uuid1 == sb2->set_uuid1) &&
381 (sb1->set_uuid2 == sb2->set_uuid2) &&
382 (sb1->set_uuid3 == sb2->set_uuid3))
390 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
393 mdp_super_t *tmp1, *tmp2;
395 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
396 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
398 if (!tmp1 || !tmp2) {
400 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
408 * nr_disks is not constant
413 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
427 static unsigned int calc_sb_csum(mdp_super_t * sb)
429 unsigned int disk_csum, csum;
431 disk_csum = sb->sb_csum;
433 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
434 sb->sb_csum = disk_csum;
440 * Handle superblock details.
441 * We want to be able to handle multiple superblock formats
442 * so we have a common interface to them all, and an array of
443 * different handlers.
444 * We rely on user-space to write the initial superblock, and support
445 * reading and updating of superblocks.
446 * Interface methods are:
447 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
448 * loads and validates a superblock on dev.
449 * if refdev != NULL, compare superblocks on both devices
451 * 0 - dev has a superblock that is compatible with refdev
452 * 1 - dev has a superblock that is compatible and newer than refdev
453 * so dev should be used as the refdev in future
454 * -EINVAL superblock incompatible or invalid
455 * -othererror e.g. -EIO
457 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
458 * Verify that dev is acceptable into mddev.
459 * The first time, mddev->raid_disks will be 0, and data from
460 * dev should be merged in. Subsequent calls check that dev
461 * is new enough. Return 0 or -EINVAL
463 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
464 * Update the superblock for rdev with data in mddev
465 * This does not write to disc.
471 struct module *owner;
472 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
473 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
474 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
478 * load_super for 0.90.0
480 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
482 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
488 * Calculate the position of the superblock,
489 * it's at the end of the disk.
491 * It also happens to be a multiple of 4Kb.
493 sb_offset = calc_dev_sboffset(rdev->bdev);
494 rdev->sb_offset = sb_offset;
496 ret = read_disk_sb(rdev);
501 bdevname(rdev->bdev, b);
502 sb = (mdp_super_t*)page_address(rdev->sb_page);
504 if (sb->md_magic != MD_SB_MAGIC) {
505 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
510 if (sb->major_version != 0 ||
511 sb->minor_version != 90) {
512 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
513 sb->major_version, sb->minor_version,
518 if (sb->raid_disks <= 0)
521 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
522 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
527 rdev->preferred_minor = sb->md_minor;
528 rdev->data_offset = 0;
530 if (sb->level == MULTIPATH)
533 rdev->desc_nr = sb->this_disk.number;
539 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
540 if (!uuid_equal(refsb, sb)) {
541 printk(KERN_WARNING "md: %s has different UUID to %s\n",
542 b, bdevname(refdev->bdev,b2));
545 if (!sb_equal(refsb, sb)) {
546 printk(KERN_WARNING "md: %s has same UUID"
547 " but different superblock to %s\n",
548 b, bdevname(refdev->bdev, b2));
552 ev2 = md_event(refsb);
558 rdev->size = calc_dev_size(rdev, sb->chunk_size);
565 * validate_super for 0.90.0
567 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
570 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
572 if (mddev->raid_disks == 0) {
573 mddev->major_version = 0;
574 mddev->minor_version = sb->minor_version;
575 mddev->patch_version = sb->patch_version;
576 mddev->persistent = ! sb->not_persistent;
577 mddev->chunk_size = sb->chunk_size;
578 mddev->ctime = sb->ctime;
579 mddev->utime = sb->utime;
580 mddev->level = sb->level;
581 mddev->layout = sb->layout;
582 mddev->raid_disks = sb->raid_disks;
583 mddev->size = sb->size;
584 mddev->events = md_event(sb);
586 if (sb->state & (1<<MD_SB_CLEAN))
587 mddev->recovery_cp = MaxSector;
589 if (sb->events_hi == sb->cp_events_hi &&
590 sb->events_lo == sb->cp_events_lo) {
591 mddev->recovery_cp = sb->recovery_cp;
593 mddev->recovery_cp = 0;
596 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
597 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
598 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
599 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
601 mddev->max_disks = MD_SB_DISKS;
606 if (ev1 < mddev->events)
609 if (mddev->level != LEVEL_MULTIPATH) {
610 rdev->raid_disk = -1;
611 rdev->in_sync = rdev->faulty = 0;
612 desc = sb->disks + rdev->desc_nr;
614 if (desc->state & (1<<MD_DISK_FAULTY))
616 else if (desc->state & (1<<MD_DISK_SYNC) &&
617 desc->raid_disk < mddev->raid_disks) {
619 rdev->raid_disk = desc->raid_disk;
626 * sync_super for 0.90.0
628 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
631 struct list_head *tmp;
633 int next_spare = mddev->raid_disks;
635 /* make rdev->sb match mddev data..
638 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
639 * 3/ any empty disks < next_spare become removed
641 * disks[0] gets initialised to REMOVED because
642 * we cannot be sure from other fields if it has
643 * been initialised or not.
646 int active=0, working=0,failed=0,spare=0,nr_disks=0;
648 sb = (mdp_super_t*)page_address(rdev->sb_page);
650 memset(sb, 0, sizeof(*sb));
652 sb->md_magic = MD_SB_MAGIC;
653 sb->major_version = mddev->major_version;
654 sb->minor_version = mddev->minor_version;
655 sb->patch_version = mddev->patch_version;
656 sb->gvalid_words = 0; /* ignored */
657 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
658 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
659 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
660 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
662 sb->ctime = mddev->ctime;
663 sb->level = mddev->level;
664 sb->size = mddev->size;
665 sb->raid_disks = mddev->raid_disks;
666 sb->md_minor = mddev->md_minor;
667 sb->not_persistent = !mddev->persistent;
668 sb->utime = mddev->utime;
670 sb->events_hi = (mddev->events>>32);
671 sb->events_lo = (u32)mddev->events;
675 sb->recovery_cp = mddev->recovery_cp;
676 sb->cp_events_hi = (mddev->events>>32);
677 sb->cp_events_lo = (u32)mddev->events;
678 if (mddev->recovery_cp == MaxSector)
679 sb->state = (1<< MD_SB_CLEAN);
683 sb->layout = mddev->layout;
684 sb->chunk_size = mddev->chunk_size;
686 sb->disks[0].state = (1<<MD_DISK_REMOVED);
687 ITERATE_RDEV(mddev,rdev2,tmp) {
689 if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
690 rdev2->desc_nr = rdev2->raid_disk;
692 rdev2->desc_nr = next_spare++;
693 d = &sb->disks[rdev2->desc_nr];
695 d->number = rdev2->desc_nr;
696 d->major = MAJOR(rdev2->bdev->bd_dev);
697 d->minor = MINOR(rdev2->bdev->bd_dev);
698 if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
699 d->raid_disk = rdev2->raid_disk;
701 d->raid_disk = rdev2->desc_nr; /* compatibility */
703 d->state = (1<<MD_DISK_FAULTY);
705 } else if (rdev2->in_sync) {
706 d->state = (1<<MD_DISK_ACTIVE);
707 d->state |= (1<<MD_DISK_SYNC);
717 /* now set the "removed" and "faulty" bits on any missing devices */
718 for (i=0 ; i < mddev->raid_disks ; i++) {
719 mdp_disk_t *d = &sb->disks[i];
720 if (d->state == 0 && d->number == 0) {
723 d->state = (1<<MD_DISK_REMOVED);
724 d->state |= (1<<MD_DISK_FAULTY);
728 sb->nr_disks = nr_disks;
729 sb->active_disks = active;
730 sb->working_disks = working;
731 sb->failed_disks = failed;
732 sb->spare_disks = spare;
734 sb->this_disk = sb->disks[rdev->desc_nr];
735 sb->sb_csum = calc_sb_csum(sb);
739 * version 1 superblock
742 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
744 unsigned int disk_csum, csum;
745 unsigned long long newcsum;
746 int size = 256 + le32_to_cpu(sb->max_dev)*2;
747 unsigned int *isuper = (unsigned int*)sb;
750 disk_csum = sb->sb_csum;
753 for (i=0; size>=4; size -= 4 )
754 newcsum += le32_to_cpu(*isuper++);
757 newcsum += le16_to_cpu(*(unsigned short*) isuper);
759 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
760 sb->sb_csum = disk_csum;
761 return cpu_to_le32(csum);
764 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
766 struct mdp_superblock_1 *sb;
769 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
772 * Calculate the position of the superblock.
773 * It is always aligned to a 4K boundary and
774 * depeding on minor_version, it can be:
775 * 0: At least 8K, but less than 12K, from end of device
776 * 1: At start of device
777 * 2: 4K from start of device.
779 switch(minor_version) {
781 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
783 sb_offset &= ~(4*2-1);
784 /* convert from sectors to K */
796 rdev->sb_offset = sb_offset;
798 ret = read_disk_sb(rdev);
802 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
804 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
805 sb->major_version != cpu_to_le32(1) ||
806 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
807 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
808 sb->feature_map != 0)
811 if (calc_sb_1_csum(sb) != sb->sb_csum) {
812 printk("md: invalid superblock checksum on %s\n",
813 bdevname(rdev->bdev,b));
816 if (le64_to_cpu(sb->data_size) < 10) {
817 printk("md: data_size too small on %s\n",
818 bdevname(rdev->bdev,b));
821 rdev->preferred_minor = 0xffff;
822 rdev->data_offset = le64_to_cpu(sb->data_offset);
828 struct mdp_superblock_1 *refsb =
829 (struct mdp_superblock_1*)page_address(refdev->sb_page);
831 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
832 sb->level != refsb->level ||
833 sb->layout != refsb->layout ||
834 sb->chunksize != refsb->chunksize) {
835 printk(KERN_WARNING "md: %s has strangely different"
836 " superblock to %s\n",
837 bdevname(rdev->bdev,b),
838 bdevname(refdev->bdev,b2));
841 ev1 = le64_to_cpu(sb->events);
842 ev2 = le64_to_cpu(refsb->events);
848 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
850 rdev->size = rdev->sb_offset;
851 if (rdev->size < le64_to_cpu(sb->data_size)/2)
853 rdev->size = le64_to_cpu(sb->data_size)/2;
854 if (le32_to_cpu(sb->chunksize))
855 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
859 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
861 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
863 if (mddev->raid_disks == 0) {
864 mddev->major_version = 1;
865 mddev->patch_version = 0;
866 mddev->persistent = 1;
867 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
868 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
869 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
870 mddev->level = le32_to_cpu(sb->level);
871 mddev->layout = le32_to_cpu(sb->layout);
872 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
873 mddev->size = le64_to_cpu(sb->size)/2;
874 mddev->events = le64_to_cpu(sb->events);
876 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
877 memcpy(mddev->uuid, sb->set_uuid, 16);
879 mddev->max_disks = (4096-256)/2;
882 ev1 = le64_to_cpu(sb->events);
884 if (ev1 < mddev->events)
888 if (mddev->level != LEVEL_MULTIPATH) {
890 rdev->desc_nr = le32_to_cpu(sb->dev_number);
891 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
893 case 0xffff: /* spare */
896 rdev->raid_disk = -1;
898 case 0xfffe: /* faulty */
901 rdev->raid_disk = -1;
906 rdev->raid_disk = role;
913 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
915 struct mdp_superblock_1 *sb;
916 struct list_head *tmp;
919 /* make rdev->sb match mddev and rdev data. */
921 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
925 memset(sb->pad1, 0, sizeof(sb->pad1));
926 memset(sb->pad2, 0, sizeof(sb->pad2));
927 memset(sb->pad3, 0, sizeof(sb->pad3));
929 sb->utime = cpu_to_le64((__u64)mddev->utime);
930 sb->events = cpu_to_le64(mddev->events);
932 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
934 sb->resync_offset = cpu_to_le64(0);
937 ITERATE_RDEV(mddev,rdev2,tmp)
938 if (rdev2->desc_nr+1 > max_dev)
939 max_dev = rdev2->desc_nr+1;
941 sb->max_dev = cpu_to_le32(max_dev);
942 for (i=0; i<max_dev;i++)
943 sb->dev_roles[max_dev] = cpu_to_le16(0xfffe);
945 ITERATE_RDEV(mddev,rdev2,tmp) {
948 sb->dev_roles[i] = cpu_to_le16(0xfffe);
949 else if (rdev2->in_sync)
950 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
952 sb->dev_roles[i] = cpu_to_le16(0xffff);
955 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
956 sb->sb_csum = calc_sb_1_csum(sb);
960 struct super_type super_types[] = {
963 .owner = THIS_MODULE,
964 .load_super = super_90_load,
965 .validate_super = super_90_validate,
966 .sync_super = super_90_sync,
970 .owner = THIS_MODULE,
971 .load_super = super_1_load,
972 .validate_super = super_1_validate,
973 .sync_super = super_1_sync,
977 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
979 struct list_head *tmp;
982 ITERATE_RDEV(mddev,rdev,tmp)
983 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
989 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
991 struct list_head *tmp;
994 ITERATE_RDEV(mddev1,rdev,tmp)
995 if (match_dev_unit(mddev2, rdev))
1001 static LIST_HEAD(pending_raid_disks);
1003 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1005 mdk_rdev_t *same_pdev;
1006 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1012 same_pdev = match_dev_unit(mddev, rdev);
1015 "%s: WARNING: %s appears to be on the same physical"
1016 " disk as %s. True\n protection against single-disk"
1017 " failure might be compromised.\n",
1018 mdname(mddev), bdevname(rdev->bdev,b),
1019 bdevname(same_pdev->bdev,b2));
1021 /* Verify rdev->desc_nr is unique.
1022 * If it is -1, assign a free number, else
1023 * check number is not in use
1025 if (rdev->desc_nr < 0) {
1027 if (mddev->pers) choice = mddev->raid_disks;
1028 while (find_rdev_nr(mddev, choice))
1030 rdev->desc_nr = choice;
1032 if (find_rdev_nr(mddev, rdev->desc_nr))
1036 list_add(&rdev->same_set, &mddev->disks);
1037 rdev->mddev = mddev;
1038 printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
1042 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1044 char b[BDEVNAME_SIZE];
1049 list_del_init(&rdev->same_set);
1050 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1055 * prevent the device from being mounted, repartitioned or
1056 * otherwise reused by a RAID array (or any other kernel
1057 * subsystem), by bd_claiming the device.
1059 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1062 struct block_device *bdev;
1063 char b[BDEVNAME_SIZE];
1065 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1067 printk(KERN_ERR "md: could not open %s.\n",
1068 __bdevname(dev, b));
1069 return PTR_ERR(bdev);
1071 err = bd_claim(bdev, rdev);
1073 printk(KERN_ERR "md: could not bd_claim %s.\n",
1082 static void unlock_rdev(mdk_rdev_t *rdev)
1084 struct block_device *bdev = rdev->bdev;
1092 void md_autodetect_dev(dev_t dev);
1094 static void export_rdev(mdk_rdev_t * rdev)
1096 char b[BDEVNAME_SIZE];
1097 printk(KERN_INFO "md: export_rdev(%s)\n",
1098 bdevname(rdev->bdev,b));
1102 list_del_init(&rdev->same_set);
1104 md_autodetect_dev(rdev->bdev->bd_dev);
1110 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1112 unbind_rdev_from_array(rdev);
1116 static void export_array(mddev_t *mddev)
1118 struct list_head *tmp;
1121 ITERATE_RDEV(mddev,rdev,tmp) {
1126 kick_rdev_from_array(rdev);
1128 if (!list_empty(&mddev->disks))
1130 mddev->raid_disks = 0;
1131 mddev->major_version = 0;
1134 static void print_desc(mdp_disk_t *desc)
1136 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1137 desc->major,desc->minor,desc->raid_disk,desc->state);
1140 static void print_sb(mdp_super_t *sb)
1145 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1146 sb->major_version, sb->minor_version, sb->patch_version,
1147 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1149 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1150 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1151 sb->md_minor, sb->layout, sb->chunk_size);
1152 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1153 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1154 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1155 sb->failed_disks, sb->spare_disks,
1156 sb->sb_csum, (unsigned long)sb->events_lo);
1159 for (i = 0; i < MD_SB_DISKS; i++) {
1162 desc = sb->disks + i;
1163 if (desc->number || desc->major || desc->minor ||
1164 desc->raid_disk || (desc->state && (desc->state != 4))) {
1165 printk(" D %2d: ", i);
1169 printk(KERN_INFO "md: THIS: ");
1170 print_desc(&sb->this_disk);
1174 static void print_rdev(mdk_rdev_t *rdev)
1176 char b[BDEVNAME_SIZE];
1177 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1178 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1179 rdev->faulty, rdev->in_sync, rdev->desc_nr);
1180 if (rdev->sb_loaded) {
1181 printk(KERN_INFO "md: rdev superblock:\n");
1182 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1184 printk(KERN_INFO "md: no rdev superblock!\n");
1187 void md_print_devices(void)
1189 struct list_head *tmp, *tmp2;
1192 char b[BDEVNAME_SIZE];
1195 printk("md: **********************************\n");
1196 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1197 printk("md: **********************************\n");
1198 ITERATE_MDDEV(mddev,tmp) {
1199 printk("%s: ", mdname(mddev));
1201 ITERATE_RDEV(mddev,rdev,tmp2)
1202 printk("<%s>", bdevname(rdev->bdev,b));
1205 ITERATE_RDEV(mddev,rdev,tmp2)
1208 printk("md: **********************************\n");
1213 static int write_disk_sb(mdk_rdev_t * rdev)
1215 char b[BDEVNAME_SIZE];
1216 if (!rdev->sb_loaded) {
1225 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1226 bdevname(rdev->bdev,b),
1227 (unsigned long long)rdev->sb_offset);
1229 if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
1232 printk("md: write_disk_sb failed for device %s\n",
1233 bdevname(rdev->bdev,b));
1237 static void sync_sbs(mddev_t * mddev)
1240 struct list_head *tmp;
1242 ITERATE_RDEV(mddev,rdev,tmp) {
1243 super_types[mddev->major_version].
1244 sync_super(mddev, rdev);
1245 rdev->sb_loaded = 1;
1249 static void md_update_sb(mddev_t * mddev)
1251 int err, count = 100;
1252 struct list_head *tmp;
1255 mddev->sb_dirty = 0;
1257 mddev->utime = get_seconds();
1260 if (!mddev->events) {
1262 * oops, this 64-bit counter should never wrap.
1263 * Either we are in around ~1 trillion A.C., assuming
1264 * 1 reboot per second, or we have a bug:
1272 * do not write anything to disk if using
1273 * nonpersistent superblocks
1275 if (!mddev->persistent)
1279 "md: updating %s RAID superblock on device (in sync %d)\n",
1280 mdname(mddev),mddev->in_sync);
1283 ITERATE_RDEV(mddev,rdev,tmp) {
1284 char b[BDEVNAME_SIZE];
1285 dprintk(KERN_INFO "md: ");
1287 dprintk("(skipping faulty ");
1289 dprintk("%s ", bdevname(rdev->bdev,b));
1290 if (!rdev->faulty) {
1291 err += write_disk_sb(rdev);
1294 if (!err && mddev->level == LEVEL_MULTIPATH)
1295 /* only need to write one superblock... */
1300 printk(KERN_ERR "md: errors occurred during superblock"
1301 " update, repeating\n");
1305 "md: excessive errors occurred during superblock update, exiting\n");
1310 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1312 * mark the device faulty if:
1314 * - the device is nonexistent (zero size)
1315 * - the device has no valid superblock
1317 * a faulty rdev _never_ has rdev->sb set.
1319 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1321 char b[BDEVNAME_SIZE];
1326 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1328 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1329 return ERR_PTR(-ENOMEM);
1331 memset(rdev, 0, sizeof(*rdev));
1333 if ((err = alloc_disk_sb(rdev)))
1336 err = lock_rdev(rdev, newdev);
1343 rdev->data_offset = 0;
1344 atomic_set(&rdev->nr_pending, 0);
1346 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1349 "md: %s has zero or unknown size, marking faulty!\n",
1350 bdevname(rdev->bdev,b));
1355 if (super_format >= 0) {
1356 err = super_types[super_format].
1357 load_super(rdev, NULL, super_minor);
1358 if (err == -EINVAL) {
1360 "md: %s has invalid sb, not importing!\n",
1361 bdevname(rdev->bdev,b));
1366 "md: could not read %s's sb, not importing!\n",
1367 bdevname(rdev->bdev,b));
1371 INIT_LIST_HEAD(&rdev->same_set);
1376 if (rdev->sb_page) {
1382 return ERR_PTR(err);
1386 * Check a full RAID array for plausibility
1390 static int analyze_sbs(mddev_t * mddev)
1393 struct list_head *tmp;
1394 mdk_rdev_t *rdev, *freshest;
1395 char b[BDEVNAME_SIZE];
1398 ITERATE_RDEV(mddev,rdev,tmp)
1399 switch (super_types[mddev->major_version].
1400 load_super(rdev, freshest, mddev->minor_version)) {
1408 "md: fatal superblock inconsistency in %s"
1409 " -- removing from array\n",
1410 bdevname(rdev->bdev,b));
1411 kick_rdev_from_array(rdev);
1415 super_types[mddev->major_version].
1416 validate_super(mddev, freshest);
1419 ITERATE_RDEV(mddev,rdev,tmp) {
1420 if (rdev != freshest)
1421 if (super_types[mddev->major_version].
1422 validate_super(mddev, rdev)) {
1423 printk(KERN_WARNING "md: kicking non-fresh %s"
1425 bdevname(rdev->bdev,b));
1426 kick_rdev_from_array(rdev);
1429 if (mddev->level == LEVEL_MULTIPATH) {
1430 rdev->desc_nr = i++;
1431 rdev->raid_disk = rdev->desc_nr;
1438 if ((mddev->recovery_cp != MaxSector) &&
1439 ((mddev->level == 1) ||
1440 ((mddev->level >= 4) && (mddev->level <= 6))))
1441 printk(KERN_ERR "md: %s: raid array is not clean"
1442 " -- starting background reconstruction\n",
1450 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1452 static DECLARE_MUTEX(disks_sem);
1453 mddev_t *mddev = mddev_find(dev);
1454 struct gendisk *disk;
1455 int partitioned = (MAJOR(dev) != MD_MAJOR);
1456 int shift = partitioned ? MdpMinorShift : 0;
1457 int unit = MINOR(dev) >> shift;
1463 if (mddev->gendisk) {
1468 disk = alloc_disk(1 << shift);
1474 disk->major = MAJOR(dev);
1475 disk->first_minor = unit << shift;
1477 sprintf(disk->disk_name, "md_d%d", unit);
1478 sprintf(disk->devfs_name, "md/d%d", unit);
1480 sprintf(disk->disk_name, "md%d", unit);
1481 sprintf(disk->devfs_name, "md/%d", unit);
1483 disk->fops = &md_fops;
1484 disk->private_data = mddev;
1485 disk->queue = mddev->queue;
1487 mddev->gendisk = disk;
1492 void md_wakeup_thread(mdk_thread_t *thread);
1494 static void md_safemode_timeout(unsigned long data)
1496 mddev_t *mddev = (mddev_t *) data;
1498 mddev->safemode = 1;
1499 md_wakeup_thread(mddev->thread);
1503 static int do_md_run(mddev_t * mddev)
1507 struct list_head *tmp;
1509 struct gendisk *disk;
1510 char b[BDEVNAME_SIZE];
1512 if (list_empty(&mddev->disks)) {
1521 * Analyze all RAID superblock(s)
1523 if (!mddev->raid_disks && analyze_sbs(mddev)) {
1528 chunk_size = mddev->chunk_size;
1529 pnum = level_to_pers(mddev->level);
1531 if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1534 * 'default chunksize' in the old md code used to
1535 * be PAGE_SIZE, baaad.
1536 * we abort here to be on the safe side. We don't
1537 * want to continue the bad practice.
1540 "no chunksize specified, see 'man raidtab'\n");
1543 if (chunk_size > MAX_CHUNK_SIZE) {
1544 printk(KERN_ERR "too big chunk_size: %d > %d\n",
1545 chunk_size, MAX_CHUNK_SIZE);
1549 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1551 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1555 if (chunk_size < PAGE_SIZE) {
1556 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1557 chunk_size, PAGE_SIZE);
1561 /* devices must have minimum size of one chunk */
1562 ITERATE_RDEV(mddev,rdev,tmp) {
1565 if (rdev->size < chunk_size / 1024) {
1567 "md: Dev %s smaller than chunk_size:"
1569 bdevname(rdev->bdev,b),
1570 (unsigned long long)rdev->size,
1577 if (pnum >= MAX_PERSONALITY) {
1585 request_module("md-personality-%d", pnum);
1590 * Drop all container device buffers, from now on
1591 * the only valid external interface is through the md
1593 * Also find largest hardsector size
1595 ITERATE_RDEV(mddev,rdev,tmp) {
1598 sync_blockdev(rdev->bdev);
1599 invalidate_bdev(rdev->bdev, 0);
1602 md_probe(mddev->unit, NULL, NULL);
1603 disk = mddev->gendisk;
1607 spin_lock(&pers_lock);
1608 if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
1609 spin_unlock(&pers_lock);
1610 printk(KERN_WARNING "md: personality %d is not loaded!\n",
1615 mddev->pers = pers[pnum];
1616 spin_unlock(&pers_lock);
1618 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
1620 err = mddev->pers->run(mddev);
1622 printk(KERN_ERR "md: pers->run() failed ...\n");
1623 module_put(mddev->pers->owner);
1627 atomic_set(&mddev->writes_pending,0);
1628 mddev->safemode = 0;
1629 mddev->safemode_timer.function = md_safemode_timeout;
1630 mddev->safemode_timer.data = (unsigned long) mddev;
1631 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
1634 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1636 if (mddev->sb_dirty)
1637 md_update_sb(mddev);
1639 set_capacity(disk, mddev->array_size<<1);
1641 /* If we call blk_queue_make_request here, it will
1642 * re-initialise max_sectors etc which may have been
1643 * refined inside -> run. So just set the bits we need to set.
1644 * Most initialisation happended when we called
1645 * blk_queue_make_request(..., md_fail_request)
1648 mddev->queue->queuedata = mddev;
1649 mddev->queue->make_request_fn = mddev->pers->make_request;
1655 static int restart_array(mddev_t *mddev)
1657 struct gendisk *disk = mddev->gendisk;
1661 * Complain if it has no devices
1664 if (list_empty(&mddev->disks))
1672 mddev->safemode = 0;
1674 set_disk_ro(disk, 0);
1676 printk(KERN_INFO "md: %s switched to read-write mode.\n",
1679 * Kick recovery or resync if necessary
1681 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1682 md_wakeup_thread(mddev->thread);
1685 printk(KERN_ERR "md: %s has no personality assigned.\n",
1694 static int do_md_stop(mddev_t * mddev, int ro)
1697 struct gendisk *disk = mddev->gendisk;
1700 if (atomic_read(&mddev->active)>2) {
1701 printk("md: %s still in use.\n",mdname(mddev));
1705 if (mddev->sync_thread) {
1706 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1707 md_unregister_thread(mddev->sync_thread);
1708 mddev->sync_thread = NULL;
1711 del_timer_sync(&mddev->safemode_timer);
1713 invalidate_partition(disk, 0);
1722 set_disk_ro(disk, 0);
1723 blk_queue_make_request(mddev->queue, md_fail_request);
1724 mddev->pers->stop(mddev);
1725 module_put(mddev->pers->owner);
1730 if (!mddev->in_sync) {
1731 /* mark array as shutdown cleanly */
1733 md_update_sb(mddev);
1736 set_disk_ro(disk, 1);
1739 * Free resources if final stop
1742 struct gendisk *disk;
1743 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
1745 export_array(mddev);
1747 mddev->array_size = 0;
1748 disk = mddev->gendisk;
1750 set_capacity(disk, 0);
1753 printk(KERN_INFO "md: %s switched to read-only mode.\n",
1760 static void autorun_array(mddev_t *mddev)
1763 struct list_head *tmp;
1766 if (list_empty(&mddev->disks)) {
1771 printk(KERN_INFO "md: running: ");
1773 ITERATE_RDEV(mddev,rdev,tmp) {
1774 char b[BDEVNAME_SIZE];
1775 printk("<%s>", bdevname(rdev->bdev,b));
1779 err = do_md_run (mddev);
1781 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
1782 do_md_stop (mddev, 0);
1787 * lets try to run arrays based on all disks that have arrived
1788 * until now. (those are in pending_raid_disks)
1790 * the method: pick the first pending disk, collect all disks with
1791 * the same UUID, remove all from the pending list and put them into
1792 * the 'same_array' list. Then order this list based on superblock
1793 * update time (freshest comes first), kick out 'old' disks and
1794 * compare superblocks. If everything's fine then run it.
1796 * If "unit" is allocated, then bump its reference count
1798 static void autorun_devices(int part)
1800 struct list_head candidates;
1801 struct list_head *tmp;
1802 mdk_rdev_t *rdev0, *rdev;
1804 char b[BDEVNAME_SIZE];
1806 printk(KERN_INFO "md: autorun ...\n");
1807 while (!list_empty(&pending_raid_disks)) {
1809 rdev0 = list_entry(pending_raid_disks.next,
1810 mdk_rdev_t, same_set);
1812 printk(KERN_INFO "md: considering %s ...\n",
1813 bdevname(rdev0->bdev,b));
1814 INIT_LIST_HEAD(&candidates);
1815 ITERATE_RDEV_PENDING(rdev,tmp)
1816 if (super_90_load(rdev, rdev0, 0) >= 0) {
1817 printk(KERN_INFO "md: adding %s ...\n",
1818 bdevname(rdev->bdev,b));
1819 list_move(&rdev->same_set, &candidates);
1822 * now we have a set of devices, with all of them having
1823 * mostly sane superblocks. It's time to allocate the
1826 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
1827 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
1828 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
1832 dev = MKDEV(mdp_major,
1833 rdev0->preferred_minor << MdpMinorShift);
1835 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
1837 md_probe(dev, NULL, NULL);
1838 mddev = mddev_find(dev);
1841 "md: cannot allocate memory for md drive.\n");
1844 if (mddev_lock(mddev))
1845 printk(KERN_WARNING "md: %s locked, cannot run\n",
1847 else if (mddev->raid_disks || mddev->major_version
1848 || !list_empty(&mddev->disks)) {
1850 "md: %s already running, cannot run %s\n",
1851 mdname(mddev), bdevname(rdev0->bdev,b));
1852 mddev_unlock(mddev);
1854 printk(KERN_INFO "md: created %s\n", mdname(mddev));
1855 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1856 list_del_init(&rdev->same_set);
1857 if (bind_rdev_to_array(rdev, mddev))
1860 autorun_array(mddev);
1861 mddev_unlock(mddev);
1863 /* on success, candidates will be empty, on error
1866 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1870 printk(KERN_INFO "md: ... autorun DONE.\n");
1874 * import RAID devices based on one partition
1875 * if possible, the array gets run as well.
1878 static int autostart_array(dev_t startdev)
1880 char b[BDEVNAME_SIZE];
1881 int err = -EINVAL, i;
1882 mdp_super_t *sb = NULL;
1883 mdk_rdev_t *start_rdev = NULL, *rdev;
1885 start_rdev = md_import_device(startdev, 0, 0);
1886 if (IS_ERR(start_rdev))
1890 /* NOTE: this can only work for 0.90.0 superblocks */
1891 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
1892 if (sb->major_version != 0 ||
1893 sb->minor_version != 90 ) {
1894 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
1895 export_rdev(start_rdev);
1899 if (start_rdev->faulty) {
1901 "md: can not autostart based on faulty %s!\n",
1902 bdevname(start_rdev->bdev,b));
1903 export_rdev(start_rdev);
1906 list_add(&start_rdev->same_set, &pending_raid_disks);
1908 for (i = 0; i < MD_SB_DISKS; i++) {
1909 mdp_disk_t *desc = sb->disks + i;
1910 dev_t dev = MKDEV(desc->major, desc->minor);
1914 if (dev == startdev)
1916 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
1918 rdev = md_import_device(dev, 0, 0);
1922 list_add(&rdev->same_set, &pending_raid_disks);
1926 * possibly return codes
1934 static int get_version(void __user * arg)
1938 ver.major = MD_MAJOR_VERSION;
1939 ver.minor = MD_MINOR_VERSION;
1940 ver.patchlevel = MD_PATCHLEVEL_VERSION;
1942 if (copy_to_user(arg, &ver, sizeof(ver)))
1948 static int get_array_info(mddev_t * mddev, void __user * arg)
1950 mdu_array_info_t info;
1951 int nr,working,active,failed,spare;
1953 struct list_head *tmp;
1955 nr=working=active=failed=spare=0;
1956 ITERATE_RDEV(mddev,rdev,tmp) {
1969 info.major_version = mddev->major_version;
1970 info.minor_version = mddev->minor_version;
1971 info.patch_version = MD_PATCHLEVEL_VERSION;
1972 info.ctime = mddev->ctime;
1973 info.level = mddev->level;
1974 info.size = mddev->size;
1976 info.raid_disks = mddev->raid_disks;
1977 info.md_minor = mddev->md_minor;
1978 info.not_persistent= !mddev->persistent;
1980 info.utime = mddev->utime;
1983 info.state = (1<<MD_SB_CLEAN);
1984 info.active_disks = active;
1985 info.working_disks = working;
1986 info.failed_disks = failed;
1987 info.spare_disks = spare;
1989 info.layout = mddev->layout;
1990 info.chunk_size = mddev->chunk_size;
1992 if (copy_to_user(arg, &info, sizeof(info)))
1998 static int get_disk_info(mddev_t * mddev, void __user * arg)
2000 mdu_disk_info_t info;
2004 if (copy_from_user(&info, arg, sizeof(info)))
2009 rdev = find_rdev_nr(mddev, nr);
2011 info.major = MAJOR(rdev->bdev->bd_dev);
2012 info.minor = MINOR(rdev->bdev->bd_dev);
2013 info.raid_disk = rdev->raid_disk;
2016 info.state |= (1<<MD_DISK_FAULTY);
2017 else if (rdev->in_sync) {
2018 info.state |= (1<<MD_DISK_ACTIVE);
2019 info.state |= (1<<MD_DISK_SYNC);
2022 info.major = info.minor = 0;
2023 info.raid_disk = -1;
2024 info.state = (1<<MD_DISK_REMOVED);
2027 if (copy_to_user(arg, &info, sizeof(info)))
2033 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2035 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2037 dev_t dev = MKDEV(info->major,info->minor);
2039 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2042 if (!mddev->raid_disks) {
2044 /* expecting a device which has a superblock */
2045 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2048 "md: md_import_device returned %ld\n",
2050 return PTR_ERR(rdev);
2052 if (!list_empty(&mddev->disks)) {
2053 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2054 mdk_rdev_t, same_set);
2055 int err = super_types[mddev->major_version]
2056 .load_super(rdev, rdev0, mddev->minor_version);
2059 "md: %s has different UUID to %s\n",
2060 bdevname(rdev->bdev,b),
2061 bdevname(rdev0->bdev,b2));
2066 err = bind_rdev_to_array(rdev, mddev);
2073 * add_new_disk can be used once the array is assembled
2074 * to add "hot spares". They must already have a superblock
2079 if (!mddev->pers->hot_add_disk) {
2081 "%s: personality does not support diskops!\n",
2085 rdev = md_import_device(dev, mddev->major_version,
2086 mddev->minor_version);
2089 "md: md_import_device returned %ld\n",
2091 return PTR_ERR(rdev);
2093 rdev->in_sync = 0; /* just to be sure */
2094 rdev->raid_disk = -1;
2095 err = bind_rdev_to_array(rdev, mddev);
2099 md_wakeup_thread(mddev->thread);
2103 /* otherwise, add_new_disk is only allowed
2104 * for major_version==0 superblocks
2106 if (mddev->major_version != 0) {
2107 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
2112 if (!(info->state & (1<<MD_DISK_FAULTY))) {
2114 rdev = md_import_device (dev, -1, 0);
2117 "md: error, md_import_device() returned %ld\n",
2119 return PTR_ERR(rdev);
2121 rdev->desc_nr = info->number;
2122 if (info->raid_disk < mddev->raid_disks)
2123 rdev->raid_disk = info->raid_disk;
2125 rdev->raid_disk = -1;
2128 if (rdev->raid_disk < mddev->raid_disks)
2129 rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
2133 err = bind_rdev_to_array(rdev, mddev);
2139 if (!mddev->persistent) {
2140 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2141 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2143 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2144 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2146 if (!mddev->size || (mddev->size > rdev->size))
2147 mddev->size = rdev->size;
2153 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2155 char b[BDEVNAME_SIZE];
2161 rdev = find_rdev(mddev, dev);
2165 if (rdev->raid_disk >= 0)
2168 kick_rdev_from_array(rdev);
2169 md_update_sb(mddev);
2173 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
2174 bdevname(rdev->bdev,b), mdname(mddev));
2178 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2180 char b[BDEVNAME_SIZE];
2188 if (mddev->major_version != 0) {
2189 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
2190 " version-0 superblocks.\n",
2194 if (!mddev->pers->hot_add_disk) {
2196 "%s: personality does not support diskops!\n",
2201 rdev = md_import_device (dev, -1, 0);
2204 "md: error, md_import_device() returned %ld\n",
2209 if (mddev->persistent)
2210 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2213 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2215 size = calc_dev_size(rdev, mddev->chunk_size);
2218 if (size < mddev->size) {
2220 "%s: disk size %llu blocks < array size %llu\n",
2221 mdname(mddev), (unsigned long long)size,
2222 (unsigned long long)mddev->size);
2229 "md: can not hot-add faulty %s disk to %s!\n",
2230 bdevname(rdev->bdev,b), mdname(mddev));
2236 bind_rdev_to_array(rdev, mddev);
2239 * The rest should better be atomic, we can have disk failures
2240 * noticed in interrupt contexts ...
2243 if (rdev->desc_nr == mddev->max_disks) {
2244 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
2247 goto abort_unbind_export;
2250 rdev->raid_disk = -1;
2252 md_update_sb(mddev);
2255 * Kick recovery, maybe this spare has to be added to the
2256 * array immediately.
2258 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2259 md_wakeup_thread(mddev->thread);
2263 abort_unbind_export:
2264 unbind_rdev_from_array(rdev);
2272 * set_array_info is used two different ways
2273 * The original usage is when creating a new array.
2274 * In this usage, raid_disks is > 0 and it together with
2275 * level, size, not_persistent,layout,chunksize determine the
2276 * shape of the array.
2277 * This will always create an array with a type-0.90.0 superblock.
2278 * The newer usage is when assembling an array.
2279 * In this case raid_disks will be 0, and the major_version field is
2280 * use to determine which style super-blocks are to be found on the devices.
2281 * The minor and patch _version numbers are also kept incase the
2282 * super_block handler wishes to interpret them.
2284 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2287 if (info->raid_disks == 0) {
2288 /* just setting version number for superblock loading */
2289 if (info->major_version < 0 ||
2290 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2291 super_types[info->major_version].name == NULL) {
2292 /* maybe try to auto-load a module? */
2294 "md: superblock version %d not known\n",
2295 info->major_version);
2298 mddev->major_version = info->major_version;
2299 mddev->minor_version = info->minor_version;
2300 mddev->patch_version = info->patch_version;
2303 mddev->major_version = MD_MAJOR_VERSION;
2304 mddev->minor_version = MD_MINOR_VERSION;
2305 mddev->patch_version = MD_PATCHLEVEL_VERSION;
2306 mddev->ctime = get_seconds();
2308 mddev->level = info->level;
2309 mddev->size = info->size;
2310 mddev->raid_disks = info->raid_disks;
2311 /* don't set md_minor, it is determined by which /dev/md* was
2314 if (info->state & (1<<MD_SB_CLEAN))
2315 mddev->recovery_cp = MaxSector;
2317 mddev->recovery_cp = 0;
2318 mddev->persistent = ! info->not_persistent;
2320 mddev->layout = info->layout;
2321 mddev->chunk_size = info->chunk_size;
2323 mddev->max_disks = MD_SB_DISKS;
2325 mddev->sb_dirty = 1;
2328 * Generate a 128 bit UUID
2330 get_random_bytes(mddev->uuid, 16);
2336 * update_array_info is used to change the configuration of an
2338 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
2339 * fields in the info are checked against the array.
2340 * Any differences that cannot be handled will cause an error.
2341 * Normally, only one change can be managed at a time.
2343 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2348 if (mddev->major_version != info->major_version ||
2349 mddev->minor_version != info->minor_version ||
2350 /* mddev->patch_version != info->patch_version || */
2351 mddev->ctime != info->ctime ||
2352 mddev->level != info->level ||
2353 /* mddev->layout != info->layout || */
2354 !mddev->persistent != info->not_persistent||
2355 mddev->chunk_size != info->chunk_size )
2357 /* Check there is only one change */
2358 if (mddev->size != info->size) cnt++;
2359 if (mddev->raid_disks != info->raid_disks) cnt++;
2360 if (mddev->layout != info->layout) cnt++;
2361 if (cnt == 0) return 0;
2362 if (cnt > 1) return -EINVAL;
2364 if (mddev->layout != info->layout) {
2366 * we don't need to do anything at the md level, the
2367 * personality will take care of it all.
2369 if (mddev->pers->reconfig == NULL)
2372 return mddev->pers->reconfig(mddev, info->layout, -1);
2374 if (mddev->size != info->size) {
2376 struct list_head *tmp;
2377 if (mddev->pers->resize == NULL)
2379 /* The "size" is the amount of each device that is used.
2380 * This can only make sense for arrays with redundancy.
2381 * linear and raid0 always use whatever space is available
2382 * We can only consider changing the size if no resync
2383 * or reconstruction is happening, and if the new size
2384 * is acceptable. It must fit before the sb_offset or,
2385 * if that is <data_offset, it must fit before the
2386 * size of each device.
2387 * If size is zero, we find the largest size that fits.
2389 if (mddev->sync_thread)
2391 ITERATE_RDEV(mddev,rdev,tmp) {
2393 int fit = (info->size == 0);
2394 if (rdev->sb_offset > rdev->data_offset)
2395 avail = (rdev->sb_offset*2) - rdev->data_offset;
2397 avail = get_capacity(rdev->bdev->bd_disk)
2398 - rdev->data_offset;
2399 if (fit && (info->size == 0 || info->size > avail/2))
2400 info->size = avail/2;
2401 if (avail < ((sector_t)info->size << 1))
2404 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
2406 struct block_device *bdev;
2408 bdev = bdget_disk(mddev->gendisk, 0);
2410 down(&bdev->bd_inode->i_sem);
2411 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2412 up(&bdev->bd_inode->i_sem);
2417 if (mddev->raid_disks != info->raid_disks) {
2418 /* change the number of raid disks */
2419 if (mddev->pers->reshape == NULL)
2421 if (info->raid_disks <= 0 ||
2422 info->raid_disks >= mddev->max_disks)
2424 if (mddev->sync_thread)
2426 rv = mddev->pers->reshape(mddev, info->raid_disks);
2428 struct block_device *bdev;
2430 bdev = bdget_disk(mddev->gendisk, 0);
2432 down(&bdev->bd_inode->i_sem);
2433 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2434 up(&bdev->bd_inode->i_sem);
2439 md_update_sb(mddev);
2443 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2447 if (mddev->pers == NULL)
2450 rdev = find_rdev(mddev, dev);
2454 md_error(mddev, rdev);
2458 static int md_ioctl(struct inode *inode, struct file *file,
2459 unsigned int cmd, unsigned long arg)
2462 void __user *argp = (void __user *)arg;
2463 struct hd_geometry __user *loc = argp;
2464 mddev_t *mddev = NULL;
2466 if (!capable(CAP_SYS_ADMIN))
2470 * Commands dealing with the RAID driver but not any
2476 err = get_version(argp);
2479 case PRINT_RAID_DEBUG:
2487 autostart_arrays(arg);
2494 * Commands creating/starting a new array:
2497 mddev = inode->i_bdev->bd_disk->private_data;
2505 if (cmd == START_ARRAY) {
2506 /* START_ARRAY doesn't need to lock the array as autostart_array
2507 * does the locking, and it could even be a different array
2512 "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
2513 "This will not be supported beyond 2.6\n",
2514 current->comm, current->pid);
2517 err = autostart_array(new_decode_dev(arg));
2519 printk(KERN_WARNING "md: autostart failed!\n");
2525 err = mddev_lock(mddev);
2528 "md: ioctl lock interrupted, reason %d, cmd %d\n",
2535 case SET_ARRAY_INFO:
2537 mdu_array_info_t info;
2539 memset(&info, 0, sizeof(info));
2540 else if (copy_from_user(&info, argp, sizeof(info))) {
2545 err = update_array_info(mddev, &info);
2547 printk(KERN_WARNING "md: couldn't update"
2548 " array info. %d\n", err);
2553 if (!list_empty(&mddev->disks)) {
2555 "md: array %s already has disks!\n",
2560 if (mddev->raid_disks) {
2562 "md: array %s already initialised!\n",
2567 err = set_array_info(mddev, &info);
2569 printk(KERN_WARNING "md: couldn't set"
2570 " array info. %d\n", err);
2580 * Commands querying/configuring an existing array:
2582 /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
2583 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
2589 * Commands even a read-only array can execute:
2593 case GET_ARRAY_INFO:
2594 err = get_array_info(mddev, argp);
2598 err = get_disk_info(mddev, argp);
2601 case RESTART_ARRAY_RW:
2602 err = restart_array(mddev);
2606 err = do_md_stop (mddev, 0);
2610 err = do_md_stop (mddev, 1);
2614 * We have a problem here : there is no easy way to give a CHS
2615 * virtual geometry. We currently pretend that we have a 2 heads
2616 * 4 sectors (with a BIG number of cylinders...). This drives
2617 * dosfs just mad... ;-)
2624 err = put_user (2, (char __user *) &loc->heads);
2627 err = put_user (4, (char __user *) &loc->sectors);
2630 err = put_user(get_capacity(mddev->gendisk)/8,
2631 (short __user *) &loc->cylinders);
2634 err = put_user (get_start_sect(inode->i_bdev),
2635 (long __user *) &loc->start);
2640 * The remaining ioctls are changing the state of the
2641 * superblock, so we do not allow read-only arrays
2653 mdu_disk_info_t info;
2654 if (copy_from_user(&info, argp, sizeof(info)))
2657 err = add_new_disk(mddev, &info);
2661 case HOT_REMOVE_DISK:
2662 err = hot_remove_disk(mddev, new_decode_dev(arg));
2666 err = hot_add_disk(mddev, new_decode_dev(arg));
2669 case SET_DISK_FAULTY:
2670 err = set_disk_faulty(mddev, new_decode_dev(arg));
2674 err = do_md_run (mddev);
2678 if (_IOC_TYPE(cmd) == MD_MAJOR)
2679 printk(KERN_WARNING "md: %s(pid %d) used"
2680 " obsolete MD ioctl, upgrade your"
2681 " software to use new ictls.\n",
2682 current->comm, current->pid);
2689 mddev_unlock(mddev);
2699 static int md_open(struct inode *inode, struct file *file)
2702 * Succeed if we can lock the mddev, which confirms that
2703 * it isn't being stopped right now.
2705 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
2708 if ((err = mddev_lock(mddev)))
2713 mddev_unlock(mddev);
2715 check_disk_change(inode->i_bdev);
2720 static int md_release(struct inode *inode, struct file * file)
2722 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
2731 static int md_media_changed(struct gendisk *disk)
2733 mddev_t *mddev = disk->private_data;
2735 return mddev->changed;
2738 static int md_revalidate(struct gendisk *disk)
2740 mddev_t *mddev = disk->private_data;
2745 static struct block_device_operations md_fops =
2747 .owner = THIS_MODULE,
2749 .release = md_release,
2751 .media_changed = md_media_changed,
2752 .revalidate_disk= md_revalidate,
2755 int md_thread(void * arg)
2757 mdk_thread_t *thread = arg;
2765 daemonize(thread->name, mdname(thread->mddev));
2767 current->exit_signal = SIGCHLD;
2768 allow_signal(SIGKILL);
2769 thread->tsk = current;
2772 * md_thread is a 'system-thread', it's priority should be very
2773 * high. We avoid resource deadlocks individually in each
2774 * raid personality. (RAID5 does preallocation) We also use RR and
2775 * the very same RT priority as kswapd, thus we will never get
2776 * into a priority inversion deadlock.
2778 * we definitely have to have equal or higher priority than
2779 * bdflush, otherwise bdflush will deadlock if there are too
2780 * many dirty RAID5 blocks.
2784 complete(thread->event);
2785 while (thread->run) {
2786 void (*run)(mddev_t *);
2788 wait_event_interruptible(thread->wqueue,
2789 test_bit(THREAD_WAKEUP, &thread->flags));
2790 if (current->flags & PF_FREEZE)
2791 refrigerator(PF_FREEZE);
2793 clear_bit(THREAD_WAKEUP, &thread->flags);
2799 if (signal_pending(current))
2800 flush_signals(current);
2802 complete(thread->event);
2806 void md_wakeup_thread(mdk_thread_t *thread)
2809 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
2810 set_bit(THREAD_WAKEUP, &thread->flags);
2811 wake_up(&thread->wqueue);
2815 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
2818 mdk_thread_t *thread;
2820 struct completion event;
2822 thread = (mdk_thread_t *) kmalloc
2823 (sizeof(mdk_thread_t), GFP_KERNEL);
2827 memset(thread, 0, sizeof(mdk_thread_t));
2828 init_waitqueue_head(&thread->wqueue);
2830 init_completion(&event);
2831 thread->event = &event;
2833 thread->mddev = mddev;
2834 thread->name = name;
2835 ret = kernel_thread(md_thread, thread, 0);
2840 wait_for_completion(&event);
2844 static void md_interrupt_thread(mdk_thread_t *thread)
2850 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
2851 send_sig(SIGKILL, thread->tsk, 1);
2854 void md_unregister_thread(mdk_thread_t *thread)
2856 struct completion event;
2858 init_completion(&event);
2860 thread->event = &event;
2862 thread->name = NULL;
2863 md_interrupt_thread(thread);
2864 wait_for_completion(&event);
2868 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
2875 if (!rdev || rdev->faulty)
2878 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
2880 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
2881 __builtin_return_address(0),__builtin_return_address(1),
2882 __builtin_return_address(2),__builtin_return_address(3));
2884 if (!mddev->pers->error_handler)
2886 mddev->pers->error_handler(mddev,rdev);
2887 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2888 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2889 md_wakeup_thread(mddev->thread);
2892 /* seq_file implementation /proc/mdstat */
2894 static void status_unused(struct seq_file *seq)
2898 struct list_head *tmp;
2900 seq_printf(seq, "unused devices: ");
2902 ITERATE_RDEV_PENDING(rdev,tmp) {
2903 char b[BDEVNAME_SIZE];
2905 seq_printf(seq, "%s ",
2906 bdevname(rdev->bdev,b));
2909 seq_printf(seq, "<none>");
2911 seq_printf(seq, "\n");
2915 static void status_resync(struct seq_file *seq, mddev_t * mddev)
2917 unsigned long max_blocks, resync, res, dt, db, rt;
2919 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
2921 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2922 max_blocks = mddev->resync_max_sectors >> 1;
2924 max_blocks = mddev->size;
2927 * Should not happen.
2933 res = (resync/1024)*1000/(max_blocks/1024 + 1);
2935 int i, x = res/50, y = 20-x;
2936 seq_printf(seq, "[");
2937 for (i = 0; i < x; i++)
2938 seq_printf(seq, "=");
2939 seq_printf(seq, ">");
2940 for (i = 0; i < y; i++)
2941 seq_printf(seq, ".");
2942 seq_printf(seq, "] ");
2944 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
2945 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
2946 "resync" : "recovery"),
2947 res/10, res % 10, resync, max_blocks);
2950 * We do not want to overflow, so the order of operands and
2951 * the * 100 / 100 trick are important. We do a +1 to be
2952 * safe against division by zero. We only estimate anyway.
2954 * dt: time from mark until now
2955 * db: blocks written from mark until now
2956 * rt: remaining time
2958 dt = ((jiffies - mddev->resync_mark) / HZ);
2960 db = resync - (mddev->resync_mark_cnt/2);
2961 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
2963 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
2965 seq_printf(seq, " speed=%ldK/sec", db/dt);
2968 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
2970 struct list_head *tmp;
2980 spin_lock(&all_mddevs_lock);
2981 list_for_each(tmp,&all_mddevs)
2983 mddev = list_entry(tmp, mddev_t, all_mddevs);
2985 spin_unlock(&all_mddevs_lock);
2988 spin_unlock(&all_mddevs_lock);
2990 return (void*)2;/* tail */
2994 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2996 struct list_head *tmp;
2997 mddev_t *next_mddev, *mddev = v;
3003 spin_lock(&all_mddevs_lock);
3005 tmp = all_mddevs.next;
3007 tmp = mddev->all_mddevs.next;
3008 if (tmp != &all_mddevs)
3009 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
3011 next_mddev = (void*)2;
3014 spin_unlock(&all_mddevs_lock);
3022 static void md_seq_stop(struct seq_file *seq, void *v)
3026 if (mddev && v != (void*)1 && v != (void*)2)
3030 static int md_seq_show(struct seq_file *seq, void *v)
3034 struct list_head *tmp2;
3038 if (v == (void*)1) {
3039 seq_printf(seq, "Personalities : ");
3040 spin_lock(&pers_lock);
3041 for (i = 0; i < MAX_PERSONALITY; i++)
3043 seq_printf(seq, "[%s] ", pers[i]->name);
3045 spin_unlock(&pers_lock);
3046 seq_printf(seq, "\n");
3049 if (v == (void*)2) {
3054 if (mddev_lock(mddev)!=0)
3056 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
3057 seq_printf(seq, "%s : %sactive", mdname(mddev),
3058 mddev->pers ? "" : "in");
3061 seq_printf(seq, " (read-only)");
3062 seq_printf(seq, " %s", mddev->pers->name);
3066 ITERATE_RDEV(mddev,rdev,tmp2) {
3067 char b[BDEVNAME_SIZE];
3068 seq_printf(seq, " %s[%d]",
3069 bdevname(rdev->bdev,b), rdev->desc_nr);
3071 seq_printf(seq, "(F)");
3077 if (!list_empty(&mddev->disks)) {
3079 seq_printf(seq, "\n %llu blocks",
3080 (unsigned long long)mddev->array_size);
3082 seq_printf(seq, "\n %llu blocks",
3083 (unsigned long long)size);
3087 mddev->pers->status (seq, mddev);
3088 seq_printf(seq, "\n ");
3089 if (mddev->curr_resync > 2)
3090 status_resync (seq, mddev);
3091 else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
3092 seq_printf(seq, " resync=DELAYED");
3095 seq_printf(seq, "\n");
3097 mddev_unlock(mddev);
3102 static struct seq_operations md_seq_ops = {
3103 .start = md_seq_start,
3104 .next = md_seq_next,
3105 .stop = md_seq_stop,
3106 .show = md_seq_show,
3109 static int md_seq_open(struct inode *inode, struct file *file)
3113 error = seq_open(file, &md_seq_ops);
3117 static struct file_operations md_seq_fops = {
3118 .open = md_seq_open,
3120 .llseek = seq_lseek,
3121 .release = seq_release,
3124 int register_md_personality(int pnum, mdk_personality_t *p)
3126 if (pnum >= MAX_PERSONALITY) {
3128 "md: tried to install personality %s as nr %d, but max is %lu\n",
3129 p->name, pnum, MAX_PERSONALITY-1);
3133 spin_lock(&pers_lock);
3135 spin_unlock(&pers_lock);
3141 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3142 spin_unlock(&pers_lock);
3146 int unregister_md_personality(int pnum)
3148 if (pnum >= MAX_PERSONALITY) {
3153 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3154 spin_lock(&pers_lock);
3156 spin_unlock(&pers_lock);
3160 static int is_mddev_idle(mddev_t *mddev)
3163 struct list_head *tmp;
3165 unsigned long curr_events;
3168 ITERATE_RDEV(mddev,rdev,tmp) {
3169 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3170 curr_events = disk_stat_read(disk, read_sectors) +
3171 disk_stat_read(disk, write_sectors) -
3172 atomic_read(&disk->sync_io);
3173 /* Allow some slack between valud of curr_events and last_events,
3174 * as there are some uninteresting races.
3175 * Note: the following is an unsigned comparison.
3177 if ((curr_events - rdev->last_events + 32) > 64) {
3178 rdev->last_events = curr_events;
3185 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3187 /* another "blocks" (512byte) blocks have been synced */
3188 atomic_sub(blocks, &mddev->recovery_active);
3189 wake_up(&mddev->recovery_wait);
3191 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3192 md_wakeup_thread(mddev->thread);
3193 // stop recovery, signal do_sync ....
3198 void md_write_start(mddev_t *mddev)
3200 if (!atomic_read(&mddev->writes_pending)) {
3201 mddev_lock_uninterruptible(mddev);
3202 if (mddev->in_sync) {
3204 del_timer(&mddev->safemode_timer);
3205 md_update_sb(mddev);
3207 atomic_inc(&mddev->writes_pending);
3208 mddev_unlock(mddev);
3210 atomic_inc(&mddev->writes_pending);
3213 void md_write_end(mddev_t *mddev)
3215 if (atomic_dec_and_test(&mddev->writes_pending)) {
3216 if (mddev->safemode == 2)
3217 md_wakeup_thread(mddev->thread);
3219 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3223 static inline void md_enter_safemode(mddev_t *mddev)
3225 if (!mddev->safemode) return;
3226 if (mddev->safemode == 2 &&
3227 (atomic_read(&mddev->writes_pending) || mddev->in_sync ||
3228 mddev->recovery_cp != MaxSector))
3229 return; /* avoid the lock */
3230 mddev_lock_uninterruptible(mddev);
3231 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
3232 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
3234 md_update_sb(mddev);
3236 mddev_unlock(mddev);
3238 if (mddev->safemode == 1)
3239 mddev->safemode = 0;
3242 void md_handle_safemode(mddev_t *mddev)
3244 if (signal_pending(current)) {
3245 printk(KERN_INFO "md: %s in immediate safe mode\n",
3247 mddev->safemode = 2;
3248 flush_signals(current);
3250 md_enter_safemode(mddev);
3254 DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3256 #define SYNC_MARKS 10
3257 #define SYNC_MARK_STEP (3*HZ)
3258 static void md_do_sync(mddev_t *mddev)
3261 unsigned int currspeed = 0,
3263 sector_t max_sectors,j;
3264 unsigned long mark[SYNC_MARKS];
3265 sector_t mark_cnt[SYNC_MARKS];
3267 struct list_head *tmp;
3268 sector_t last_check;
3270 /* just incase thread restarts... */
3271 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3274 /* we overload curr_resync somewhat here.
3275 * 0 == not engaged in resync at all
3276 * 2 == checking that there is no conflict with another sync
3277 * 1 == like 2, but have yielded to allow conflicting resync to
3279 * other == active in resync - this many blocks
3281 * Before starting a resync we must have set curr_resync to
3282 * 2, and then checked that every "conflicting" array has curr_resync
3283 * less than ours. When we find one that is the same or higher
3284 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
3285 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
3286 * This will mean we have to start checking from the beginning again.
3291 mddev->curr_resync = 2;
3294 if (signal_pending(current)) {
3295 flush_signals(current);
3298 ITERATE_MDDEV(mddev2,tmp) {
3300 if (mddev2 == mddev)
3302 if (mddev2->curr_resync &&
3303 match_mddev_units(mddev,mddev2)) {
3305 if (mddev < mddev2 && mddev->curr_resync == 2) {
3306 /* arbitrarily yield */
3307 mddev->curr_resync = 1;
3308 wake_up(&resync_wait);
3310 if (mddev > mddev2 && mddev->curr_resync == 1)
3311 /* no need to wait here, we can wait the next
3312 * time 'round when curr_resync == 2
3315 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
3316 if (!signal_pending(current)
3317 && mddev2->curr_resync >= mddev->curr_resync) {
3318 printk(KERN_INFO "md: delaying resync of %s"
3319 " until %s has finished resync (they"
3320 " share one or more physical units)\n",
3321 mdname(mddev), mdname(mddev2));
3324 finish_wait(&resync_wait, &wq);
3327 finish_wait(&resync_wait, &wq);
3330 } while (mddev->curr_resync < 2);
3332 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3333 /* resync follows the size requested by the personality,
3334 * which default to physical size, but can be virtual size
3336 max_sectors = mddev->resync_max_sectors;
3338 /* recovery follows the physical size of devices */
3339 max_sectors = mddev->size << 1;
3341 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
3342 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3343 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3344 printk(KERN_INFO "md: using maximum available idle IO bandwith "
3345 "(but not more than %d KB/sec) for reconstruction.\n",
3346 sysctl_speed_limit_max);
3348 is_mddev_idle(mddev); /* this also initializes IO event counters */
3349 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3350 j = mddev->recovery_cp;
3353 for (m = 0; m < SYNC_MARKS; m++) {
3358 mddev->resync_mark = mark[last_mark];
3359 mddev->resync_mark_cnt = mark_cnt[last_mark];
3362 * Tune reconstruction:
3364 window = 32*(PAGE_SIZE/512);
3365 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
3366 window/2,(unsigned long long) max_sectors/2);
3368 atomic_set(&mddev->recovery_active, 0);
3369 init_waitqueue_head(&mddev->recovery_wait);
3374 "md: resuming recovery of %s from checkpoint.\n",
3376 mddev->curr_resync = j;
3379 while (j < max_sectors) {
3382 sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
3384 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3387 atomic_add(sectors, &mddev->recovery_active);
3389 if (j>1) mddev->curr_resync = j;
3391 if (last_check + window > j || j == max_sectors)
3396 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
3397 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
3401 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
3403 int next = (last_mark+1) % SYNC_MARKS;
3405 mddev->resync_mark = mark[next];
3406 mddev->resync_mark_cnt = mark_cnt[next];
3407 mark[next] = jiffies;
3408 mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
3413 if (signal_pending(current)) {
3415 * got a signal, exit.
3418 "md: md_do_sync() got signal ... exiting\n");
3419 flush_signals(current);
3420 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3425 * this loop exits only if either when we are slower than
3426 * the 'hard' speed limit, or the system was IO-idle for
3428 * the system might be non-idle CPU-wise, but we only care
3429 * about not overloading the IO subsystem. (things like an
3430 * e2fsck being done on the RAID array should execute fast)
3432 mddev->queue->unplug_fn(mddev->queue);
3435 currspeed = ((unsigned long)(j-mddev->resync_mark_cnt))/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
3437 if (currspeed > sysctl_speed_limit_min) {
3438 if ((currspeed > sysctl_speed_limit_max) ||
3439 !is_mddev_idle(mddev)) {
3440 msleep_interruptible(250);
3445 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
3447 * this also signals 'finished resyncing' to md_stop
3450 mddev->queue->unplug_fn(mddev->queue);
3452 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
3454 /* tell personality that we are finished */
3455 mddev->pers->sync_request(mddev, max_sectors, 1);
3457 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3458 mddev->curr_resync > 2 &&
3459 mddev->curr_resync >= mddev->recovery_cp) {
3460 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3462 "md: checkpointing recovery of %s.\n",
3464 mddev->recovery_cp = mddev->curr_resync;
3466 mddev->recovery_cp = MaxSector;
3469 md_enter_safemode(mddev);
3471 mddev->curr_resync = 0;
3472 wake_up(&resync_wait);
3473 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
3474 md_wakeup_thread(mddev->thread);
3479 * This routine is regularly called by all per-raid-array threads to
3480 * deal with generic issues like resync and super-block update.
3481 * Raid personalities that don't have a thread (linear/raid0) do not
3482 * need this as they never do any recovery or update the superblock.
3484 * It does not do any resync itself, but rather "forks" off other threads
3485 * to do that as needed.
3486 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
3487 * "->recovery" and create a thread at ->sync_thread.
3488 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
3489 * and wakeups up this thread which will reap the thread and finish up.
3490 * This thread also removes any faulty devices (with nr_pending == 0).
3492 * The overall approach is:
3493 * 1/ if the superblock needs updating, update it.
3494 * 2/ If a recovery thread is running, don't do anything else.
3495 * 3/ If recovery has finished, clean up, possibly marking spares active.
3496 * 4/ If there are any faulty devices, remove them.
3497 * 5/ If array is degraded, try to add spares devices
3498 * 6/ If array has spares or is not in-sync, start a resync thread.
3500 void md_check_recovery(mddev_t *mddev)
3503 struct list_head *rtmp;
3506 dprintk(KERN_INFO "md: recovery thread got woken up ...\n");
3512 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
3513 test_bit(MD_RECOVERY_DONE, &mddev->recovery)
3516 if (mddev_trylock(mddev)==0) {
3518 if (mddev->sb_dirty)
3519 md_update_sb(mddev);
3520 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
3521 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
3522 /* resync/recovery still happening */
3523 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3526 if (mddev->sync_thread) {
3527 /* resync has finished, collect result */
3528 md_unregister_thread(mddev->sync_thread);
3529 mddev->sync_thread = NULL;
3530 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3531 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3533 /* activate any spares */
3534 mddev->pers->spare_active(mddev);
3536 md_update_sb(mddev);
3537 mddev->recovery = 0;
3538 /* flag recovery needed just to double check */
3539 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3542 if (mddev->recovery)
3543 /* probably just the RECOVERY_NEEDED flag */
3544 mddev->recovery = 0;
3546 /* no recovery is running.
3547 * remove any failed drives, then
3548 * add spares if possible
3550 ITERATE_RDEV(mddev,rdev,rtmp) {
3551 if (rdev->raid_disk >= 0 &&
3553 atomic_read(&rdev->nr_pending)==0) {
3554 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0)
3555 rdev->raid_disk = -1;
3557 if (!rdev->faulty && rdev->raid_disk >= 0 && !rdev->in_sync)
3560 if (mddev->degraded) {
3561 ITERATE_RDEV(mddev,rdev,rtmp)
3562 if (rdev->raid_disk < 0
3564 if (mddev->pers->hot_add_disk(mddev,rdev))
3571 if (!spares && (mddev->recovery_cp == MaxSector )) {
3572 /* nothing we can do ... */
3575 if (mddev->pers->sync_request) {
3576 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3578 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3579 mddev->sync_thread = md_register_thread(md_do_sync,
3582 if (!mddev->sync_thread) {
3583 printk(KERN_ERR "%s: could not start resync"
3586 /* leave the spares where they are, it shouldn't hurt */
3587 mddev->recovery = 0;
3589 md_wakeup_thread(mddev->sync_thread);
3593 mddev_unlock(mddev);
3597 int md_notify_reboot(struct notifier_block *this,
3598 unsigned long code, void *x)
3600 struct list_head *tmp;
3603 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3605 printk(KERN_INFO "md: stopping all md devices.\n");
3607 ITERATE_MDDEV(mddev,tmp)
3608 if (mddev_trylock(mddev)==0)
3609 do_md_stop (mddev, 1);
3611 * certain more exotic SCSI devices are known to be
3612 * volatile wrt too early system reboots. While the
3613 * right place to handle this issue is the given
3614 * driver, we do want to have a safe RAID driver ...
3621 struct notifier_block md_notifier = {
3622 .notifier_call = md_notify_reboot,
3624 .priority = INT_MAX, /* before any real devices */
3627 static void md_geninit(void)
3629 struct proc_dir_entry *p;
3631 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3633 p = create_proc_entry("mdstat", S_IRUGO, NULL);
3635 p->proc_fops = &md_seq_fops;
3638 int __init md_init(void)
3642 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
3643 " MD_SB_DISKS=%d\n",
3644 MD_MAJOR_VERSION, MD_MINOR_VERSION,
3645 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3647 if (register_blkdev(MAJOR_NR, "md"))
3649 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
3650 unregister_blkdev(MAJOR_NR, "md");
3654 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
3655 md_probe, NULL, NULL);
3656 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
3657 md_probe, NULL, NULL);
3659 for (minor=0; minor < MAX_MD_DEVS; ++minor)
3660 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
3661 S_IFBLK|S_IRUSR|S_IWUSR,
3664 for (minor=0; minor < MAX_MD_DEVS; ++minor)
3665 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
3666 S_IFBLK|S_IRUSR|S_IWUSR,
3670 register_reboot_notifier(&md_notifier);
3671 raid_table_header = register_sysctl_table(raid_root_table, 1);
3681 * Searches all registered partitions for autorun RAID arrays
3684 static dev_t detected_devices[128];
3687 void md_autodetect_dev(dev_t dev)
3689 if (dev_cnt >= 0 && dev_cnt < 127)
3690 detected_devices[dev_cnt++] = dev;
3694 static void autostart_arrays(int part)
3699 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
3701 for (i = 0; i < dev_cnt; i++) {
3702 dev_t dev = detected_devices[i];
3704 rdev = md_import_device(dev,0, 0);
3712 list_add(&rdev->same_set, &pending_raid_disks);
3716 autorun_devices(part);
3721 static __exit void md_exit(void)
3724 struct list_head *tmp;
3726 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
3727 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
3728 for (i=0; i < MAX_MD_DEVS; i++)
3729 devfs_remove("md/%d", i);
3730 for (i=0; i < MAX_MD_DEVS; i++)
3731 devfs_remove("md/d%d", i);
3735 unregister_blkdev(MAJOR_NR,"md");
3736 unregister_blkdev(mdp_major, "mdp");
3737 unregister_reboot_notifier(&md_notifier);
3738 unregister_sysctl_table(raid_table_header);
3739 remove_proc_entry("mdstat", NULL);
3740 ITERATE_MDDEV(mddev,tmp) {
3741 struct gendisk *disk = mddev->gendisk;
3744 export_array(mddev);
3747 mddev->gendisk = NULL;
3752 module_init(md_init)
3753 module_exit(md_exit)
3755 EXPORT_SYMBOL(register_md_personality);
3756 EXPORT_SYMBOL(unregister_md_personality);
3757 EXPORT_SYMBOL(md_error);
3758 EXPORT_SYMBOL(md_done_sync);
3759 EXPORT_SYMBOL(md_write_start);
3760 EXPORT_SYMBOL(md_write_end);
3761 EXPORT_SYMBOL(md_handle_safemode);
3762 EXPORT_SYMBOL(md_register_thread);
3763 EXPORT_SYMBOL(md_unregister_thread);
3764 EXPORT_SYMBOL(md_wakeup_thread);
3765 EXPORT_SYMBOL(md_print_devices);
3766 EXPORT_SYMBOL(md_check_recovery);
3767 MODULE_LICENSE("GPL");