fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / md / md.c
index 77cd6e9..af1f876 100644 (file)
@@ -19,6 +19,9 @@
 
      Neil Brown <neilb@cse.unsw.edu.au>.
 
+   - persistent bitmap code
+     Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
+
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2, or (at your option)
 */
 
 #include <linux/module.h>
-#include <linux/config.h>
+#include <linux/kthread.h>
 #include <linux/linkage.h>
 #include <linux/raid/md.h>
+#include <linux/raid/bitmap.h>
 #include <linux/sysctl.h>
-#include <linux/devfs_fs_kernel.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include <linux/init.h>
 
+#include <linux/file.h>
+
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
 #endif
 static void autostart_arrays (int part);
 #endif
 
-static mdk_personality_t *pers[MAX_PERSONALITY];
-static spinlock_t pers_lock = SPIN_LOCK_UNLOCKED;
+static LIST_HEAD(pers_list);
+static DEFINE_SPINLOCK(pers_lock);
+
+static void md_print_devices(void);
+
+#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
 
 /*
  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  * is 1000 KB/sec, so the extra system load does not show up that much.
  * Increase it if you want to have more _guaranteed_ speed. Note that
- * the RAID driver will use the maximum available bandwith if the IO
+ * the RAID driver will use the maximum available bandwidth if the IO
  * subsystem is idle. There is also an 'absolute maximum' reconstruction
  * speed limit - in case reconstruction slows down your system despite
  * idle IO detection.
  *
  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
+ * or /sys/block/mdX/md/sync_speed_{min,max}
  */
 
 static int sysctl_speed_limit_min = 1000;
 static int sysctl_speed_limit_max = 200000;
+static inline int speed_min(mddev_t *mddev)
+{
+       return mddev->sync_speed_min ?
+               mddev->sync_speed_min : sysctl_speed_limit_min;
+}
+
+static inline int speed_max(mddev_t *mddev)
+{
+       return mddev->sync_speed_max ?
+               mddev->sync_speed_max : sysctl_speed_limit_max;
+}
 
 static struct ctl_table_header *raid_table_header;
 
@@ -86,7 +110,7 @@ static ctl_table raid_table[] = {
                .procname       = "speed_limit_min",
                .data           = &sysctl_speed_limit_min,
                .maxlen         = sizeof(int),
-               .mode           = 0644,
+               .mode           = S_IRUGO|S_IWUSR,
                .proc_handler   = &proc_dointvec,
        },
        {
@@ -94,7 +118,7 @@ static ctl_table raid_table[] = {
                .procname       = "speed_limit_max",
                .data           = &sysctl_speed_limit_max,
                .maxlen         = sizeof(int),
-               .mode           = 0644,
+               .mode           = S_IRUGO|S_IWUSR,
                .proc_handler   = &proc_dointvec,
        },
        { .ctl_name = 0 }
@@ -105,7 +129,7 @@ static ctl_table raid_dir_table[] = {
                .ctl_name       = DEV_RAID,
                .procname       = "raid",
                .maxlen         = 0,
-               .mode           = 0555,
+               .mode           = S_IRUGO|S_IXUGO,
                .child          = raid_table,
        },
        { .ctl_name = 0 }
@@ -124,12 +148,43 @@ static ctl_table raid_root_table[] = {
 
 static struct block_device_operations md_fops;
 
+static int start_readonly;
+
+/*
+ * We have a system wide 'event count' that is incremented
+ * on any 'interesting' event, and readers of /proc/mdstat
+ * can use 'poll' or 'select' to find out when the event
+ * count increases.
+ *
+ * Events are:
+ *  start array, stop array, error, add device, remove device,
+ *  start build, activate spare
+ */
+static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+static atomic_t md_event_count;
+void md_new_event(mddev_t *mddev)
+{
+       atomic_inc(&md_event_count);
+       wake_up(&md_event_waiters);
+       sysfs_notify(&mddev->kobj, NULL, "sync_action");
+}
+EXPORT_SYMBOL_GPL(md_new_event);
+
+/* Alternate version that can be called from interrupts
+ * when calling sysfs_notify isn't needed.
+ */
+static void md_new_event_inintr(mddev_t *mddev)
+{
+       atomic_inc(&md_event_count);
+       wake_up(&md_event_waiters);
+}
+
 /*
  * Enables to iterate over all existing md arrays
  * all_mddevs_lock protects this list.
  */
 static LIST_HEAD(all_mddevs);
-static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(all_mddevs_lock);
 
 
 /*
@@ -154,6 +209,7 @@ static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED;
                tmp = tmp->next;})                                      \
                )
 
+
 static int md_fail_request (request_queue_t *q, struct bio *bio)
 {
        bio_io_error(bio, bio->bi_size);
@@ -172,10 +228,11 @@ static void mddev_put(mddev_t *mddev)
                return;
        if (!mddev->raid_disks && list_empty(&mddev->disks)) {
                list_del(&mddev->all_mddevs);
-               blk_put_queue(mddev->queue);
-               kfree(mddev);
-       }
-       spin_unlock(&all_mddevs_lock);
+               spin_unlock(&all_mddevs_lock);
+               blk_cleanup_queue(mddev->queue);
+               kobject_unregister(&mddev->kobj);
+       } else
+               spin_unlock(&all_mddevs_lock);
 }
 
 static mddev_t * mddev_find(dev_t unit)
@@ -188,8 +245,7 @@ static mddev_t * mddev_find(dev_t unit)
                if (mddev->unit == unit) {
                        mddev_get(mddev);
                        spin_unlock(&all_mddevs_lock);
-                       if (new)
-                               kfree(new);
+                       kfree(new);
                        return mddev;
                }
 
@@ -200,29 +256,30 @@ static mddev_t * mddev_find(dev_t unit)
        }
        spin_unlock(&all_mddevs_lock);
 
-       new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
        if (!new)
                return NULL;
 
-       memset(new, 0, sizeof(*new));
-
        new->unit = unit;
        if (MAJOR(unit) == MD_MAJOR)
                new->md_minor = MINOR(unit);
        else
                new->md_minor = MINOR(unit) >> MdpMinorShift;
 
-       init_MUTEX(&new->reconfig_sem);
+       mutex_init(&new->reconfig_mutex);
        INIT_LIST_HEAD(&new->disks);
        INIT_LIST_HEAD(&new->all_mddevs);
        init_timer(&new->safemode_timer);
        atomic_set(&new->active, 1);
+       spin_lock_init(&new->write_lock);
+       init_waitqueue_head(&new->sb_wait);
 
        new->queue = blk_alloc_queue(GFP_KERNEL);
        if (!new->queue) {
                kfree(new);
                return NULL;
        }
+       set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
 
        blk_queue_make_request(new->queue, md_fail_request);
 
@@ -231,28 +288,22 @@ static mddev_t * mddev_find(dev_t unit)
 
 static inline int mddev_lock(mddev_t * mddev)
 {
-       return down_interruptible(&mddev->reconfig_sem);
-}
-
-static inline void mddev_lock_uninterruptible(mddev_t * mddev)
-{
-       down(&mddev->reconfig_sem);
+       return mutex_lock_interruptible(&mddev->reconfig_mutex);
 }
 
 static inline int mddev_trylock(mddev_t * mddev)
 {
-       return down_trylock(&mddev->reconfig_sem);
+       return mutex_trylock(&mddev->reconfig_mutex);
 }
 
 static inline void mddev_unlock(mddev_t * mddev)
 {
-       up(&mddev->reconfig_sem);
+       mutex_unlock(&mddev->reconfig_mutex);
 
-       if (mddev->thread)
-               md_wakeup_thread(mddev->thread);
+       md_wakeup_thread(mddev->thread);
 }
 
-mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
+static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
 {
        mdk_rdev_t * rdev;
        struct list_head *tmp;
@@ -276,7 +327,19 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
        return NULL;
 }
 
-inline static sector_t calc_dev_sboffset(struct block_device *bdev)
+static struct mdk_personality *find_pers(int level, char *clevel)
+{
+       struct mdk_personality *pers;
+       list_for_each_entry(pers, &pers_list, list) {
+               if (level != LEVEL_NONE && pers->level == level)
+                       return pers;
+               if (strcmp(pers->name, clevel)==0)
+                       return pers;
+       }
+       return NULL;
+}
+
+static inline sector_t calc_dev_sboffset(struct block_device *bdev)
 {
        sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
        return MD_NEW_SIZE_BLOCKS(size);
@@ -310,7 +373,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
 static void free_disk_sb(mdk_rdev_t * rdev)
 {
        if (rdev->sb_page) {
-               page_cache_release(rdev->sb_page);
+               put_page(rdev->sb_page);
                rdev->sb_loaded = 0;
                rdev->sb_page = NULL;
                rdev->sb_offset = 0;
@@ -319,6 +382,111 @@ static void free_disk_sb(mdk_rdev_t * rdev)
 }
 
 
+static int super_written(struct bio *bio, unsigned int bytes_done, int error)
+{
+       mdk_rdev_t *rdev = bio->bi_private;
+       mddev_t *mddev = rdev->mddev;
+       if (bio->bi_size)
+               return 1;
+
+       if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+               printk("md: super_written gets error=%d, uptodate=%d\n",
+                      error, test_bit(BIO_UPTODATE, &bio->bi_flags));
+               WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
+               md_error(mddev, rdev);
+       }
+
+       if (atomic_dec_and_test(&mddev->pending_writes))
+               wake_up(&mddev->sb_wait);
+       bio_put(bio);
+       return 0;
+}
+
+static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+{
+       struct bio *bio2 = bio->bi_private;
+       mdk_rdev_t *rdev = bio2->bi_private;
+       mddev_t *mddev = rdev->mddev;
+       if (bio->bi_size)
+               return 1;
+
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+           error == -EOPNOTSUPP) {
+               unsigned long flags;
+               /* barriers don't appear to be supported :-( */
+               set_bit(BarriersNotsupp, &rdev->flags);
+               mddev->barriers_work = 0;
+               spin_lock_irqsave(&mddev->write_lock, flags);
+               bio2->bi_next = mddev->biolist;
+               mddev->biolist = bio2;
+               spin_unlock_irqrestore(&mddev->write_lock, flags);
+               wake_up(&mddev->sb_wait);
+               bio_put(bio);
+               return 0;
+       }
+       bio_put(bio2);
+       bio->bi_private = rdev;
+       return super_written(bio, bytes_done, error);
+}
+
+void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
+                  sector_t sector, int size, struct page *page)
+{
+       /* write first size bytes of page to sector of rdev
+        * Increment mddev->pending_writes before returning
+        * and decrement it on completion, waking up sb_wait
+        * if zero is reached.
+        * If an error occurred, call md_error
+        *
+        * As we might need to resubmit the request if BIO_RW_BARRIER
+        * causes ENOTSUPP, we allocate a spare bio...
+        */
+       struct bio *bio = bio_alloc(GFP_NOIO, 1);
+       int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
+
+       bio->bi_bdev = rdev->bdev;
+       bio->bi_sector = sector;
+       bio_add_page(bio, page, size, 0);
+       bio->bi_private = rdev;
+       bio->bi_end_io = super_written;
+       bio->bi_rw = rw;
+
+       atomic_inc(&mddev->pending_writes);
+       if (!test_bit(BarriersNotsupp, &rdev->flags)) {
+               struct bio *rbio;
+               rw |= (1<<BIO_RW_BARRIER);
+               rbio = bio_clone(bio, GFP_NOIO);
+               rbio->bi_private = bio;
+               rbio->bi_end_io = super_written_barrier;
+               submit_bio(rw, rbio);
+       } else
+               submit_bio(rw, bio);
+}
+
+void md_super_wait(mddev_t *mddev)
+{
+       /* wait for all superblock writes that were scheduled to complete.
+        * if any had to be retried (due to BARRIER problems), retry them
+        */
+       DEFINE_WAIT(wq);
+       for(;;) {
+               prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
+               if (atomic_read(&mddev->pending_writes)==0)
+                       break;
+               while (mddev->biolist) {
+                       struct bio *bio;
+                       spin_lock_irq(&mddev->write_lock);
+                       bio = mddev->biolist;
+                       mddev->biolist = bio->bi_next ;
+                       bio->bi_next = NULL;
+                       spin_unlock_irq(&mddev->write_lock);
+                       submit_bio(bio->bi_rw, bio);
+               }
+               schedule();
+       }
+       finish_wait(&mddev->sb_wait, &wq);
+}
+
 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
 {
        if (bio->bi_size)
@@ -328,35 +496,31 @@ static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
        return 0;
 }
 
-static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
+int sync_page_io(struct block_device *bdev, sector_t sector, int size,
                   struct page *page, int rw)
 {
-       struct bio bio;
-       struct bio_vec vec;
+       struct bio *bio = bio_alloc(GFP_NOIO, 1);
        struct completion event;
+       int ret;
 
        rw |= (1 << BIO_RW_SYNC);
 
-       bio_init(&bio);
-       bio.bi_io_vec = &vec;
-       vec.bv_page = page;
-       vec.bv_len = size;
-       vec.bv_offset = 0;
-       bio.bi_vcnt = 1;
-       bio.bi_idx = 0;
-       bio.bi_size = size;
-       bio.bi_bdev = bdev;
-       bio.bi_sector = sector;
+       bio->bi_bdev = bdev;
+       bio->bi_sector = sector;
+       bio_add_page(bio, page, size, 0);
        init_completion(&event);
-       bio.bi_private = &event;
-       bio.bi_end_io = bi_complete;
-       submit_bio(rw, &bio);
+       bio->bi_private = &event;
+       bio->bi_end_io = bi_complete;
+       submit_bio(rw, bio);
        wait_for_completion(&event);
 
-       return test_bit(BIO_UPTODATE, &bio.bi_flags);
+       ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
+       bio_put(bio);
+       return ret;
 }
+EXPORT_SYMBOL_GPL(sync_page_io);
 
-static int read_disk_sb(mdk_rdev_t * rdev)
+static int read_disk_sb(mdk_rdev_t * rdev, int size)
 {
        char b[BDEVNAME_SIZE];
        if (!rdev->sb_page) {
@@ -367,13 +531,13 @@ static int read_disk_sb(mdk_rdev_t * rdev)
                return 0;
 
 
-       if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
+       if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
                goto fail;
        rdev->sb_loaded = 1;
        return 0;
 
 fail:
-       printk(KERN_ERR "md: disabled device %s, could not read superblock.\n",
+       printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
                bdevname(rdev->bdev,b));
        return -EINVAL;
 }
@@ -420,11 +584,8 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
                ret = 1;
 
 abort:
-       if (tmp1)
-               kfree(tmp1);
-       if (tmp2)
-               kfree(tmp2);
-
+       kfree(tmp1);
+       kfree(tmp2);
        return ret;
 }
 
@@ -439,6 +600,7 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
        return csum;
 }
 
+
 /*
  * Handle superblock details.
  * We want to be able to handle multiple superblock formats
@@ -496,7 +658,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        sb_offset = calc_dev_sboffset(rdev->bdev);
        rdev->sb_offset = sb_offset;
 
-       ret = read_disk_sb(rdev);
+       ret = read_disk_sb(rdev, MD_SB_BYTES);
        if (ret) return ret;
 
        ret = -EINVAL;
@@ -511,7 +673,8 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        }
 
        if (sb->major_version != 0 ||
-           sb->minor_version != 90) {
+           sb->minor_version < 90 ||
+           sb->minor_version > 91) {
                printk(KERN_WARNING "Bad version number %d.%d on %s\n",
                        sb->major_version, sb->minor_version,
                        b);
@@ -521,7 +684,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        if (sb->raid_disks <= 0)
                goto abort;
 
-       if (calc_sb_csum(sb) != sb->sb_csum) {
+       if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
                printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
                        b);
                goto abort;
@@ -529,8 +692,9 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
 
        rdev->preferred_minor = sb->md_minor;
        rdev->data_offset = 0;
+       rdev->sb_size = MD_SB_BYTES;
 
-       if (sb->level == MULTIPATH)
+       if (sb->level == LEVEL_MULTIPATH)
                rdev->desc_nr = -1;
        else
                rdev->desc_nr = sb->this_disk.number;
@@ -560,6 +724,10 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        }
        rdev->size = calc_dev_size(rdev, sb->chunk_size);
 
+       if (rdev->size < sb->size && sb->level > 1)
+               /* "this cannot possibly happen" ... */
+               ret = -EINVAL;
+
  abort:
        return ret;
 }
@@ -571,7 +739,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        mdp_disk_t *desc;
        mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
+       __u64 ev1 = md_event(sb);
 
+       rdev->raid_disk = -1;
+       rdev->flags = 0;
        if (mddev->raid_disks == 0) {
                mddev->major_version = 0;
                mddev->minor_version = sb->minor_version;
@@ -581,10 +752,27 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->ctime = sb->ctime;
                mddev->utime = sb->utime;
                mddev->level = sb->level;
+               mddev->clevel[0] = 0;
                mddev->layout = sb->layout;
                mddev->raid_disks = sb->raid_disks;
                mddev->size = sb->size;
-               mddev->events = md_event(sb);
+               mddev->events = ev1;
+               mddev->bitmap_offset = 0;
+               mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+
+               if (mddev->minor_version >= 91) {
+                       mddev->reshape_position = sb->reshape_position;
+                       mddev->delta_disks = sb->delta_disks;
+                       mddev->new_level = sb->new_level;
+                       mddev->new_layout = sb->new_layout;
+                       mddev->new_chunk = sb->new_chunk;
+               } else {
+                       mddev->reshape_position = MaxSector;
+                       mddev->delta_disks = 0;
+                       mddev->new_level = mddev->level;
+                       mddev->new_layout = mddev->layout;
+                       mddev->new_chunk = mddev->chunk_size;
+               }
 
                if (sb->state & (1<<MD_SB_CLEAN))
                        mddev->recovery_cp = MaxSector;
@@ -602,26 +790,50 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
 
                mddev->max_disks = MD_SB_DISKS;
-       } else {
-               __u64 ev1;
-               ev1 = md_event(sb);
+
+               if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
+                   mddev->bitmap_file == NULL) {
+                       if (mddev->level != 1 && mddev->level != 4
+                           && mddev->level != 5 && mddev->level != 6
+                           && mddev->level != 10) {
+                               /* FIXME use a better test */
+                               printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
+                               return -EINVAL;
+                       }
+                       mddev->bitmap_offset = mddev->default_bitmap_offset;
+               }
+
+       } else if (mddev->pers == NULL) {
+               /* Insist on good event counter while assembling */
                ++ev1;
                if (ev1 < mddev->events) 
                        return -EINVAL;
+       } else if (mddev->bitmap) {
+               /* if adding to array with a bitmap, then we can accept an
+                * older device ... but not too old.
+                */
+               if (ev1 < mddev->bitmap->events_cleared)
+                       return 0;
+       } else {
+               if (ev1 < mddev->events)
+                       /* just a hot-add of a new device, leave raid_disk at -1 */
+                       return 0;
        }
+
        if (mddev->level != LEVEL_MULTIPATH) {
-               rdev->raid_disk = -1;
-               rdev->in_sync = rdev->faulty = 0;
                desc = sb->disks + rdev->desc_nr;
 
                if (desc->state & (1<<MD_DISK_FAULTY))
-                       rdev->faulty = 1;
-               else if (desc->state & (1<<MD_DISK_SYNC) &&
-                        desc->raid_disk < mddev->raid_disks) {
-                       rdev->in_sync = 1;
+                       set_bit(Faulty, &rdev->flags);
+               else if (desc->state & (1<<MD_DISK_SYNC) /* &&
+                           desc->raid_disk < mddev->raid_disks */) {
+                       set_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = desc->raid_disk;
                }
-       }
+               if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
+                       set_bit(WriteMostly, &rdev->flags);
+       } else /* MULTIPATH are always insync */
+               set_bit(In_sync, &rdev->flags);
        return 0;
 }
 
@@ -635,6 +847,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        mdk_rdev_t *rdev2;
        int next_spare = mddev->raid_disks;
 
+
        /* make rdev->sb match mddev data..
         *
         * 1/ zero out disks
@@ -648,13 +861,14 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        int i;
        int active=0, working=0,failed=0,spare=0,nr_disks=0;
 
+       rdev->sb_size = MD_SB_BYTES;
+
        sb = (mdp_super_t*)page_address(rdev->sb_page);
 
        memset(sb, 0, sizeof(*sb));
 
        sb->md_magic = MD_SB_MAGIC;
        sb->major_version = mddev->major_version;
-       sb->minor_version = mddev->minor_version;
        sb->patch_version = mddev->patch_version;
        sb->gvalid_words  = 0; /* ignored */
        memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
@@ -673,6 +887,17 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->events_hi = (mddev->events>>32);
        sb->events_lo = (u32)mddev->events;
 
+       if (mddev->reshape_position == MaxSector)
+               sb->minor_version = 90;
+       else {
+               sb->minor_version = 91;
+               sb->reshape_position = mddev->reshape_position;
+               sb->new_level = mddev->new_level;
+               sb->delta_disks = mddev->delta_disks;
+               sb->new_layout = mddev->new_layout;
+               sb->new_chunk = mddev->new_chunk;
+       }
+       mddev->minor_version = sb->minor_version;
        if (mddev->in_sync)
        {
                sb->recovery_cp = mddev->recovery_cp;
@@ -686,26 +911,32 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->layout = mddev->layout;
        sb->chunk_size = mddev->chunk_size;
 
+       if (mddev->bitmap && mddev->bitmap_file == NULL)
+               sb->state |= (1<<MD_SB_BITMAP_PRESENT);
+
        sb->disks[0].state = (1<<MD_DISK_REMOVED);
        ITERATE_RDEV(mddev,rdev2,tmp) {
                mdp_disk_t *d;
-               if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
-                       rdev2->desc_nr = rdev2->raid_disk;
+               int desc_nr;
+               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
+                   && !test_bit(Faulty, &rdev2->flags))
+                       desc_nr = rdev2->raid_disk;
                else
-                       rdev2->desc_nr = next_spare++;
+                       desc_nr = next_spare++;
+               rdev2->desc_nr = desc_nr;
                d = &sb->disks[rdev2->desc_nr];
                nr_disks++;
                d->number = rdev2->desc_nr;
                d->major = MAJOR(rdev2->bdev->bd_dev);
                d->minor = MINOR(rdev2->bdev->bd_dev);
-               if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
+               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
+                   && !test_bit(Faulty, &rdev2->flags))
                        d->raid_disk = rdev2->raid_disk;
                else
                        d->raid_disk = rdev2->desc_nr; /* compatibility */
-               if (rdev2->faulty) {
+               if (test_bit(Faulty, &rdev2->flags))
                        d->state = (1<<MD_DISK_FAULTY);
-                       failed++;
-               } else if (rdev2->in_sync) {
+               else if (test_bit(In_sync, &rdev2->flags)) {
                        d->state = (1<<MD_DISK_ACTIVE);
                        d->state |= (1<<MD_DISK_SYNC);
                        active++;
@@ -715,8 +946,9 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                        spare++;
                        working++;
                }
+               if (test_bit(WriteMostly, &rdev2->flags))
+                       d->state |= (1<<MD_DISK_WRITEMOSTLY);
        }
-       
        /* now set the "removed" and "faulty" bits on any missing devices */
        for (i=0 ; i < mddev->raid_disks ; i++) {
                mdp_disk_t *d = &sb->disks[i];
@@ -742,16 +974,27 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
  * version 1 superblock
  */
 
-static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
+static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
 {
-       unsigned int disk_csum, csum;
-       int size = 256 + sb->max_dev*2;
+       __le32 disk_csum;
+       u32 csum;
+       unsigned long long newcsum;
+       int size = 256 + le32_to_cpu(sb->max_dev)*2;
+       __le32 *isuper = (__le32*)sb;
+       int i;
 
        disk_csum = sb->sb_csum;
        sb->sb_csum = 0;
-       csum = csum_partial((void *)sb, size, 0);
+       newcsum = 0;
+       for (i=0; size>=4; size -= 4 )
+               newcsum += le32_to_cpu(*isuper++);
+
+       if (size == 2)
+               newcsum += le16_to_cpu(*(__le16*) isuper);
+
+       csum = (newcsum & 0xffffffff) + (newcsum >> 32);
        sb->sb_csum = disk_csum;
-       return csum;
+       return cpu_to_le32(csum);
 }
 
 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
@@ -760,6 +1003,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        int ret;
        sector_t sb_offset;
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+       int bmask;
 
        /*
         * Calculate the position of the superblock.
@@ -773,7 +1017,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        case 0:
                sb_offset = rdev->bdev->bd_inode->i_size >> 9;
                sb_offset -= 8*2;
-               sb_offset &= ~(4*2);
+               sb_offset &= ~(sector_t)(4*2-1);
                /* convert from sectors to K */
                sb_offset /= 2;
                break;
@@ -788,7 +1032,10 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        }
        rdev->sb_offset = sb_offset;
 
-       ret = read_disk_sb(rdev);
+       /* superblock is rarely larger than 1K, but it can be larger,
+        * and it is safe to read 4k, so we do that
+        */
+       ret = read_disk_sb(rdev, 4096);
        if (ret) return ret;
 
 
@@ -798,7 +1045,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
            sb->major_version != cpu_to_le32(1) ||
            le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
            le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
-           sb->feature_map != 0)
+           (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
                return -EINVAL;
 
        if (calc_sb_1_csum(sb) != sb->sb_csum) {
@@ -806,11 +1053,27 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
                        bdevname(rdev->bdev,b));
                return -EINVAL;
        }
+       if (le64_to_cpu(sb->data_size) < 10) {
+               printk("md: data_size too small on %s\n",
+                      bdevname(rdev->bdev,b));
+               return -EINVAL;
+       }
        rdev->preferred_minor = 0xffff;
        rdev->data_offset = le64_to_cpu(sb->data_offset);
+       atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+
+       rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
+       bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
+       if (rdev->sb_size & bmask)
+               rdev-> sb_size = (rdev->sb_size | bmask)+1;
+
+       if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
+               rdev->desc_nr = -1;
+       else
+               rdev->desc_nr = le32_to_cpu(sb->dev_number);
 
        if (refdev == 0)
-               return 1;
+               ret = 1;
        else {
                __u64 ev1, ev2;
                struct mdp_superblock_1 *refsb = 
@@ -830,7 +1093,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
                ev2 = le64_to_cpu(refsb->events);
 
                if (ev1 > ev2)
-                       return 1;
+                       ret = 1;
+               else
+                       ret = 0;
        }
        if (minor_version) 
                rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
@@ -841,61 +1106,102 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        rdev->size = le64_to_cpu(sb->data_size)/2;
        if (le32_to_cpu(sb->chunksize))
                rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
-       return 0;
+
+       if (le64_to_cpu(sb->size) > rdev->size*2)
+               return -EINVAL;
+       return ret;
 }
 
 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+       __u64 ev1 = le64_to_cpu(sb->events);
 
+       rdev->raid_disk = -1;
+       rdev->flags = 0;
        if (mddev->raid_disks == 0) {
                mddev->major_version = 1;
-               mddev->minor_version = 0;
                mddev->patch_version = 0;
                mddev->persistent = 1;
                mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
                mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
                mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
                mddev->level = le32_to_cpu(sb->level);
+               mddev->clevel[0] = 0;
                mddev->layout = le32_to_cpu(sb->layout);
                mddev->raid_disks = le32_to_cpu(sb->raid_disks);
-               mddev->size = (u32)le64_to_cpu(sb->size);
-               mddev->events = le64_to_cpu(sb->events);
+               mddev->size = le64_to_cpu(sb->size)/2;
+               mddev->events = ev1;
+               mddev->bitmap_offset = 0;
+               mddev->default_bitmap_offset = 1024 >> 9;
                
                mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
                memcpy(mddev->uuid, sb->set_uuid, 16);
 
                mddev->max_disks =  (4096-256)/2;
-       } else {
-               __u64 ev1;
-               ev1 = le64_to_cpu(sb->events);
+
+               if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
+                   mddev->bitmap_file == NULL ) {
+                       if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
+                           && mddev->level != 10) {
+                               printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
+                               return -EINVAL;
+                       }
+                       mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
+               }
+               if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+                       mddev->reshape_position = le64_to_cpu(sb->reshape_position);
+                       mddev->delta_disks = le32_to_cpu(sb->delta_disks);
+                       mddev->new_level = le32_to_cpu(sb->new_level);
+                       mddev->new_layout = le32_to_cpu(sb->new_layout);
+                       mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
+               } else {
+                       mddev->reshape_position = MaxSector;
+                       mddev->delta_disks = 0;
+                       mddev->new_level = mddev->level;
+                       mddev->new_layout = mddev->layout;
+                       mddev->new_chunk = mddev->chunk_size;
+               }
+
+       } else if (mddev->pers == NULL) {
+               /* Insist of good event counter while assembling */
                ++ev1;
                if (ev1 < mddev->events)
                        return -EINVAL;
+       } else if (mddev->bitmap) {
+               /* If adding to array with a bitmap, then we can accept an
+                * older device, but not too old.
+                */
+               if (ev1 < mddev->bitmap->events_cleared)
+                       return 0;
+       } else {
+               if (ev1 < mddev->events)
+                       /* just a hot-add of a new device, leave raid_disk at -1 */
+                       return 0;
        }
-
        if (mddev->level != LEVEL_MULTIPATH) {
                int role;
-               rdev->desc_nr = le32_to_cpu(sb->dev_number);
                role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
                switch(role) {
                case 0xffff: /* spare */
-                       rdev->in_sync = 0;
-                       rdev->faulty = 0;
-                       rdev->raid_disk = -1;
                        break;
                case 0xfffe: /* faulty */
-                       rdev->in_sync = 0;
-                       rdev->faulty = 1;
-                       rdev->raid_disk = -1;
+                       set_bit(Faulty, &rdev->flags);
                        break;
                default:
-                       rdev->in_sync = 1;
-                       rdev->faulty = 0;
+                       if ((le32_to_cpu(sb->feature_map) &
+                            MD_FEATURE_RECOVERY_OFFSET))
+                               rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
+                       else
+                               set_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = role;
                        break;
                }
-       }
+               if (sb->devflags & WriteMostly1)
+                       set_bit(WriteMostly, &rdev->flags);
+       } else /* MULTIPATH are always insync */
+               set_bit(In_sync, &rdev->flags);
+
        return 0;
 }
 
@@ -911,6 +1217,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 
        sb->feature_map = 0;
        sb->pad0 = 0;
+       sb->recovery_offset = cpu_to_le64(0);
        memset(sb->pad1, 0, sizeof(sb->pad1));
        memset(sb->pad2, 0, sizeof(sb->pad2));
        memset(sb->pad3, 0, sizeof(sb->pad3));
@@ -922,30 +1229,58 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        else
                sb->resync_offset = cpu_to_le64(0);
 
+       sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
+
+       sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+       sb->size = cpu_to_le64(mddev->size<<1);
+
+       if (mddev->bitmap && mddev->bitmap_file == NULL) {
+               sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
+               sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
+       }
+
+       if (rdev->raid_disk >= 0 &&
+           !test_bit(In_sync, &rdev->flags) &&
+           rdev->recovery_offset > 0) {
+               sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+               sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
+       }
+
+       if (mddev->reshape_position != MaxSector) {
+               sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
+               sb->reshape_position = cpu_to_le64(mddev->reshape_position);
+               sb->new_layout = cpu_to_le32(mddev->new_layout);
+               sb->delta_disks = cpu_to_le32(mddev->delta_disks);
+               sb->new_level = cpu_to_le32(mddev->new_level);
+               sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
+       }
+
        max_dev = 0;
        ITERATE_RDEV(mddev,rdev2,tmp)
-               if (rdev2->desc_nr > max_dev)
-                       max_dev = rdev2->desc_nr;
+               if (rdev2->desc_nr+1 > max_dev)
+                       max_dev = rdev2->desc_nr+1;
        
-       sb->max_dev = max_dev;
+       sb->max_dev = cpu_to_le32(max_dev);
        for (i=0; i<max_dev;i++)
-               sb->dev_roles[max_dev] = cpu_to_le16(0xfffe);
+               sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
        ITERATE_RDEV(mddev,rdev2,tmp) {
                i = rdev2->desc_nr;
-               if (rdev2->faulty)
+               if (test_bit(Faulty, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
-               else if (rdev2->in_sync)
+               else if (test_bit(In_sync, &rdev2->flags))
+                       sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
+               else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
                        sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
                else
                        sb->dev_roles[i] = cpu_to_le16(0xffff);
        }
 
-       sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
+       sb->sb_csum = calc_sb_1_csum(sb);
 }
 
 
-struct super_type super_types[] = {
+static struct super_type super_types[] = {
        [0] = {
                .name   = "0.90.0",
                .owner  = THIS_MODULE,
@@ -992,11 +1327,21 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
 {
        mdk_rdev_t *same_pdev;
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+       struct kobject *ko;
+       char *s;
 
        if (rdev->mddev) {
                MD_BUG();
                return -EINVAL;
        }
+       /* make sure rdev->size exceeds mddev->size */
+       if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
+               if (mddev->pers)
+                       /* Cannot change size, so fail */
+                       return -ENOSPC;
+               else
+                       mddev->size = rdev->size;
+       }
        same_pdev = match_dev_unit(mddev, rdev);
        if (same_pdev)
                printk(KERN_WARNING
@@ -1020,10 +1365,25 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
                if (find_rdev_nr(mddev, rdev->desc_nr))
                        return -EBUSY;
        }
+       bdevname(rdev->bdev,b);
+       if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
+               return -ENOMEM;
+       while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
+               *s = '!';
                        
        list_add(&rdev->same_set, &mddev->disks);
        rdev->mddev = mddev;
-       printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
+       printk(KERN_INFO "md: bind<%s>\n", b);
+
+       rdev->kobj.parent = &mddev->kobj;
+       kobject_add(&rdev->kobj);
+
+       if (rdev->bdev->bd_part)
+               ko = &rdev->bdev->bd_part->kobj;
+       else
+               ko = &rdev->bdev->bd_disk->kobj;
+       sysfs_create_link(&rdev->kobj, ko, "block");
+       bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
        return 0;
 }
 
@@ -1034,28 +1394,35 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
                MD_BUG();
                return;
        }
+       bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
        list_del_init(&rdev->same_set);
        printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
        rdev->mddev = NULL;
+       sysfs_remove_link(&rdev->kobj, "block");
+       kobject_del(&rdev->kobj);
 }
 
 /*
  * prevent the device from being mounted, repartitioned or
  * otherwise reused by a RAID array (or any other kernel
- * subsystem), by opening the device. [simply getting an
- * inode is not enough, the SCSI module usage code needs
- * an explicit open() on the device]
+ * subsystem), by bd_claiming the device.
  */
 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
 {
        int err = 0;
        struct block_device *bdev;
+       char b[BDEVNAME_SIZE];
 
        bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
-       if (IS_ERR(bdev))
+       if (IS_ERR(bdev)) {
+               printk(KERN_ERR "md: could not open %s.\n",
+                       __bdevname(dev, b));
                return PTR_ERR(bdev);
+       }
        err = bd_claim(bdev, rdev);
        if (err) {
+               printk(KERN_ERR "md: could not bd_claim %s.\n",
+                       bdevname(bdev, b));
                blkdev_put(bdev);
                return err;
        }
@@ -1088,7 +1455,7 @@ static void export_rdev(mdk_rdev_t * rdev)
        md_autodetect_dev(rdev->bdev->bd_dev);
 #endif
        unlock_rdev(rdev);
-       kfree(rdev);
+       kobject_put(&rdev->kobj);
 }
 
 static void kick_rdev_from_array(mdk_rdev_t * rdev)
@@ -1117,10 +1484,7 @@ static void export_array(mddev_t *mddev)
 
 static void print_desc(mdp_disk_t *desc)
 {
-       char b[BDEVNAME_SIZE];
-
-       printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
-               __bdevname(MKDEV(desc->major, desc->minor), b),
+       printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
                desc->major,desc->minor,desc->raid_disk,desc->state);
 }
 
@@ -1163,7 +1527,8 @@ static void print_rdev(mdk_rdev_t *rdev)
        char b[BDEVNAME_SIZE];
        printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
                bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
-               rdev->faulty, rdev->in_sync, rdev->desc_nr);
+               test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
+               rdev->desc_nr);
        if (rdev->sb_loaded) {
                printk(KERN_INFO "md: rdev superblock:\n");
                print_sb((mdp_super_t*)page_address(rdev->sb_page));
@@ -1171,7 +1536,7 @@ static void print_rdev(mdk_rdev_t *rdev)
                printk(KERN_INFO "md: no rdev superblock!\n");
 }
 
-void md_print_devices(void)
+static void md_print_devices(void)
 {
        struct list_head *tmp, *tmp2;
        mdk_rdev_t *rdev;
@@ -1183,8 +1548,11 @@ void md_print_devices(void)
        printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
        printk("md:     **********************************\n");
        ITERATE_MDDEV(mddev,tmp) {
-               printk("%s: ", mdname(mddev));
 
+               if (mddev->bitmap)
+                       bitmap_print_sb(mddev->bitmap);
+               else
+                       printk("%s: ", mdname(mddev));
                ITERATE_RDEV(mddev,rdev,tmp2)
                        printk("<%s>", bdevname(rdev->bdev,b));
                printk("\n");
@@ -1197,52 +1565,94 @@ void md_print_devices(void)
 }
 
 
-static int write_disk_sb(mdk_rdev_t * rdev)
-{
-       char b[BDEVNAME_SIZE];
-       if (!rdev->sb_loaded) {
-               MD_BUG();
-               return 1;
-       }
-       if (rdev->faulty) {
-               MD_BUG();
-               return 1;
-       }
-
-       dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
-               bdevname(rdev->bdev,b),
-              (unsigned long long)rdev->sb_offset);
-  
-       if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
-               return 0;
-
-       printk("md: write_disk_sb failed for device %s\n", 
-               bdevname(rdev->bdev,b));
-       return 1;
-}
-
-static void sync_sbs(mddev_t * mddev)
+static void sync_sbs(mddev_t * mddev, int nospares)
 {
+       /* Update each superblock (in-memory image), but
+        * if we are allowed to, skip spares which already
+        * have the right event counter, or have one earlier
+        * (which would mean they aren't being marked as dirty
+        * with the rest of the array)
+        */
        mdk_rdev_t *rdev;
        struct list_head *tmp;
 
        ITERATE_RDEV(mddev,rdev,tmp) {
-               super_types[mddev->major_version].
-                       sync_super(mddev, rdev);
-               rdev->sb_loaded = 1;
+               if (rdev->sb_events == mddev->events ||
+                   (nospares &&
+                    rdev->raid_disk < 0 &&
+                    (rdev->sb_events&1)==0 &&
+                    rdev->sb_events+1 == mddev->events)) {
+                       /* Don't update this superblock */
+                       rdev->sb_loaded = 2;
+               } else {
+                       super_types[mddev->major_version].
+                               sync_super(mddev, rdev);
+                       rdev->sb_loaded = 1;
+               }
        }
 }
 
-static void md_update_sb(mddev_t * mddev)
+static void md_update_sb(mddev_t * mddev, int force_change)
 {
-       int err, count = 100;
+       int err;
        struct list_head *tmp;
        mdk_rdev_t *rdev;
+       int sync_req;
+       int nospares = 0;
 
-       mddev->sb_dirty = 0;
 repeat:
+       spin_lock_irq(&mddev->write_lock);
+
+       set_bit(MD_CHANGE_PENDING, &mddev->flags);
+       if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+               force_change = 1;
+       if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+               /* just a clean<-> dirty transition, possibly leave spares alone,
+                * though if events isn't the right even/odd, we will have to do
+                * spares after all
+                */
+               nospares = 1;
+       if (force_change)
+               nospares = 0;
+       if (mddev->degraded)
+               /* If the array is degraded, then skipping spares is both
+                * dangerous and fairly pointless.
+                * Dangerous because a device that was removed from the array
+                * might have a event_count that still looks up-to-date,
+                * so it can be re-added without a resync.
+                * Pointless because if there are any spares to skip,
+                * then a recovery will happen and soon that array won't
+                * be degraded any more and the spare can go back to sleep then.
+                */
+               nospares = 0;
+
+       sync_req = mddev->in_sync;
        mddev->utime = get_seconds();
-       mddev->events ++;
+
+       /* If this is just a dirty<->clean transition, and the array is clean
+        * and 'events' is odd, we can roll back to the previous clean state */
+       if (nospares
+           && (mddev->in_sync && mddev->recovery_cp == MaxSector)
+           && (mddev->events & 1)
+           && mddev->events != 1)
+               mddev->events--;
+       else {
+               /* otherwise we have to go forward and ... */
+               mddev->events ++;
+               if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
+                       /* .. if the array isn't clean, insist on an odd 'events' */
+                       if ((mddev->events&1)==0) {
+                               mddev->events++;
+                               nospares = 0;
+                       }
+               } else {
+                       /* otherwise insist on an even 'events' (for clean states) */
+                       if ((mddev->events&1)) {
+                               mddev->events++;
+                               nospares = 0;
+                       }
+               }
+       }
 
        if (!mddev->events) {
                /*
@@ -1253,112 +1663,383 @@ repeat:
                MD_BUG();
                mddev->events --;
        }
-       sync_sbs(mddev);
+       sync_sbs(mddev, nospares);
 
        /*
         * do not write anything to disk if using
         * nonpersistent superblocks
         */
-       if (!mddev->persistent)
+       if (!mddev->persistent) {
+               clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+               spin_unlock_irq(&mddev->write_lock);
+               wake_up(&mddev->sb_wait);
                return;
+       }
+       spin_unlock_irq(&mddev->write_lock);
 
        dprintk(KERN_INFO 
                "md: updating %s RAID superblock on device (in sync %d)\n",
                mdname(mddev),mddev->in_sync);
 
-       err = 0;
+       err = bitmap_update_sb(mddev->bitmap);
        ITERATE_RDEV(mddev,rdev,tmp) {
                char b[BDEVNAME_SIZE];
                dprintk(KERN_INFO "md: ");
-               if (rdev->faulty)
+               if (rdev->sb_loaded != 1)
+                       continue; /* no noise on spare devices */
+               if (test_bit(Faulty, &rdev->flags))
                        dprintk("(skipping faulty ");
 
                dprintk("%s ", bdevname(rdev->bdev,b));
-               if (!rdev->faulty) {
-                       err += write_disk_sb(rdev);
+               if (!test_bit(Faulty, &rdev->flags)) {
+                       md_super_write(mddev,rdev,
+                                      rdev->sb_offset<<1, rdev->sb_size,
+                                      rdev->sb_page);
+                       dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
+                               bdevname(rdev->bdev,b),
+                               (unsigned long long)rdev->sb_offset);
+                       rdev->sb_events = mddev->events;
+
                } else
                        dprintk(")\n");
-               if (!err && mddev->level == LEVEL_MULTIPATH)
+               if (mddev->level == LEVEL_MULTIPATH)
                        /* only need to write one superblock... */
                        break;
        }
-       if (err) {
-               if (--count) {
-                       printk(KERN_ERR "md: errors occurred during superblock"
-                               " update, repeating\n");
-                       goto repeat;
-               }
-               printk(KERN_ERR \
-                       "md: excessive errors occurred during superblock update, exiting\n");
+       md_super_wait(mddev);
+       /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+
+       spin_lock_irq(&mddev->write_lock);
+       if (mddev->in_sync != sync_req ||
+           test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+               /* have to write it out again */
+               spin_unlock_irq(&mddev->write_lock);
+               goto repeat;
        }
+       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+       spin_unlock_irq(&mddev->write_lock);
+       wake_up(&mddev->sb_wait);
+
 }
 
-/*
- * Import a device. If 'super_format' >= 0, then sanity check the superblock
- *
- * mark the device faulty if:
- *
- *   - the device is nonexistent (zero size)
- *   - the device has no valid superblock
- *
- * a faulty rdev _never_ has rdev->sb set.
+/* words written to sysfs files may, or my not, be \n terminated.
+ * We want to accept with case. For this we use cmd_match.
  */
-static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
+static int cmd_match(const char *cmd, const char *str)
 {
-       char b[BDEVNAME_SIZE];
-       int err;
-       mdk_rdev_t *rdev;
-       sector_t size;
-
-       rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
-       if (!rdev) {
-               printk(KERN_ERR "md: could not alloc mem for %s!\n", 
-                       __bdevname(newdev, b));
-               return ERR_PTR(-ENOMEM);
+       /* See if cmd, written into a sysfs file, matches
+        * str.  They must either be the same, or cmd can
+        * have a trailing newline
+        */
+       while (*cmd && *str && *cmd == *str) {
+               cmd++;
+               str++;
        }
-       memset(rdev, 0, sizeof(*rdev));
+       if (*cmd == '\n')
+               cmd++;
+       if (*str || *cmd)
+               return 0;
+       return 1;
+}
 
-       if ((err = alloc_disk_sb(rdev)))
-               goto abort_free;
+struct rdev_sysfs_entry {
+       struct attribute attr;
+       ssize_t (*show)(mdk_rdev_t *, char *);
+       ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
+};
 
-       err = lock_rdev(rdev, newdev);
-       if (err) {
-               printk(KERN_ERR "md: could not lock %s.\n",
-                       __bdevname(newdev, b));
-               goto abort_free;
-       }
-       rdev->desc_nr = -1;
-       rdev->faulty = 0;
-       rdev->in_sync = 0;
-       rdev->data_offset = 0;
-       atomic_set(&rdev->nr_pending, 0);
+static ssize_t
+state_show(mdk_rdev_t *rdev, char *page)
+{
+       char *sep = "";
+       int len=0;
 
-       size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
-       if (!size) {
-               printk(KERN_WARNING 
-                       "md: %s has zero or unknown size, marking faulty!\n",
-                       bdevname(rdev->bdev,b));
-               err = -EINVAL;
-               goto abort_free;
+       if (test_bit(Faulty, &rdev->flags)) {
+               len+= sprintf(page+len, "%sfaulty",sep);
+               sep = ",";
        }
-
-       if (super_format >= 0) {
-               err = super_types[super_format].
-                       load_super(rdev, NULL, super_minor);
-               if (err == -EINVAL) {
-                       printk(KERN_WARNING 
-                               "md: %s has invalid sb, not importing!\n",
-                               bdevname(rdev->bdev,b));
-                       goto abort_free;
-               }
-               if (err < 0) {
-                       printk(KERN_WARNING 
-                               "md: could not read %s's sb, not importing!\n",
-                               bdevname(rdev->bdev,b));
-                       goto abort_free;
-               }
+       if (test_bit(In_sync, &rdev->flags)) {
+               len += sprintf(page+len, "%sin_sync",sep);
+               sep = ",";
        }
-       INIT_LIST_HEAD(&rdev->same_set);
+       if (test_bit(WriteMostly, &rdev->flags)) {
+               len += sprintf(page+len, "%swrite_mostly",sep);
+               sep = ",";
+       }
+       if (!test_bit(Faulty, &rdev->flags) &&
+           !test_bit(In_sync, &rdev->flags)) {
+               len += sprintf(page+len, "%sspare", sep);
+               sep = ",";
+       }
+       return len+sprintf(page+len, "\n");
+}
+
+static ssize_t
+state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       /* can write
+        *  faulty  - simulates and error
+        *  remove  - disconnects the device
+        *  writemostly - sets write_mostly
+        *  -writemostly - clears write_mostly
+        */
+       int err = -EINVAL;
+       if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
+               md_error(rdev->mddev, rdev);
+               err = 0;
+       } else if (cmd_match(buf, "remove")) {
+               if (rdev->raid_disk >= 0)
+                       err = -EBUSY;
+               else {
+                       mddev_t *mddev = rdev->mddev;
+                       kick_rdev_from_array(rdev);
+                       if (mddev->pers)
+                               md_update_sb(mddev, 1);
+                       md_new_event(mddev);
+                       err = 0;
+               }
+       } else if (cmd_match(buf, "writemostly")) {
+               set_bit(WriteMostly, &rdev->flags);
+               err = 0;
+       } else if (cmd_match(buf, "-writemostly")) {
+               clear_bit(WriteMostly, &rdev->flags);
+               err = 0;
+       }
+       return err ? err : len;
+}
+static struct rdev_sysfs_entry rdev_state =
+__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+
+static ssize_t
+super_show(mdk_rdev_t *rdev, char *page)
+{
+       if (rdev->sb_loaded && rdev->sb_size) {
+               memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
+               return rdev->sb_size;
+       } else
+               return 0;
+}
+static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
+
+static ssize_t
+errors_show(mdk_rdev_t *rdev, char *page)
+{
+       return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
+}
+
+static ssize_t
+errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+       if (*buf && (*e == 0 || *e == '\n')) {
+               atomic_set(&rdev->corrected_errors, n);
+               return len;
+       }
+       return -EINVAL;
+}
+static struct rdev_sysfs_entry rdev_errors =
+__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
+
+static ssize_t
+slot_show(mdk_rdev_t *rdev, char *page)
+{
+       if (rdev->raid_disk < 0)
+               return sprintf(page, "none\n");
+       else
+               return sprintf(page, "%d\n", rdev->raid_disk);
+}
+
+static ssize_t
+slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       char *e;
+       int slot = simple_strtoul(buf, &e, 10);
+       if (strncmp(buf, "none", 4)==0)
+               slot = -1;
+       else if (e==buf || (*e && *e!= '\n'))
+               return -EINVAL;
+       if (rdev->mddev->pers)
+               /* Cannot set slot in active array (yet) */
+               return -EBUSY;
+       if (slot >= rdev->mddev->raid_disks)
+               return -ENOSPC;
+       rdev->raid_disk = slot;
+       /* assume it is working */
+       rdev->flags = 0;
+       set_bit(In_sync, &rdev->flags);
+       return len;
+}
+
+
+static struct rdev_sysfs_entry rdev_slot =
+__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
+
+static ssize_t
+offset_show(mdk_rdev_t *rdev, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
+}
+
+static ssize_t
+offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long long offset = simple_strtoull(buf, &e, 10);
+       if (e==buf || (*e && *e != '\n'))
+               return -EINVAL;
+       if (rdev->mddev->pers)
+               return -EBUSY;
+       rdev->data_offset = offset;
+       return len;
+}
+
+static struct rdev_sysfs_entry rdev_offset =
+__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
+
+static ssize_t
+rdev_size_show(mdk_rdev_t *rdev, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
+}
+
+static ssize_t
+rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long long size = simple_strtoull(buf, &e, 10);
+       if (e==buf || (*e && *e != '\n'))
+               return -EINVAL;
+       if (rdev->mddev->pers)
+               return -EBUSY;
+       rdev->size = size;
+       if (size < rdev->mddev->size || rdev->mddev->size == 0)
+               rdev->mddev->size = size;
+       return len;
+}
+
+static struct rdev_sysfs_entry rdev_size =
+__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
+
+static struct attribute *rdev_default_attrs[] = {
+       &rdev_state.attr,
+       &rdev_super.attr,
+       &rdev_errors.attr,
+       &rdev_slot.attr,
+       &rdev_offset.attr,
+       &rdev_size.attr,
+       NULL,
+};
+static ssize_t
+rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
+       mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+
+       if (!entry->show)
+               return -EIO;
+       return entry->show(rdev, page);
+}
+
+static ssize_t
+rdev_attr_store(struct kobject *kobj, struct attribute *attr,
+             const char *page, size_t length)
+{
+       struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
+       mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+
+       if (!entry->store)
+               return -EIO;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       return entry->store(rdev, page, length);
+}
+
+static void rdev_free(struct kobject *ko)
+{
+       mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
+       kfree(rdev);
+}
+static struct sysfs_ops rdev_sysfs_ops = {
+       .show           = rdev_attr_show,
+       .store          = rdev_attr_store,
+};
+static struct kobj_type rdev_ktype = {
+       .release        = rdev_free,
+       .sysfs_ops      = &rdev_sysfs_ops,
+       .default_attrs  = rdev_default_attrs,
+};
+
+/*
+ * Import a device. If 'super_format' >= 0, then sanity check the superblock
+ *
+ * mark the device faulty if:
+ *
+ *   - the device is nonexistent (zero size)
+ *   - the device has no valid superblock
+ *
+ * a faulty rdev _never_ has rdev->sb set.
+ */
+static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
+{
+       char b[BDEVNAME_SIZE];
+       int err;
+       mdk_rdev_t *rdev;
+       sector_t size;
+
+       rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+       if (!rdev) {
+               printk(KERN_ERR "md: could not alloc mem for new device!\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       if ((err = alloc_disk_sb(rdev)))
+               goto abort_free;
+
+       err = lock_rdev(rdev, newdev);
+       if (err)
+               goto abort_free;
+
+       rdev->kobj.parent = NULL;
+       rdev->kobj.ktype = &rdev_ktype;
+       kobject_init(&rdev->kobj);
+
+       rdev->desc_nr = -1;
+       rdev->saved_raid_disk = -1;
+       rdev->raid_disk = -1;
+       rdev->flags = 0;
+       rdev->data_offset = 0;
+       rdev->sb_events = 0;
+       atomic_set(&rdev->nr_pending, 0);
+       atomic_set(&rdev->read_errors, 0);
+       atomic_set(&rdev->corrected_errors, 0);
+
+       size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+       if (!size) {
+               printk(KERN_WARNING 
+                       "md: %s has zero or unknown size, marking faulty!\n",
+                       bdevname(rdev->bdev,b));
+               err = -EINVAL;
+               goto abort_free;
+       }
+
+       if (super_format >= 0) {
+               err = super_types[super_format].
+                       load_super(rdev, NULL, super_minor);
+               if (err == -EINVAL) {
+                       printk(KERN_WARNING 
+                               "md: %s has invalid sb, not importing!\n",
+                               bdevname(rdev->bdev,b));
+                       goto abort_free;
+               }
+               if (err < 0) {
+                       printk(KERN_WARNING 
+                               "md: could not read %s's sb, not importing!\n",
+                               bdevname(rdev->bdev,b));
+                       goto abort_free;
+               }
+       }
+       INIT_LIST_HEAD(&rdev->same_set);
 
        return rdev;
 
@@ -1377,7 +2058,7 @@ abort_free:
  */
 
 
-static int analyze_sbs(mddev_t * mddev)
+static void analyze_sbs(mddev_t * mddev)
 {
        int i;
        struct list_head *tmp;
@@ -1419,40 +2100,854 @@ static int analyze_sbs(mddev_t * mddev)
                if (mddev->level == LEVEL_MULTIPATH) {
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
-                       rdev->in_sync = 1;
+                       set_bit(In_sync, &rdev->flags);
+               }
+       }
+
+
+
+       if (mddev->recovery_cp != MaxSector &&
+           mddev->level >= 1)
+               printk(KERN_ERR "md: %s: raid array is not clean"
+                      " -- starting background reconstruction\n",
+                      mdname(mddev));
+
+}
+
+static ssize_t
+safe_delay_show(mddev_t *mddev, char *page)
+{
+       int msec = (mddev->safemode_delay*1000)/HZ;
+       return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
+}
+static ssize_t
+safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
+{
+       int scale=1;
+       int dot=0;
+       int i;
+       unsigned long msec;
+       char buf[30];
+       char *e;
+       /* remove a period, and count digits after it */
+       if (len >= sizeof(buf))
+               return -EINVAL;
+       strlcpy(buf, cbuf, len);
+       buf[len] = 0;
+       for (i=0; i<len; i++) {
+               if (dot) {
+                       if (isdigit(buf[i])) {
+                               buf[i-1] = buf[i];
+                               scale *= 10;
+                       }
+                       buf[i] = 0;
+               } else if (buf[i] == '.') {
+                       dot=1;
+                       buf[i] = 0;
+               }
+       }
+       msec = simple_strtoul(buf, &e, 10);
+       if (e == buf || (*e && *e != '\n'))
+               return -EINVAL;
+       msec = (msec * 1000) / scale;
+       if (msec == 0)
+               mddev->safemode_delay = 0;
+       else {
+               mddev->safemode_delay = (msec*HZ)/1000;
+               if (mddev->safemode_delay == 0)
+                       mddev->safemode_delay = 1;
+       }
+       return len;
+}
+static struct md_sysfs_entry md_safe_delay =
+__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
+
+static ssize_t
+level_show(mddev_t *mddev, char *page)
+{
+       struct mdk_personality *p = mddev->pers;
+       if (p)
+               return sprintf(page, "%s\n", p->name);
+       else if (mddev->clevel[0])
+               return sprintf(page, "%s\n", mddev->clevel);
+       else if (mddev->level != LEVEL_NONE)
+               return sprintf(page, "%d\n", mddev->level);
+       else
+               return 0;
+}
+
+static ssize_t
+level_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       int rv = len;
+       if (mddev->pers)
+               return -EBUSY;
+       if (len == 0)
+               return 0;
+       if (len >= sizeof(mddev->clevel))
+               return -ENOSPC;
+       strncpy(mddev->clevel, buf, len);
+       if (mddev->clevel[len-1] == '\n')
+               len--;
+       mddev->clevel[len] = 0;
+       mddev->level = LEVEL_NONE;
+       return rv;
+}
+
+static struct md_sysfs_entry md_level =
+__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
+
+
+static ssize_t
+layout_show(mddev_t *mddev, char *page)
+{
+       /* just a number, not meaningful for all levels */
+       return sprintf(page, "%d\n", mddev->layout);
+}
+
+static ssize_t
+layout_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+       if (mddev->pers)
+               return -EBUSY;
+
+       if (!*buf || (*e && *e != '\n'))
+               return -EINVAL;
+
+       mddev->layout = n;
+       return len;
+}
+static struct md_sysfs_entry md_layout =
+__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
+
+
+static ssize_t
+raid_disks_show(mddev_t *mddev, char *page)
+{
+       if (mddev->raid_disks == 0)
+               return 0;
+       return sprintf(page, "%d\n", mddev->raid_disks);
+}
+
+static int update_raid_disks(mddev_t *mddev, int raid_disks);
+
+static ssize_t
+raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       int rv = 0;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+
+       if (!*buf || (*e && *e != '\n'))
+               return -EINVAL;
+
+       if (mddev->pers)
+               rv = update_raid_disks(mddev, n);
+       else
+               mddev->raid_disks = n;
+       return rv ? rv : len;
+}
+static struct md_sysfs_entry md_raid_disks =
+__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
+
+static ssize_t
+chunk_size_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%d\n", mddev->chunk_size);
+}
+
+static ssize_t
+chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       /* can only set chunk_size if array is not yet active */
+       char *e;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+
+       if (mddev->pers)
+               return -EBUSY;
+       if (!*buf || (*e && *e != '\n'))
+               return -EINVAL;
+
+       mddev->chunk_size = n;
+       return len;
+}
+static struct md_sysfs_entry md_chunk_size =
+__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
+
+static ssize_t
+resync_start_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
+}
+
+static ssize_t
+resync_start_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       /* can only set chunk_size if array is not yet active */
+       char *e;
+       unsigned long long n = simple_strtoull(buf, &e, 10);
+
+       if (mddev->pers)
+               return -EBUSY;
+       if (!*buf || (*e && *e != '\n'))
+               return -EINVAL;
+
+       mddev->recovery_cp = n;
+       return len;
+}
+static struct md_sysfs_entry md_resync_start =
+__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
+
+/*
+ * The array state can be:
+ *
+ * clear
+ *     No devices, no size, no level
+ *     Equivalent to STOP_ARRAY ioctl
+ * inactive
+ *     May have some settings, but array is not active
+ *        all IO results in error
+ *     When written, doesn't tear down array, but just stops it
+ * suspended (not supported yet)
+ *     All IO requests will block. The array can be reconfigured.
+ *     Writing this, if accepted, will block until array is quiessent
+ * readonly
+ *     no resync can happen.  no superblocks get written.
+ *     write requests fail
+ * read-auto
+ *     like readonly, but behaves like 'clean' on a write request.
+ *
+ * clean - no pending writes, but otherwise active.
+ *     When written to inactive array, starts without resync
+ *     If a write request arrives then
+ *       if metadata is known, mark 'dirty' and switch to 'active'.
+ *       if not known, block and switch to write-pending
+ *     If written to an active array that has pending writes, then fails.
+ * active
+ *     fully active: IO and resync can be happening.
+ *     When written to inactive array, starts with resync
+ *
+ * write-pending
+ *     clean, but writes are blocked waiting for 'active' to be written.
+ *
+ * active-idle
+ *     like active, but no writes have been seen for a while (100msec).
+ *
+ */
+enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
+                  write_pending, active_idle, bad_word};
+static char *array_states[] = {
+       "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
+       "write-pending", "active-idle", NULL };
+
+static int match_word(const char *word, char **list)
+{
+       int n;
+       for (n=0; list[n]; n++)
+               if (cmd_match(word, list[n]))
+                       break;
+       return n;
+}
+
+static ssize_t
+array_state_show(mddev_t *mddev, char *page)
+{
+       enum array_state st = inactive;
+
+       if (mddev->pers)
+               switch(mddev->ro) {
+               case 1:
+                       st = readonly;
+                       break;
+               case 2:
+                       st = read_auto;
+                       break;
+               case 0:
+                       if (mddev->in_sync)
+                               st = clean;
+                       else if (mddev->safemode)
+                               st = active_idle;
+                       else
+                               st = active;
                }
+       else {
+               if (list_empty(&mddev->disks) &&
+                   mddev->raid_disks == 0 &&
+                   mddev->size == 0)
+                       st = clear;
+               else
+                       st = inactive;
        }
+       return sprintf(page, "%s\n", array_states[st]);
+}
+
+static int do_md_stop(mddev_t * mddev, int ro);
+static int do_md_run(mddev_t * mddev);
+static int restart_array(mddev_t *mddev);
+
+static ssize_t
+array_state_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       int err = -EINVAL;
+       enum array_state st = match_word(buf, array_states);
+       switch(st) {
+       case bad_word:
+               break;
+       case clear:
+               /* stopping an active array */
+               if (mddev->pers) {
+                       if (atomic_read(&mddev->active) > 1)
+                               return -EBUSY;
+                       err = do_md_stop(mddev, 0);
+               }
+               break;
+       case inactive:
+               /* stopping an active array */
+               if (mddev->pers) {
+                       if (atomic_read(&mddev->active) > 1)
+                               return -EBUSY;
+                       err = do_md_stop(mddev, 2);
+               }
+               break;
+       case suspended:
+               break; /* not supported yet */
+       case readonly:
+               if (mddev->pers)
+                       err = do_md_stop(mddev, 1);
+               else {
+                       mddev->ro = 1;
+                       err = do_md_run(mddev);
+               }
+               break;
+       case read_auto:
+               /* stopping an active array */
+               if (mddev->pers) {
+                       err = do_md_stop(mddev, 1);
+                       if (err == 0)
+                               mddev->ro = 2; /* FIXME mark devices writable */
+               } else {
+                       mddev->ro = 2;
+                       err = do_md_run(mddev);
+               }
+               break;
+       case clean:
+               if (mddev->pers) {
+                       restart_array(mddev);
+                       spin_lock_irq(&mddev->write_lock);
+                       if (atomic_read(&mddev->writes_pending) == 0) {
+                               mddev->in_sync = 1;
+                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       }
+                       spin_unlock_irq(&mddev->write_lock);
+               } else {
+                       mddev->ro = 0;
+                       mddev->recovery_cp = MaxSector;
+                       err = do_md_run(mddev);
+               }
+               break;
+       case active:
+               if (mddev->pers) {
+                       restart_array(mddev);
+                       clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       wake_up(&mddev->sb_wait);
+                       err = 0;
+               } else {
+                       mddev->ro = 0;
+                       err = do_md_run(mddev);
+               }
+               break;
+       case write_pending:
+       case active_idle:
+               /* these cannot be set */
+               break;
+       }
+       if (err)
+               return err;
+       else
+               return len;
+}
+static struct md_sysfs_entry md_array_state =
+__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+
+static ssize_t
+null_show(mddev_t *mddev, char *page)
+{
+       return -EINVAL;
+}
+
+static ssize_t
+new_dev_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       /* buf must be %d:%d\n? giving major and minor numbers */
+       /* The new device is added to the array.
+        * If the array has a persistent superblock, we read the
+        * superblock to initialise info and check validity.
+        * Otherwise, only checking done is that in bind_rdev_to_array,
+        * which mainly checks size.
+        */
+       char *e;
+       int major = simple_strtoul(buf, &e, 10);
+       int minor;
+       dev_t dev;
+       mdk_rdev_t *rdev;
+       int err;
+
+       if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
+               return -EINVAL;
+       minor = simple_strtoul(e+1, &e, 10);
+       if (*e && *e != '\n')
+               return -EINVAL;
+       dev = MKDEV(major, minor);
+       if (major != MAJOR(dev) ||
+           minor != MINOR(dev))
+               return -EOVERFLOW;
+
+
+       if (mddev->persistent) {
+               rdev = md_import_device(dev, mddev->major_version,
+                                       mddev->minor_version);
+               if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
+                       mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
+                                                      mdk_rdev_t, same_set);
+                       err = super_types[mddev->major_version]
+                               .load_super(rdev, rdev0, mddev->minor_version);
+                       if (err < 0)
+                               goto out;
+               }
+       } else
+               rdev = md_import_device(dev, -1, -1);
+
+       if (IS_ERR(rdev))
+               return PTR_ERR(rdev);
+       err = bind_rdev_to_array(rdev, mddev);
+ out:
+       if (err)
+               export_rdev(rdev);
+       return err ? err : len;
+}
+
+static struct md_sysfs_entry md_new_device =
+__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
+
+static ssize_t
+bitmap_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *end;
+       unsigned long chunk, end_chunk;
+
+       if (!mddev->bitmap)
+               goto out;
+       /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
+       while (*buf) {
+               chunk = end_chunk = simple_strtoul(buf, &end, 0);
+               if (buf == end) break;
+               if (*end == '-') { /* range */
+                       buf = end + 1;
+                       end_chunk = simple_strtoul(buf, &end, 0);
+                       if (buf == end) break;
+               }
+               if (*end && !isspace(*end)) break;
+               bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
+               buf = end;
+               while (isspace(*buf)) buf++;
+       }
+       bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
+out:
+       return len;
+}
+
+static struct md_sysfs_entry md_bitmap =
+__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
+
+static ssize_t
+size_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
+}
+
+static int update_size(mddev_t *mddev, unsigned long size);
+
+static ssize_t
+size_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       /* If array is inactive, we can reduce the component size, but
+        * not increase it (except from 0).
+        * If array is active, we can try an on-line resize
+        */
+       char *e;
+       int err = 0;
+       unsigned long long size = simple_strtoull(buf, &e, 10);
+       if (!*buf || *buf == '\n' ||
+           (*e && *e != '\n'))
+               return -EINVAL;
+
+       if (mddev->pers) {
+               err = update_size(mddev, size);
+               md_update_sb(mddev, 1);
+       } else {
+               if (mddev->size == 0 ||
+                   mddev->size > size)
+                       mddev->size = size;
+               else
+                       err = -ENOSPC;
+       }
+       return err ? err : len;
+}
+
+static struct md_sysfs_entry md_size =
+__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
+
+
+/* Metdata version.
+ * This is either 'none' for arrays with externally managed metadata,
+ * or N.M for internally known formats
+ */
+static ssize_t
+metadata_show(mddev_t *mddev, char *page)
+{
+       if (mddev->persistent)
+               return sprintf(page, "%d.%d\n",
+                              mddev->major_version, mddev->minor_version);
+       else
+               return sprintf(page, "none\n");
+}
+
+static ssize_t
+metadata_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       int major, minor;
+       char *e;
+       if (!list_empty(&mddev->disks))
+               return -EBUSY;
+
+       if (cmd_match(buf, "none")) {
+               mddev->persistent = 0;
+               mddev->major_version = 0;
+               mddev->minor_version = 90;
+               return len;
+       }
+       major = simple_strtoul(buf, &e, 10);
+       if (e==buf || *e != '.')
+               return -EINVAL;
+       buf = e+1;
+       minor = simple_strtoul(buf, &e, 10);
+       if (e==buf || (*e && *e != '\n') )
+               return -EINVAL;
+       if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
+           super_types[major].name == NULL)
+               return -ENOENT;
+       mddev->major_version = major;
+       mddev->minor_version = minor;
+       mddev->persistent = 1;
+       return len;
+}
+
+static struct md_sysfs_entry md_metadata =
+__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+
+static ssize_t
+action_show(mddev_t *mddev, char *page)
+{
+       char *type = "idle";
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+           test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
+               if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+                       type = "reshape";
+               else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+                       if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                               type = "resync";
+                       else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+                               type = "check";
+                       else
+                               type = "repair";
+               } else
+                       type = "recover";
+       }
+       return sprintf(page, "%s\n", type);
+}
+
+static ssize_t
+action_store(mddev_t *mddev, const char *page, size_t len)
+{
+       if (!mddev->pers || !mddev->pers->sync_request)
+               return -EINVAL;
+
+       if (cmd_match(page, "idle")) {
+               if (mddev->sync_thread) {
+                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+                       md_unregister_thread(mddev->sync_thread);
+                       mddev->sync_thread = NULL;
+                       mddev->recovery = 0;
+               }
+       } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+                  test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+               return -EBUSY;
+       else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       else if (cmd_match(page, "reshape")) {
+               int err;
+               if (mddev->pers->start_reshape == NULL)
+                       return -EINVAL;
+               err = mddev->pers->start_reshape(mddev);
+               if (err)
+                       return err;
+       } else {
+               if (cmd_match(page, "check"))
+                       set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+               else if (!cmd_match(page, "repair"))
+                       return -EINVAL;
+               set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+               set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+       }
+       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       md_wakeup_thread(mddev->thread);
+       return len;
+}
+
+static ssize_t
+mismatch_cnt_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n",
+                      (unsigned long long) mddev->resync_mismatches);
+}
+
+static struct md_sysfs_entry md_scan_mode =
+__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+
+
+static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
+
+static ssize_t
+sync_min_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%d (%s)\n", speed_min(mddev),
+                      mddev->sync_speed_min ? "local": "system");
+}
+
+static ssize_t
+sync_min_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       int min;
+       char *e;
+       if (strncmp(buf, "system", 6)==0) {
+               mddev->sync_speed_min = 0;
+               return len;
+       }
+       min = simple_strtoul(buf, &e, 10);
+       if (buf == e || (*e && *e != '\n') || min <= 0)
+               return -EINVAL;
+       mddev->sync_speed_min = min;
+       return len;
+}
+
+static struct md_sysfs_entry md_sync_min =
+__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
+
+static ssize_t
+sync_max_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%d (%s)\n", speed_max(mddev),
+                      mddev->sync_speed_max ? "local": "system");
+}
+
+static ssize_t
+sync_max_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       int max;
+       char *e;
+       if (strncmp(buf, "system", 6)==0) {
+               mddev->sync_speed_max = 0;
+               return len;
+       }
+       max = simple_strtoul(buf, &e, 10);
+       if (buf == e || (*e && *e != '\n') || max <= 0)
+               return -EINVAL;
+       mddev->sync_speed_max = max;
+       return len;
+}
+
+static struct md_sysfs_entry md_sync_max =
+__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
+
+
+static ssize_t
+sync_speed_show(mddev_t *mddev, char *page)
+{
+       unsigned long resync, dt, db;
+       resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
+       dt = ((jiffies - mddev->resync_mark) / HZ);
+       if (!dt) dt++;
+       db = resync - (mddev->resync_mark_cnt);
+       return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
+}
+
+static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
+
+static ssize_t
+sync_completed_show(mddev_t *mddev, char *page)
+{
+       unsigned long max_blocks, resync;
+
+       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+               max_blocks = mddev->resync_max_sectors;
+       else
+               max_blocks = mddev->size << 1;
+
+       resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
+       return sprintf(page, "%lu / %lu\n", resync, max_blocks);
+}
+
+static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
+
+static ssize_t
+suspend_lo_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
+}
+
+static ssize_t
+suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long long new = simple_strtoull(buf, &e, 10);
+
+       if (mddev->pers->quiesce == NULL)
+               return -EINVAL;
+       if (buf == e || (*e && *e != '\n'))
+               return -EINVAL;
+       if (new >= mddev->suspend_hi ||
+           (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
+               mddev->suspend_lo = new;
+               mddev->pers->quiesce(mddev, 2);
+               return len;
+       } else
+               return -EINVAL;
+}
+static struct md_sysfs_entry md_suspend_lo =
+__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
+
+
+static ssize_t
+suspend_hi_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
+}
+
+static ssize_t
+suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long long new = simple_strtoull(buf, &e, 10);
+
+       if (mddev->pers->quiesce == NULL)
+               return -EINVAL;
+       if (buf == e || (*e && *e != '\n'))
+               return -EINVAL;
+       if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
+           (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
+               mddev->suspend_hi = new;
+               mddev->pers->quiesce(mddev, 1);
+               mddev->pers->quiesce(mddev, 0);
+               return len;
+       } else
+               return -EINVAL;
+}
+static struct md_sysfs_entry md_suspend_hi =
+__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
+
+
+static struct attribute *md_default_attrs[] = {
+       &md_level.attr,
+       &md_layout.attr,
+       &md_raid_disks.attr,
+       &md_chunk_size.attr,
+       &md_size.attr,
+       &md_resync_start.attr,
+       &md_metadata.attr,
+       &md_new_device.attr,
+       &md_safe_delay.attr,
+       &md_array_state.attr,
+       NULL,
+};
+
+static struct attribute *md_redundancy_attrs[] = {
+       &md_scan_mode.attr,
+       &md_mismatches.attr,
+       &md_sync_min.attr,
+       &md_sync_max.attr,
+       &md_sync_speed.attr,
+       &md_sync_completed.attr,
+       &md_suspend_lo.attr,
+       &md_suspend_hi.attr,
+       &md_bitmap.attr,
+       NULL,
+};
+static struct attribute_group md_redundancy_group = {
+       .name = NULL,
+       .attrs = md_redundancy_attrs,
+};
+
+
+static ssize_t
+md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
+       mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+       ssize_t rv;
+
+       if (!entry->show)
+               return -EIO;
+       rv = mddev_lock(mddev);
+       if (!rv) {
+               rv = entry->show(mddev, page);
+               mddev_unlock(mddev);
+       }
+       return rv;
+}
 
+static ssize_t
+md_attr_store(struct kobject *kobj, struct attribute *attr,
+             const char *page, size_t length)
+{
+       struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
+       mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+       ssize_t rv;
 
-       /*
-        * Check if we can support this RAID array
-        */
-       if (mddev->major_version != MD_MAJOR_VERSION ||
-                       mddev->minor_version > MD_MINOR_VERSION) {
-               printk(KERN_ALERT 
-                       "md: %s: unsupported raid array version %d.%d.%d\n",
-                       mdname(mddev), mddev->major_version,
-                       mddev->minor_version, mddev->patch_version);
-               goto abort;
+       if (!entry->store)
+               return -EIO;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       rv = mddev_lock(mddev);
+       if (!rv) {
+               rv = entry->store(mddev, page, length);
+               mddev_unlock(mddev);
        }
+       return rv;
+}
 
-       if ((mddev->recovery_cp != MaxSector) &&
-           ((mddev->level == 1) ||
-            ((mddev->level >= 4) && (mddev->level <= 6))))
-               printk(KERN_ERR "md: %s: raid array is not clean"
-                      " -- starting background reconstruction\n",
-                      mdname(mddev));
-
-       return 0;
-abort:
-       return 1;
+static void md_free(struct kobject *ko)
+{
+       mddev_t *mddev = container_of(ko, mddev_t, kobj);
+       kfree(mddev);
 }
 
+static struct sysfs_ops md_sysfs_ops = {
+       .show   = md_attr_show,
+       .store  = md_attr_store,
+};
+static struct kobj_type md_ktype = {
+       .release        = md_free,
+       .sysfs_ops      = &md_sysfs_ops,
+       .default_attrs  = md_default_attrs,
+};
+
 int mdp_major = 0;
 
 static struct kobject *md_probe(dev_t dev, int *part, void *data)
 {
-       static DECLARE_MUTEX(disks_sem);
+       static DEFINE_MUTEX(disks_mutex);
        mddev_t *mddev = mddev_find(dev);
        struct gendisk *disk;
        int partitioned = (MAJOR(dev) != MD_MAJOR);
@@ -1462,15 +2957,15 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        if (!mddev)
                return NULL;
 
-       down(&disks_sem);
+       mutex_lock(&disks_mutex);
        if (mddev->gendisk) {
-               up(&disks_sem);
+               mutex_unlock(&disks_mutex);
                mddev_put(mddev);
                return NULL;
        }
        disk = alloc_disk(1 << shift);
        if (!disk) {
-               up(&disks_sem);
+               mutex_unlock(&disks_mutex);
                mddev_put(mddev);
                return NULL;
        }
@@ -1485,12 +2980,15 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        disk->queue = mddev->queue;
        add_disk(disk);
        mddev->gendisk = disk;
-       up(&disks_sem);
+       mutex_unlock(&disks_mutex);
+       mddev->kobj.parent = &disk->kobj;
+       mddev->kobj.k_name = NULL;
+       snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
+       mddev->kobj.ktype = &md_ktype;
+       kobject_register(&mddev->kobj);
        return NULL;
 }
 
-void md_wakeup_thread(mdk_thread_t *thread);
-
 static void md_safemode_timeout(unsigned long data)
 {
        mddev_t *mddev = (mddev_t *) data;
@@ -1499,20 +2997,21 @@ static void md_safemode_timeout(unsigned long data)
        md_wakeup_thread(mddev->thread);
 }
 
+static int start_dirty_degraded;
 
 static int do_md_run(mddev_t * mddev)
 {
-       int pnum, err;
+       int err;
        int chunk_size;
        struct list_head *tmp;
        mdk_rdev_t *rdev;
        struct gendisk *disk;
+       struct mdk_personality *pers;
        char b[BDEVNAME_SIZE];
 
-       if (list_empty(&mddev->disks)) {
-               MD_BUG();
+       if (list_empty(&mddev->disks))
+               /* cannot run an array with no devices.. */
                return -EINVAL;
-       }
 
        if (mddev->pers)
                return -EBUSY;
@@ -1520,26 +3019,12 @@ static int do_md_run(mddev_t * mddev)
        /*
         * Analyze all RAID superblock(s)
         */
-       if (!mddev->raid_disks && analyze_sbs(mddev)) {
-               MD_BUG();
-               return -EINVAL;
-       }
+       if (!mddev->raid_disks)
+               analyze_sbs(mddev);
 
        chunk_size = mddev->chunk_size;
-       pnum = level_to_pers(mddev->level);
 
-       if ((pnum != MULTIPATH) && (pnum != RAID1)) {
-               if (!chunk_size) {
-                       /*
-                        * 'default chunksize' in the old md code used to
-                        * be PAGE_SIZE, baaad.
-                        * we abort here to be on the safe side. We don't
-                        * want to continue the bad practice.
-                        */
-                       printk(KERN_ERR 
-                               "no chunksize specified, see 'man raidtab'\n");
-                       return -EINVAL;
-               }
+       if (chunk_size) {
                if (chunk_size > MAX_CHUNK_SIZE) {
                        printk(KERN_ERR "too big chunk_size: %d > %d\n",
                                chunk_size, MAX_CHUNK_SIZE);
@@ -1549,7 +3034,7 @@ static int do_md_run(mddev_t * mddev)
                 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
                 */
                if ( (1 << ffz(~chunk_size)) != chunk_size) {
-                       MD_BUG();
+                       printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
                        return -EINVAL;
                }
                if (chunk_size < PAGE_SIZE) {
@@ -1560,7 +3045,7 @@ static int do_md_run(mddev_t * mddev)
 
                /* devices must have minimum size of one chunk */
                ITERATE_RDEV(mddev,rdev,tmp) {
-                       if (rdev->faulty)
+                       if (test_bit(Faulty, &rdev->flags))
                                continue;
                        if (rdev->size < chunk_size / 1024) {
                                printk(KERN_WARNING
@@ -1574,16 +3059,11 @@ static int do_md_run(mddev_t * mddev)
                }
        }
 
-       if (pnum >= MAX_PERSONALITY) {
-               MD_BUG();
-               return -EINVAL;
-       }
-
 #ifdef CONFIG_KMOD
-       if (!pers[pnum])
-       {
-               request_module("md-personality-%d", pnum);
-       }
+       if (mddev->level != LEVEL_NONE)
+               request_module("md-level-%d", mddev->level);
+       else if (mddev->clevel[0])
+               request_module("md-%s", mddev->clevel);
 #endif
 
        /*
@@ -1593,7 +3073,7 @@ static int do_md_run(mddev_t * mddev)
         * Also find largest hardsector size
         */
        ITERATE_RDEV(mddev,rdev,tmp) {
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        continue;
                sync_blockdev(rdev->bdev);
                invalidate_bdev(rdev->bdev, 0);
@@ -1605,34 +3085,77 @@ static int do_md_run(mddev_t * mddev)
                return -ENOMEM;
 
        spin_lock(&pers_lock);
-       if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
+       pers = find_pers(mddev->level, mddev->clevel);
+       if (!pers || !try_module_get(pers->owner)) {
                spin_unlock(&pers_lock);
-               printk(KERN_WARNING "md: personality %d is not loaded!\n",
-                      pnum);
+               if (mddev->level != LEVEL_NONE)
+                       printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
+                              mddev->level);
+               else
+                       printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
+                              mddev->clevel);
                return -EINVAL;
        }
-
-       mddev->pers = pers[pnum];
+       mddev->pers = pers;
        spin_unlock(&pers_lock);
+       mddev->level = pers->level;
+       strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+
+       if (mddev->reshape_position != MaxSector &&
+           pers->start_reshape == NULL) {
+               /* This personality cannot handle reshaping... */
+               mddev->pers = NULL;
+               module_put(pers->owner);
+               return -EINVAL;
+       }
+
+       mddev->recovery = 0;
+       mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
+       mddev->barriers_work = 1;
+       mddev->ok_start_degraded = start_dirty_degraded;
+
+       if (start_readonly)
+               mddev->ro = 2; /* read-only, but switch on first write */
 
        err = mddev->pers->run(mddev);
+       if (!err && mddev->pers->sync_request) {
+               err = bitmap_create(mddev);
+               if (err) {
+                       printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
+                              mdname(mddev), err);
+                       mddev->pers->stop(mddev);
+               }
+       }
        if (err) {
                printk(KERN_ERR "md: pers->run() failed ...\n");
                module_put(mddev->pers->owner);
                mddev->pers = NULL;
-               return -EINVAL;
+               bitmap_destroy(mddev);
+               return err;
        }
+       if (mddev->pers->sync_request)
+               sysfs_create_group(&mddev->kobj, &md_redundancy_group);
+       else if (mddev->ro == 2) /* auto-readonly not meaningful */
+               mddev->ro = 0;
+
        atomic_set(&mddev->writes_pending,0);
        mddev->safemode = 0;
        mddev->safemode_timer.function = md_safemode_timeout;
        mddev->safemode_timer.data = (unsigned long) mddev;
-       mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
+       mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
        mddev->in_sync = 1;
+
+       ITERATE_RDEV(mddev,rdev,tmp)
+               if (rdev->raid_disk >= 0) {
+                       char nm[20];
+                       sprintf(nm, "rd%d", rdev->raid_disk);
+                       sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+               }
        
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        
-       if (mddev->sb_dirty)
-               md_update_sb(mddev);
+       if (mddev->flags)
+               md_update_sb(mddev, 0);
 
        set_capacity(disk, mddev->array_size<<1);
 
@@ -1646,7 +3169,40 @@ static int do_md_run(mddev_t * mddev)
        mddev->queue->queuedata = mddev;
        mddev->queue->make_request_fn = mddev->pers->make_request;
 
+       /* If there is a partially-recovered drive we need to
+        * start recovery here.  If we leave it to md_check_recovery,
+        * it will remove the drives and not do the right thing
+        */
+       if (mddev->degraded && !mddev->sync_thread) {
+               struct list_head *rtmp;
+               int spares = 0;
+               ITERATE_RDEV(mddev,rdev,rtmp)
+                       if (rdev->raid_disk >= 0 &&
+                           !test_bit(In_sync, &rdev->flags) &&
+                           !test_bit(Faulty, &rdev->flags))
+                               /* complete an interrupted recovery */
+                               spares++;
+               if (spares && mddev->pers->sync_request) {
+                       mddev->recovery = 0;
+                       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+                       mddev->sync_thread = md_register_thread(md_do_sync,
+                                                               mddev,
+                                                               "%s_resync");
+                       if (!mddev->sync_thread) {
+                               printk(KERN_ERR "%s: could not start resync"
+                                      " thread...\n",
+                                      mdname(mddev));
+                               /* leave the spares where they are, it shouldn't hurt */
+                               mddev->recovery = 0;
+                       }
+               }
+       }
+       md_wakeup_thread(mddev->thread);
+       md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
+
        mddev->changed = 1;
+       md_new_event(mddev);
+       kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
        return 0;
 }
 
@@ -1678,18 +3234,47 @@ static int restart_array(mddev_t *mddev)
                 */
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
+               md_wakeup_thread(mddev->sync_thread);
                err = 0;
-       } else {
-               printk(KERN_ERR "md: %s has no personality assigned.\n",
-                       mdname(mddev));
+       } else
                err = -EINVAL;
-       }
 
 out:
        return err;
 }
 
-static int do_md_stop(mddev_t * mddev, int ro)
+/* similar to deny_write_access, but accounts for our holding a reference
+ * to the file ourselves */
+static int deny_bitmap_write_access(struct file * file)
+{
+       struct inode *inode = file->f_mapping->host;
+
+       spin_lock(&inode->i_lock);
+       if (atomic_read(&inode->i_writecount) > 1) {
+               spin_unlock(&inode->i_lock);
+               return -ETXTBSY;
+       }
+       atomic_set(&inode->i_writecount, -1);
+       spin_unlock(&inode->i_lock);
+
+       return 0;
+}
+
+static void restore_bitmap_write_access(struct file *file)
+{
+       struct inode *inode = file->f_mapping->host;
+
+       spin_lock(&inode->i_lock);
+       atomic_set(&inode->i_writecount, 1);
+       spin_unlock(&inode->i_lock);
+}
+
+/* mode:
+ *   0 - completely stop and dis-assemble array
+ *   1 - switch to readonly
+ *   2 - stop but do not disassemble array
+ */
+static int do_md_stop(mddev_t * mddev, int mode)
 {
        int err = 0;
        struct gendisk *disk = mddev->gendisk;
@@ -1701,6 +3286,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
                }
 
                if (mddev->sync_thread) {
+                       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        md_unregister_thread(mddev->sync_thread);
                        mddev->sync_thread = NULL;
@@ -1710,61 +3296,92 @@ static int do_md_stop(mddev_t * mddev, int ro)
 
                invalidate_partition(disk, 0);
 
-               if (ro) {
+               switch(mode) {
+               case 1: /* readonly */
                        err  = -ENXIO;
-                       if (mddev->ro)
+                       if (mddev->ro==1)
                                goto out;
                        mddev->ro = 1;
-               } else {
+                       break;
+               case 0: /* disassemble */
+               case 2: /* stop */
+                       bitmap_flush(mddev);
+                       md_super_wait(mddev);
                        if (mddev->ro)
                                set_disk_ro(disk, 0);
                        blk_queue_make_request(mddev->queue, md_fail_request);
                        mddev->pers->stop(mddev);
+                       if (mddev->pers->sync_request)
+                               sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+
                        module_put(mddev->pers->owner);
                        mddev->pers = NULL;
+
+                       set_capacity(disk, 0);
+                       mddev->changed = 1;
+
                        if (mddev->ro)
                                mddev->ro = 0;
                }
-               if (!mddev->in_sync) {
+               if (!mddev->in_sync || mddev->flags) {
                        /* mark array as shutdown cleanly */
                        mddev->in_sync = 1;
-                       md_update_sb(mddev);
+                       md_update_sb(mddev, 1);
                }
-               if (ro)
+               if (mode == 1)
                        set_disk_ro(disk, 1);
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        }
+
        /*
         * Free resources if final stop
         */
-       if (!ro) {
-               struct gendisk *disk;
+       if (mode == 0) {
+               mdk_rdev_t *rdev;
+               struct list_head *tmp;
+
                printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
 
+               bitmap_destroy(mddev);
+               if (mddev->bitmap_file) {
+                       restore_bitmap_write_access(mddev->bitmap_file);
+                       fput(mddev->bitmap_file);
+                       mddev->bitmap_file = NULL;
+               }
+               mddev->bitmap_offset = 0;
+
+               ITERATE_RDEV(mddev,rdev,tmp)
+                       if (rdev->raid_disk >= 0) {
+                               char nm[20];
+                               sprintf(nm, "rd%d", rdev->raid_disk);
+                               sysfs_remove_link(&mddev->kobj, nm);
+                       }
+
                export_array(mddev);
 
                mddev->array_size = 0;
-               disk = mddev->gendisk;
-               if (disk)
-                       set_capacity(disk, 0);
-               mddev->changed = 1;
-       } else
+               mddev->size = 0;
+               mddev->raid_disks = 0;
+               mddev->recovery_cp = 0;
+
+       } else if (mddev->pers)
                printk(KERN_INFO "md: %s switched to read-only mode.\n",
                        mdname(mddev));
        err = 0;
+       md_new_event(mddev);
 out:
        return err;
 }
 
+#ifndef MODULE
 static void autorun_array(mddev_t *mddev)
 {
        mdk_rdev_t *rdev;
        struct list_head *tmp;
        int err;
 
-       if (list_empty(&mddev->disks)) {
-               MD_BUG();
+       if (list_empty(&mddev->disks))
                return;
-       }
 
        printk(KERN_INFO "md: running: ");
 
@@ -1776,7 +3393,7 @@ static void autorun_array(mddev_t *mddev)
 
        err = do_md_run (mddev);
        if (err) {
-               printk(KERN_WARNING "md :do_md_run() returned %d\n", err);
+               printk(KERN_WARNING "mddo_md_run() returned %d\n", err);
                do_md_stop (mddev, 0);
        }
 }
@@ -1795,7 +3412,6 @@ static void autorun_array(mddev_t *mddev)
  */
 static void autorun_devices(int part)
 {
-       struct list_head candidates;
        struct list_head *tmp;
        mdk_rdev_t *rdev0, *rdev;
        mddev_t *mddev;
@@ -1803,7 +3419,9 @@ static void autorun_devices(int part)
 
        printk(KERN_INFO "md: autorun ...\n");
        while (!list_empty(&pending_raid_disks)) {
+               int unit;
                dev_t dev;
+               LIST_HEAD(candidates);
                rdev0 = list_entry(pending_raid_disks.next,
                                         mdk_rdev_t, same_set);
 
@@ -1821,16 +3439,19 @@ static void autorun_devices(int part)
                 * mostly sane superblocks. It's time to allocate the
                 * mddev.
                 */
-               if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
+               if (part) {
+                       dev = MKDEV(mdp_major,
+                                   rdev0->preferred_minor << MdpMinorShift);
+                       unit = MINOR(dev) >> MdpMinorShift;
+               } else {
+                       dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
+                       unit = MINOR(dev);
+               }
+               if (rdev0->preferred_minor != unit) {
                        printk(KERN_INFO "md: unit number in %s is bad: %d\n",
                               bdevname(rdev0->bdev, b), rdev0->preferred_minor);
                        break;
                }
-               if (part)
-                       dev = MKDEV(mdp_major,
-                                   rdev0->preferred_minor << MdpMinorShift);
-               else
-                       dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
 
                md_probe(dev, NULL, NULL);
                mddev = mddev_find(dev);
@@ -1867,6 +3488,7 @@ static void autorun_devices(int part)
        }
        printk(KERN_INFO "md: ... autorun DONE.\n");
 }
+#endif /* !MODULE */
 
 /*
  * import RAID devices based on one partition
@@ -1881,11 +3503,9 @@ static int autostart_array(dev_t startdev)
        mdk_rdev_t *start_rdev = NULL, *rdev;
 
        start_rdev = md_import_device(startdev, 0, 0);
-       if (IS_ERR(start_rdev)) {
-               printk(KERN_WARNING "md: could not import %s!\n",
-                       __bdevname(startdev, b));
+       if (IS_ERR(start_rdev))
                return err;
-       }
+
 
        /* NOTE: this can only work for 0.90.0 superblocks */
        sb = (mdp_super_t*)page_address(start_rdev->sb_page);
@@ -1896,7 +3516,7 @@ static int autostart_array(dev_t startdev)
                return err;
        }
 
-       if (start_rdev->faulty) {
+       if (test_bit(Faulty, &start_rdev->flags)) {
                printk(KERN_WARNING 
                        "md: can not autostart based on faulty %s!\n",
                        bdevname(start_rdev->bdev,b));
@@ -1916,12 +3536,9 @@ static int autostart_array(dev_t startdev)
                if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
                        continue;
                rdev = md_import_device(dev, 0, 0);
-               if (IS_ERR(rdev)) {
-                       printk(KERN_WARNING "md: could not import %s,"
-                               " trying to run array nevertheless.\n",
-                               __bdevname(dev, b));
+               if (IS_ERR(rdev))
                        continue;
-               }
+
                list_add(&rdev->same_set, &pending_raid_disks);
        }
 
@@ -1958,11 +3575,11 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        nr=working=active=failed=spare=0;
        ITERATE_RDEV(mddev,rdev,tmp) {
                nr++;
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        failed++;
                else {
                        working++;
-                       if (rdev->in_sync)
+                       if (test_bit(In_sync, &rdev->flags))
                                active++;       
                        else
                                spare++;
@@ -1971,10 +3588,12 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
 
        info.major_version = mddev->major_version;
        info.minor_version = mddev->minor_version;
-       info.patch_version = 1;
+       info.patch_version = MD_PATCHLEVEL_VERSION;
        info.ctime         = mddev->ctime;
        info.level         = mddev->level;
        info.size          = mddev->size;
+       if (info.size != mddev->size) /* overflow */
+               info.size = -1;
        info.nr_disks      = nr;
        info.raid_disks    = mddev->raid_disks;
        info.md_minor      = mddev->md_minor;
@@ -1984,6 +3603,8 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        info.state         = 0;
        if (mddev->in_sync)
                info.state = (1<<MD_SB_CLEAN);
+       if (mddev->bitmap && mddev->bitmap_offset)
+               info.state = (1<<MD_SB_BITMAP_PRESENT);
        info.active_disks  = active;
        info.working_disks = working;
        info.failed_disks  = failed;
@@ -1998,6 +3619,44 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        return 0;
 }
 
+static int get_bitmap_file(mddev_t * mddev, void __user * arg)
+{
+       mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
+       char *ptr, *buf = NULL;
+       int err = -ENOMEM;
+
+       md_allow_write(mddev);
+
+       file = kmalloc(sizeof(*file), GFP_KERNEL);
+       if (!file)
+               goto out;
+
+       /* bitmap disabled, zero the first byte and copy out */
+       if (!mddev->bitmap || !mddev->bitmap->file) {
+               file->pathname[0] = '\0';
+               goto copy_out;
+       }
+
+       buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
+       if (!buf)
+               goto out;
+
+       ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
+       if (!ptr)
+               goto out;
+
+       strcpy(file->pathname, ptr);
+
+copy_out:
+       err = 0;
+       if (copy_to_user(arg, file, sizeof(*file)))
+               err = -EFAULT;
+out:
+       kfree(buf);
+       kfree(file);
+       return err;
+}
+
 static int get_disk_info(mddev_t * mddev, void __user * arg)
 {
        mdu_disk_info_t info;
@@ -2015,12 +3674,14 @@ static int get_disk_info(mddev_t * mddev, void __user * arg)
                info.minor = MINOR(rdev->bdev->bd_dev);
                info.raid_disk = rdev->raid_disk;
                info.state = 0;
-               if (rdev->faulty)
+               if (test_bit(Faulty, &rdev->flags))
                        info.state |= (1<<MD_DISK_FAULTY);
-               else if (rdev->in_sync) {
+               else if (test_bit(In_sync, &rdev->flags)) {
                        info.state |= (1<<MD_DISK_ACTIVE);
                        info.state |= (1<<MD_DISK_SYNC);
                }
+               if (test_bit(WriteMostly, &rdev->flags))
+                       info.state |= (1<<MD_DISK_WRITEMOSTLY);
        } else {
                info.major = info.minor = 0;
                info.raid_disk = -1;
@@ -2085,21 +3746,52 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                               mdname(mddev));
                        return -EINVAL;
                }
-               rdev = md_import_device(dev, mddev->major_version,
-                                       mddev->minor_version);
+               if (mddev->persistent)
+                       rdev = md_import_device(dev, mddev->major_version,
+                                               mddev->minor_version);
+               else
+                       rdev = md_import_device(dev, -1, -1);
                if (IS_ERR(rdev)) {
                        printk(KERN_WARNING 
                                "md: md_import_device returned %ld\n",
                                PTR_ERR(rdev));
                        return PTR_ERR(rdev);
                }
-               rdev->in_sync = 0; /* just to be sure */
+               /* set save_raid_disk if appropriate */
+               if (!mddev->persistent) {
+                       if (info->state & (1<<MD_DISK_SYNC)  &&
+                           info->raid_disk < mddev->raid_disks)
+                               rdev->raid_disk = info->raid_disk;
+                       else
+                               rdev->raid_disk = -1;
+               } else
+                       super_types[mddev->major_version].
+                               validate_super(mddev, rdev);
+               rdev->saved_raid_disk = rdev->raid_disk;
+
+               clear_bit(In_sync, &rdev->flags); /* just to be sure */
+               if (info->state & (1<<MD_DISK_WRITEMOSTLY))
+                       set_bit(WriteMostly, &rdev->flags);
+
                rdev->raid_disk = -1;
                err = bind_rdev_to_array(rdev, mddev);
+               if (!err && !mddev->pers->hot_remove_disk) {
+                       /* If there is hot_add_disk but no hot_remove_disk
+                        * then added disks for geometry changes,
+                        * and should be added immediately.
+                        */
+                       super_types[mddev->major_version].
+                               validate_super(mddev, rdev);
+                       err = mddev->pers->hot_add_disk(mddev, rdev);
+                       if (err)
+                               unbind_rdev_from_array(rdev);
+               }
                if (err)
                        export_rdev(rdev);
-               if (mddev->thread)
-                       md_wakeup_thread(mddev->thread);
+
+               md_update_sb(mddev, 1);
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               md_wakeup_thread(mddev->thread);
                return err;
        }
 
@@ -2127,17 +3819,14 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                else
                        rdev->raid_disk = -1;
 
-               rdev->faulty = 0;
+               rdev->flags = 0;
+
                if (rdev->raid_disk < mddev->raid_disks)
-                       rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
-               else
-                       rdev->in_sync = 0;
+                       if (info->state & (1<<MD_DISK_SYNC))
+                               set_bit(In_sync, &rdev->flags);
 
-               err = bind_rdev_to_array(rdev, mddev);
-               if (err) {
-                       export_rdev(rdev);
-                       return err;
-               }
+               if (info->state & (1<<MD_DISK_WRITEMOSTLY))
+                       set_bit(WriteMostly, &rdev->flags);
 
                if (!mddev->persistent) {
                        printk(KERN_INFO "md: nonpersistent superblock ...\n");
@@ -2146,45 +3835,12 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                        rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
                rdev->size = calc_dev_size(rdev, mddev->chunk_size);
 
-               if (!mddev->size || (mddev->size > rdev->size))
-                       mddev->size = rdev->size;
-       }
-
-       return 0;
-}
-
-static int hot_generate_error(mddev_t * mddev, dev_t dev)
-{
-       char b[BDEVNAME_SIZE];
-       struct request_queue *q;
-       mdk_rdev_t *rdev;
-
-       if (!mddev->pers)
-               return -ENODEV;
-
-       printk(KERN_INFO "md: trying to generate %s error in %s ... \n",
-               __bdevname(dev, b), mdname(mddev));
-
-       rdev = find_rdev(mddev, dev);
-       if (!rdev) {
-               /* MD_BUG(); */ /* like hell - it's not a driver bug */
-               return -ENXIO;
-       }
-
-       if (rdev->desc_nr == -1) {
-               MD_BUG();
-               return -EINVAL;
-       }
-       if (!rdev->in_sync)
-               return -ENODEV;
-
-       q = bdev_get_queue(rdev->bdev);
-       if (!q) {
-               MD_BUG();
-               return -ENODEV;
+               err = bind_rdev_to_array(rdev, mddev);
+               if (err) {
+                       export_rdev(rdev);
+                       return err;
+               }
        }
-       printk(KERN_INFO "md: okay, generating error!\n");
-//     q->oneshot_error = 1; // disabled for now
 
        return 0;
 }
@@ -2197,9 +3853,6 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
        if (!mddev->pers)
                return -ENODEV;
 
-       printk(KERN_INFO "md: trying to remove %s from %s ... \n",
-               __bdevname(dev, b), mdname(mddev));
-
        rdev = find_rdev(mddev, dev);
        if (!rdev)
                return -ENXIO;
@@ -2208,7 +3861,8 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
                goto busy;
 
        kick_rdev_from_array(rdev);
-       md_update_sb(mddev);
+       md_update_sb(mddev, 1);
+       md_new_event(mddev);
 
        return 0;
 busy:
@@ -2227,9 +3881,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
        if (!mddev->pers)
                return -ENODEV;
 
-       printk(KERN_INFO "md: trying to hot-add %s to %s ... \n",
-               __bdevname(dev, b), mdname(mddev));
-
        if (mddev->major_version != 0) {
                printk(KERN_WARNING "%s: HOT_ADD may only be used with"
                        " version-0 superblocks.\n",
@@ -2260,25 +3911,19 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
        size = calc_dev_size(rdev, mddev->chunk_size);
        rdev->size = size;
 
-       if (size < mddev->size) {
-               printk(KERN_WARNING 
-                       "%s: disk size %llu blocks < array size %llu\n",
-                       mdname(mddev), (unsigned long long)size,
-                       (unsigned long long)mddev->size);
-               err = -ENOSPC;
-               goto abort_export;
-       }
-
-       if (rdev->faulty) {
+       if (test_bit(Faulty, &rdev->flags)) {
                printk(KERN_WARNING 
                        "md: can not hot-add faulty %s disk to %s!\n",
                        bdevname(rdev->bdev,b), mdname(mddev));
                err = -EINVAL;
                goto abort_export;
        }
-       rdev->in_sync = 0;
+       clear_bit(In_sync, &rdev->flags);
        rdev->desc_nr = -1;
-       bind_rdev_to_array(rdev, mddev);
+       rdev->saved_raid_disk = -1;
+       err = bind_rdev_to_array(rdev, mddev);
+       if (err)
+               goto abort_export;
 
        /*
         * The rest should better be atomic, we can have disk failures
@@ -2294,7 +3939,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
 
        rdev->raid_disk = -1;
 
-       md_update_sb(mddev);
+       md_update_sb(mddev, 1);
 
        /*
         * Kick recovery, maybe this spare has to be added to the
@@ -2302,7 +3947,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
         */
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
-
+       md_new_event(mddev);
        return 0;
 
 abort_unbind_export:
@@ -2313,6 +3958,63 @@ abort_export:
        return err;
 }
 
+static int set_bitmap_file(mddev_t *mddev, int fd)
+{
+       int err;
+
+       if (mddev->pers) {
+               if (!mddev->pers->quiesce)
+                       return -EBUSY;
+               if (mddev->recovery || mddev->sync_thread)
+                       return -EBUSY;
+               /* we should be able to change the bitmap.. */
+       }
+
+
+       if (fd >= 0) {
+               if (mddev->bitmap)
+                       return -EEXIST; /* cannot add when bitmap is present */
+               mddev->bitmap_file = fget(fd);
+
+               if (mddev->bitmap_file == NULL) {
+                       printk(KERN_ERR "%s: error: failed to get bitmap file\n",
+                              mdname(mddev));
+                       return -EBADF;
+               }
+
+               err = deny_bitmap_write_access(mddev->bitmap_file);
+               if (err) {
+                       printk(KERN_ERR "%s: error: bitmap file is already in use\n",
+                              mdname(mddev));
+                       fput(mddev->bitmap_file);
+                       mddev->bitmap_file = NULL;
+                       return err;
+               }
+               mddev->bitmap_offset = 0; /* file overrides offset */
+       } else if (mddev->bitmap == NULL)
+               return -ENOENT; /* cannot remove what isn't there */
+       err = 0;
+       if (mddev->pers) {
+               mddev->pers->quiesce(mddev, 1);
+               if (fd >= 0)
+                       err = bitmap_create(mddev);
+               if (fd < 0 || err) {
+                       bitmap_destroy(mddev);
+                       fd = -1; /* make sure to put the file */
+               }
+               mddev->pers->quiesce(mddev, 0);
+       }
+       if (fd < 0) {
+               if (mddev->bitmap_file) {
+                       restore_bitmap_write_access(mddev->bitmap_file);
+                       fput(mddev->bitmap_file);
+               }
+               mddev->bitmap_file = NULL;
+       }
+
+       return err;
+}
+
 /*
  * set_array_info is used two different ways
  * The original usage is when creating a new array.
@@ -2343,6 +4045,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
                mddev->major_version = info->major_version;
                mddev->minor_version = info->minor_version;
                mddev->patch_version = info->patch_version;
+               mddev->persistent = !info->not_persistent;
                return 0;
        }
        mddev->major_version = MD_MAJOR_VERSION;
@@ -2351,6 +4054,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
        mddev->ctime         = get_seconds();
 
        mddev->level         = info->level;
+       mddev->clevel[0]     = 0;
        mddev->size          = info->size;
        mddev->raid_disks    = info->raid_disks;
        /* don't set md_minor, it is determined by which /dev/md* was
@@ -2367,16 +4071,90 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
 
        mddev->max_disks     = MD_SB_DISKS;
 
-       mddev->sb_dirty      = 1;
+       mddev->flags         = 0;
+       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+
+       mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+       mddev->bitmap_offset = 0;
+
+       mddev->reshape_position = MaxSector;
 
        /*
         * Generate a 128 bit UUID
         */
        get_random_bytes(mddev->uuid, 16);
 
-       return 0;
+       mddev->new_level = mddev->level;
+       mddev->new_chunk = mddev->chunk_size;
+       mddev->new_layout = mddev->layout;
+       mddev->delta_disks = 0;
+
+       return 0;
+}
+
+static int update_size(mddev_t *mddev, unsigned long size)
+{
+       mdk_rdev_t * rdev;
+       int rv;
+       struct list_head *tmp;
+       int fit = (size == 0);
+
+       if (mddev->pers->resize == NULL)
+               return -EINVAL;
+       /* The "size" is the amount of each device that is used.
+        * This can only make sense for arrays with redundancy.
+        * linear and raid0 always use whatever space is available
+        * We can only consider changing the size if no resync
+        * or reconstruction is happening, and if the new size
+        * is acceptable. It must fit before the sb_offset or,
+        * if that is <data_offset, it must fit before the
+        * size of each device.
+        * If size is zero, we find the largest size that fits.
+        */
+       if (mddev->sync_thread)
+               return -EBUSY;
+       ITERATE_RDEV(mddev,rdev,tmp) {
+               sector_t avail;
+               avail = rdev->size * 2;
+
+               if (fit && (size == 0 || size > avail/2))
+                       size = avail/2;
+               if (avail < ((sector_t)size << 1))
+                       return -ENOSPC;
+       }
+       rv = mddev->pers->resize(mddev, (sector_t)size *2);
+       if (!rv) {
+               struct block_device *bdev;
+
+               bdev = bdget_disk(mddev->gendisk, 0);
+               if (bdev) {
+                       mutex_lock(&bdev->bd_inode->i_mutex);
+                       i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
+                       mutex_unlock(&bdev->bd_inode->i_mutex);
+                       bdput(bdev);
+               }
+       }
+       return rv;
+}
+
+static int update_raid_disks(mddev_t *mddev, int raid_disks)
+{
+       int rv;
+       /* change the number of raid disks */
+       if (mddev->pers->check_reshape == NULL)
+               return -EINVAL;
+       if (raid_disks <= 0 ||
+           raid_disks >= mddev->max_disks)
+               return -EINVAL;
+       if (mddev->sync_thread || mddev->reshape_position != MaxSector)
+               return -EBUSY;
+       mddev->delta_disks = raid_disks - mddev->raid_disks;
+
+       rv = mddev->pers->check_reshape(mddev);
+       return rv;
 }
 
+
 /*
  * update_array_info is used to change the configuration of an
  * on-line array.
@@ -2389,88 +4167,78 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
 {
        int rv = 0;
        int cnt = 0;
+       int state = 0;
+
+       /* calculate expected state,ignoring low bits */
+       if (mddev->bitmap && mddev->bitmap_offset)
+               state |= (1 << MD_SB_BITMAP_PRESENT);
 
        if (mddev->major_version != info->major_version ||
            mddev->minor_version != info->minor_version ||
 /*         mddev->patch_version != info->patch_version || */
            mddev->ctime         != info->ctime         ||
            mddev->level         != info->level         ||
-           mddev->layout        != info->layout        ||
+/*         mddev->layout        != info->layout        || */
            !mddev->persistent   != info->not_persistent||
-           mddev->chunk_size    != info->chunk_size    )
+           mddev->chunk_size    != info->chunk_size    ||
+           /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
+           ((state^info->state) & 0xfffffe00)
+               )
                return -EINVAL;
        /* Check there is only one change */
-       if (mddev->size != info->size) cnt++;
+       if (info->size >= 0 && mddev->size != info->size) cnt++;
        if (mddev->raid_disks != info->raid_disks) cnt++;
+       if (mddev->layout != info->layout) cnt++;
+       if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
        if (cnt == 0) return 0;
        if (cnt > 1) return -EINVAL;
 
-       if (mddev->size != info->size) {
-               mdk_rdev_t * rdev;
-               struct list_head *tmp;
-               if (mddev->pers->resize == NULL)
-                       return -EINVAL;
-               /* The "size" is the amount of each device that is used.
-                * This can only make sense for arrays with redundancy.
-                * linear and raid0 always use whatever space is available
-                * We can only consider changing the size of no resync
-                * or reconstruction is happening, and if the new size
-                * is acceptable. It must fit before the sb_offset or,
-                * if that is <data_offset, it must fit before the
-                * size of each device.
-                * If size is zero, we find the largest size that fits.
+       if (mddev->layout != info->layout) {
+               /* Change layout
+                * we don't need to do anything at the md level, the
+                * personality will take care of it all.
                 */
-               if (mddev->sync_thread)
-                       return -EBUSY;
-               ITERATE_RDEV(mddev,rdev,tmp) {
-                       sector_t avail;
-                       int fit = (info->size == 0);
-                       if (rdev->sb_offset > rdev->data_offset)
-                               avail = (rdev->sb_offset*2) - rdev->data_offset;
-                       else
-                               avail = get_capacity(rdev->bdev->bd_disk)
-                                       - rdev->data_offset;
-                       if (fit && (info->size == 0 || info->size > avail/2))
-                               info->size = avail/2;
-                       if (avail < ((sector_t)info->size << 1))
-                               return -ENOSPC;
-               }
-               rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
-               if (!rv) {
-                       struct block_device *bdev;
-
-                       bdev = bdget_disk(mddev->gendisk, 0);
-                       if (bdev) {
-                               down(&bdev->bd_inode->i_sem);
-                               i_size_write(bdev->bd_inode, mddev->array_size << 10);
-                               up(&bdev->bd_inode->i_sem);
-                               bdput(bdev);
-                       }
-               }
-       }
-       if (mddev->raid_disks    != info->raid_disks) {
-               /* change the number of raid disks */
-               if (mddev->pers->reshape == NULL)
+               if (mddev->pers->reconfig == NULL)
                        return -EINVAL;
-               if (info->raid_disks <= 0 ||
-                   info->raid_disks >= mddev->max_disks)
+               else
+                       return mddev->pers->reconfig(mddev, info->layout, -1);
+       }
+       if (info->size >= 0 && mddev->size != info->size)
+               rv = update_size(mddev, info->size);
+
+       if (mddev->raid_disks    != info->raid_disks)
+               rv = update_raid_disks(mddev, info->raid_disks);
+
+       if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
+               if (mddev->pers->quiesce == NULL)
                        return -EINVAL;
-               if (mddev->sync_thread)
+               if (mddev->recovery || mddev->sync_thread)
                        return -EBUSY;
-               rv = mddev->pers->reshape(mddev, info->raid_disks);
-               if (!rv) {
-                       struct block_device *bdev;
-
-                       bdev = bdget_disk(mddev->gendisk, 0);
-                       if (bdev) {
-                               down(&bdev->bd_inode->i_sem);
-                               i_size_write(bdev->bd_inode, mddev->array_size << 10);
-                               up(&bdev->bd_inode->i_sem);
-                               bdput(bdev);
-                       }
+               if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
+                       /* add the bitmap */
+                       if (mddev->bitmap)
+                               return -EEXIST;
+                       if (mddev->default_bitmap_offset == 0)
+                               return -EINVAL;
+                       mddev->bitmap_offset = mddev->default_bitmap_offset;
+                       mddev->pers->quiesce(mddev, 1);
+                       rv = bitmap_create(mddev);
+                       if (rv)
+                               bitmap_destroy(mddev);
+                       mddev->pers->quiesce(mddev, 0);
+               } else {
+                       /* remove the bitmap */
+                       if (!mddev->bitmap)
+                               return -ENOENT;
+                       if (mddev->bitmap->file)
+                               return -EINVAL;
+                       mddev->pers->quiesce(mddev, 1);
+                       bitmap_destroy(mddev);
+                       mddev->pers->quiesce(mddev, 0);
+                       mddev->bitmap_offset = 0;
                }
        }
-       md_update_sb(mddev);
+       md_update_sb(mddev, 1);
        return rv;
 }
 
@@ -2478,6 +4246,9 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
 {
        mdk_rdev_t *rdev;
 
+       if (mddev->pers == NULL)
+               return -ENODEV;
+
        rdev = find_rdev(mddev, dev);
        if (!rdev)
                return -ENODEV;
@@ -2486,13 +4257,21 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
        return 0;
 }
 
+static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+       mddev_t *mddev = bdev->bd_disk->private_data;
+
+       geo->heads = 2;
+       geo->sectors = 4;
+       geo->cylinders = get_capacity(mddev->gendisk) / 8;
+       return 0;
+}
+
 static int md_ioctl(struct inode *inode, struct file *file,
                        unsigned int cmd, unsigned long arg)
 {
-       char b[BDEVNAME_SIZE];
        int err = 0;
        void __user *argp = (void __user *)arg;
-       struct hd_geometry __user *loc = argp;
        mddev_t *mddev = NULL;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -2542,14 +4321,13 @@ static int md_ioctl(struct inode *inode, struct file *file,
                if (cnt > 0 ) {
                        printk(KERN_WARNING
                               "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
-                              "This will not be supported beyond 2.6\n",
+                              "This will not be supported beyond July 2006\n",
                               current->comm, current->pid);
                        cnt--;
                }
                err = autostart_array(new_decode_dev(arg));
                if (err) {
-                       printk(KERN_WARNING "md: autostart %s failed!\n",
-                               __bdevname(arg, b));
+                       printk(KERN_WARNING "md: autostart failed!\n");
                        goto abort;
                }
                goto done;
@@ -2612,8 +4390,11 @@ static int md_ioctl(struct inode *inode, struct file *file,
        /*
         * Commands querying/configuring an existing array:
         */
-       /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
-       if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
+       /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
+        * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
+       if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
+                       && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
+                       && cmd != GET_BITMAP_FILE) {
                err = -ENODEV;
                goto abort_unlock;
        }
@@ -2627,6 +4408,10 @@ static int md_ioctl(struct inode *inode, struct file *file,
                        err = get_array_info(mddev, argp);
                        goto done_unlock;
 
+               case GET_BITMAP_FILE:
+                       err = get_bitmap_file(mddev, argp);
+                       goto done_unlock;
+
                case GET_DISK_INFO:
                        err = get_disk_info(mddev, argp);
                        goto done_unlock;
@@ -2649,34 +4434,26 @@ static int md_ioctl(struct inode *inode, struct file *file,
         * 4 sectors (with a BIG number of cylinders...). This drives
         * dosfs just mad... ;-)
         */
-               case HDIO_GETGEO:
-                       if (!loc) {
-                               err = -EINVAL;
-                               goto abort_unlock;
-                       }
-                       err = put_user (2, (char __user *) &loc->heads);
-                       if (err)
-                               goto abort_unlock;
-                       err = put_user (4, (char __user *) &loc->sectors);
-                       if (err)
-                               goto abort_unlock;
-                       err = put_user(get_capacity(mddev->gendisk)/8,
-                                       (short __user *) &loc->cylinders);
-                       if (err)
-                               goto abort_unlock;
-                       err = put_user (get_start_sect(inode->i_bdev),
-                                               (long __user *) &loc->start);
-                       goto done_unlock;
        }
 
        /*
         * The remaining ioctls are changing the state of the
-        * superblock, so we do not allow read-only arrays
-        * here:
+        * superblock, so we do not allow them on read-only arrays.
+        * However non-MD ioctls (e.g. get-size) will still come through
+        * here and hit the 'default' below, so only disallow
+        * 'md' ioctls, and switch to rw mode if started auto-readonly.
         */
-       if (mddev->ro) {
-               err = -EROFS;
-               goto abort_unlock;
+       if (_IOC_TYPE(cmd) == MD_MAJOR &&
+           mddev->ro && mddev->pers) {
+               if (mddev->ro == 2) {
+                       mddev->ro = 0;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               md_wakeup_thread(mddev->thread);
+
+               } else {
+                       err = -EROFS;
+                       goto abort_unlock;
+               }
        }
 
        switch (cmd)
@@ -2690,9 +4467,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
                                err = add_new_disk(mddev, &info);
                        goto done_unlock;
                }
-               case HOT_GENERATE_ERROR:
-                       err = hot_generate_error(mddev, new_decode_dev(arg));
-                       goto done_unlock;
+
                case HOT_REMOVE_DISK:
                        err = hot_remove_disk(mddev, new_decode_dev(arg));
                        goto done_unlock;
@@ -2709,12 +4484,11 @@ static int md_ioctl(struct inode *inode, struct file *file,
                        err = do_md_run (mddev);
                        goto done_unlock;
 
+               case SET_BITMAP_FILE:
+                       err = set_bitmap_file(mddev, (int)arg);
+                       goto done_unlock;
+
                default:
-                       if (_IOC_TYPE(cmd) == MD_MAJOR)
-                               printk(KERN_WARNING "md: %s(pid %d) used"
-                                       " obsolete MD ioctl, upgrade your"
-                                       " software to use new ictls.\n",
-                                       current->comm, current->pid);
                        err = -EINVAL;
                        goto abort_unlock;
        }
@@ -2740,7 +4514,7 @@ static int md_open(struct inode *inode, struct file *file)
        mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
        int err;
 
-       if ((err = mddev_lock(mddev)))
+       if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
                goto out;
 
        err = 0;
@@ -2756,8 +4530,7 @@ static int md_release(struct inode *inode, struct file * file)
 {
        mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
 
-       if (!mddev)
-               BUG();
+       BUG_ON(!mddev);
        mddev_put(mddev);
 
        return 0;
@@ -2783,26 +4556,15 @@ static struct block_device_operations md_fops =
        .open           = md_open,
        .release        = md_release,
        .ioctl          = md_ioctl,
+       .getgeo         = md_getgeo,
        .media_changed  = md_media_changed,
        .revalidate_disk= md_revalidate,
 };
 
-int md_thread(void * arg)
+static int md_thread(void * arg)
 {
        mdk_thread_t *thread = arg;
 
-       lock_kernel();
-
-       /*
-        * Detach thread
-        */
-
-       daemonize(thread->name, mdname(thread->mddev));
-
-       current->exit_signal = SIGCHLD;
-       allow_signal(SIGKILL);
-       thread->tsk = current;
-
        /*
         * md_thread is a 'system-thread', it's priority should be very
         * high. We avoid resource deadlocks individually in each
@@ -2814,27 +4576,30 @@ int md_thread(void * arg)
         * bdflush, otherwise bdflush will deadlock if there are too
         * many dirty RAID5 blocks.
         */
-       unlock_kernel();
 
-       complete(thread->event);
-       while (thread->run) {
-               void (*run)(mddev_t *);
+       current->flags |= PF_NOFREEZE;
+       allow_signal(SIGKILL);
+       while (!kthread_should_stop()) {
+
+               /* We need to wait INTERRUPTIBLE so that
+                * we don't add to the load-average.
+                * That means we need to be sure no signals are
+                * pending
+                */
+               if (signal_pending(current))
+                       flush_signals(current);
 
-               wait_event_interruptible(thread->wqueue,
-                                        test_bit(THREAD_WAKEUP, &thread->flags));
-               if (current->flags & PF_FREEZE)
-                       refrigerator(PF_FREEZE);
+               wait_event_interruptible_timeout
+                       (thread->wqueue,
+                        test_bit(THREAD_WAKEUP, &thread->flags)
+                        || kthread_should_stop(),
+                        thread->timeout);
 
                clear_bit(THREAD_WAKEUP, &thread->flags);
 
-               run = thread->run;
-               if (run)
-                       run(thread->mddev);
-
-               if (signal_pending(current))
-                       flush_signals(current);
+               thread->run(thread->mddev);
        }
-       complete(thread->event);
+
        return 0;
 }
 
@@ -2851,52 +4616,29 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
                                 const char *name)
 {
        mdk_thread_t *thread;
-       int ret;
-       struct completion event;
 
-       thread = (mdk_thread_t *) kmalloc
-                               (sizeof(mdk_thread_t), GFP_KERNEL);
+       thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
        if (!thread)
                return NULL;
 
-       memset(thread, 0, sizeof(mdk_thread_t));
        init_waitqueue_head(&thread->wqueue);
 
-       init_completion(&event);
-       thread->event = &event;
        thread->run = run;
        thread->mddev = mddev;
-       thread->name = name;
-       ret = kernel_thread(md_thread, thread, 0);
-       if (ret < 0) {
+       thread->timeout = MAX_SCHEDULE_TIMEOUT;
+       thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
+       if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
        }
-       wait_for_completion(&event);
        return thread;
 }
 
-void md_interrupt_thread(mdk_thread_t *thread)
-{
-       if (!thread->tsk) {
-               MD_BUG();
-               return;
-       }
-       dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
-       send_sig(SIGKILL, thread->tsk, 1);
-}
-
 void md_unregister_thread(mdk_thread_t *thread)
 {
-       struct completion event;
-
-       init_completion(&event);
+       dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
 
-       thread->event = &event;
-       thread->run = NULL;
-       thread->name = NULL;
-       md_interrupt_thread(thread);
-       wait_for_completion(&event);
+       kthread_stop(thread->tsk);
        kfree(thread);
 }
 
@@ -2907,20 +4649,24 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
                return;
        }
 
-       if (!rdev || rdev->faulty)
+       if (!rdev || test_bit(Faulty, &rdev->flags))
                return;
-
+/*
        dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
                mdname(mddev),
                MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
                __builtin_return_address(0),__builtin_return_address(1),
                __builtin_return_address(2),__builtin_return_address(3));
-
+*/
+       if (!mddev->pers)
+               return;
        if (!mddev->pers->error_handler)
                return;
        mddev->pers->error_handler(mddev,rdev);
+       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
+       md_new_event_inintr(mddev);
 }
 
 /* seq_file implementation /proc/mdstat */
@@ -2948,10 +4694,17 @@ static void status_unused(struct seq_file *seq)
 
 static void status_resync(struct seq_file *seq, mddev_t * mddev)
 {
-       unsigned long max_blocks, resync, res, dt, db, rt;
+       sector_t max_blocks, resync, res;
+       unsigned long dt, db, rt;
+       int scale;
+       unsigned int per_milli;
 
        resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
-       max_blocks = mddev->size;
+
+       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+               max_blocks = mddev->resync_max_sectors >> 1;
+       else
+               max_blocks = mddev->size;
 
        /*
         * Should not happen.
@@ -2960,9 +4713,22 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
                MD_BUG();
                return;
        }
-       res = (resync/1024)*1000/(max_blocks/1024 + 1);
+       /* Pick 'scale' such that (resync>>scale)*1000 will fit
+        * in a sector_t, and (max_blocks>>scale) will fit in a
+        * u32, as those are the requirements for sector_div.
+        * Thus 'scale' must be at least 10
+        */
+       scale = 10;
+       if (sizeof(sector_t) > sizeof(unsigned long)) {
+               while ( max_blocks/2 > (1ULL<<(scale+32)))
+                       scale++;
+       }
+       res = (resync>>scale)*1000;
+       sector_div(res, (u32)((max_blocks>>scale)+1));
+
+       per_milli = res;
        {
-               int i, x = res/50, y = 20-x;
+               int i, x = per_milli/50, y = 20-x;
                seq_printf(seq, "[");
                for (i = 0; i < x; i++)
                        seq_printf(seq, "=");
@@ -2971,10 +4737,16 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
                        seq_printf(seq, ".");
                seq_printf(seq, "] ");
        }
-       seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
-                     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
-                      "resync" : "recovery"),
-                     res/10, res % 10, resync, max_blocks);
+       seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
+                  (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
+                   "reshape" :
+                   (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
+                    "check" :
+                    (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
+                     "resync" : "recovery"))),
+                  per_milli/10, per_milli % 10,
+                  (unsigned long long) resync,
+                  (unsigned long long) max_blocks);
 
        /*
         * We do not want to overflow, so the order of operands and
@@ -2987,12 +4759,13 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
         */
        dt = ((jiffies - mddev->resync_mark) / HZ);
        if (!dt) dt++;
-       db = resync - (mddev->resync_mark_cnt/2);
-       rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
+       db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
+               - mddev->resync_mark_cnt;
+       rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
 
        seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
 
-       seq_printf(seq, " speed=%ldK/sec", db/dt);
+       seq_printf(seq, " speed=%ldK/sec", db/2/dt);
 }
 
 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3057,23 +4830,29 @@ static void md_seq_stop(struct seq_file *seq, void *v)
                mddev_put(mddev);
 }
 
+struct mdstat_info {
+       int event;
+};
+
 static int md_seq_show(struct seq_file *seq, void *v)
 {
        mddev_t *mddev = v;
        sector_t size;
        struct list_head *tmp2;
        mdk_rdev_t *rdev;
-       int i;
+       struct mdstat_info *mi = seq->private;
+       struct bitmap *bitmap;
 
        if (v == (void*)1) {
+               struct mdk_personality *pers;
                seq_printf(seq, "Personalities : ");
                spin_lock(&pers_lock);
-               for (i = 0; i < MAX_PERSONALITY; i++)
-                       if (pers[i])
-                               seq_printf(seq, "[%s] ", pers[i]->name);
+               list_for_each_entry(pers, &pers_list, list)
+                       seq_printf(seq, "[%s] ", pers->name);
 
                spin_unlock(&pers_lock);
                seq_printf(seq, "\n");
+               mi->event = atomic_read(&md_event_count);
                return 0;
        }
        if (v == (void*)2) {
@@ -3081,14 +4860,17 @@ static int md_seq_show(struct seq_file *seq, void *v)
                return 0;
        }
 
-       if (mddev_lock(mddev)!=0) 
+       if (mddev_lock(mddev) < 0)
                return -EINTR;
+
        if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
                seq_printf(seq, "%s : %sactive", mdname(mddev),
                                                mddev->pers ? "" : "in");
                if (mddev->pers) {
-                       if (mddev->ro)
+                       if (mddev->ro==1)
                                seq_printf(seq, " (read-only)");
+                       if (mddev->ro==2)
+                               seq_printf(seq, "(auto-read-only)");
                        seq_printf(seq, " %s", mddev->pers->name);
                }
 
@@ -3097,10 +4879,13 @@ static int md_seq_show(struct seq_file *seq, void *v)
                        char b[BDEVNAME_SIZE];
                        seq_printf(seq, " %s[%d]",
                                bdevname(rdev->bdev,b), rdev->desc_nr);
-                       if (rdev->faulty) {
+                       if (test_bit(WriteMostly, &rdev->flags))
+                               seq_printf(seq, "(W)");
+                       if (test_bit(Faulty, &rdev->flags)) {
                                seq_printf(seq, "(F)");
                                continue;
-                       }
+                       } else if (rdev->raid_disk < 0)
+                               seq_printf(seq, "(S)"); /* spare */
                        size += rdev->size;
                }
 
@@ -3112,14 +4897,52 @@ static int md_seq_show(struct seq_file *seq, void *v)
                                seq_printf(seq, "\n      %llu blocks",
                                        (unsigned long long)size);
                }
+               if (mddev->persistent) {
+                       if (mddev->major_version != 0 ||
+                           mddev->minor_version != 90) {
+                               seq_printf(seq," super %d.%d",
+                                          mddev->major_version,
+                                          mddev->minor_version);
+                       }
+               } else
+                       seq_printf(seq, " super non-persistent");
 
                if (mddev->pers) {
                        mddev->pers->status (seq, mddev);
                        seq_printf(seq, "\n      ");
-                       if (mddev->curr_resync > 2)
-                               status_resync (seq, mddev);
-                       else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
-                               seq_printf(seq, "       resync=DELAYED");
+                       if (mddev->pers->sync_request) {
+                               if (mddev->curr_resync > 2) {
+                                       status_resync (seq, mddev);
+                                       seq_printf(seq, "\n      ");
+                               } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
+                                       seq_printf(seq, "\tresync=DELAYED\n      ");
+                               else if (mddev->recovery_cp < MaxSector)
+                                       seq_printf(seq, "\tresync=PENDING\n      ");
+                       }
+               } else
+                       seq_printf(seq, "\n       ");
+
+               if ((bitmap = mddev->bitmap)) {
+                       unsigned long chunk_kb;
+                       unsigned long flags;
+                       spin_lock_irqsave(&bitmap->lock, flags);
+                       chunk_kb = bitmap->chunksize >> 10;
+                       seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
+                               "%lu%s chunk",
+                               bitmap->pages - bitmap->missing_pages,
+                               bitmap->pages,
+                               (bitmap->pages - bitmap->missing_pages)
+                                       << (PAGE_SHIFT - 10),
+                               chunk_kb ? chunk_kb : bitmap->chunksize,
+                               chunk_kb ? "KB" : "B");
+                       if (bitmap->file) {
+                               seq_printf(seq, ", file: ");
+                               seq_path(seq, bitmap->file->f_path.mnt,
+                                        bitmap->file->f_path.dentry," \t\n");
+                       }
+
+                       seq_printf(seq, "\n");
+                       spin_unlock_irqrestore(&bitmap->lock, flags);
                }
 
                seq_printf(seq, "\n");
@@ -3139,59 +4962,73 @@ static struct seq_operations md_seq_ops = {
 static int md_seq_open(struct inode *inode, struct file *file)
 {
        int error;
+       struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+       if (mi == NULL)
+               return -ENOMEM;
 
        error = seq_open(file, &md_seq_ops);
+       if (error)
+               kfree(mi);
+       else {
+               struct seq_file *p = file->private_data;
+               p->private = mi;
+               mi->event = atomic_read(&md_event_count);
+       }
        return error;
 }
 
+static int md_seq_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *m = file->private_data;
+       struct mdstat_info *mi = m->private;
+       m->private = NULL;
+       kfree(mi);
+       return seq_release(inode, file);
+}
+
+static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+{
+       struct seq_file *m = filp->private_data;
+       struct mdstat_info *mi = m->private;
+       int mask;
+
+       poll_wait(filp, &md_event_waiters, wait);
+
+       /* always allow read */
+       mask = POLLIN | POLLRDNORM;
+
+       if (mi->event != atomic_read(&md_event_count))
+               mask |= POLLERR | POLLPRI;
+       return mask;
+}
+
 static struct file_operations md_seq_fops = {
+       .owner          = THIS_MODULE,
        .open           = md_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = md_seq_release,
+       .poll           = mdstat_poll,
 };
 
-int register_md_personality(int pnum, mdk_personality_t *p)
+int register_md_personality(struct mdk_personality *p)
 {
-       if (pnum >= MAX_PERSONALITY) {
-               printk(KERN_ERR
-                      "md: tried to install personality %s as nr %d, but max is %lu\n",
-                      p->name, pnum, MAX_PERSONALITY-1);
-               return -EINVAL;
-       }
-
        spin_lock(&pers_lock);
-       if (pers[pnum]) {
-               spin_unlock(&pers_lock);
-               MD_BUG();
-               return -EBUSY;
-       }
-
-       pers[pnum] = p;
-       printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
+       list_add_tail(&p->list, &pers_list);
+       printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
        spin_unlock(&pers_lock);
        return 0;
 }
 
-int unregister_md_personality(int pnum)
+int unregister_md_personality(struct mdk_personality *p)
 {
-       if (pnum >= MAX_PERSONALITY) {
-               MD_BUG();
-               return -EINVAL;
-       }
-
-       printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
+       printk(KERN_INFO "md: %s personality unregistered\n", p->name);
        spin_lock(&pers_lock);
-       pers[pnum] = NULL;
+       list_del_init(&p->list);
        spin_unlock(&pers_lock);
        return 0;
 }
 
-void md_sync_acct(mdk_rdev_t *rdev, unsigned long nr_sectors)
-{
-       rdev->bdev->bd_contains->bd_disk->sync_io += nr_sectors;
-}
-
 static int is_mddev_idle(mddev_t *mddev)
 {
        mdk_rdev_t * rdev;
@@ -3202,10 +5039,23 @@ static int is_mddev_idle(mddev_t *mddev)
        idle = 1;
        ITERATE_RDEV(mddev,rdev,tmp) {
                struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
-               curr_events = disk_stat_read(disk, read_sectors) + 
-                               disk_stat_read(disk, write_sectors) - 
-                               disk->sync_io;
-               if ((curr_events - rdev->last_events) > 32) {
+               curr_events = disk_stat_read(disk, sectors[0]) + 
+                               disk_stat_read(disk, sectors[1]) - 
+                               atomic_read(&disk->sync_io);
+               /* The difference between curr_events and last_events
+                * will be affected by any new non-sync IO (making
+                * curr_events bigger) and any difference in the amount of
+                * in-flight syncio (making current_events bigger or smaller)
+                * The amount in-flight is currently limited to
+                * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
+                * which is at most 4096 sectors.
+                * These numbers are fairly fragile and should be made
+                * more robust, probably by enforcing the
+                * 'window size' that md_do_sync sort-of uses.
+                *
+                * Note: the following is an unsigned comparison.
+                */
+               if ((curr_events - rdev->last_events + 4096) > 8192) {
                        rdev->last_events = curr_events;
                        idle = 0;
                }
@@ -3226,19 +5076,34 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
 }
 
 
-void md_write_start(mddev_t *mddev)
+/* md_write_start(mddev, bi)
+ * If we need to update some array metadata (e.g. 'active' flag
+ * in superblock) before writing, schedule a superblock update
+ * and wait for it to complete.
+ */
+void md_write_start(mddev_t *mddev, struct bio *bi)
 {
-       if (!atomic_read(&mddev->writes_pending)) {
-               mddev_lock_uninterruptible(mddev);
+       if (bio_data_dir(bi) != WRITE)
+               return;
+
+       BUG_ON(mddev->ro == 1);
+       if (mddev->ro == 2) {
+               /* need to switch to read/write */
+               mddev->ro = 0;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               md_wakeup_thread(mddev->thread);
+       }
+       atomic_inc(&mddev->writes_pending);
+       if (mddev->in_sync) {
+               spin_lock_irq(&mddev->write_lock);
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
-                       del_timer(&mddev->safemode_timer);
-                       md_update_sb(mddev);
+                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       md_wakeup_thread(mddev->thread);
                }
-               atomic_inc(&mddev->writes_pending);
-               mddev_unlock(mddev);
-       } else
-               atomic_inc(&mddev->writes_pending);
+               spin_unlock_irq(&mddev->write_lock);
+       }
+       wait_event(mddev->sb_wait, mddev->flags==0);
 }
 
 void md_write_end(mddev_t *mddev)
@@ -3246,61 +5111,75 @@ void md_write_end(mddev_t *mddev)
        if (atomic_dec_and_test(&mddev->writes_pending)) {
                if (mddev->safemode == 2)
                        md_wakeup_thread(mddev->thread);
-               else
+               else if (mddev->safemode_delay)
                        mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
        }
 }
 
-static inline void md_enter_safemode(mddev_t *mddev)
+/* md_allow_write(mddev)
+ * Calling this ensures that the array is marked 'active' so that writes
+ * may proceed without blocking.  It is important to call this before
+ * attempting a GFP_KERNEL allocation while holding the mddev lock.
+ * Must be called with mddev_lock held.
+ */
+void md_allow_write(mddev_t *mddev)
 {
-       if (!mddev->safemode) return;
-       if (mddev->safemode == 2 &&
-           (atomic_read(&mddev->writes_pending) || mddev->in_sync ||
-                   mddev->recovery_cp != MaxSector))
-               return; /* avoid the lock */
-       mddev_lock_uninterruptible(mddev);
-       if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
-           !mddev->in_sync && mddev->recovery_cp == MaxSector) {
-               mddev->in_sync = 1;
-               md_update_sb(mddev);
-       }
-       mddev_unlock(mddev);
-
-       if (mddev->safemode == 1)
-               mddev->safemode = 0;
-}
+       if (!mddev->pers)
+               return;
+       if (mddev->ro)
+               return;
 
-void md_handle_safemode(mddev_t *mddev)
-{
-       if (signal_pending(current)) {
-               printk(KERN_INFO "md: %s in immediate safe mode\n",
-                       mdname(mddev));
-               mddev->safemode = 2;
-               flush_signals(current);
-       }
-       md_enter_safemode(mddev);
+       spin_lock_irq(&mddev->write_lock);
+       if (mddev->in_sync) {
+               mddev->in_sync = 0;
+               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               if (mddev->safemode_delay &&
+                   mddev->safemode == 0)
+                       mddev->safemode = 1;
+               spin_unlock_irq(&mddev->write_lock);
+               md_update_sb(mddev, 0);
+       } else
+               spin_unlock_irq(&mddev->write_lock);
 }
+EXPORT_SYMBOL_GPL(md_allow_write);
 
-
-DECLARE_WAIT_QUEUE_HEAD(resync_wait);
+static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
 
 #define SYNC_MARKS     10
 #define        SYNC_MARK_STEP  (3*HZ)
-static void md_do_sync(mddev_t *mddev)
+void md_do_sync(mddev_t *mddev)
 {
        mddev_t *mddev2;
        unsigned int currspeed = 0,
                 window;
-       sector_t max_sectors,j;
+       sector_t max_sectors,j, io_sectors;
        unsigned long mark[SYNC_MARKS];
        sector_t mark_cnt[SYNC_MARKS];
        int last_mark,m;
        struct list_head *tmp;
        sector_t last_check;
+       int skipped = 0;
+       struct list_head *rtmp;
+       mdk_rdev_t *rdev;
+       char *desc;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
                return;
+       if (mddev->ro) /* never try to sync a read-only array */
+               return;
+
+       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+               if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+                       desc = "data-check";
+               else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       desc = "requested-resync";
+               else
+                       desc = "resync";
+       } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+               desc = "reshape";
+       else
+               desc = "recovery";
 
        /* we overload curr_resync somewhat here.
         * 0 == not engaged in resync at all
@@ -3308,54 +5187,95 @@ static void md_do_sync(mddev_t *mddev)
         * 1 == like 2, but have yielded to allow conflicting resync to
         *              commense
         * other == active in resync - this many blocks
+        *
+        * Before starting a resync we must have set curr_resync to
+        * 2, and then checked that every "conflicting" array has curr_resync
+        * less than ours.  When we find one that is the same or higher
+        * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
+        * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
+        * This will mean we have to start checking from the beginning again.
+        *
         */
+
        do {
                mddev->curr_resync = 2;
 
+       try_again:
+               if (kthread_should_stop()) {
+                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+                       goto skip;
+               }
                ITERATE_MDDEV(mddev2,tmp) {
                        if (mddev2 == mddev)
                                continue;
                        if (mddev2->curr_resync && 
                            match_mddev_units(mddev,mddev2)) {
-                               printk(KERN_INFO "md: delaying resync of %s"
-                                       " until %s has finished resync (they"
-                                       " share one or more physical units)\n",
-                                      mdname(mddev), mdname(mddev2));
-                               if (mddev < mddev2) {/* arbitrarily yield */
+                               DEFINE_WAIT(wq);
+                               if (mddev < mddev2 && mddev->curr_resync == 2) {
+                                       /* arbitrarily yield */
                                        mddev->curr_resync = 1;
                                        wake_up(&resync_wait);
                                }
-                               if (wait_event_interruptible(resync_wait,
-                                                            mddev2->curr_resync < mddev->curr_resync)) {
-                                       flush_signals(current);
+                               if (mddev > mddev2 && mddev->curr_resync == 1)
+                                       /* no need to wait here, we can wait the next
+                                        * time 'round when curr_resync == 2
+                                        */
+                                       continue;
+                               prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+                               if (!kthread_should_stop() &&
+                                   mddev2->curr_resync >= mddev->curr_resync) {
+                                       printk(KERN_INFO "md: delaying %s of %s"
+                                              " until %s has finished (they"
+                                              " share one or more physical units)\n",
+                                              desc, mdname(mddev), mdname(mddev2));
                                        mddev_put(mddev2);
-                                       goto skip;
+                                       schedule();
+                                       finish_wait(&resync_wait, &wq);
+                                       goto try_again;
                                }
-                       }
-                       if (mddev->curr_resync == 1) {
-                               mddev_put(mddev2);
-                               break;
+                               finish_wait(&resync_wait, &wq);
                        }
                }
        } while (mddev->curr_resync < 2);
 
-       max_sectors = mddev->size << 1;
+       j = 0;
+       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+               /* resync follows the size requested by the personality,
+                * which defaults to physical size, but can be virtual size
+                */
+               max_sectors = mddev->resync_max_sectors;
+               mddev->resync_mismatches = 0;
+               /* we don't use the checkpoint if there's a bitmap */
+               if (!mddev->bitmap &&
+                   !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       j = mddev->recovery_cp;
+       } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+               max_sectors = mddev->size << 1;
+       else {
+               /* recovery follows the physical size of devices */
+               max_sectors = mddev->size << 1;
+               j = MaxSector;
+               ITERATE_RDEV(mddev,rdev,rtmp)
+                       if (rdev->raid_disk >= 0 &&
+                           !test_bit(Faulty, &rdev->flags) &&
+                           !test_bit(In_sync, &rdev->flags) &&
+                           rdev->recovery_offset < j)
+                               j = rdev->recovery_offset;
+       }
 
-       printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
-       printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
-               " %d KB/sec/disc.\n", sysctl_speed_limit_min);
-       printk(KERN_INFO "md: using maximum available idle IO bandwith "
-              "(but not more than %d KB/sec) for reconstruction.\n",
-              sysctl_speed_limit_max);
+       printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
+       printk(KERN_INFO "md: minimum _guaranteed_  speed:"
+               " %d KB/sec/disk.\n", speed_min(mddev));
+       printk(KERN_INFO "md: using maximum available idle IO bandwidth "
+              "(but not more than %d KB/sec) for %s.\n",
+              speed_max(mddev), desc);
 
        is_mddev_idle(mddev); /* this also initializes IO event counters */
-       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
-               j = mddev->recovery_cp;
-       else
-               j = 0;
+
+       io_sectors = 0;
        for (m = 0; m < SYNC_MARKS; m++) {
                mark[m] = jiffies;
-               mark_cnt[m] = j;
+               mark_cnt[m] = io_sectors;
        }
        last_mark = 0;
        mddev->resync_mark = mark[last_mark];
@@ -3365,59 +5285,73 @@ static void md_do_sync(mddev_t *mddev)
         * Tune reconstruction:
         */
        window = 32*(PAGE_SIZE/512);
-       printk(KERN_INFO "md: using %dk window, over a total of %Lu blocks.\n",
+       printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
                window/2,(unsigned long long) max_sectors/2);
 
        atomic_set(&mddev->recovery_active, 0);
        init_waitqueue_head(&mddev->recovery_wait);
        last_check = 0;
 
-       if (j)
+       if (j>2) {
                printk(KERN_INFO 
-                       "md: resuming recovery of %s from checkpoint.\n",
-                       mdname(mddev));
+                      "md: resuming %s of %s from checkpoint.\n",
+                      desc, mdname(mddev));
+               mddev->curr_resync = j;
+       }
 
        while (j < max_sectors) {
-               int sectors;
+               sector_t sectors;
 
-               sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
-               if (sectors < 0) {
+               skipped = 0;
+               sectors = mddev->pers->sync_request(mddev, j, &skipped,
+                                           currspeed < speed_min(mddev));
+               if (sectors == 0) {
                        set_bit(MD_RECOVERY_ERR, &mddev->recovery);
                        goto out;
                }
-               atomic_add(sectors, &mddev->recovery_active);
+
+               if (!skipped) { /* actual IO requested */
+                       io_sectors += sectors;
+                       atomic_add(sectors, &mddev->recovery_active);
+               }
+
                j += sectors;
                if (j>1) mddev->curr_resync = j;
+               mddev->curr_mark_cnt = io_sectors;
+               if (last_check == 0)
+                       /* this is the earliers that rebuilt will be
+                        * visible in /proc/mdstat
+                        */
+                       md_new_event(mddev);
 
-               if (last_check + window > j || j == max_sectors)
+               if (last_check + window > io_sectors || j == max_sectors)
                        continue;
 
-               last_check = j;
+               last_check = io_sectors;
 
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
                    test_bit(MD_RECOVERY_ERR, &mddev->recovery))
                        break;
 
        repeat:
-               if (jiffies >= mark[last_mark] + SYNC_MARK_STEP ) {
+               if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
                        /* step marks */
                        int next = (last_mark+1) % SYNC_MARKS;
 
                        mddev->resync_mark = mark[next];
                        mddev->resync_mark_cnt = mark_cnt[next];
                        mark[next] = jiffies;
-                       mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
+                       mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
                        last_mark = next;
                }
 
 
-               if (signal_pending(current)) {
+               if (kthread_should_stop()) {
                        /*
                         * got a signal, exit.
                         */
                        printk(KERN_INFO 
                                "md: md_do_sync() got signal ... exiting\n");
-                       flush_signals(current);
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto out;
                }
@@ -3433,18 +5367,18 @@ static void md_do_sync(mddev_t *mddev)
                mddev->queue->unplug_fn(mddev->queue);
                cond_resched();
 
-               currspeed = ((unsigned long)(j-mddev->resync_mark_cnt))/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
+               currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
+                       /((jiffies-mddev->resync_mark)/HZ +1) +1;
 
-               if (currspeed > sysctl_speed_limit_min) {
-                       if ((currspeed > sysctl_speed_limit_max) ||
+               if (currspeed > speed_min(mddev)) {
+                       if ((currspeed > speed_max(mddev)) ||
                                        !is_mddev_idle(mddev)) {
-                               current->state = TASK_INTERRUPTIBLE;
-                               schedule_timeout(HZ/4);
+                               msleep(500);
                                goto repeat;
                        }
                }
        }
-       printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
+       printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
        /*
         * this also signals 'finished resyncing' to md_stop
         */
@@ -3454,26 +5388,41 @@ static void md_do_sync(mddev_t *mddev)
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
-       mddev->pers->sync_request(mddev, max_sectors, 1);
+       mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
 
        if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
-           mddev->curr_resync > 2 &&
-           mddev->curr_resync > mddev->recovery_cp) {
-               if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
-                       printk(KERN_INFO 
-                               "md: checkpointing recovery of %s.\n",
-                               mdname(mddev));
-                       mddev->recovery_cp = mddev->curr_resync;
-               } else
-                       mddev->recovery_cp = MaxSector;
+           !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
+           mddev->curr_resync > 2) {
+               if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+                       if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+                               if (mddev->curr_resync >= mddev->recovery_cp) {
+                                       printk(KERN_INFO
+                                              "md: checkpointing %s of %s.\n",
+                                              desc, mdname(mddev));
+                                       mddev->recovery_cp = mddev->curr_resync;
+                               }
+                       } else
+                               mddev->recovery_cp = MaxSector;
+               } else {
+                       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+                               mddev->curr_resync = MaxSector;
+                       ITERATE_RDEV(mddev,rdev,rtmp)
+                               if (rdev->raid_disk >= 0 &&
+                                   !test_bit(Faulty, &rdev->flags) &&
+                                   !test_bit(In_sync, &rdev->flags) &&
+                                   rdev->recovery_offset < mddev->curr_resync)
+                                       rdev->recovery_offset = mddev->curr_resync;
+               }
        }
+       set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
-       md_enter_safemode(mddev);
  skip:
        mddev->curr_resync = 0;
+       wake_up(&resync_wait);
        set_bit(MD_RECOVERY_DONE, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
 }
+EXPORT_SYMBOL_GPL(md_do_sync);
 
 
 /*
@@ -3504,20 +5453,48 @@ void md_check_recovery(mddev_t *mddev)
        struct list_head *rtmp;
 
 
-       dprintk(KERN_INFO "md: recovery thread got woken up ...\n");
+       if (mddev->bitmap)
+               bitmap_daemon_work(mddev->bitmap);
 
        if (mddev->ro)
                return;
+
+       if (signal_pending(current)) {
+               if (mddev->pers->sync_request) {
+                       printk(KERN_INFO "md: %s in immediate safe mode\n",
+                              mdname(mddev));
+                       mddev->safemode = 2;
+               }
+               flush_signals(current);
+       }
+
        if ( ! (
-               mddev->sb_dirty ||
+               mddev->flags ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
-               test_bit(MD_RECOVERY_DONE, &mddev->recovery)
+               test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+               (mddev->safemode == 1) ||
+               (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
+                && !mddev->in_sync && mddev->recovery_cp == MaxSector)
                ))
                return;
-       if (mddev_trylock(mddev)==0) {
+
+       if (mddev_trylock(mddev)) {
                int spares =0;
-               if (mddev->sb_dirty)
-                       md_update_sb(mddev);
+
+               spin_lock_irq(&mddev->write_lock);
+               if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
+                   !mddev->in_sync && mddev->recovery_cp == MaxSector) {
+                       mddev->in_sync = 1;
+                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               }
+               if (mddev->safemode == 1)
+                       mddev->safemode = 0;
+               spin_unlock_irq(&mddev->write_lock);
+
+               if (mddev->flags)
+                       md_update_sb(mddev, 0);
+
+
                if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
                    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
                        /* resync/recovery still happening */
@@ -3534,52 +5511,83 @@ void md_check_recovery(mddev_t *mddev)
                                /* activate any spares */
                                mddev->pers->spare_active(mddev);
                        }
-                       md_update_sb(mddev);
+                       md_update_sb(mddev, 1);
+
+                       /* if array is no-longer degraded, then any saved_raid_disk
+                        * information must be scrapped
+                        */
+                       if (!mddev->degraded)
+                               ITERATE_RDEV(mddev,rdev,rtmp)
+                                       rdev->saved_raid_disk = -1;
+
                        mddev->recovery = 0;
                        /* flag recovery needed just to double check */
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                       wake_up(&resync_wait);
+                       md_new_event(mddev);
                        goto unlock;
                }
-               if (mddev->recovery) {
-                       /* probably just the RECOVERY_NEEDED flag */
-                       mddev->recovery = 0;
-                       wake_up(&resync_wait);
-               }
+               /* Clear some bits that don't mean anything, but
+                * might be left set
+                */
+               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
+               clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
+               clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
 
+               if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+                       goto unlock;
                /* no recovery is running.
                 * remove any failed drives, then
-                * add spares if possible
+                * add spares if possible.
+                * Spare are also removed and re-added, to allow
+                * the personality to fail the re-add.
                 */
-               ITERATE_RDEV(mddev,rdev,rtmp) {
+               ITERATE_RDEV(mddev,rdev,rtmp)
                        if (rdev->raid_disk >= 0 &&
-                           rdev->faulty &&
+                           (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
                            atomic_read(&rdev->nr_pending)==0) {
-                               if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0)
+                               if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
+                                       char nm[20];
+                                       sprintf(nm,"rd%d", rdev->raid_disk);
+                                       sysfs_remove_link(&mddev->kobj, nm);
                                        rdev->raid_disk = -1;
+                               }
                        }
-                       if (!rdev->faulty && rdev->raid_disk >= 0 && !rdev->in_sync)
-                               spares++;
-               }
+
                if (mddev->degraded) {
                        ITERATE_RDEV(mddev,rdev,rtmp)
                                if (rdev->raid_disk < 0
-                                   && !rdev->faulty) {
-                                       if (mddev->pers->hot_add_disk(mddev,rdev))
+                                   && !test_bit(Faulty, &rdev->flags)) {
+                                       rdev->recovery_offset = 0;
+                                       if (mddev->pers->hot_add_disk(mddev,rdev)) {
+                                               char nm[20];
+                                               sprintf(nm, "rd%d", rdev->raid_disk);
+                                               sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
                                                spares++;
-                                       else
+                                               md_new_event(mddev);
+                                       } else
                                                break;
                                }
                }
 
-               if (!spares && (mddev->recovery_cp == MaxSector )) {
-                       /* nothing we can do ... */
+               if (spares) {
+                       clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+                       clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+               } else if (mddev->recovery_cp < MaxSector) {
+                       set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+               } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+                       /* nothing to be done ... */
                        goto unlock;
-               }
+
                if (mddev->pers->sync_request) {
                        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-                       if (!spares)
-                               set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+                       if (spares && mddev->bitmap && ! mddev->bitmap->file) {
+                               /* We are adding a device or devices to an array
+                                * which has the bitmap stored on all devices.
+                                * So make sure all bitmap pages get written
+                                */
+                               bitmap_write_all(mddev->bitmap);
+                       }
                        mddev->sync_thread = md_register_thread(md_do_sync,
                                                                mddev,
                                                                "%s_resync");
@@ -3589,17 +5597,17 @@ void md_check_recovery(mddev_t *mddev)
                                        mdname(mddev));
                                /* leave the spares where they are, it shouldn't hurt */
                                mddev->recovery = 0;
-                       } else {
+                       } else
                                md_wakeup_thread(mddev->sync_thread);
-                       }
+                       md_new_event(mddev);
                }
        unlock:
                mddev_unlock(mddev);
        }
 }
 
-int md_notify_reboot(struct notifier_block *this,
-                                       unsigned long code, void *x)
+static int md_notify_reboot(struct notifier_block *this,
+                           unsigned long code, void *x)
 {
        struct list_head *tmp;
        mddev_t *mddev;
@@ -3609,8 +5617,10 @@ int md_notify_reboot(struct notifier_block *this,
                printk(KERN_INFO "md: stopping all md devices.\n");
 
                ITERATE_MDDEV(mddev,tmp)
-                       if (mddev_trylock(mddev)==0)
+                       if (mddev_trylock(mddev)) {
                                do_md_stop (mddev, 1);
+                               mddev_unlock(mddev);
+                       }
                /*
                 * certain more exotic SCSI devices are known to be
                 * volatile wrt too early system reboots. While the
@@ -3622,7 +5632,7 @@ int md_notify_reboot(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
-struct notifier_block md_notifier = {
+static struct notifier_block md_notifier = {
        .notifier_call  = md_notify_reboot,
        .next           = NULL,
        .priority       = INT_MAX, /* before any real devices */
@@ -3639,37 +5649,18 @@ static void md_geninit(void)
                p->proc_fops = &md_seq_fops;
 }
 
-int __init md_init(void)
+static int __init md_init(void)
 {
-       int minor;
-
-       printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
-                       " MD_SB_DISKS=%d\n",
-                       MD_MAJOR_VERSION, MD_MINOR_VERSION,
-                       MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
-
        if (register_blkdev(MAJOR_NR, "md"))
                return -1;
        if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
                unregister_blkdev(MAJOR_NR, "md");
                return -1;
        }
-       devfs_mk_dir("md");
-       blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
-                               md_probe, NULL, NULL);
-       blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
+       blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
+                           md_probe, NULL, NULL);
+       blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
                            md_probe, NULL, NULL);
-
-       for (minor=0; minor < MAX_MD_DEVS; ++minor)
-               devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
-                               S_IFBLK|S_IRUSR|S_IWUSR,
-                               "md/%d", minor);
-
-       for (minor=0; minor < MAX_MD_DEVS; ++minor)
-               devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
-                             S_IFBLK|S_IRUSR|S_IWUSR,
-                             "md/d%d", minor);
-
 
        register_reboot_notifier(&md_notifier);
        raid_table_header = register_sysctl_table(raid_root_table, 1);
@@ -3697,7 +5688,6 @@ void md_autodetect_dev(dev_t dev)
 
 static void autostart_arrays(int part)
 {
-       char b[BDEVNAME_SIZE];
        mdk_rdev_t *rdev;
        int i;
 
@@ -3707,12 +5697,10 @@ static void autostart_arrays(int part)
                dev_t dev = detected_devices[i];
 
                rdev = md_import_device(dev,0, 0);
-               if (IS_ERR(rdev)) {
-                       printk(KERN_ALERT "md: could not import %s!\n",
-                               __bdevname(dev, b));
+               if (IS_ERR(rdev))
                        continue;
-               }
-               if (rdev->faulty) {
+
+               if (test_bit(Faulty, &rdev->flags)) {
                        MD_BUG();
                        continue;
                }
@@ -3723,21 +5711,15 @@ static void autostart_arrays(int part)
        autorun_devices(part);
 }
 
-#endif
+#endif /* !MODULE */
 
 static __exit void md_exit(void)
 {
        mddev_t *mddev;
        struct list_head *tmp;
-       int i;
-       blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
-       blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
-       for (i=0; i < MAX_MD_DEVS; i++)
-               devfs_remove("md/%d", i);
-       for (i=0; i < MAX_MD_DEVS; i++)
-               devfs_remove("md/d%d", i);
 
-       devfs_remove("md");
+       blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
+       blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
 
        unregister_blkdev(MAJOR_NR,"md");
        unregister_blkdev(mdp_major, "mdp");
@@ -3759,18 +5741,35 @@ static __exit void md_exit(void)
 module_init(md_init)
 module_exit(md_exit)
 
+static int get_ro(char *buffer, struct kernel_param *kp)
+{
+       return sprintf(buffer, "%d", start_readonly);
+}
+static int set_ro(const char *val, struct kernel_param *kp)
+{
+       char *e;
+       int num = simple_strtoul(val, &e, 10);
+       if (*val && (*e == '\0' || *e == '\n')) {
+               start_readonly = num;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
+module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
+
+
 EXPORT_SYMBOL(register_md_personality);
 EXPORT_SYMBOL(unregister_md_personality);
 EXPORT_SYMBOL(md_error);
-EXPORT_SYMBOL(md_sync_acct);
 EXPORT_SYMBOL(md_done_sync);
 EXPORT_SYMBOL(md_write_start);
 EXPORT_SYMBOL(md_write_end);
-EXPORT_SYMBOL(md_handle_safemode);
 EXPORT_SYMBOL(md_register_thread);
 EXPORT_SYMBOL(md_unregister_thread);
 EXPORT_SYMBOL(md_wakeup_thread);
-EXPORT_SYMBOL(md_print_devices);
-EXPORT_SYMBOL(md_interrupt_thread);
 EXPORT_SYMBOL(md_check_recovery);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("md");
+MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);