X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fmd%2Fraid5.c;fp=drivers%2Fmd%2Fraid5.c;h=2dba305daf3c887799453caefae8d5a36535de4b;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=450066007160ca04d1a8956abe870bc1b363210a;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 450066007..2dba305da 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2,11 +2,8 @@ * raid5.c : Multiple Devices driver for Linux * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman * Copyright (C) 1999, 2000 Ingo Molnar - * Copyright (C) 2002, 2003 H. Peter Anvin * - * RAID-4/5/6 management functions. - * Thanks to Penguin Computing for making the RAID-6 development possible - * by donating a test server! + * RAID-5 management functions. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,38 +15,14 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -/* - * BITMAP UNPLUGGING: - * - * The sequencing for updating the bitmap reliably is a little - * subtle (and I got it wrong the first time) so it deserves some - * explanation. - * - * We group bitmap updates into batches. Each batch has a number. - * We may write out several batches at once, but that isn't very important. - * conf->bm_write is the number of the last batch successfully written. - * conf->bm_flush is the number of the last batch that was closed to - * new additions. - * When we discover that we will need to write to any block in a stripe - * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq - * the number of the batch it will be in. This is bm_flush+1. - * When we are ready to do a write, if that batch hasn't been written yet, - * we plug the array and queue the stripe for later. - * When an unplug happens, we increment bm_flush, thus closing the current - * batch. - * When we notice that bm_flush > bm_write, we write out all pending updates - * to the bitmap, and advance bm_write to where bm_flush was. - * This may occasionally write a bit out twice, but is sure never to - * miss any bits. - */ +#include #include #include +#include #include #include -#include #include -#include "raid6.h" #include @@ -94,32 +67,22 @@ #define __inline__ #endif -#if !RAID6_USE_EMPTY_ZERO_PAGE -/* In .bss so it's zeroed */ -const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); -#endif - -static inline int raid6_next_disk(int disk, int raid_disks) -{ - disk++; - return (disk < raid_disks) ? disk : 0; -} static void print_raid5_conf (raid5_conf_t *conf); static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) { if (atomic_dec_and_test(&sh->count)) { - BUG_ON(!list_empty(&sh->lru)); - BUG_ON(atomic_read(&conf->active_stripes)==0); + if (!list_empty(&sh->lru)) + BUG(); + if (atomic_read(&conf->active_stripes)==0) + BUG(); if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state)) list_add_tail(&sh->lru, &conf->delayed_list); - blk_plug_device(conf->mddev->queue); - } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) { + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + conf->seq_write == sh->bm_seq) list_add_tail(&sh->lru, &conf->bitmap_list); - blk_plug_device(conf->mddev->queue); - } else { + else { clear_bit(STRIPE_BIT_DELAY, &sh->state); list_add_tail(&sh->lru, &conf->handle_list); } @@ -130,11 +93,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); } + list_add_tail(&sh->lru, &conf->inactive_list); atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); + if (!conf->inactive_blocked || + atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4)) wake_up(&conf->wait_for_stripe); - } } } } @@ -142,7 +105,7 @@ static void release_stripe(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; unsigned long flags; - + spin_lock_irqsave(&conf->device_lock, flags); __release_stripe(conf, sh); spin_unlock_irqrestore(&conf->device_lock, flags); @@ -155,7 +118,7 @@ static inline void remove_hash(struct stripe_head *sh) hlist_del_init(&sh->hash); } -static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) +static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) { struct hlist_head *hp = stripe_hash(conf, sh->sector); @@ -215,27 +178,27 @@ static int grow_buffers(struct stripe_head *sh, int num) static void raid5_build_block (struct stripe_head *sh, int i); -static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) +static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) { raid5_conf_t *conf = sh->raid_conf; - int i; + int disks = conf->raid_disks, i; - BUG_ON(atomic_read(&sh->count) != 0); - BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); + if (atomic_read(&sh->count) != 0) + BUG(); + if (test_bit(STRIPE_HANDLE, &sh->state)) + BUG(); CHECK_DEVLOCK(); PRINTK("init_stripe called, stripe %llu\n", (unsigned long long)sh->sector); remove_hash(sh); - + sh->sector = sector; sh->pd_idx = pd_idx; sh->state = 0; - sh->disks = disks; - - for (i = sh->disks; i--; ) { + for (i=disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (dev->toread || dev->towrite || dev->written || @@ -252,7 +215,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int insert_hash(conf, sh); } -static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) +static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) { struct stripe_head *sh; struct hlist_node *hn; @@ -260,7 +223,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in CHECK_DEVLOCK(); PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) - if (sh->sector == sector && sh->disks == disks) + if (sh->sector == sector) return sh; PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); return NULL; @@ -269,8 +232,8 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in static void unplug_slaves(mddev_t *mddev); static void raid5_unplug_device(request_queue_t *q); -static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, - int pd_idx, int noblock) +static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, + int pd_idx, int noblock) { struct stripe_head *sh; @@ -282,7 +245,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0, conf->device_lock, /* nothing */); - sh = __find_stripe(conf, sector, disks); + sh = __find_stripe(conf, sector); if (!sh) { if (!conf->inactive_blocked) sh = get_free_stripe(conf); @@ -296,19 +259,19 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector < (conf->max_nr_stripes *3/4) || !conf->inactive_blocked), conf->device_lock, - raid5_unplug_device(conf->mddev->queue) + unplug_slaves(conf->mddev); ); conf->inactive_blocked = 0; } else - init_stripe(sh, sector, pd_idx, disks); + init_stripe(sh, sector, pd_idx); } else { if (atomic_read(&sh->count)) { - BUG_ON(!list_empty(&sh->lru)); + if (!list_empty(&sh->lru)) + BUG(); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); - if (list_empty(&sh->lru) && - !test_bit(STRIPE_EXPANDING, &sh->state)) + if (list_empty(&sh->lru)) BUG(); list_del_init(&sh->lru); } @@ -337,7 +300,6 @@ static int grow_one_stripe(raid5_conf_t *conf) kmem_cache_free(conf->slab_cache, sh); return 0; } - sh->disks = conf->raid_disks; /* we just created an active stripe so... */ atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); @@ -351,144 +313,20 @@ static int grow_stripes(raid5_conf_t *conf, int num) kmem_cache_t *sc; int devs = conf->raid_disks; - sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); - sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); - conf->active_name = 0; - sc = kmem_cache_create(conf->cache_name[conf->active_name], + sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev)); + + sc = kmem_cache_create(conf->cache_name, sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 0, 0, NULL, NULL); if (!sc) return 1; conf->slab_cache = sc; - conf->pool_size = devs; - while (num--) + while (num--) { if (!grow_one_stripe(conf)) return 1; - return 0; -} - -#ifdef CONFIG_MD_RAID5_RESHAPE -static int resize_stripes(raid5_conf_t *conf, int newsize) -{ - /* Make all the stripes able to hold 'newsize' devices. - * New slots in each stripe get 'page' set to a new page. - * - * This happens in stages: - * 1/ create a new kmem_cache and allocate the required number of - * stripe_heads. - * 2/ gather all the old stripe_heads and tranfer the pages across - * to the new stripe_heads. This will have the side effect of - * freezing the array as once all stripe_heads have been collected, - * no IO will be possible. Old stripe heads are freed once their - * pages have been transferred over, and the old kmem_cache is - * freed when all stripes are done. - * 3/ reallocate conf->disks to be suitable bigger. If this fails, - * we simple return a failre status - no need to clean anything up. - * 4/ allocate new pages for the new slots in the new stripe_heads. - * If this fails, we don't bother trying the shrink the - * stripe_heads down again, we just leave them as they are. - * As each stripe_head is processed the new one is released into - * active service. - * - * Once step2 is started, we cannot afford to wait for a write, - * so we use GFP_NOIO allocations. - */ - struct stripe_head *osh, *nsh; - LIST_HEAD(newstripes); - struct disk_info *ndisks; - int err = 0; - kmem_cache_t *sc; - int i; - - if (newsize <= conf->pool_size) - return 0; /* never bother to shrink */ - - /* Step 1 */ - sc = kmem_cache_create(conf->cache_name[1-conf->active_name], - sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), - 0, 0, NULL, NULL); - if (!sc) - return -ENOMEM; - - for (i = conf->max_nr_stripes; i; i--) { - nsh = kmem_cache_alloc(sc, GFP_KERNEL); - if (!nsh) - break; - - memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); - - nsh->raid_conf = conf; - spin_lock_init(&nsh->lock); - - list_add(&nsh->lru, &newstripes); - } - if (i) { - /* didn't get enough, give up */ - while (!list_empty(&newstripes)) { - nsh = list_entry(newstripes.next, struct stripe_head, lru); - list_del(&nsh->lru); - kmem_cache_free(sc, nsh); - } - kmem_cache_destroy(sc); - return -ENOMEM; - } - /* Step 2 - Must use GFP_NOIO now. - * OK, we have enough stripes, start collecting inactive - * stripes and copying them over - */ - list_for_each_entry(nsh, &newstripes, lru) { - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list), - conf->device_lock, - unplug_slaves(conf->mddev) - ); - osh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); - atomic_set(&nsh->count, 1); - for(i=0; ipool_size; i++) - nsh->dev[i].page = osh->dev[i].page; - for( ; idev[i].page = NULL; - kmem_cache_free(conf->slab_cache, osh); - } - kmem_cache_destroy(conf->slab_cache); - - /* Step 3. - * At this point, we are holding all the stripes so the array - * is completely stalled, so now is a good time to resize - * conf->disks. - */ - ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); - if (ndisks) { - for (i=0; iraid_disks; i++) - ndisks[i] = conf->disks[i]; - kfree(conf->disks); - conf->disks = ndisks; - } else - err = -ENOMEM; - - /* Step 4, return new stripes to service */ - while(!list_empty(&newstripes)) { - nsh = list_entry(newstripes.next, struct stripe_head, lru); - list_del_init(&nsh->lru); - for (i=conf->raid_disks; i < newsize; i++) - if (nsh->dev[i].page == NULL) { - struct page *p = alloc_page(GFP_NOIO); - nsh->dev[i].page = p; - if (!p) - err = -ENOMEM; - } - release_stripe(nsh); } - /* critical section pass, GFP_NOIO no longer needed */ - - conf->slab_cache = sc; - conf->active_name = 1-conf->active_name; - conf->pool_size = newsize; - return err; + return 0; } -#endif static int drop_one_stripe(raid5_conf_t *conf) { @@ -499,8 +337,9 @@ static int drop_one_stripe(raid5_conf_t *conf) spin_unlock_irq(&conf->device_lock); if (!sh) return 0; - BUG_ON(atomic_read(&sh->count)); - shrink_buffers(sh, conf->pool_size); + if (atomic_read(&sh->count)) + BUG(); + shrink_buffers(sh, conf->raid_disks); kmem_cache_free(conf->slab_cache, sh); atomic_dec(&conf->active_stripes); return 1; @@ -521,10 +360,8 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, { struct stripe_head *sh = bi->bi_private; raid5_conf_t *conf = sh->raid_conf; - int disks = sh->disks, i; + int disks = conf->raid_disks, i; int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); - char b[BDEVNAME_SIZE]; - mdk_rdev_t *rdev; if (bi->bi_size) return 1; @@ -572,39 +409,25 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, set_bit(R5_UPTODATE, &sh->dev[i].flags); #endif if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - rdev = conf->disks[i].rdev; - printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", - mdname(conf->mddev), STRIPE_SECTORS, - (unsigned long long)sh->sector + rdev->data_offset, - bdevname(rdev->bdev, b)); + printk(KERN_INFO "raid5: read error corrected!!\n"); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } if (atomic_read(&conf->disks[i].rdev->read_errors)) atomic_set(&conf->disks[i].rdev->read_errors, 0); } else { - const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); int retry = 0; - rdev = conf->disks[i].rdev; - clear_bit(R5_UPTODATE, &sh->dev[i].flags); - atomic_inc(&rdev->read_errors); + atomic_inc(&conf->disks[i].rdev->read_errors); if (conf->mddev->degraded) - printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", - mdname(conf->mddev), - (unsigned long long)sh->sector + rdev->data_offset, - bdn); + printk(KERN_WARNING "raid5: read error not correctable.\n"); else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ - printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", - mdname(conf->mddev), - (unsigned long long)sh->sector + rdev->data_offset, - bdn); - else if (atomic_read(&rdev->read_errors) + printk(KERN_WARNING "raid5: read error NOT corrected!!\n"); + else if (atomic_read(&conf->disks[i].rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING - "raid5:%s: Too many read errors, failing device %s.\n", - mdname(conf->mddev), bdn); + "raid5: Too many read errors, failing device.\n"); else retry = 1; if (retry) @@ -612,7 +435,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - md_error(conf->mddev, rdev); + md_error(conf->mddev, conf->disks[i].rdev); } } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); @@ -635,7 +458,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, { struct stripe_head *sh = bi->bi_private; raid5_conf_t *conf = sh->raid_conf; - int disks = sh->disks, i; + int disks = conf->raid_disks, i; unsigned long flags; int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); @@ -686,7 +509,8 @@ static void raid5_build_block (struct stripe_head *sh, int i) dev->req.bi_private = sh; dev->flags = 0; - dev->sector = compute_blocknr(sh, i); + if (i != sh->pd_idx) + dev->sector = compute_blocknr(sh, i); } static void error(mddev_t *mddev, mdk_rdev_t *rdev) @@ -713,7 +537,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) " Operation continuing on %d devices\n", bdevname(rdev->bdev,b), conf->working_disks); } -} +} /* * Input: a 'big' sector number, @@ -751,12 +575,9 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, /* * Select the parity disk based on the user selected algorithm. */ - switch(conf->level) { - case 4: + if (conf->level == 4) *pd_idx = data_disks; - break; - case 5: - switch (conf->algorithm) { + else switch (conf->algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: *pd_idx = data_disks - stripe % raid_disks; if (*dd_idx >= *pd_idx) @@ -778,39 +599,6 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, default: printk(KERN_ERR "raid5: unsupported algorithm %d\n", conf->algorithm); - } - break; - case 6: - - /**** FIX THIS ****/ - switch (conf->algorithm) { - case ALGORITHM_LEFT_ASYMMETRIC: - *pd_idx = raid_disks - 1 - (stripe % raid_disks); - if (*pd_idx == raid_disks-1) - (*dd_idx)++; /* Q D D D P */ - else if (*dd_idx >= *pd_idx) - (*dd_idx) += 2; /* D D P Q D */ - break; - case ALGORITHM_RIGHT_ASYMMETRIC: - *pd_idx = stripe % raid_disks; - if (*pd_idx == raid_disks-1) - (*dd_idx)++; /* Q D D D P */ - else if (*dd_idx >= *pd_idx) - (*dd_idx) += 2; /* D D P Q D */ - break; - case ALGORITHM_LEFT_SYMMETRIC: - *pd_idx = raid_disks - 1 - (stripe % raid_disks); - *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; - break; - case ALGORITHM_RIGHT_SYMMETRIC: - *pd_idx = stripe % raid_disks; - *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; - break; - default: - printk (KERN_CRIT "raid6: unsupported algorithm %d\n", - conf->algorithm); - } - break; } /* @@ -824,7 +612,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, static sector_t compute_blocknr(struct stripe_head *sh, int i) { raid5_conf_t *conf = sh->raid_conf; - int raid_disks = sh->disks, data_disks = raid_disks - 1; + int raid_disks = conf->raid_disks, data_disks = raid_disks - 1; sector_t new_sector = sh->sector, check; int sectors_per_chunk = conf->chunk_size >> 9; sector_t stripe; @@ -832,17 +620,12 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) int chunk_number, dummy1, dummy2, dd_idx = i; sector_t r_sector; - chunk_offset = sector_div(new_sector, sectors_per_chunk); stripe = new_sector; BUG_ON(new_sector != stripe); - if (i == sh->pd_idx) - return 0; - switch(conf->level) { - case 4: break; - case 5: - switch (conf->algorithm) { + + switch (conf->algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: case ALGORITHM_RIGHT_ASYMMETRIC: if (i > sh->pd_idx) @@ -856,37 +639,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) break; default: printk(KERN_ERR "raid5: unsupported algorithm %d\n", - conf->algorithm); - } - break; - case 6: - data_disks = raid_disks - 2; - if (i == raid6_next_disk(sh->pd_idx, raid_disks)) - return 0; /* It is the Q disk */ - switch (conf->algorithm) { - case ALGORITHM_LEFT_ASYMMETRIC: - case ALGORITHM_RIGHT_ASYMMETRIC: - if (sh->pd_idx == raid_disks-1) - i--; /* Q D D D P */ - else if (i > sh->pd_idx) - i -= 2; /* D D P Q D */ - break; - case ALGORITHM_LEFT_SYMMETRIC: - case ALGORITHM_RIGHT_SYMMETRIC: - if (sh->pd_idx == raid_disks-1) - i--; /* Q D D D P */ - else { - /* D D P Q D */ - if (i < sh->pd_idx) - i += raid_disks; - i -= (sh->pd_idx + 2); - } - break; - default: - printk (KERN_CRIT "raid6: unsupported algorithm %d\n", conf->algorithm); - } - break; } chunk_number = stripe * data_disks + i; @@ -903,11 +656,10 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) /* - * Copy data between a page in the stripe cache, and one or more bion - * The page could align with the middle of the bio, or there could be - * several bion, each with several bio_vecs, which cover part of the page - * Multiple bion are linked together on bi_next. There may be extras - * at the end of this list. We ignore them. + * Copy data between a page in the stripe cache, and a bio. + * There are no alignment or size guarantees between the page or the + * bio except that there is some overlap. + * All iovecs in the bio must be considered. */ static void copy_data(int frombio, struct bio *bio, struct page *page, @@ -936,7 +688,7 @@ static void copy_data(int frombio, struct bio *bio, if (len > 0 && page_offset + len > STRIPE_SIZE) clen = STRIPE_SIZE - page_offset; else clen = len; - + if (clen > 0) { char *ba = __bio_kmap_atomic(bio, i, KM_USER0); if (frombio) @@ -961,7 +713,8 @@ static void copy_data(int frombio, struct bio *bio, static void compute_block(struct stripe_head *sh, int dd_idx) { - int i, count, disks = sh->disks; + raid5_conf_t *conf = sh->raid_conf; + int i, count, disks = conf->raid_disks; void *ptr[MAX_XOR_BLOCKS], *p; PRINTK("compute_block, stripe %llu, idx %d\n", @@ -988,21 +741,22 @@ static void compute_block(struct stripe_head *sh, int dd_idx) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); } -static void compute_parity5(struct stripe_head *sh, int method) +static void compute_parity(struct stripe_head *sh, int method) { raid5_conf_t *conf = sh->raid_conf; - int i, pd_idx = sh->pd_idx, disks = sh->disks, count; + int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count; void *ptr[MAX_XOR_BLOCKS]; struct bio *chosen; - PRINTK("compute_parity5, stripe %llu, method %d\n", + PRINTK("compute_parity, stripe %llu, method %d\n", (unsigned long long)sh->sector, method); count = 1; ptr[0] = page_address(sh->dev[pd_idx].page); switch(method) { case READ_MODIFY_WRITE: - BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)); + if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)) + BUG(); for (i=disks ; i-- ;) { if (i==pd_idx) continue; @@ -1015,7 +769,7 @@ static void compute_parity5(struct stripe_head *sh, int method) if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - BUG_ON(sh->dev[i].written); + if (sh->dev[i].written) BUG(); sh->dev[i].written = chosen; check_xor(); } @@ -1031,7 +785,7 @@ static void compute_parity5(struct stripe_head *sh, int method) if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - BUG_ON(sh->dev[i].written); + if (sh->dev[i].written) BUG(); sh->dev[i].written = chosen; } break; @@ -1082,195 +836,9 @@ static void compute_parity5(struct stripe_head *sh, int method) clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); } -static void compute_parity6(struct stripe_head *sh, int method) -{ - raid6_conf_t *conf = sh->raid_conf; - int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count; - struct bio *chosen; - /**** FIX THIS: This could be very bad if disks is close to 256 ****/ - void *ptrs[disks]; - - qd_idx = raid6_next_disk(pd_idx, disks); - d0_idx = raid6_next_disk(qd_idx, disks); - - PRINTK("compute_parity, stripe %llu, method %d\n", - (unsigned long long)sh->sector, method); - - switch(method) { - case READ_MODIFY_WRITE: - BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ - case RECONSTRUCT_WRITE: - for (i= disks; i-- ;) - if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { - chosen = sh->dev[i].towrite; - sh->dev[i].towrite = NULL; - - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wake_up(&conf->wait_for_overlap); - - if (sh->dev[i].written) BUG(); - sh->dev[i].written = chosen; - } - break; - case CHECK_PARITY: - BUG(); /* Not implemented yet */ - } - - for (i = disks; i--;) - if (sh->dev[i].written) { - sector_t sector = sh->dev[i].sector; - struct bio *wbi = sh->dev[i].written; - while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { - copy_data(1, wbi, sh->dev[i].page, sector); - wbi = r5_next_bio(wbi, sector); - } - - set_bit(R5_LOCKED, &sh->dev[i].flags); - set_bit(R5_UPTODATE, &sh->dev[i].flags); - } - -// switch(method) { -// case RECONSTRUCT_WRITE: -// case CHECK_PARITY: -// case UPDATE_PARITY: - /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ - /* FIX: Is this ordering of drives even remotely optimal? */ - count = 0; - i = d0_idx; - do { - ptrs[count++] = page_address(sh->dev[i].page); - if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) - printk("block %d/%d not uptodate on parity calc\n", i,count); - i = raid6_next_disk(i, disks); - } while ( i != d0_idx ); -// break; -// } - - raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); - - switch(method) { - case RECONSTRUCT_WRITE: - set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); - set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); - set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); - set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); - break; - case UPDATE_PARITY: - set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); - set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); - break; - } -} - - -/* Compute one missing block */ -static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) -{ - raid6_conf_t *conf = sh->raid_conf; - int i, count, disks = conf->raid_disks; - void *ptr[MAX_XOR_BLOCKS], *p; - int pd_idx = sh->pd_idx; - int qd_idx = raid6_next_disk(pd_idx, disks); - - PRINTK("compute_block_1, stripe %llu, idx %d\n", - (unsigned long long)sh->sector, dd_idx); - - if ( dd_idx == qd_idx ) { - /* We're actually computing the Q drive */ - compute_parity6(sh, UPDATE_PARITY); - } else { - ptr[0] = page_address(sh->dev[dd_idx].page); - if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); - count = 1; - for (i = disks ; i--; ) { - if (i == dd_idx || i == qd_idx) - continue; - p = page_address(sh->dev[i].page); - if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) - ptr[count++] = p; - else - printk("compute_block() %d, stripe %llu, %d" - " not present\n", dd_idx, - (unsigned long long)sh->sector, i); - - check_xor(); - } - if (count != 1) - xor_block(count, STRIPE_SIZE, ptr); - if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); - else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); - } -} - -/* Compute two missing blocks */ -static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) -{ - raid6_conf_t *conf = sh->raid_conf; - int i, count, disks = conf->raid_disks; - int pd_idx = sh->pd_idx; - int qd_idx = raid6_next_disk(pd_idx, disks); - int d0_idx = raid6_next_disk(qd_idx, disks); - int faila, failb; - - /* faila and failb are disk numbers relative to d0_idx */ - /* pd_idx become disks-2 and qd_idx become disks-1 */ - faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; - failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; - - BUG_ON(faila == failb); - if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } - - PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", - (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); - - if ( failb == disks-1 ) { - /* Q disk is one of the missing disks */ - if ( faila == disks-2 ) { - /* Missing P+Q, just recompute */ - compute_parity6(sh, UPDATE_PARITY); - return; - } else { - /* We're missing D+Q; recompute D from P */ - compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); - compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ - return; - } - } - - /* We're missing D+P or D+D; build pointer table */ - { - /**** FIX THIS: This could be very bad if disks is close to 256 ****/ - void *ptrs[disks]; - - count = 0; - i = d0_idx; - do { - ptrs[count++] = page_address(sh->dev[i].page); - i = raid6_next_disk(i, disks); - if (i != dd_idx1 && i != dd_idx2 && - !test_bit(R5_UPTODATE, &sh->dev[i].flags)) - printk("compute_2 with missing block %d/%d\n", count, i); - } while ( i != d0_idx ); - - if ( failb == disks-2 ) { - /* We're missing D+P. */ - raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); - } else { - /* We're missing D+D. */ - raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); - } - - /* Both the above update both missing blocks */ - set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); - set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); - } -} - - - /* * Each stripe/dev can have one or more bion attached. - * toread/towrite point to the first in a chain. + * toread/towrite point to the first in a chain. * The bi_next chain must be in order. */ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) @@ -1300,7 +868,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) goto overlap; - BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); + if (*bip && bi->bi_next && (*bip) != bi->bi_next) + BUG(); if (*bip) bi->bi_next = *bip; *bip = bi; @@ -1313,9 +882,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in (unsigned long long)sh->sector, dd_idx); if (conf->mddev->bitmap && firstwrite) { + sh->bm_seq = conf->seq_write; bitmap_startwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0); - sh->bm_seq = conf->seq_flush+1; set_bit(STRIPE_BIT_DELAY, &sh->state); } @@ -1341,27 +910,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in return 0; } -static void end_reshape(raid5_conf_t *conf); - -static int page_is_zero(struct page *p) -{ - char *a = page_address(p); - return ((*(u32*)a) == 0 && - memcmp(a, a+4, STRIPE_SIZE-4)==0); -} - -static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) -{ - int sectors_per_chunk = conf->chunk_size >> 9; - sector_t x = stripe; - int pd_idx, dd_idx; - int chunk_offset = sector_div(x, sectors_per_chunk); - stripe = x; - raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk - + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf); - return pd_idx; -} - /* * handle_stripe - do things to a stripe. @@ -1381,14 +929,14 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) * */ -static void handle_stripe5(struct stripe_head *sh) +static void handle_stripe(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; - int disks = sh->disks; + int disks = conf->raid_disks; struct bio *return_bi= NULL; struct bio *bi; int i; - int syncing, expanding, expanded; + int syncing; int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; int non_overwrite = 0; int failed_num=0; @@ -1403,8 +951,6 @@ static void handle_stripe5(struct stripe_head *sh) clear_bit(STRIPE_DELAYED, &sh->state); syncing = test_bit(STRIPE_SYNCING, &sh->state); - expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); - expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); /* Now to look around and see what can be done */ rcu_read_lock(); @@ -1597,14 +1143,13 @@ static void handle_stripe5(struct stripe_head *sh) * parity, or to satisfy requests * or to load a block that is being partially written. */ - if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) { + if (to_read || non_overwrite || (syncing && (uptodate < disks))) { for (i=disks; i--;) { dev = &sh->dev[i]; if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || syncing || - expanding || (failed && (sh->dev[failed_num].toread || (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) ) @@ -1713,7 +1258,7 @@ static void handle_stripe5(struct stripe_head *sh) if (locked == 0 && (rcw == 0 ||rmw == 0) && !test_bit(STRIPE_BIT_DELAY, &sh->state)) { PRINTK("Computing parity...\n"); - compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); + compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); /* now every locked buffer is ready to be written */ for (i=disks; i--;) if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { @@ -1740,10 +1285,14 @@ static void handle_stripe5(struct stripe_head *sh) !test_bit(STRIPE_INSYNC, &sh->state)) { set_bit(STRIPE_HANDLE, &sh->state); if (failed == 0) { - BUG_ON(uptodate != disks); - compute_parity5(sh, CHECK_PARITY); + char *pagea; + if (uptodate != disks) + BUG(); + compute_parity(sh, CHECK_PARITY); uptodate--; - if (page_is_zero(sh->dev[sh->pd_idx].page)) { + pagea = page_address(sh->dev[sh->pd_idx].page); + if ((*(u32*)pagea) == 0 && + !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { /* parity is correct (on disc, not in buffer any more) */ set_bit(STRIPE_INSYNC, &sh->state); } else { @@ -1790,75 +1339,11 @@ static void handle_stripe5(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags); set_bit(R5_LOCKED, &dev->flags); - locked++; } else { /* let's read it back */ set_bit(R5_Wantread, &dev->flags); set_bit(R5_LOCKED, &dev->flags); - locked++; - } - } - - if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { - /* Need to write out all blocks after computing parity */ - sh->disks = conf->raid_disks; - sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); - compute_parity5(sh, RECONSTRUCT_WRITE); - for (i= conf->raid_disks; i--;) { - set_bit(R5_LOCKED, &sh->dev[i].flags); - locked++; - set_bit(R5_Wantwrite, &sh->dev[i].flags); } - clear_bit(STRIPE_EXPANDING, &sh->state); - } else if (expanded) { - clear_bit(STRIPE_EXPAND_READY, &sh->state); - atomic_dec(&conf->reshape_stripes); - wake_up(&conf->wait_for_overlap); - md_done_sync(conf->mddev, STRIPE_SECTORS, 1); - } - - if (expanding && locked == 0) { - /* We have read all the blocks in this stripe and now we need to - * copy some of them into a target stripe for expand. - */ - clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); - for (i=0; i< sh->disks; i++) - if (i != sh->pd_idx) { - int dd_idx, pd_idx, j; - struct stripe_head *sh2; - - sector_t bn = compute_blocknr(sh, i); - sector_t s = raid5_compute_sector(bn, conf->raid_disks, - conf->raid_disks-1, - &dd_idx, &pd_idx, conf); - sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1); - if (sh2 == NULL) - /* so far only the early blocks of this stripe - * have been requested. When later blocks - * get requested, we will try again - */ - continue; - if(!test_bit(STRIPE_EXPANDING, &sh2->state) || - test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { - /* must have already done this block */ - release_stripe(sh2); - continue; - } - memcpy(page_address(sh2->dev[dd_idx].page), - page_address(sh->dev[i].page), - STRIPE_SIZE); - set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); - set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); - for (j=0; jraid_disks; j++) - if (j != sh2->pd_idx && - !test_bit(R5_Expanded, &sh2->dev[j].flags)) - break; - if (j == conf->raid_disks) { - set_bit(STRIPE_EXPAND_READY, &sh2->state); - set_bit(STRIPE_HANDLE, &sh2->state); - } - release_stripe(sh2); - } } spin_unlock(&sh->lock); @@ -1899,7 +1384,7 @@ static void handle_stripe5(struct stripe_head *sh) rcu_read_unlock(); if (rdev) { - if (syncing || expanding || expanded) + if (syncing) md_sync_acct(rdev->bdev, STRIPE_SECTORS); bi->bi_bdev = rdev->bdev; @@ -1931,603 +1416,40 @@ static void handle_stripe5(struct stripe_head *sh) } } -static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) +static void raid5_activate_delayed(raid5_conf_t *conf) { - raid6_conf_t *conf = sh->raid_conf; - int disks = conf->raid_disks; - struct bio *return_bi= NULL; - struct bio *bi; - int i; - int syncing; - int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; - int non_overwrite = 0; - int failed_num[2] = {0, 0}; - struct r5dev *dev, *pdev, *qdev; - int pd_idx = sh->pd_idx; - int qd_idx = raid6_next_disk(pd_idx, disks); - int p_failed, q_failed; - - PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n", - (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count), - pd_idx, qd_idx); + if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { + while (!list_empty(&conf->delayed_list)) { + struct list_head *l = conf->delayed_list.next; + struct stripe_head *sh; + sh = list_entry(l, struct stripe_head, lru); + list_del_init(l); + clear_bit(STRIPE_DELAYED, &sh->state); + if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + atomic_inc(&conf->preread_active_stripes); + list_add_tail(&sh->lru, &conf->handle_list); + } + } +} - spin_lock(&sh->lock); - clear_bit(STRIPE_HANDLE, &sh->state); - clear_bit(STRIPE_DELAYED, &sh->state); +static void activate_bit_delay(raid5_conf_t *conf) +{ + /* device_lock is held */ + struct list_head head; + list_add(&head, &conf->bitmap_list); + list_del_init(&conf->bitmap_list); + while (!list_empty(&head)) { + struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); + list_del_init(&sh->lru); + atomic_inc(&sh->count); + __release_stripe(conf, sh); + } +} - syncing = test_bit(STRIPE_SYNCING, &sh->state); - /* Now to look around and see what can be done */ - - rcu_read_lock(); - for (i=disks; i--; ) { - mdk_rdev_t *rdev; - dev = &sh->dev[i]; - clear_bit(R5_Insync, &dev->flags); - - PRINTK("check %d: state 0x%lx read %p write %p written %p\n", - i, dev->flags, dev->toread, dev->towrite, dev->written); - /* maybe we can reply to a read */ - if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { - struct bio *rbi, *rbi2; - PRINTK("Return read for disc %d\n", i); - spin_lock_irq(&conf->device_lock); - rbi = dev->toread; - dev->toread = NULL; - if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&conf->wait_for_overlap); - spin_unlock_irq(&conf->device_lock); - while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { - copy_data(0, rbi, dev->page, dev->sector); - rbi2 = r5_next_bio(rbi, dev->sector); - spin_lock_irq(&conf->device_lock); - if (--rbi->bi_phys_segments == 0) { - rbi->bi_next = return_bi; - return_bi = rbi; - } - spin_unlock_irq(&conf->device_lock); - rbi = rbi2; - } - } - - /* now count some things */ - if (test_bit(R5_LOCKED, &dev->flags)) locked++; - if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; - - - if (dev->toread) to_read++; - if (dev->towrite) { - to_write++; - if (!test_bit(R5_OVERWRITE, &dev->flags)) - non_overwrite++; - } - if (dev->written) written++; - rdev = rcu_dereference(conf->disks[i].rdev); - if (!rdev || !test_bit(In_sync, &rdev->flags)) { - /* The ReadError flag will just be confusing now */ - clear_bit(R5_ReadError, &dev->flags); - clear_bit(R5_ReWrite, &dev->flags); - } - if (!rdev || !test_bit(In_sync, &rdev->flags) - || test_bit(R5_ReadError, &dev->flags)) { - if ( failed < 2 ) - failed_num[failed] = i; - failed++; - } else - set_bit(R5_Insync, &dev->flags); - } - rcu_read_unlock(); - PRINTK("locked=%d uptodate=%d to_read=%d" - " to_write=%d failed=%d failed_num=%d,%d\n", - locked, uptodate, to_read, to_write, failed, - failed_num[0], failed_num[1]); - /* check if the array has lost >2 devices and, if so, some requests might - * need to be failed - */ - if (failed > 2 && to_read+to_write+written) { - for (i=disks; i--; ) { - int bitmap_end = 0; - - if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - mdk_rdev_t *rdev; - rcu_read_lock(); - rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && test_bit(In_sync, &rdev->flags)) - /* multiple read failures in one stripe */ - md_error(conf->mddev, rdev); - rcu_read_unlock(); - } - - spin_lock_irq(&conf->device_lock); - /* fail all writes first */ - bi = sh->dev[i].towrite; - sh->dev[i].towrite = NULL; - if (bi) { to_write--; bitmap_end = 1; } - - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wake_up(&conf->wait_for_overlap); - - while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ - struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (--bi->bi_phys_segments == 0) { - md_write_end(conf->mddev); - bi->bi_next = return_bi; - return_bi = bi; - } - bi = nextbi; - } - /* and fail all 'written' */ - bi = sh->dev[i].written; - sh->dev[i].written = NULL; - if (bi) bitmap_end = 1; - while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { - struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); - clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (--bi->bi_phys_segments == 0) { - md_write_end(conf->mddev); - bi->bi_next = return_bi; - return_bi = bi; - } - bi = bi2; - } - - /* fail any reads if this device is non-operational */ - if (!test_bit(R5_Insync, &sh->dev[i].flags) || - test_bit(R5_ReadError, &sh->dev[i].flags)) { - bi = sh->dev[i].toread; - sh->dev[i].toread = NULL; - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wake_up(&conf->wait_for_overlap); - if (bi) to_read--; - while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ - struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (--bi->bi_phys_segments == 0) { - bi->bi_next = return_bi; - return_bi = bi; - } - bi = nextbi; - } - } - spin_unlock_irq(&conf->device_lock); - if (bitmap_end) - bitmap_endwrite(conf->mddev->bitmap, sh->sector, - STRIPE_SECTORS, 0, 0); - } - } - if (failed > 2 && syncing) { - md_done_sync(conf->mddev, STRIPE_SECTORS,0); - clear_bit(STRIPE_SYNCING, &sh->state); - syncing = 0; - } - - /* - * might be able to return some write requests if the parity blocks - * are safe, or on a failed drive - */ - pdev = &sh->dev[pd_idx]; - p_failed = (failed >= 1 && failed_num[0] == pd_idx) - || (failed >= 2 && failed_num[1] == pd_idx); - qdev = &sh->dev[qd_idx]; - q_failed = (failed >= 1 && failed_num[0] == qd_idx) - || (failed >= 2 && failed_num[1] == qd_idx); - - if ( written && - ( p_failed || ((test_bit(R5_Insync, &pdev->flags) - && !test_bit(R5_LOCKED, &pdev->flags) - && test_bit(R5_UPTODATE, &pdev->flags))) ) && - ( q_failed || ((test_bit(R5_Insync, &qdev->flags) - && !test_bit(R5_LOCKED, &qdev->flags) - && test_bit(R5_UPTODATE, &qdev->flags))) ) ) { - /* any written block on an uptodate or failed drive can be - * returned. Note that if we 'wrote' to a failed drive, - * it will be UPTODATE, but never LOCKED, so we don't need - * to test 'failed' directly. - */ - for (i=disks; i--; ) - if (sh->dev[i].written) { - dev = &sh->dev[i]; - if (!test_bit(R5_LOCKED, &dev->flags) && - test_bit(R5_UPTODATE, &dev->flags) ) { - /* We can return any write requests */ - int bitmap_end = 0; - struct bio *wbi, *wbi2; - PRINTK("Return write for stripe %llu disc %d\n", - (unsigned long long)sh->sector, i); - spin_lock_irq(&conf->device_lock); - wbi = dev->written; - dev->written = NULL; - while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { - wbi2 = r5_next_bio(wbi, dev->sector); - if (--wbi->bi_phys_segments == 0) { - md_write_end(conf->mddev); - wbi->bi_next = return_bi; - return_bi = wbi; - } - wbi = wbi2; - } - if (dev->towrite == NULL) - bitmap_end = 1; - spin_unlock_irq(&conf->device_lock); - if (bitmap_end) - bitmap_endwrite(conf->mddev->bitmap, sh->sector, - STRIPE_SECTORS, - !test_bit(STRIPE_DEGRADED, &sh->state), 0); - } - } - } - - /* Now we might consider reading some blocks, either to check/generate - * parity, or to satisfy requests - * or to load a block that is being partially written. - */ - if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) { - for (i=disks; i--;) { - dev = &sh->dev[i]; - if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && - (dev->toread || - (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || - syncing || - (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) || - (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write)) - ) - ) { - /* we would like to get this block, possibly - * by computing it, but we might not be able to - */ - if (uptodate == disks-1) { - PRINTK("Computing stripe %llu block %d\n", - (unsigned long long)sh->sector, i); - compute_block_1(sh, i, 0); - uptodate++; - } else if ( uptodate == disks-2 && failed >= 2 ) { - /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ - int other; - for (other=disks; other--;) { - if ( other == i ) - continue; - if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) ) - break; - } - BUG_ON(other < 0); - PRINTK("Computing stripe %llu blocks %d,%d\n", - (unsigned long long)sh->sector, i, other); - compute_block_2(sh, i, other); - uptodate += 2; - } else if (test_bit(R5_Insync, &dev->flags)) { - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantread, &dev->flags); -#if 0 - /* if I am just reading this block and we don't have - a failed drive, or any pending writes then sidestep the cache */ - if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && - ! syncing && !failed && !to_write) { - sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; - sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; - } -#endif - locked++; - PRINTK("Reading block %d (sync=%d)\n", - i, syncing); - } - } - } - set_bit(STRIPE_HANDLE, &sh->state); - } - - /* now to consider writing and what else, if anything should be read */ - if (to_write) { - int rcw=0, must_compute=0; - for (i=disks ; i--;) { - dev = &sh->dev[i]; - /* Would I have to read this buffer for reconstruct_write */ - if (!test_bit(R5_OVERWRITE, &dev->flags) - && i != pd_idx && i != qd_idx - && (!test_bit(R5_LOCKED, &dev->flags) -#if 0 - || sh->bh_page[i] != bh->b_page -#endif - ) && - !test_bit(R5_UPTODATE, &dev->flags)) { - if (test_bit(R5_Insync, &dev->flags)) rcw++; - else { - PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags); - must_compute++; - } - } - } - PRINTK("for sector %llu, rcw=%d, must_compute=%d\n", - (unsigned long long)sh->sector, rcw, must_compute); - set_bit(STRIPE_HANDLE, &sh->state); - - if (rcw > 0) - /* want reconstruct write, but need to get some data */ - for (i=disks; i--;) { - dev = &sh->dev[i]; - if (!test_bit(R5_OVERWRITE, &dev->flags) - && !(failed == 0 && (i == pd_idx || i == qd_idx)) - && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && - test_bit(R5_Insync, &dev->flags)) { - if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - { - PRINTK("Read_old stripe %llu block %d for Reconstruct\n", - (unsigned long long)sh->sector, i); - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantread, &dev->flags); - locked++; - } else { - PRINTK("Request delayed stripe %llu block %d for Reconstruct\n", - (unsigned long long)sh->sector, i); - set_bit(STRIPE_DELAYED, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - } - } - } - /* now if nothing is locked, and if we have enough data, we can start a write request */ - if (locked == 0 && rcw == 0 && - !test_bit(STRIPE_BIT_DELAY, &sh->state)) { - if ( must_compute > 0 ) { - /* We have failed blocks and need to compute them */ - switch ( failed ) { - case 0: BUG(); - case 1: compute_block_1(sh, failed_num[0], 0); break; - case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; - default: BUG(); /* This request should have been failed? */ - } - } - - PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector); - compute_parity6(sh, RECONSTRUCT_WRITE); - /* now every locked buffer is ready to be written */ - for (i=disks; i--;) - if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { - PRINTK("Writing stripe %llu block %d\n", - (unsigned long long)sh->sector, i); - locked++; - set_bit(R5_Wantwrite, &sh->dev[i].flags); - } - /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ - set_bit(STRIPE_INSYNC, &sh->state); - - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - atomic_dec(&conf->preread_active_stripes); - if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - } - } - } - - /* maybe we need to check and possibly fix the parity for this stripe - * Any reads will already have been scheduled, so we just see if enough data - * is available - */ - if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { - int update_p = 0, update_q = 0; - struct r5dev *dev; - - set_bit(STRIPE_HANDLE, &sh->state); - - BUG_ON(failed>2); - BUG_ON(uptodate < disks); - /* Want to check and possibly repair P and Q. - * However there could be one 'failed' device, in which - * case we can only check one of them, possibly using the - * other to generate missing data - */ - - /* If !tmp_page, we cannot do the calculations, - * but as we have set STRIPE_HANDLE, we will soon be called - * by stripe_handle with a tmp_page - just wait until then. - */ - if (tmp_page) { - if (failed == q_failed) { - /* The only possible failed device holds 'Q', so it makes - * sense to check P (If anything else were failed, we would - * have used P to recreate it). - */ - compute_block_1(sh, pd_idx, 1); - if (!page_is_zero(sh->dev[pd_idx].page)) { - compute_block_1(sh,pd_idx,0); - update_p = 1; - } - } - if (!q_failed && failed < 2) { - /* q is not failed, and we didn't use it to generate - * anything, so it makes sense to check it - */ - memcpy(page_address(tmp_page), - page_address(sh->dev[qd_idx].page), - STRIPE_SIZE); - compute_parity6(sh, UPDATE_PARITY); - if (memcmp(page_address(tmp_page), - page_address(sh->dev[qd_idx].page), - STRIPE_SIZE)!= 0) { - clear_bit(STRIPE_INSYNC, &sh->state); - update_q = 1; - } - } - if (update_p || update_q) { - conf->mddev->resync_mismatches += STRIPE_SECTORS; - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) - /* don't try to repair!! */ - update_p = update_q = 0; - } - - /* now write out any block on a failed drive, - * or P or Q if they need it - */ - - if (failed == 2) { - dev = &sh->dev[failed_num[1]]; - locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - if (failed >= 1) { - dev = &sh->dev[failed_num[0]]; - locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - - if (update_p) { - dev = &sh->dev[pd_idx]; - locked ++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - if (update_q) { - dev = &sh->dev[qd_idx]; - locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - clear_bit(STRIPE_DEGRADED, &sh->state); - - set_bit(STRIPE_INSYNC, &sh->state); - } - } - - if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { - md_done_sync(conf->mddev, STRIPE_SECTORS,1); - clear_bit(STRIPE_SYNCING, &sh->state); - } - - /* If the failed drives are just a ReadError, then we might need - * to progress the repair/check process - */ - if (failed <= 2 && ! conf->mddev->ro) - for (i=0; idev[failed_num[i]]; - if (test_bit(R5_ReadError, &dev->flags) - && !test_bit(R5_LOCKED, &dev->flags) - && test_bit(R5_UPTODATE, &dev->flags) - ) { - if (!test_bit(R5_ReWrite, &dev->flags)) { - set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_ReWrite, &dev->flags); - set_bit(R5_LOCKED, &dev->flags); - } else { - /* let's read it back */ - set_bit(R5_Wantread, &dev->flags); - set_bit(R5_LOCKED, &dev->flags); - } - } - } - spin_unlock(&sh->lock); - - while ((bi=return_bi)) { - int bytes = bi->bi_size; - - return_bi = bi->bi_next; - bi->bi_next = NULL; - bi->bi_size = 0; - bi->bi_end_io(bi, bytes, 0); - } - for (i=disks; i-- ;) { - int rw; - struct bio *bi; - mdk_rdev_t *rdev; - if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) - rw = 1; - else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) - rw = 0; - else - continue; - - bi = &sh->dev[i].req; - - bi->bi_rw = rw; - if (rw) - bi->bi_end_io = raid5_end_write_request; - else - bi->bi_end_io = raid5_end_read_request; - - rcu_read_lock(); - rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && test_bit(Faulty, &rdev->flags)) - rdev = NULL; - if (rdev) - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - - if (rdev) { - if (syncing) - md_sync_acct(rdev->bdev, STRIPE_SECTORS); - - bi->bi_bdev = rdev->bdev; - PRINTK("for %llu schedule op %ld on disc %d\n", - (unsigned long long)sh->sector, bi->bi_rw, i); - atomic_inc(&sh->count); - bi->bi_sector = sh->sector + rdev->data_offset; - bi->bi_flags = 1 << BIO_UPTODATE; - bi->bi_vcnt = 1; - bi->bi_max_vecs = 1; - bi->bi_idx = 0; - bi->bi_io_vec = &sh->dev[i].vec; - bi->bi_io_vec[0].bv_len = STRIPE_SIZE; - bi->bi_io_vec[0].bv_offset = 0; - bi->bi_size = STRIPE_SIZE; - bi->bi_next = NULL; - if (rw == WRITE && - test_bit(R5_ReWrite, &sh->dev[i].flags)) - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); - generic_make_request(bi); - } else { - if (rw == 1) - set_bit(STRIPE_DEGRADED, &sh->state); - PRINTK("skip op %ld on disc %d for sector %llu\n", - bi->bi_rw, i, (unsigned long long)sh->sector); - clear_bit(R5_LOCKED, &sh->dev[i].flags); - set_bit(STRIPE_HANDLE, &sh->state); - } - } -} - -static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) -{ - if (sh->raid_conf->level == 6) - handle_stripe6(sh, tmp_page); - else - handle_stripe5(sh); -} - - - -static void raid5_activate_delayed(raid5_conf_t *conf) -{ - if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { - while (!list_empty(&conf->delayed_list)) { - struct list_head *l = conf->delayed_list.next; - struct stripe_head *sh; - sh = list_entry(l, struct stripe_head, lru); - list_del_init(l); - clear_bit(STRIPE_DELAYED, &sh->state); - if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - atomic_inc(&conf->preread_active_stripes); - list_add_tail(&sh->lru, &conf->handle_list); - } - } -} - -static void activate_bit_delay(raid5_conf_t *conf) -{ - /* device_lock is held */ - struct list_head head; - list_add(&head, &conf->bitmap_list); - list_del_init(&conf->bitmap_list); - while (!list_empty(&head)) { - struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); - list_del_init(&sh->lru); - atomic_inc(&sh->count); - __release_stripe(conf, sh); - } -} - -static void unplug_slaves(mddev_t *mddev) -{ - raid5_conf_t *conf = mddev_to_conf(mddev); - int i; +static void unplug_slaves(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + int i; rcu_read_lock(); for (i=0; iraid_disks; i++) { @@ -2597,16 +1519,24 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, return ret; } -static int make_request(request_queue_t *q, struct bio * bi) +static inline void raid5_plug_device(raid5_conf_t *conf) +{ + spin_lock_irq(&conf->device_lock); + blk_plug_device(conf->mddev->queue); + spin_unlock_irq(&conf->device_lock); +} + +static int make_request (request_queue_t *q, struct bio * bi) { mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev_to_conf(mddev); + const unsigned int raid_disks = conf->raid_disks; + const unsigned int data_disks = raid_disks - 1; unsigned int dd_idx, pd_idx; sector_t new_sector; sector_t logical_sector, last_sector; struct stripe_head *sh; const int rw = bio_data_dir(bi); - int remaining; if (unlikely(bio_barrier(bi))) { bio_endio(bi, bi->bi_size, -EOPNOTSUPP); @@ -2625,79 +1555,20 @@ static int make_request(request_queue_t *q, struct bio * bi) for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); - int disks, data_disks; - - retry: - prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); - if (likely(conf->expand_progress == MaxSector)) - disks = conf->raid_disks; - else { - /* spinlock is needed as expand_progress may be - * 64bit on a 32bit platform, and so it might be - * possible to see a half-updated value - * Ofcourse expand_progress could change after - * the lock is dropped, so once we get a reference - * to the stripe that we think it is, we will have - * to check again. - */ - spin_lock_irq(&conf->device_lock); - disks = conf->raid_disks; - if (logical_sector >= conf->expand_progress) - disks = conf->previous_raid_disks; - else { - if (logical_sector >= conf->expand_lo) { - spin_unlock_irq(&conf->device_lock); - schedule(); - goto retry; - } - } - spin_unlock_irq(&conf->device_lock); - } - data_disks = disks - conf->max_degraded; + + new_sector = raid5_compute_sector(logical_sector, + raid_disks, data_disks, &dd_idx, &pd_idx, conf); - new_sector = raid5_compute_sector(logical_sector, disks, data_disks, - &dd_idx, &pd_idx, conf); PRINTK("raid5: make_request, sector %llu logical %llu\n", (unsigned long long)new_sector, (unsigned long long)logical_sector); - sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); + retry: + prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); + sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); if (sh) { - if (unlikely(conf->expand_progress != MaxSector)) { - /* expansion might have moved on while waiting for a - * stripe, so we must do the range check again. - * Expansion could still move past after this - * test, but as we are holding a reference to - * 'sh', we know that if that happens, - * STRIPE_EXPANDING will get set and the expansion - * won't proceed until we finish with the stripe. - */ - int must_retry = 0; - spin_lock_irq(&conf->device_lock); - if (logical_sector < conf->expand_progress && - disks == conf->previous_raid_disks) - /* mismatch, need to try again */ - must_retry = 1; - spin_unlock_irq(&conf->device_lock); - if (must_retry) { - release_stripe(sh); - goto retry; - } - } - /* FIXME what if we get a false positive because these - * are being updated. - */ - if (logical_sector >= mddev->suspend_lo && - logical_sector < mddev->suspend_hi) { - release_stripe(sh); - schedule(); - goto retry; - } - - if (test_bit(STRIPE_EXPANDING, &sh->state) || - !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { - /* Stripe is busy expanding or - * add failed due to overlap. Flush everything + if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { + /* Add failed due to overlap. Flush everything * and wait a while */ raid5_unplug_device(mddev->queue); @@ -2706,8 +1577,10 @@ static int make_request(request_queue_t *q, struct bio * bi) goto retry; } finish_wait(&conf->wait_for_overlap, &w); - handle_stripe(sh, NULL); + raid5_plug_device(conf); + handle_stripe(sh); release_stripe(sh); + } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2717,182 +1590,52 @@ static int make_request(request_queue_t *q, struct bio * bi) } spin_lock_irq(&conf->device_lock); - remaining = --bi->bi_phys_segments; - spin_unlock_irq(&conf->device_lock); - if (remaining == 0) { + if (--bi->bi_phys_segments == 0) { int bytes = bi->bi_size; - if ( rw == WRITE ) + if ( bio_data_dir(bi) == WRITE ) md_write_end(mddev); bi->bi_size = 0; bi->bi_end_io(bi, bytes, 0); } - return 0; -} - -static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) -{ - /* reshaping is quite different to recovery/resync so it is - * handled quite separately ... here. - * - * On each call to sync_request, we gather one chunk worth of - * destination stripes and flag them as expanding. - * Then we find all the source stripes and request reads. - * As the reads complete, handle_stripe will copy the data - * into the destination stripe and release that stripe. - */ - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; - struct stripe_head *sh; - int pd_idx; - sector_t first_sector, last_sector; - int raid_disks; - int data_disks; - int i; - int dd_idx; - sector_t writepos, safepos, gap; - - if (sector_nr == 0 && - conf->expand_progress != 0) { - /* restarting in the middle, skip the initial sectors */ - sector_nr = conf->expand_progress; - sector_div(sector_nr, conf->raid_disks-1); - *skipped = 1; - return sector_nr; - } - - /* we update the metadata when there is more than 3Meg - * in the block range (that is rather arbitrary, should - * probably be time based) or when the data about to be - * copied would over-write the source of the data at - * the front of the range. - * i.e. one new_stripe forward from expand_progress new_maps - * to after where expand_lo old_maps to - */ - writepos = conf->expand_progress + - conf->chunk_size/512*(conf->raid_disks-1); - sector_div(writepos, conf->raid_disks-1); - safepos = conf->expand_lo; - sector_div(safepos, conf->previous_raid_disks-1); - gap = conf->expand_progress - conf->expand_lo; - - if (writepos >= safepos || - gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) { - /* Cannot proceed until we've updated the superblock... */ - wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes)==0); - mddev->reshape_position = conf->expand_progress; - mddev->sb_dirty = 1; - md_wakeup_thread(mddev->thread); - wait_event(mddev->sb_wait, mddev->sb_dirty == 0 || - kthread_should_stop()); - spin_lock_irq(&conf->device_lock); - conf->expand_lo = mddev->reshape_position; - spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_for_overlap); - } - - for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { - int j; - int skipped = 0; - pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); - sh = get_active_stripe(conf, sector_nr+i, - conf->raid_disks, pd_idx, 0); - set_bit(STRIPE_EXPANDING, &sh->state); - atomic_inc(&conf->reshape_stripes); - /* If any of this stripe is beyond the end of the old - * array, then we need to zero those blocks - */ - for (j=sh->disks; j--;) { - sector_t s; - if (j == sh->pd_idx) - continue; - s = compute_blocknr(sh, j); - if (s < (mddev->array_size<<1)) { - skipped = 1; - continue; - } - memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); - set_bit(R5_Expanded, &sh->dev[j].flags); - set_bit(R5_UPTODATE, &sh->dev[j].flags); - } - if (!skipped) { - set_bit(STRIPE_EXPAND_READY, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - } - release_stripe(sh); - } - spin_lock_irq(&conf->device_lock); - conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); spin_unlock_irq(&conf->device_lock); - /* Ok, those stripe are ready. We can start scheduling - * reads on the source stripes. - * The source stripes are determined by mapping the first and last - * block on the destination stripes. - */ - raid_disks = conf->previous_raid_disks; - data_disks = raid_disks - 1; - first_sector = - raid5_compute_sector(sector_nr*(conf->raid_disks-1), - raid_disks, data_disks, - &dd_idx, &pd_idx, conf); - last_sector = - raid5_compute_sector((sector_nr+conf->chunk_size/512) - *(conf->raid_disks-1) -1, - raid_disks, data_disks, - &dd_idx, &pd_idx, conf); - if (last_sector >= (mddev->size<<1)) - last_sector = (mddev->size<<1)-1; - while (first_sector <= last_sector) { - pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks); - sh = get_active_stripe(conf, first_sector, - conf->previous_raid_disks, pd_idx, 0); - set_bit(STRIPE_EXPAND_SOURCE, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); - first_sector += STRIPE_SECTORS; - } - return conf->chunk_size>>9; + return 0; } /* FIXME go_faster isn't used */ -static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) +static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; struct stripe_head *sh; - int pd_idx; + int sectors_per_chunk = conf->chunk_size >> 9; + sector_t x; + unsigned long stripe; + int chunk_offset; + int dd_idx, pd_idx; + sector_t first_sector; int raid_disks = conf->raid_disks; + int data_disks = raid_disks-1; sector_t max_sector = mddev->size << 1; int sync_blocks; - int still_degraded = 0; - int i; if (sector_nr >= max_sector) { /* just being told to finish up .. nothing much to do */ unplug_slaves(mddev); - if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { - end_reshape(conf); - return 0; - } if (mddev->curr_resync < max_sector) /* aborted */ bitmap_end_sync(mddev->bitmap, mddev->curr_resync, &sync_blocks, 1); - else /* completed sync */ + else /* compelted sync */ conf->fullsync = 0; bitmap_close_sync(mddev->bitmap); return 0; } - - if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) - return reshape_request(mddev, sector_nr, skipped); - - /* if there is too many failed drives and we are trying + /* if there is 1 or more failed drives and we are trying * to resync, then assert that we are finished, because there is * nothing we can do. */ - if (mddev->degraded >= conf->max_degraded && - test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { sector_t rv = (mddev->size << 1) - sector_nr; *skipped = 1; return rv; @@ -2906,31 +1649,28 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ } - pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); - sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); + x = sector_nr; + chunk_offset = sector_div(x, sectors_per_chunk); + stripe = x; + BUG_ON(x != stripe); + + first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk + + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf); + sh = get_active_stripe(conf, sector_nr, pd_idx, 1); if (sh == NULL) { - sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); + sh = get_active_stripe(conf, sector_nr, pd_idx, 0); /* make sure we don't swamp the stripe cache if someone else - * is trying to get access + * is trying to get access */ schedule_timeout_uninterruptible(1); } - /* Need to check if array will still be degraded after recovery/resync - * We don't need to check the 'failed' flag as when that gets set, - * recovery aborts. - */ - for (i=0; iraid_disks; i++) - if (conf->disks[i].rdev == NULL) - still_degraded = 1; - - bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); - - spin_lock(&sh->lock); + bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); + spin_lock(&sh->lock); set_bit(STRIPE_SYNCING, &sh->state); clear_bit(STRIPE_INSYNC, &sh->state); spin_unlock(&sh->lock); - handle_stripe(sh, NULL); + handle_stripe(sh); release_stripe(sh); return STRIPE_SECTORS; @@ -2958,7 +1698,7 @@ static void raid5d (mddev_t *mddev) while (1) { struct list_head *first; - if (conf->seq_flush != conf->seq_write) { + if (conf->seq_flush - conf->seq_write > 0) { int seq = conf->seq_flush; spin_unlock_irq(&conf->device_lock); bitmap_unplug(mddev->bitmap); @@ -2981,11 +1721,12 @@ static void raid5d (mddev_t *mddev) list_del_init(first); atomic_inc(&sh->count); - BUG_ON(atomic_read(&sh->count)!= 1); + if (atomic_read(&sh->count)!= 1) + BUG(); spin_unlock_irq(&conf->device_lock); handled++; - handle_stripe(sh, conf->spare_page); + handle_stripe(sh); release_stripe(sh); spin_lock_irq(&conf->device_lock); @@ -3075,81 +1816,23 @@ static int run(mddev_t *mddev) struct disk_info *disk; struct list_head *tmp; - if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { - printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", + if (mddev->level != 5 && mddev->level != 4) { + printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level); return -EIO; } - if (mddev->reshape_position != MaxSector) { - /* Check that we can continue the reshape. - * Currently only disks can change, it must - * increase, and we must be past the point where - * a stripe over-writes itself - */ - sector_t here_new, here_old; - int old_disks; - - if (mddev->new_level != mddev->level || - mddev->new_layout != mddev->layout || - mddev->new_chunk != mddev->chunk_size) { - printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n", - mdname(mddev)); - return -EINVAL; - } - if (mddev->delta_disks <= 0) { - printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n", - mdname(mddev)); - return -EINVAL; - } - old_disks = mddev->raid_disks - mddev->delta_disks; - /* reshape_position must be on a new-stripe boundary, and one - * further up in new geometry must map after here in old geometry. - */ - here_new = mddev->reshape_position; - if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) { - printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n"); - return -EINVAL; - } - /* here_new is the stripe we will write to */ - here_old = mddev->reshape_position; - sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1)); - /* here_old is the first stripe that we might need to read from */ - if (here_new >= here_old) { - /* Reading from the same stripe as writing to - bad */ - printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n"); - return -EINVAL; - } - printk(KERN_INFO "raid5: reshape will continue\n"); - /* OK, we should be able to continue; */ - } - - - mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); + mddev->private = kzalloc(sizeof (raid5_conf_t) + + mddev->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; - if (mddev->reshape_position == MaxSector) { - conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; - } else { - conf->raid_disks = mddev->raid_disks; - conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; - } - - conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); - if (!conf->disks) - goto abort; conf->mddev = mddev; if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; - if (mddev->level == 6) { - conf->spare_page = alloc_page(GFP_KERNEL); - if (!conf->spare_page) - goto abort; - } spin_lock_init(&conf->device_lock); init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_overlap); @@ -3164,7 +1847,7 @@ static int run(mddev_t *mddev) ITERATE_RDEV(mddev,rdev,tmp) { raid_disk = rdev->raid_disk; - if (raid_disk >= conf->raid_disks + if (raid_disk >= mddev->raid_disks || raid_disk < 0) continue; disk = conf->disks + raid_disk; @@ -3180,30 +1863,21 @@ static int run(mddev_t *mddev) } } + conf->raid_disks = mddev->raid_disks; /* - * 0 for a fully functional array, 1 or 2 for a degraded array. + * 0 for a fully functional array, 1 for a degraded array. */ mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; conf->mddev = mddev; conf->chunk_size = mddev->chunk_size; conf->level = mddev->level; - if (conf->level == 6) - conf->max_degraded = 2; - else - conf->max_degraded = 1; conf->algorithm = mddev->layout; conf->max_nr_stripes = NR_STRIPES; - conf->expand_progress = mddev->reshape_position; /* device size must be a multiple of chunk size */ mddev->size &= ~(mddev->chunk_size/1024 -1); mddev->resync_max_sectors = mddev->size << 1; - if (conf->level == 6 && conf->raid_disks < 4) { - printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", - mdname(mddev), conf->raid_disks); - goto abort; - } if (!conf->chunk_size || conf->chunk_size % 4) { printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", conf->chunk_size, mdname(mddev)); @@ -3215,14 +1889,14 @@ static int run(mddev_t *mddev) conf->algorithm, mdname(mddev)); goto abort; } - if (mddev->degraded > conf->max_degraded) { + if (mddev->degraded > 1) { printk(KERN_ERR "raid5: not enough operational devices for %s" " (%d/%d failed)\n", mdname(mddev), conf->failed_disks, conf->raid_disks); goto abort; } - if (mddev->degraded > 0 && + if (mddev->degraded == 1 && mddev->recovery_cp != MaxSector) { if (mddev->ok_start_degraded) printk(KERN_WARNING @@ -3271,25 +1945,12 @@ static int run(mddev_t *mddev) print_raid5_conf(conf); - if (conf->expand_progress != MaxSector) { - printk("...ok start reshape thread\n"); - conf->expand_lo = conf->expand_progress; - atomic_set(&conf->reshape_stripes, 0); - clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); - clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); - set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "%s_reshape"); - } - /* read-ahead size must cover two whole stripes, which is - * 2 * (datadisks) * chunksize where 'n' is the number of raid devices + * 2 * (n-1) * chunksize where 'n' is the number of raid devices */ { - int data_disks = conf->previous_raid_disks - conf->max_degraded; - int stripe = data_disks * - (mddev->chunk_size / PAGE_SIZE); + int stripe = (mddev->raid_disks-1) * mddev->chunk_size + / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } @@ -3299,15 +1960,12 @@ static int run(mddev_t *mddev) mddev->queue->unplug_fn = raid5_unplug_device; mddev->queue->issue_flush_fn = raid5_issue_flush; - mddev->array_size = mddev->size * (conf->previous_raid_disks - - conf->max_degraded); + mddev->array_size = mddev->size * (mddev->raid_disks - 1); return 0; abort: if (conf) { print_raid5_conf(conf); - safe_put_page(conf->spare_page); - kfree(conf->disks); kfree(conf->stripe_hashtbl); kfree(conf); } @@ -3328,30 +1986,29 @@ static int stop(mddev_t *mddev) kfree(conf->stripe_hashtbl); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); - kfree(conf->disks); kfree(conf); mddev->private = NULL; return 0; } #if RAID5_DEBUG -static void print_sh (struct seq_file *seq, struct stripe_head *sh) +static void print_sh (struct stripe_head *sh) { int i; - seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", - (unsigned long long)sh->sector, sh->pd_idx, sh->state); - seq_printf(seq, "sh %llu, count %d.\n", - (unsigned long long)sh->sector, atomic_read(&sh->count)); - seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); - for (i = 0; i < sh->disks; i++) { - seq_printf(seq, "(cache%d: %p %ld) ", - i, sh->dev[i].page, sh->dev[i].flags); + printk("sh %llu, pd_idx %d, state %ld.\n", + (unsigned long long)sh->sector, sh->pd_idx, sh->state); + printk("sh %llu, count %d.\n", + (unsigned long long)sh->sector, atomic_read(&sh->count)); + printk("sh %llu, ", (unsigned long long)sh->sector); + for (i = 0; i < sh->raid_conf->raid_disks; i++) { + printk("(cache%d: %p %ld) ", + i, sh->dev[i].page, sh->dev[i].flags); } - seq_printf(seq, "\n"); + printk("\n"); } -static void printall (struct seq_file *seq, raid5_conf_t *conf) +static void printall (raid5_conf_t *conf) { struct stripe_head *sh; struct hlist_node *hn; @@ -3362,7 +2019,7 @@ static void printall (struct seq_file *seq, raid5_conf_t *conf) hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { if (sh->raid_conf != conf) continue; - print_sh(seq, sh); + print_sh(sh); } } spin_unlock_irq(&conf->device_lock); @@ -3382,8 +2039,9 @@ static void status (struct seq_file *seq, mddev_t *mddev) test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); #if RAID5_DEBUG - seq_printf (seq, "\n"); - printall(seq, conf); +#define D(x) \ + seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x)) + printall(conf); #endif } @@ -3467,20 +2125,14 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int disk; struct disk_info *p; - if (mddev->degraded > conf->max_degraded) + if (mddev->degraded > 1) /* no point adding a device */ return 0; /* - * find the disk ... but prefer rdev->saved_raid_disk - * if possible. + * find the disk ... */ - if (rdev->saved_raid_disk >= 0 && - conf->disks[rdev->saved_raid_disk].rdev == NULL) - disk = rdev->saved_raid_disk; - else - disk = 0; - for ( ; disk < conf->raid_disks; disk++) + for (disk=0; disk < mddev->raid_disks; disk++) if ((p=conf->disks + disk)->rdev == NULL) { clear_bit(In_sync, &rdev->flags); rdev->raid_disk = disk; @@ -3503,10 +2155,8 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) * any io in the removed space completes, but it hardly seems * worth it. */ - raid5_conf_t *conf = mddev_to_conf(mddev); - sectors &= ~((sector_t)mddev->chunk_size/512 - 1); - mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; + mddev->array_size = (sectors * (mddev->raid_disks-1))>>1; set_capacity(mddev->gendisk, mddev->array_size << 1); mddev->changed = 1; if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { @@ -3518,158 +2168,11 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) return 0; } -#ifdef CONFIG_MD_RAID5_RESHAPE -static int raid5_check_reshape(mddev_t *mddev) -{ - raid5_conf_t *conf = mddev_to_conf(mddev); - int err; - - if (mddev->delta_disks < 0 || - mddev->new_level != mddev->level) - return -EINVAL; /* Cannot shrink array or change level yet */ - if (mddev->delta_disks == 0) - return 0; /* nothing to do */ - - /* Can only proceed if there are plenty of stripe_heads. - * We need a minimum of one full stripe,, and for sensible progress - * it is best to have about 4 times that. - * If we require 4 times, then the default 256 4K stripe_heads will - * allow for chunk sizes up to 256K, which is probably OK. - * If the chunk size is greater, user-space should request more - * stripe_heads first. - */ - if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || - (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { - printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", - (mddev->chunk_size / STRIPE_SIZE)*4); - return -ENOSPC; - } - - err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); - if (err) - return err; - - /* looks like we might be able to manage this */ - return 0; -} - -static int raid5_start_reshape(mddev_t *mddev) -{ - raid5_conf_t *conf = mddev_to_conf(mddev); - mdk_rdev_t *rdev; - struct list_head *rtmp; - int spares = 0; - int added_devices = 0; - - if (mddev->degraded || - test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - return -EBUSY; - - ITERATE_RDEV(mddev, rdev, rtmp) - if (rdev->raid_disk < 0 && - !test_bit(Faulty, &rdev->flags)) - spares++; - - if (spares < mddev->delta_disks-1) - /* Not enough devices even to make a degraded array - * of that size - */ - return -EINVAL; - - atomic_set(&conf->reshape_stripes, 0); - spin_lock_irq(&conf->device_lock); - conf->previous_raid_disks = conf->raid_disks; - conf->raid_disks += mddev->delta_disks; - conf->expand_progress = 0; - conf->expand_lo = 0; - spin_unlock_irq(&conf->device_lock); - - /* Add some new drives, as many as will fit. - * We know there are enough to make the newly sized array work. - */ - ITERATE_RDEV(mddev, rdev, rtmp) - if (rdev->raid_disk < 0 && - !test_bit(Faulty, &rdev->flags)) { - if (raid5_add_disk(mddev, rdev)) { - char nm[20]; - set_bit(In_sync, &rdev->flags); - conf->working_disks++; - added_devices++; - rdev->recovery_offset = 0; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); - } else - break; - } - - mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; - mddev->raid_disks = conf->raid_disks; - mddev->reshape_position = 0; - mddev->sb_dirty = 1; - - clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); - clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); - set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "%s_reshape"); - if (!mddev->sync_thread) { - mddev->recovery = 0; - spin_lock_irq(&conf->device_lock); - mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; - conf->expand_progress = MaxSector; - spin_unlock_irq(&conf->device_lock); - return -EAGAIN; - } - md_wakeup_thread(mddev->sync_thread); - md_new_event(mddev); - return 0; -} -#endif - -static void end_reshape(raid5_conf_t *conf) -{ - struct block_device *bdev; - - if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { - conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1); - set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); - conf->mddev->changed = 1; - - bdev = bdget_disk(conf->mddev->gendisk, 0); - if (bdev) { - mutex_lock(&bdev->bd_inode->i_mutex); - i_size_write(bdev->bd_inode, conf->mddev->array_size << 10); - mutex_unlock(&bdev->bd_inode->i_mutex); - bdput(bdev); - } - spin_lock_irq(&conf->device_lock); - conf->expand_progress = MaxSector; - spin_unlock_irq(&conf->device_lock); - conf->mddev->reshape_position = MaxSector; - - /* read-ahead size must cover two whole stripes, which is - * 2 * (datadisks) * chunksize where 'n' is the number of raid devices - */ - { - int data_disks = conf->previous_raid_disks - conf->max_degraded; - int stripe = data_disks * - (conf->mddev->chunk_size / PAGE_SIZE); - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; - } - } -} - static void raid5_quiesce(mddev_t *mddev, int state) { raid5_conf_t *conf = mddev_to_conf(mddev); switch(state) { - case 2: /* resume for a suspend */ - wake_up(&conf->wait_for_overlap); - break; - case 1: /* stop all writes */ spin_lock_irq(&conf->device_lock); conf->quiesce = 1; @@ -3683,29 +2186,11 @@ static void raid5_quiesce(mddev_t *mddev, int state) spin_lock_irq(&conf->device_lock); conf->quiesce = 0; wake_up(&conf->wait_for_stripe); - wake_up(&conf->wait_for_overlap); spin_unlock_irq(&conf->device_lock); break; } } -static struct mdk_personality raid6_personality = -{ - .name = "raid6", - .level = 6, - .owner = THIS_MODULE, - .make_request = make_request, - .run = run, - .stop = stop, - .status = status, - .error_handler = error, - .hot_add_disk = raid5_add_disk, - .hot_remove_disk= raid5_remove_disk, - .spare_active = raid5_spare_active, - .sync_request = sync_request, - .resize = raid5_resize, - .quiesce = raid5_quiesce, -}; static struct mdk_personality raid5_personality = { .name = "raid5", @@ -3721,10 +2206,6 @@ static struct mdk_personality raid5_personality = .spare_active = raid5_spare_active, .sync_request = sync_request, .resize = raid5_resize, -#ifdef CONFIG_MD_RAID5_RESHAPE - .check_reshape = raid5_check_reshape, - .start_reshape = raid5_start_reshape, -#endif .quiesce = raid5_quiesce, }; @@ -3748,12 +2229,6 @@ static struct mdk_personality raid4_personality = static int __init raid5_init(void) { - int e; - - e = raid6_select_algo(); - if ( e ) - return e; - register_md_personality(&raid6_personality); register_md_personality(&raid5_personality); register_md_personality(&raid4_personality); return 0; @@ -3761,7 +2236,6 @@ static int __init raid5_init(void) static void raid5_exit(void) { - unregister_md_personality(&raid6_personality); unregister_md_personality(&raid5_personality); unregister_md_personality(&raid4_personality); } @@ -3774,10 +2248,3 @@ MODULE_ALIAS("md-raid5"); MODULE_ALIAS("md-raid4"); MODULE_ALIAS("md-level-5"); MODULE_ALIAS("md-level-4"); -MODULE_ALIAS("md-personality-8"); /* RAID6 */ -MODULE_ALIAS("md-raid6"); -MODULE_ALIAS("md-level-6"); - -/* This used to be two separate modules, they were: */ -MODULE_ALIAS("raid5"); -MODULE_ALIAS("raid6");