#include <linux/raid/raid1.h>
-#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
-#define MD_PERSONALITY
-
/*
* Number of guaranteed r1bios in case of extreme VM load:
*/
{
struct pool_info *pi = data;
r1bio_t *r1_bio;
+ int size = offsetof(r1bio_t, bios[pi->raid_disks]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
- r1_bio = kmalloc(sizeof(r1bio_t) + sizeof(struct bio*)*pi->raid_disks,
- gfp_flags);
+ r1_bio = kmalloc(size, gfp_flags);
if (r1_bio)
- memset(r1_bio, 0, sizeof(*r1_bio) +
- sizeof(struct bio*) * pi->raid_disks);
+ memset(r1_bio, 0, size);
else
unplug_slaves(pi->mddev);
bio->bi_io_vec[i].bv_page = page;
}
- r1_bio->master_bio = bio;
+ r1_bio->master_bio = NULL;
return r1_bio;
spin_unlock_irqrestore(&conf->resync_lock, flags);
}
-static int map(mddev_t *mddev, mdk_rdev_t **rdevp)
-{
- conf_t *conf = mddev_to_conf(mddev);
- int i, disks = conf->raid_disks;
-
- /*
- * Later we do read balancing on the read side
- * now we use the first available disk.
- */
-
- spin_lock_irq(&conf->device_lock);
- for (i = 0; i < disks; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
- if (rdev && rdev->in_sync) {
- *rdevp = rdev;
- atomic_inc(&rdev->nr_pending);
- spin_unlock_irq(&conf->device_lock);
- return i;
- }
- }
- spin_unlock_irq(&conf->device_lock);
-
- printk(KERN_ERR "raid1_map(): huh, no more operational devices?\n");
- return -1;
-}
-
static void reschedule_retry(r1bio_t *r1_bio)
{
unsigned long flags;
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
- bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
+ if (printk_ratelimit())
+ printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
+ bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio);
}
*
* The rdev for the device selected will have nr_pending incremented.
*/
-static int read_balance(conf_t *conf, struct bio *bio, r1bio_t *r1_bio)
+static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
const unsigned long this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
- const int sectors = bio->bi_size >> 9;
+ const int sectors = r1_bio->sectors;
sector_t new_distance, current_distance;
spin_lock_irq(&conf->device_lock);
*/
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* make sure that disk is operational */
+ /* Choose the first operation device, for consistancy */
new_disk = 0;
while (!conf->mirrors[new_disk].rdev ||
!conf->mirrors[new_disk].rdev->in_sync) {
new_disk++;
if (new_disk == conf->raid_disks) {
- new_disk = 0;
+ new_disk = -1;
break;
}
}
new_disk = conf->raid_disks;
new_disk--;
if (new_disk == disk) {
- new_disk = conf->last_used;
+ new_disk = -1;
goto rb_out;
}
}
} while (disk != conf->last_used);
rb_out:
- r1_bio->read_disk = new_disk;
- conf->next_seq_sect = this_sector + sectors;
- conf->last_used = new_disk;
- if (conf->mirrors[new_disk].rdev)
+ if (new_disk >= 0) {
+ conf->next_seq_sect = this_sector + sectors;
+ conf->last_used = new_disk;
atomic_inc(&conf->mirrors[new_disk].rdev->nr_pending);
+ }
spin_unlock_irq(&conf->device_lock);
return new_disk;
unplug_slaves(q->queuedata);
}
+static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+{
+ mddev_t *mddev = q->queuedata;
+ conf_t *conf = mddev_to_conf(mddev);
+ unsigned long flags;
+ int i, ret = 0;
+
+ spin_lock_irqsave(&conf->device_lock, flags);
+ for (i=0; i<mddev->raid_disks; i++) {
+ mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ if (rdev && !rdev->faulty) {
+ struct block_device *bdev = rdev->bdev;
+ request_queue_t *r_queue = bdev_get_queue(bdev);
+
+ if (r_queue->issue_flush_fn) {
+ ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
+ if (ret)
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return ret;
+}
+
/*
* Throttle resync depth, so that we can both get proper overlapping of
* requests, but are still able to handle normal requests quickly.
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector;
+ r1_bio->state = 0;
+
if (bio_data_dir(bio) == READ) {
/*
* read balancing logic:
*/
- mirror = conf->mirrors + read_balance(conf, bio, r1_bio);
+ int rdisk = read_balance(conf, r1_bio);
+
+ if (rdisk < 0) {
+ /* couldn't find anywhere to read from */
+ raid_end_bio_io(r1_bio);
+ return 0;
+ }
+ mirror = conf->mirrors + rdisk;
+
+ r1_bio->read_disk = rdisk;
read_bio = bio_clone(bio, GFP_NOIO);
- r1_bio->bios[r1_bio->read_disk] = read_bio;
+ r1_bio->bios[rdisk] = read_bio;
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
atomic_inc(&conf->mirrors[i].rdev->nr_pending);
atomic_inc(&r1_bio->remaining);
- md_sync_acct(conf->mirrors[i].rdev, wbio->bi_size >> 9);
+ md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
generic_make_request(wbio);
}
} else {
int disk;
bio = r1_bio->bios[r1_bio->read_disk];
- if ((disk=map(mddev, &rdev)) == -1) {
+ if ((disk=read_balance(conf, r1_bio)) == -1) {
printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
" read error for block %llu\n",
bdevname(bio->bi_bdev,b),
r1_bio->bios[r1_bio->read_disk] = NULL;
r1_bio->read_disk = disk;
r1_bio->bios[r1_bio->read_disk] = bio;
- printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
- " another mirror\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)r1_bio->sector);
+ rdev = conf->mirrors[disk].rdev;
+ if (printk_ratelimit())
+ printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
+ " another mirror\n",
+ bdevname(rdev->bdev,b),
+ (unsigned long long)r1_bio->sector);
bio->bi_bdev = rdev->bdev;
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_rw = READ;
bio = r1_bio->bios[disk];
r1_bio->sectors = nr_sectors;
- md_sync_acct(mirror->rdev, nr_sectors);
+ md_sync_acct(mirror->rdev->bdev, nr_sectors);
generic_make_request(bio);
mddev->queue->unplug_fn = raid1_unplug;
+ mddev->queue->issue_flush_fn = raid1_issue_flush;
ITERATE_RDEV(mddev, rdev, tmp) {
disk_idx = rdev->raid_disk;
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
if (!rdev->faulty && rdev->in_sync)