#define RAID6_DEBUG 0 /* Extremely verbose printk */
#define RAID6_PARANOIA 1 /* Check spinlocks */
#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */
-#if RAID6_PARANOIA && CONFIG_SMP
+#if RAID6_PARANOIA && defined(CONFIG_SMP)
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
md_error(conf->mddev, conf->disks[i].rdev);
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
}
- atomic_dec(&conf->disks[i].rdev->nr_pending);
+ rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
#if 0
/* must restore b_page before unlocking buffer... */
if (sh->bh_page[i] != bh->b_page) {
if (!uptodate)
md_error(conf->mddev, conf->disks[i].rdev);
- atomic_dec(&conf->disks[i].rdev->nr_pending);
+ rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
if (!rdev->faulty) {
mddev->sb_dirty = 1;
- conf->working_disks--;
if (rdev->in_sync) {
+ conf->working_disks--;
mddev->degraded++;
conf->failed_disks++;
rdev->in_sync = 0;
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (to_read || non_overwrite || (syncing && (uptodate+failed < disks))) {
+ if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
for (i=disks; i--;) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
PRINTK("Reading block %d (sync=%d)\n",
i, syncing);
if (syncing)
- md_sync_acct(conf->disks[i].rdev, STRIPE_SECTORS);
+ md_sync_acct(conf->disks[i].rdev->bdev,
+ STRIPE_SECTORS);
}
}
}
if (rdev) {
if (test_bit(R5_Syncio, &sh->dev[i].flags))
- md_sync_acct(rdev, STRIPE_SECTORS);
+ md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev;
PRINTK("for %llu schedule op %ld on disc %d\n",
static void unplug_slaves(mddev_t *mddev)
{
- /* note: this is always called with device_lock held */
raid6_conf_t *conf = mddev_to_conf(mddev);
int i;
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev;
- if (rdev && !rdev->faulty) {
- struct block_device *bdev = rdev->bdev;
- if (bdev) {
- request_queue_t *r_queue = bdev_get_queue(bdev);
- if (r_queue && r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
- }
+ if (rdev && atomic_read(&rdev->nr_pending)) {
+ request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+
+ atomic_inc(&rdev->nr_pending);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+
+ if (r_queue && r_queue->unplug_fn)
+ r_queue->unplug_fn(r_queue);
+
+ spin_lock_irqsave(&conf->device_lock, flags);
+ atomic_dec(&rdev->nr_pending);
}
}
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
static void raid6_unplug_device(request_queue_t *q)
unplug_slaves(mddev);
}
+static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+{
+ mddev_t *mddev = q->queuedata;
+ raid6_conf_t *conf = mddev_to_conf(mddev);
+ int i, ret = 0;
+
+ for (i=0; i<mddev->raid_disks; i++) {
+ mdk_rdev_t *rdev = conf->disks[i].rdev;
+ if (rdev && !rdev->faulty) {
+ struct block_device *bdev = rdev->bdev;
+ request_queue_t *r_queue;
+
+ if (!bdev)
+ continue;
+
+ r_queue = bdev_get_queue(bdev);
+ if (!r_queue)
+ continue;
+
+ if (!r_queue->issue_flush_fn) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
+}
+
static inline void raid6_plug_device(raid6_conf_t *conf)
{
spin_lock_irq(&conf->device_lock);
atomic_set(&conf->preread_active_stripes, 0);
mddev->queue->unplug_fn = raid6_unplug_device;
+ mddev->queue->issue_flush_fn = raid6_issue_flush;
PRINTK("raid6: run(%s) called.\n", mdname(mddev));
conf->algorithm = mddev->layout;
conf->max_nr_stripes = NR_STRIPES;
+ /* device size must be a multiple of chunk size */
+ mddev->size &= ~(mddev->chunk_size/1024 -1);
+
if (conf->raid_disks < 4) {
printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
mdname(mddev), conf->raid_disks);
return found;
}
+static int raid6_resize(mddev_t *mddev, sector_t sectors)
+{
+ /* no resync is happening, and there is enough space
+ * on all devices, so we can resize.
+ * We need to make sure resync covers any new space.
+ * If the array is shrinking we should possibly wait until
+ * any io in the removed space completes, but it hardly seems
+ * worth it.
+ */
+ sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
+ mddev->array_size = (sectors * (mddev->raid_disks-2))>>1;
+ set_capacity(mddev->gendisk, mddev->array_size << 1);
+ mddev->changed = 1;
+ if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
+ mddev->recovery_cp = mddev->size << 1;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ mddev->size = sectors /2;
+ return 0;
+}
+
static mdk_personality_t raid6_personality=
{
.name = "raid6",
.hot_remove_disk= raid6_remove_disk,
.spare_active = raid6_spare_active,
.sync_request = sync_request,
+ .resize = raid6_resize,
};
static int __init raid6_init (void)