/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
+ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/slab.h>
+#include <linux/idr.h>
static const char *_name = DM_NAME;
-#define MAX_DEVICES 1024
static unsigned int major = 0;
static unsigned int _major = 0;
int error;
struct bio *bio;
atomic_t io_count;
+ unsigned long start_time;
};
/*
union map_info info;
};
+union map_info *dm_get_mapinfo(struct bio *bio)
+{
+ if (bio && bio->bi_private)
+ return &((struct target_io *)bio->bi_private)->info;
+ return NULL;
+}
+
/*
* Bits for the md->flags field.
*/
#define DMF_BLOCK_IO 0
#define DMF_SUSPENDED 1
-#define DMF_FS_LOCKED 2
+#define DMF_FROZEN 2
struct mapped_device {
- struct rw_semaphore lock;
+ struct rw_semaphore io_lock;
+ struct semaphore suspend_lock;
rwlock_t map_lock;
atomic_t holders;
request_queue_t *queue;
struct gendisk *disk;
+ void *interface_ptr;
+
/*
* A list of ios that arrived while we were suspended.
*/
/*
* Event handling.
*/
- uint32_t event_nr;
+ atomic_t event_nr;
wait_queue_head_t eventq;
/*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
+ struct block_device *suspended_bdev;
};
#define MIN_IOS 256
static kmem_cache_t *_io_cache;
static kmem_cache_t *_tio_cache;
-static __init int local_init(void)
+static struct bio_set *dm_set;
+
+static int __init local_init(void)
{
int r;
+ dm_set = bioset_create(16, 16, 4);
+ if (!dm_set)
+ return -ENOMEM;
+
/* allocate a slab for the dm_ios */
_io_cache = kmem_cache_create("dm_io",
sizeof(struct dm_io), 0, 0, NULL, NULL);
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
+ bioset_free(dm_set);
+
if (unregister_blkdev(_major, _name) < 0)
DMERR("devfs_unregister_blkdev failed");
DMINFO("cleaned up");
}
-/*
- * We have a lot of init/exit functions, so it seems easier to
- * store them in an array. The disposable macro 'xx'
- * expands a prefix into a pair of function names.
- */
-static struct {
- int (*init) (void);
- void (*exit) (void);
-
-} _inits[] = {
-#define xx(n) {n ## _init, n ## _exit},
- xx(local)
- xx(dm_target)
- xx(dm_linear)
- xx(dm_stripe)
- xx(dm_interface)
-#undef xx
+int (*_inits[])(void) __initdata = {
+ local_init,
+ dm_target_init,
+ dm_linear_init,
+ dm_stripe_init,
+ dm_interface_init,
+};
+
+void (*_exits[])(void) = {
+ local_exit,
+ dm_target_exit,
+ dm_linear_exit,
+ dm_stripe_exit,
+ dm_interface_exit,
};
static int __init dm_init(void)
int r, i;
for (i = 0; i < count; i++) {
- r = _inits[i].init();
+ r = _inits[i]();
if (r)
goto bad;
}
bad:
while (i--)
- _inits[i].exit();
+ _exits[i]();
return r;
}
static void __exit dm_exit(void)
{
- int i = ARRAY_SIZE(_inits);
+ int i = ARRAY_SIZE(_exits);
while (i--)
- _inits[i].exit();
+ _exits[i]();
}
/*
mempool_free(tio, md->tio_pool);
}
+static void start_io_acct(struct dm_io *io)
+{
+ struct mapped_device *md = io->md;
+
+ io->start_time = jiffies;
+
+ preempt_disable();
+ disk_round_stats(dm_disk(md));
+ preempt_enable();
+ dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+}
+
+static int end_io_acct(struct dm_io *io)
+{
+ struct mapped_device *md = io->md;
+ struct bio *bio = io->bio;
+ unsigned long duration = jiffies - io->start_time;
+ int pending;
+ int rw = bio_data_dir(bio);
+
+ preempt_disable();
+ disk_round_stats(dm_disk(md));
+ preempt_enable();
+ dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+
+ disk_stat_add(dm_disk(md), ticks[rw], duration);
+
+ return !pending;
+}
+
/*
* Add the bio to the list of deferred io.
*/
static int queue_io(struct mapped_device *md, struct bio *bio)
{
- down_write(&md->lock);
+ down_write(&md->io_lock);
if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
- up_write(&md->lock);
+ up_write(&md->io_lock);
return 1;
}
bio_list_add(&md->deferred, bio);
- up_write(&md->lock);
+ up_write(&md->io_lock);
return 0; /* deferred successfully */
}
* Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc.
*/
-static inline void dec_pending(struct dm_io *io, int error)
+static void dec_pending(struct dm_io *io, int error)
{
if (error)
io->error = error;
if (atomic_dec_and_test(&io->io_count)) {
- if (atomic_dec_and_test(&io->md->pending))
+ if (end_io_acct(io))
/* nudge anyone waiting on suspend queue */
wake_up(&io->md->wait);
*/
if (ti->split_io) {
sector_t boundary;
- boundary = dm_round_up(offset + 1, ti->split_io) - offset;
-
+ boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
+ - offset;
if (len > boundary)
len = boundary;
}
/* error the io and bail out */
struct dm_io *io = tio->io;
free_tio(tio->io->md, tio);
- dec_pending(io, -EIO);
+ dec_pending(io, r);
+ bio_put(clone);
}
}
unsigned short idx;
};
+static void dm_bio_destructor(struct bio *bio)
+{
+ bio_free(bio, dm_set);
+}
+
/*
* Creates a little bio that is just does part of a bvec.
*/
struct bio *clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
- clone = bio_alloc(GFP_NOIO, 1);
- memcpy(clone->bi_io_vec, bv, sizeof(*bv));
+ clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
+ clone->bi_destructor = dm_bio_destructor;
+ *clone->bi_io_vec = *bv;
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
} else {
/*
- * Create two copy bios to deal with io that has
- * been split across a target.
+ * Handle a bvec that must be split between two or more targets.
*/
struct bio_vec *bv = bio->bi_io_vec + ci->idx;
+ sector_t remaining = to_sector(bv->bv_len);
+ unsigned int offset = 0;
- clone = split_bvec(bio, ci->sector, ci->idx,
- bv->bv_offset, max);
- __map_bio(ti, clone, tio);
+ do {
+ if (offset) {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ max = max_io_len(ci->md, ci->sector, ti);
- ci->sector += max;
- ci->sector_count -= max;
- ti = dm_table_find_target(ci->map, ci->sector);
-
- len = to_sector(bv->bv_len) - max;
- clone = split_bvec(bio, ci->sector, ci->idx,
- bv->bv_offset + to_bytes(max), len);
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
- __map_bio(ti, clone, tio);
+ tio = alloc_tio(ci->md);
+ tio->io = ci->io;
+ tio->ti = ti;
+ memset(&tio->info, 0, sizeof(tio->info));
+ }
+
+ len = min(remaining, max);
+
+ clone = split_bvec(bio, ci->sector, ci->idx,
+ bv->bv_offset + offset, len);
+
+ __map_bio(ti, clone, tio);
+
+ ci->sector += len;
+ ci->sector_count -= len;
+ offset += to_bytes(len);
+ } while (remaining -= len);
- ci->sector += len;
- ci->sector_count -= len;
ci->idx++;
}
}
ci.sector_count = bio_sectors(bio);
ci.idx = bio->bi_idx;
- atomic_inc(&md->pending);
+ start_io_acct(ci.io);
while (ci.sector_count)
__clone_and_map(&ci);
static int dm_request(request_queue_t *q, struct bio *bio)
{
int r;
+ int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
- down_read(&md->lock);
+ down_read(&md->io_lock);
+
+ disk_stat_inc(dm_disk(md), ios[rw]);
+ disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
/*
* If we're suspended we have to queue
* this io for later.
*/
while (test_bit(DMF_BLOCK_IO, &md->flags)) {
- up_read(&md->lock);
+ up_read(&md->io_lock);
if (bio_rw(bio) == READA) {
bio_io_error(bio, bio->bi_size);
* We're in a while loop, because someone could suspend
* before we get to the following read lock.
*/
- down_read(&md->lock);
+ down_read(&md->io_lock);
}
__split_bio(md, bio);
- up_read(&md->lock);
+ up_read(&md->io_lock);
return 0;
}
+static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+ int ret = -ENXIO;
+
+ if (map) {
+ ret = dm_table_flush_all(map);
+ dm_table_put(map);
+ }
+
+ return ret;
+}
+
static void dm_unplug_all(request_queue_t *q)
{
struct mapped_device *md = q->queuedata;
}
/*-----------------------------------------------------------------
- * A bitset is used to keep track of allocated minor numbers.
+ * An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
-static spinlock_t _minor_lock = SPIN_LOCK_UNLOCKED;
-static unsigned long _minor_bits[MAX_DEVICES / BITS_PER_LONG];
+static DECLARE_MUTEX(_minor_lock);
+static DEFINE_IDR(_minor_idr);
static void free_minor(unsigned int minor)
{
- spin_lock(&_minor_lock);
- clear_bit(minor, _minor_bits);
- spin_unlock(&_minor_lock);
+ down(&_minor_lock);
+ idr_remove(&_minor_idr, minor);
+ up(&_minor_lock);
}
/*
* See if the device with a specific minor # is free.
*/
-static int specific_minor(unsigned int minor)
+static int specific_minor(struct mapped_device *md, unsigned int minor)
{
- int r = -EBUSY;
+ int r, m;
- if (minor >= MAX_DEVICES) {
- DMWARN("request for a mapped_device beyond MAX_DEVICES (%d)",
- MAX_DEVICES);
+ if (minor >= (1 << MINORBITS))
return -EINVAL;
+
+ down(&_minor_lock);
+
+ if (idr_find(&_minor_idr, minor)) {
+ r = -EBUSY;
+ goto out;
}
- spin_lock(&_minor_lock);
- if (!test_and_set_bit(minor, _minor_bits))
- r = 0;
- spin_unlock(&_minor_lock);
+ r = idr_pre_get(&_minor_idr, GFP_KERNEL);
+ if (!r) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = idr_get_new_above(&_minor_idr, md, minor, &m);
+ if (r) {
+ goto out;
+ }
+ if (m != minor) {
+ idr_remove(&_minor_idr, m);
+ r = -EBUSY;
+ goto out;
+ }
+
+out:
+ up(&_minor_lock);
return r;
}
-static int next_free_minor(unsigned int *minor)
+static int next_free_minor(struct mapped_device *md, unsigned int *minor)
{
- int r = -EBUSY;
+ int r;
unsigned int m;
- spin_lock(&_minor_lock);
- m = find_first_zero_bit(_minor_bits, MAX_DEVICES);
- if (m != MAX_DEVICES) {
- set_bit(m, _minor_bits);
- *minor = m;
- r = 0;
+ down(&_minor_lock);
+
+ r = idr_pre_get(&_minor_idr, GFP_KERNEL);
+ if (!r) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = idr_get_new(&_minor_idr, md, &m);
+ if (r) {
+ goto out;
+ }
+
+ if (m >= (1 << MINORBITS)) {
+ idr_remove(&_minor_idr, m);
+ r = -ENOSPC;
+ goto out;
}
- spin_unlock(&_minor_lock);
+ *minor = m;
+
+out:
+ up(&_minor_lock);
return r;
}
+static struct block_device_operations dm_blk_dops;
+
/*
* Allocate and initialise a blank device with a given minor.
*/
return NULL;
}
+ if (!try_module_get(THIS_MODULE))
+ goto bad0;
+
/* get a minor number for the dev */
- r = persistent ? specific_minor(minor) : next_free_minor(&minor);
+ r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
if (r < 0)
goto bad1;
memset(md, 0, sizeof(*md));
- init_rwsem(&md->lock);
+ init_rwsem(&md->io_lock);
+ init_MUTEX(&md->suspend_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
+ atomic_set(&md->event_nr, 0);
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
+ blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
+ md->queue->issue_flush_fn = dm_flush_all;
md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
mempool_free_slab, _io_cache);
blk_put_queue(md->queue);
free_minor(minor);
bad1:
+ module_put(THIS_MODULE);
+ bad0:
kfree(md);
return NULL;
}
static void free_dev(struct mapped_device *md)
{
- free_minor(md->disk->first_minor);
+ unsigned int minor = md->disk->first_minor;
+
+ if (md->suspended_bdev) {
+ thaw_bdev(md->suspended_bdev, NULL);
+ bdput(md->suspended_bdev);
+ }
mempool_destroy(md->tio_pool);
mempool_destroy(md->io_pool);
del_gendisk(md->disk);
+ free_minor(minor);
put_disk(md->disk);
blk_put_queue(md->queue);
+ module_put(THIS_MODULE);
kfree(md);
}
{
struct mapped_device *md = (struct mapped_device *) context;
- down_write(&md->lock);
- md->event_nr++;
+ atomic_inc(&md->event_nr);
wake_up(&md->eventq);
- up_write(&md->lock);
}
-static void __set_size(struct gendisk *disk, sector_t size)
+static void __set_size(struct mapped_device *md, sector_t size)
{
- struct block_device *bdev;
-
- set_capacity(disk, size);
- bdev = bdget_disk(disk, 0);
- if (bdev) {
- down(&bdev->bd_inode->i_sem);
- i_size_write(bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- up(&bdev->bd_inode->i_sem);
- bdput(bdev);
- }
+ set_capacity(md->disk, size);
+
+ mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
+ i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+ mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
}
static int __bind(struct mapped_device *md, struct dm_table *t)
sector_t size;
size = dm_table_get_size(t);
- __set_size(md->disk, size);
+ __set_size(md, size);
if (size == 0)
return 0;
+ dm_table_get(t);
+ dm_table_event_callback(t, event_callback, md);
+
write_lock(&md->map_lock);
md->map = t;
+ dm_table_set_restrictions(t, q);
write_unlock(&md->map_lock);
- dm_table_get(t);
- dm_table_event_callback(md->map, event_callback, md);
- dm_table_set_restrictions(t, q);
return 0;
}
return create_aux(minor, 1, result);
}
+static struct mapped_device *dm_find_md(dev_t dev)
+{
+ struct mapped_device *md;
+ unsigned minor = MINOR(dev);
+
+ if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
+ return NULL;
+
+ down(&_minor_lock);
+
+ md = idr_find(&_minor_idr, minor);
+ if (!md || (dm_disk(md)->first_minor != minor))
+ md = NULL;
+
+ up(&_minor_lock);
+
+ return md;
+}
+
+struct mapped_device *dm_get_md(dev_t dev)
+{
+ struct mapped_device *md = dm_find_md(dev);
+
+ if (md)
+ dm_get(md);
+
+ return md;
+}
+
+void *dm_get_mdptr(dev_t dev)
+{
+ struct mapped_device *md;
+ void *mdptr = NULL;
+
+ md = dm_find_md(dev);
+ if (md)
+ mdptr = md->interface_ptr;
+ return mdptr;
+}
+
+void dm_set_mdptr(struct mapped_device *md, void *ptr)
+{
+ md->interface_ptr = ptr;
+}
+
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
struct dm_table *map = dm_get_table(md);
if (atomic_dec_and_test(&md->holders)) {
- if (!test_bit(DMF_SUSPENDED, &md->flags) && map)
- dm_table_suspend_targets(map);
+ if (!dm_suspended(md)) {
+ dm_table_presuspend_targets(map);
+ dm_table_postsuspend_targets(map);
+ }
__unbind(md);
free_dev(md);
}
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- int r;
+ int r = -EINVAL;
- down_write(&md->lock);
+ down(&md->suspend_lock);
/* device must be suspended */
- if (!test_bit(DMF_SUSPENDED, &md->flags)) {
- up_write(&md->lock);
- return -EPERM;
- }
+ if (!dm_suspended(md))
+ goto out;
__unbind(md);
r = __bind(md, table);
- if (r)
- return r;
- up_write(&md->lock);
- return 0;
+out:
+ up(&md->suspend_lock);
+ return r;
}
/*
* Functions to lock and unlock any filesystem running on the
* device.
*/
-static int __lock_fs(struct mapped_device *md)
+static int lock_fs(struct mapped_device *md)
{
- struct block_device *bdev;
+ int r;
- if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
- return 0;
+ WARN_ON(md->frozen_sb);
- bdev = bdget_disk(md->disk, 0);
- if (!bdev) {
- DMWARN("bdget failed in __lock_fs");
- return -ENOMEM;
+ md->frozen_sb = freeze_bdev(md->suspended_bdev);
+ if (IS_ERR(md->frozen_sb)) {
+ r = PTR_ERR(md->frozen_sb);
+ md->frozen_sb = NULL;
+ return r;
}
- WARN_ON(md->frozen_sb);
- md->frozen_sb = freeze_bdev(bdev);
+ set_bit(DMF_FROZEN, &md->flags);
+
/* don't bdput right now, we don't want the bdev
- * to go away while it is locked. We'll bdput
- * in __unlock_fs
+ * to go away while it is locked.
*/
return 0;
}
-static int __unlock_fs(struct mapped_device *md)
+static void unlock_fs(struct mapped_device *md)
{
- struct block_device *bdev;
-
- if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
- return 0;
-
- bdev = bdget_disk(md->disk, 0);
- if (!bdev) {
- DMWARN("bdget failed in __unlock_fs");
- return -ENOMEM;
- }
+ if (!test_bit(DMF_FROZEN, &md->flags))
+ return;
- thaw_bdev(bdev, md->frozen_sb);
+ thaw_bdev(md->suspended_bdev, md->frozen_sb);
md->frozen_sb = NULL;
- bdput(bdev);
- bdput(bdev);
- return 0;
+ clear_bit(DMF_FROZEN, &md->flags);
}
/*
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
-int dm_suspend(struct mapped_device *md)
+int dm_suspend(struct mapped_device *md, int do_lockfs)
{
- struct dm_table *map;
+ struct dm_table *map = NULL;
DECLARE_WAITQUEUE(wait, current);
+ struct bio *def;
+ int r = -EINVAL;
- /* Flush I/O to the device. */
- down_read(&md->lock);
- if (test_bit(DMF_BLOCK_IO, &md->flags)) {
- up_read(&md->lock);
- return -EINVAL;
+ down(&md->suspend_lock);
+
+ if (dm_suspended(md))
+ goto out;
+
+ map = dm_get_table(md);
+
+ /* This does not get reverted if there's an error later. */
+ dm_table_presuspend_targets(map);
+
+ md->suspended_bdev = bdget_disk(md->disk, 0);
+ if (!md->suspended_bdev) {
+ DMWARN("bdget failed in dm_suspend");
+ r = -ENOMEM;
+ goto out;
}
- __lock_fs(md);
- up_read(&md->lock);
+ /* Flush I/O to the device. */
+ if (do_lockfs) {
+ r = lock_fs(md);
+ if (r)
+ goto out;
+ }
/*
- * First we set the BLOCK_IO flag so no more ios will be
- * mapped.
+ * First we set the BLOCK_IO flag so no more ios will be mapped.
*/
- down_write(&md->lock);
- if (test_bit(DMF_BLOCK_IO, &md->flags)) {
- /*
- * If we get here we know another thread is
- * trying to suspend as well, so we leave the fs
- * locked for this thread.
- */
- up_write(&md->lock);
- return -EINVAL;
- }
-
+ down_write(&md->io_lock);
set_bit(DMF_BLOCK_IO, &md->flags);
+
add_wait_queue(&md->wait, &wait);
- up_write(&md->lock);
+ up_write(&md->io_lock);
/* unplug */
- map = dm_get_table(md);
- if (map) {
+ if (map)
dm_table_unplug_all(map);
- dm_table_put(map);
- }
/*
* Then we wait for the already mapped ios to
}
set_current_state(TASK_RUNNING);
- down_write(&md->lock);
+ down_write(&md->io_lock);
remove_wait_queue(&md->wait, &wait);
/* were we interrupted ? */
+ r = -EINTR;
if (atomic_read(&md->pending)) {
- __unlock_fs(md);
clear_bit(DMF_BLOCK_IO, &md->flags);
- up_write(&md->lock);
- return -EINTR;
+ def = bio_list_get(&md->deferred);
+ __flush_deferred_io(md, def);
+ up_write(&md->io_lock);
+ unlock_fs(md);
+ goto out;
}
+ up_write(&md->io_lock);
+
+ dm_table_postsuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
- map = dm_get_table(md);
- if (map)
- dm_table_suspend_targets(map);
- dm_table_put(map);
- up_write(&md->lock);
+ r = 0;
- return 0;
+out:
+ if (r && md->suspended_bdev) {
+ bdput(md->suspended_bdev);
+ md->suspended_bdev = NULL;
+ }
+
+ dm_table_put(map);
+ up(&md->suspend_lock);
+ return r;
}
int dm_resume(struct mapped_device *md)
{
+ int r = -EINVAL;
struct bio *def;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = NULL;
- down_write(&md->lock);
- if (!map ||
- !test_bit(DMF_SUSPENDED, &md->flags) ||
- !dm_table_get_size(map)) {
- up_write(&md->lock);
- dm_table_put(map);
- return -EINVAL;
- }
+ down(&md->suspend_lock);
+ if (!dm_suspended(md))
+ goto out;
+
+ map = dm_get_table(md);
+ if (!map || !dm_table_get_size(map))
+ goto out;
dm_table_resume_targets(map);
- clear_bit(DMF_SUSPENDED, &md->flags);
+
+ down_write(&md->io_lock);
clear_bit(DMF_BLOCK_IO, &md->flags);
def = bio_list_get(&md->deferred);
__flush_deferred_io(md, def);
- up_write(&md->lock);
- __unlock_fs(md);
+ up_write(&md->io_lock);
+
+ unlock_fs(md);
+
+ bdput(md->suspended_bdev);
+ md->suspended_bdev = NULL;
+
+ clear_bit(DMF_SUSPENDED, &md->flags);
+
dm_table_unplug_all(map);
+
+ r = 0;
+
+out:
dm_table_put(map);
+ up(&md->suspend_lock);
- return 0;
+ return r;
}
/*-----------------------------------------------------------------
*---------------------------------------------------------------*/
uint32_t dm_get_event_nr(struct mapped_device *md)
{
- uint32_t r;
-
- down_read(&md->lock);
- r = md->event_nr;
- up_read(&md->lock);
-
- return r;
-}
-
-int dm_add_wait_queue(struct mapped_device *md, wait_queue_t *wq,
- uint32_t event_nr)
-{
- down_write(&md->lock);
- if (event_nr != md->event_nr) {
- up_write(&md->lock);
- return 1;
- }
-
- add_wait_queue(&md->eventq, wq);
- up_write(&md->lock);
-
- return 0;
+ return atomic_read(&md->event_nr);
}
-void dm_remove_wait_queue(struct mapped_device *md, wait_queue_t *wq)
+int dm_wait_event(struct mapped_device *md, int event_nr)
{
- down_write(&md->lock);
- remove_wait_queue(&md->eventq, wq);
- up_write(&md->lock);
+ return wait_event_interruptible(md->eventq,
+ (event_nr != atomic_read(&md->event_nr)));
}
/*
return test_bit(DMF_SUSPENDED, &md->flags);
}
-struct block_device_operations dm_blk_dops = {
+static struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.owner = THIS_MODULE
};
+EXPORT_SYMBOL(dm_get_mapinfo);
+
/*
* module hooks
*/
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
MODULE_DESCRIPTION(DM_NAME " driver");
-MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");