X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-table.c;fp=drivers%2Fmd%2Fdm-table.c;h=9b1e2f5ca63049dbb1dd2dad9e642382374fbbf5;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=75fe9493e6af47059dbe79819b16683455304be8;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 75fe9493e..9b1e2f5ca 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -14,18 +14,14 @@ #include #include #include -#include #include -#define DM_MSG_PREFIX "table" - #define MAX_DEPTH 16 #define NODE_SIZE L1_CACHE_BYTES #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) struct dm_table { - struct mapped_device *md; atomic_t holders; /* btree table */ @@ -101,8 +97,6 @@ static void combine_restrictions_low(struct io_restrictions *lhs, lhs->seg_boundary_mask = min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); - - lhs->no_cluster |= rhs->no_cluster; } /* @@ -210,8 +204,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num) return 0; } -int dm_table_create(struct dm_table **result, int mode, - unsigned num_targets, struct mapped_device *md) +int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) { struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); @@ -234,49 +227,10 @@ int dm_table_create(struct dm_table **result, int mode, } t->mode = mode; - t->md = md; *result = t; return 0; } -int dm_create_error_table(struct dm_table **result, struct mapped_device *md) -{ - struct dm_table *t; - sector_t dev_size = 1; - int r; - - /* - * Find current size of device. - * Default to 1 sector if inactive. - */ - t = dm_get_table(md); - if (t) { - dev_size = dm_table_get_size(t); - dm_table_put(t); - } - - r = dm_table_create(&t, FMODE_READ, 1, md); - if (r) - return r; - - r = dm_table_add_target(t, "error", 0, dev_size, NULL); - if (r) - goto out; - - r = dm_table_complete(t); - if (r) - goto out; - - *result = t; - -out: - if (r) - dm_table_put(t); - - return r; -} -EXPORT_SYMBOL_GPL(dm_create_error_table); - static void free_devices(struct list_head *devices) { struct list_head *tmp, *next; @@ -391,19 +345,20 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev) /* * Open a device so we can use it as a map destination. */ -static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) +static int open_dev(struct dm_dev *d, dev_t dev) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; - BUG_ON(d->bdev); + if (d->bdev) + BUG(); bdev = open_by_devnum(dev, d->mode); if (IS_ERR(bdev)) return PTR_ERR(bdev); - r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); + r = bd_claim(bdev, _claim_ptr); if (r) blkdev_put(bdev); else @@ -414,12 +369,12 @@ static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) /* * Close a device that we've been using. */ -static void close_dev(struct dm_dev *d, struct mapped_device *md) +static void close_dev(struct dm_dev *d) { if (!d->bdev) return; - bd_release_from_disk(d->bdev, dm_disk(md)); + bd_release(d->bdev); blkdev_put(d->bdev); d->bdev = NULL; } @@ -440,7 +395,7 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) * careful to leave things as they were if we fail to reopen the * device. */ -static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) +static int upgrade_mode(struct dm_dev *dd, int new_mode) { int r; struct dm_dev dd_copy; @@ -450,9 +405,9 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *m dd->mode |= new_mode; dd->bdev = NULL; - r = open_dev(dd, dev, md); + r = open_dev(dd, dev); if (!r) - close_dev(&dd_copy, md); + close_dev(&dd_copy); else *dd = dd_copy; @@ -472,7 +427,8 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, struct dm_dev *dd; unsigned int major, minor; - BUG_ON(!t); + if (!t) + BUG(); if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ @@ -494,7 +450,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, dd->mode = mode; dd->bdev = NULL; - if ((r = open_dev(dd, dev, t->md))) { + if ((r = open_dev(dd, dev))) { kfree(dd); return r; } @@ -505,7 +461,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, list_add(&dd->list, &t->devices); } else if (dd->mode != (mode | dd->mode)) { - r = upgrade_mode(dd, mode, t->md); + r = upgrade_mode(dd, mode); if (r) return r; } @@ -569,8 +525,6 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, rs->seg_boundary_mask = min_not_zero(rs->seg_boundary_mask, q->seg_boundary_mask); - - rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); } return r; @@ -582,7 +536,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, void dm_put_device(struct dm_target *ti, struct dm_dev *dd) { if (atomic_dec_and_test(&dd->count)) { - close_dev(dd, ti->table->md); + close_dev(dd); list_del(&dd->list); kfree(dd); } @@ -630,12 +584,6 @@ int dm_split_args(int *argc, char ***argvp, char *input) unsigned array_size = 0; *argc = 0; - - if (!input) { - *argvp = NULL; - return 0; - } - argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; @@ -717,14 +665,15 @@ int dm_table_add_target(struct dm_table *t, const char *type, memset(tgt, 0, sizeof(*tgt)); if (!len) { - DMERR("%s: zero-length target", dm_device_name(t->md)); + tgt->error = "zero-length target"; + DMERR("%s", tgt->error); return -EINVAL; } tgt->type = dm_get_target_type(type); if (!tgt->type) { - DMERR("%s: %s: unknown target type", dm_device_name(t->md), - type); + tgt->error = "unknown target type"; + DMERR("%s", tgt->error); return -EINVAL; } @@ -761,7 +710,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, return 0; bad: - DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); + DMERR("%s", tgt->error); dm_put_target_type(tgt->type); return r; } @@ -816,14 +765,14 @@ int dm_table_complete(struct dm_table *t) return r; } -static DEFINE_MUTEX(_event_lock); +static DECLARE_MUTEX(_event_lock); void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context) { - mutex_lock(&_event_lock); + down(&_event_lock); t->event_fn = fn; t->event_context = context; - mutex_unlock(&_event_lock); + up(&_event_lock); } void dm_table_event(struct dm_table *t) @@ -834,10 +783,10 @@ void dm_table_event(struct dm_table *t) */ BUG_ON(in_interrupt()); - mutex_lock(&_event_lock); + down(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); - mutex_unlock(&_event_lock); + up(&_event_lock); } sector_t dm_table_get_size(struct dm_table *t) @@ -847,7 +796,7 @@ sector_t dm_table_get_size(struct dm_table *t) struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) { - if (index >= t->num_targets) + if (index > t->num_targets) return NULL; return t->targets + index; @@ -885,11 +834,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) q->hardsect_size = t->limits.hardsect_size; q->max_segment_size = t->limits.max_segment_size; q->seg_boundary_mask = t->limits.seg_boundary_mask; - if (t->limits.no_cluster) - q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); - else - q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); - } unsigned int dm_table_get_num_targets(struct dm_table *t) @@ -1001,20 +945,12 @@ int dm_table_flush_all(struct dm_table *t) return ret; } -struct mapped_device *dm_table_get_md(struct dm_table *t) -{ - dm_get(t->md); - - return t->md; -} - EXPORT_SYMBOL(dm_vcalloc); EXPORT_SYMBOL(dm_get_device); EXPORT_SYMBOL(dm_put_device); EXPORT_SYMBOL(dm_table_event); EXPORT_SYMBOL(dm_table_get_size); EXPORT_SYMBOL(dm_table_get_mode); -EXPORT_SYMBOL(dm_table_get_md); EXPORT_SYMBOL(dm_table_put); EXPORT_SYMBOL(dm_table_get); EXPORT_SYMBOL(dm_table_unplug_all);