2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/blkpg.h>
15 #include <linux/bio.h>
16 #include <linux/buffer_head.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
21 static const char *_name = DM_NAME;
23 static unsigned int major = 0;
24 static unsigned int _major = 0;
27 * One of these is allocated per bio.
30 struct mapped_device *md;
37 * One of these is allocated per target within a bio. Hopefully
38 * this will be simplified out one day.
47 * Bits for the md->flags field.
49 #define DMF_BLOCK_IO 0
50 #define DMF_SUSPENDED 1
51 #define DMF_FS_LOCKED 2
53 struct mapped_device {
54 struct rw_semaphore lock;
60 request_queue_t *queue;
66 * A list of ios that arrived while we were suspended.
69 wait_queue_head_t wait;
70 struct bio_list deferred;
73 * The current mapping.
78 * io objects are allocated from here.
87 wait_queue_head_t eventq;
90 * freeze/thaw support require holding onto a super block
92 struct super_block *frozen_sb;
96 static kmem_cache_t *_io_cache;
97 static kmem_cache_t *_tio_cache;
99 static int __init local_init(void)
103 /* allocate a slab for the dm_ios */
104 _io_cache = kmem_cache_create("dm_io",
105 sizeof(struct dm_io), 0, 0, NULL, NULL);
109 /* allocate a slab for the target ios */
110 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
113 kmem_cache_destroy(_io_cache);
118 r = register_blkdev(_major, _name);
120 kmem_cache_destroy(_tio_cache);
121 kmem_cache_destroy(_io_cache);
131 static void local_exit(void)
133 kmem_cache_destroy(_tio_cache);
134 kmem_cache_destroy(_io_cache);
136 if (unregister_blkdev(_major, _name) < 0)
137 DMERR("devfs_unregister_blkdev failed");
141 DMINFO("cleaned up");
144 int (*_inits[])(void) __initdata = {
152 void (*_exits[])(void) = {
160 static int __init dm_init(void)
162 const int count = ARRAY_SIZE(_inits);
166 for (i = 0; i < count; i++) {
181 static void __exit dm_exit(void)
183 int i = ARRAY_SIZE(_exits);
190 * Block device functions
192 static int dm_blk_open(struct inode *inode, struct file *file)
194 struct mapped_device *md;
196 md = inode->i_bdev->bd_disk->private_data;
201 static int dm_blk_close(struct inode *inode, struct file *file)
203 struct mapped_device *md;
205 md = inode->i_bdev->bd_disk->private_data;
210 static inline struct dm_io *alloc_io(struct mapped_device *md)
212 return mempool_alloc(md->io_pool, GFP_NOIO);
215 static inline void free_io(struct mapped_device *md, struct dm_io *io)
217 mempool_free(io, md->io_pool);
220 static inline struct target_io *alloc_tio(struct mapped_device *md)
222 return mempool_alloc(md->tio_pool, GFP_NOIO);
225 static inline void free_tio(struct mapped_device *md, struct target_io *tio)
227 mempool_free(tio, md->tio_pool);
231 * Add the bio to the list of deferred io.
233 static int queue_io(struct mapped_device *md, struct bio *bio)
235 down_write(&md->lock);
237 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
242 bio_list_add(&md->deferred, bio);
245 return 0; /* deferred successfully */
249 * Everyone (including functions in this file), should use this
250 * function to access the md->map field, and make sure they call
251 * dm_table_put() when finished.
253 struct dm_table *dm_get_table(struct mapped_device *md)
257 read_lock(&md->map_lock);
261 read_unlock(&md->map_lock);
266 /*-----------------------------------------------------------------
268 * A more elegant soln is in the works that uses the queue
269 * merge fn, unfortunately there are a couple of changes to
270 * the block layer that I want to make for this. So in the
271 * interests of getting something for people to use I give
272 * you this clearly demarcated crap.
273 *---------------------------------------------------------------*/
276 * Decrements the number of outstanding ios that a bio has been
277 * cloned into, completing the original io if necc.
279 static inline void dec_pending(struct dm_io *io, int error)
284 if (atomic_dec_and_test(&io->io_count)) {
285 if (atomic_dec_and_test(&io->md->pending))
286 /* nudge anyone waiting on suspend queue */
287 wake_up(&io->md->wait);
289 bio_endio(io->bio, io->bio->bi_size, io->error);
294 static int clone_endio(struct bio *bio, unsigned int done, int error)
297 struct target_io *tio = bio->bi_private;
298 struct dm_io *io = tio->io;
299 dm_endio_fn endio = tio->ti->type->end_io;
304 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
308 r = endio(tio->ti, bio, error, &tio->info);
313 /* the target wants another shot at the io */
317 free_tio(io->md, tio);
318 dec_pending(io, error);
323 static sector_t max_io_len(struct mapped_device *md,
324 sector_t sector, struct dm_target *ti)
326 sector_t offset = sector - ti->begin;
327 sector_t len = ti->len - offset;
330 * Does the target need to split even further ?
334 boundary = dm_round_up(offset + 1, ti->split_io) - offset;
343 static void __map_bio(struct dm_target *ti, struct bio *clone,
344 struct target_io *tio)
351 BUG_ON(!clone->bi_size);
353 clone->bi_end_io = clone_endio;
354 clone->bi_private = tio;
357 * Map the clone. If r == 0 we don't need to do
358 * anything, the target has assumed ownership of
361 atomic_inc(&tio->io->io_count);
362 r = ti->type->map(ti, clone, &tio->info);
364 /* the bio has been remapped so dispatch it */
365 generic_make_request(clone);
368 /* error the io and bail out */
369 struct dm_io *io = tio->io;
370 free_tio(tio->io->md, tio);
371 dec_pending(io, -EIO);
377 struct mapped_device *md;
378 struct dm_table *map;
382 sector_t sector_count;
387 * Creates a little bio that is just does part of a bvec.
389 static struct bio *split_bvec(struct bio *bio, sector_t sector,
390 unsigned short idx, unsigned int offset,
394 struct bio_vec *bv = bio->bi_io_vec + idx;
396 clone = bio_alloc(GFP_NOIO, 1);
397 *clone->bi_io_vec = *bv;
399 clone->bi_sector = sector;
400 clone->bi_bdev = bio->bi_bdev;
401 clone->bi_rw = bio->bi_rw;
403 clone->bi_size = to_bytes(len);
404 clone->bi_io_vec->bv_offset = offset;
405 clone->bi_io_vec->bv_len = clone->bi_size;
411 * Creates a bio that consists of range of complete bvecs.
413 static struct bio *clone_bio(struct bio *bio, sector_t sector,
414 unsigned short idx, unsigned short bv_count,
419 clone = bio_clone(bio, GFP_NOIO);
420 clone->bi_sector = sector;
422 clone->bi_vcnt = idx + bv_count;
423 clone->bi_size = to_bytes(len);
424 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
429 static void __clone_and_map(struct clone_info *ci)
431 struct bio *clone, *bio = ci->bio;
432 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
433 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
434 struct target_io *tio;
437 * Allocate a target io object.
439 tio = alloc_tio(ci->md);
442 memset(&tio->info, 0, sizeof(tio->info));
444 if (ci->sector_count <= max) {
446 * Optimise for the simple case where we can do all of
447 * the remaining io with a single clone.
449 clone = clone_bio(bio, ci->sector, ci->idx,
450 bio->bi_vcnt - ci->idx, ci->sector_count);
451 __map_bio(ti, clone, tio);
452 ci->sector_count = 0;
454 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
456 * There are some bvecs that don't span targets.
457 * Do as many of these as possible.
460 sector_t remaining = max;
463 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
464 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
466 if (bv_len > remaining)
473 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
474 __map_bio(ti, clone, tio);
477 ci->sector_count -= len;
482 * Create two copy bios to deal with io that has
483 * been split across a target.
485 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
487 clone = split_bvec(bio, ci->sector, ci->idx,
489 __map_bio(ti, clone, tio);
492 ci->sector_count -= max;
493 ti = dm_table_find_target(ci->map, ci->sector);
495 len = to_sector(bv->bv_len) - max;
496 clone = split_bvec(bio, ci->sector, ci->idx,
497 bv->bv_offset + to_bytes(max), len);
498 tio = alloc_tio(ci->md);
501 memset(&tio->info, 0, sizeof(tio->info));
502 __map_bio(ti, clone, tio);
505 ci->sector_count -= len;
511 * Split the bio into several clones.
513 static void __split_bio(struct mapped_device *md, struct bio *bio)
515 struct clone_info ci;
517 ci.map = dm_get_table(md);
519 bio_io_error(bio, bio->bi_size);
525 ci.io = alloc_io(md);
527 atomic_set(&ci.io->io_count, 1);
530 ci.sector = bio->bi_sector;
531 ci.sector_count = bio_sectors(bio);
532 ci.idx = bio->bi_idx;
534 atomic_inc(&md->pending);
535 while (ci.sector_count)
536 __clone_and_map(&ci);
538 /* drop the extra reference count */
539 dec_pending(ci.io, 0);
540 dm_table_put(ci.map);
542 /*-----------------------------------------------------------------
544 *---------------------------------------------------------------*/
547 * The request function that just remaps the bio built up by
550 static int dm_request(request_queue_t *q, struct bio *bio)
553 struct mapped_device *md = q->queuedata;
555 down_read(&md->lock);
558 * If we're suspended we have to queue
561 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
564 if (bio_rw(bio) == READA) {
565 bio_io_error(bio, bio->bi_size);
569 r = queue_io(md, bio);
571 bio_io_error(bio, bio->bi_size);
575 return 0; /* deferred successfully */
578 * We're in a while loop, because someone could suspend
579 * before we get to the following read lock.
581 down_read(&md->lock);
584 __split_bio(md, bio);
589 static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
590 sector_t *error_sector)
592 struct mapped_device *md = q->queuedata;
593 struct dm_table *map = dm_get_table(md);
597 ret = dm_table_flush_all(md->map);
604 static void dm_unplug_all(request_queue_t *q)
606 struct mapped_device *md = q->queuedata;
607 struct dm_table *map = dm_get_table(md);
610 dm_table_unplug_all(map);
615 static int dm_any_congested(void *congested_data, int bdi_bits)
618 struct mapped_device *md = (struct mapped_device *) congested_data;
619 struct dm_table *map = dm_get_table(md);
621 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
624 r = dm_table_any_congested(map, bdi_bits);
630 /*-----------------------------------------------------------------
631 * An IDR is used to keep track of allocated minor numbers.
632 *---------------------------------------------------------------*/
633 static DECLARE_MUTEX(_minor_lock);
634 static DEFINE_IDR(_minor_idr);
636 static void free_minor(unsigned int minor)
639 idr_remove(&_minor_idr, minor);
644 * See if the device with a specific minor # is free.
646 static int specific_minor(struct mapped_device *md, unsigned int minor)
650 if (minor >= (1 << MINORBITS))
655 if (idr_find(&_minor_idr, minor)) {
660 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
666 r = idr_get_new_above(&_minor_idr, md, minor, &m);
672 idr_remove(&_minor_idr, m);
682 static int next_free_minor(struct mapped_device *md, unsigned int *minor)
689 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
695 r = idr_get_new(&_minor_idr, md, &m);
700 if (m >= (1 << MINORBITS)) {
701 idr_remove(&_minor_idr, m);
713 static struct block_device_operations dm_blk_dops;
716 * Allocate and initialise a blank device with a given minor.
718 static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
721 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
724 DMWARN("unable to allocate device, out of memory.");
728 /* get a minor number for the dev */
729 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
733 memset(md, 0, sizeof(*md));
734 init_rwsem(&md->lock);
735 rwlock_init(&md->map_lock);
736 atomic_set(&md->holders, 1);
737 atomic_set(&md->event_nr, 0);
739 md->queue = blk_alloc_queue(GFP_KERNEL);
743 md->queue->queuedata = md;
744 md->queue->backing_dev_info.congested_fn = dm_any_congested;
745 md->queue->backing_dev_info.congested_data = md;
746 blk_queue_make_request(md->queue, dm_request);
747 md->queue->unplug_fn = dm_unplug_all;
748 md->queue->issue_flush_fn = dm_flush_all;
750 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
751 mempool_free_slab, _io_cache);
755 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
756 mempool_free_slab, _tio_cache);
760 md->disk = alloc_disk(1);
764 md->disk->major = _major;
765 md->disk->first_minor = minor;
766 md->disk->fops = &dm_blk_dops;
767 md->disk->queue = md->queue;
768 md->disk->private_data = md;
769 sprintf(md->disk->disk_name, "dm-%d", minor);
772 atomic_set(&md->pending, 0);
773 init_waitqueue_head(&md->wait);
774 init_waitqueue_head(&md->eventq);
779 mempool_destroy(md->tio_pool);
781 mempool_destroy(md->io_pool);
783 blk_put_queue(md->queue);
790 static void free_dev(struct mapped_device *md)
792 free_minor(md->disk->first_minor);
793 mempool_destroy(md->tio_pool);
794 mempool_destroy(md->io_pool);
795 del_gendisk(md->disk);
797 blk_put_queue(md->queue);
802 * Bind a table to the device.
804 static void event_callback(void *context)
806 struct mapped_device *md = (struct mapped_device *) context;
808 atomic_inc(&md->event_nr);
809 wake_up(&md->eventq);
812 static void __set_size(struct gendisk *disk, sector_t size)
814 struct block_device *bdev;
816 set_capacity(disk, size);
817 bdev = bdget_disk(disk, 0);
819 down(&bdev->bd_inode->i_sem);
820 i_size_write(bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
821 up(&bdev->bd_inode->i_sem);
826 static int __bind(struct mapped_device *md, struct dm_table *t)
828 request_queue_t *q = md->queue;
831 size = dm_table_get_size(t);
832 __set_size(md->disk, size);
836 write_lock(&md->map_lock);
838 write_unlock(&md->map_lock);
841 dm_table_event_callback(md->map, event_callback, md);
842 dm_table_set_restrictions(t, q);
846 static void __unbind(struct mapped_device *md)
848 struct dm_table *map = md->map;
853 dm_table_event_callback(map, NULL, NULL);
854 write_lock(&md->map_lock);
856 write_unlock(&md->map_lock);
861 * Constructor for a new device.
863 static int create_aux(unsigned int minor, int persistent,
864 struct mapped_device **result)
866 struct mapped_device *md;
868 md = alloc_dev(minor, persistent);
876 int dm_create(struct mapped_device **result)
878 return create_aux(0, 0, result);
881 int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
883 return create_aux(minor, 1, result);
886 void *dm_get_mdptr(dev_t dev)
888 struct mapped_device *md;
890 unsigned minor = MINOR(dev);
892 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
897 md = idr_find(&_minor_idr, minor);
899 if (md && (dm_disk(md)->first_minor == minor))
900 mdptr = md->interface_ptr;
907 void dm_set_mdptr(struct mapped_device *md, void *ptr)
909 md->interface_ptr = ptr;
912 void dm_get(struct mapped_device *md)
914 atomic_inc(&md->holders);
917 void dm_put(struct mapped_device *md)
919 struct dm_table *map = dm_get_table(md);
921 if (atomic_dec_and_test(&md->holders)) {
922 if (!test_bit(DMF_SUSPENDED, &md->flags) && map)
923 dm_table_suspend_targets(map);
932 * Process the deferred bios
934 static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
947 * Swap in a new table (destroying old one).
949 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
953 down_write(&md->lock);
955 /* device must be suspended */
956 if (!test_bit(DMF_SUSPENDED, &md->flags)) {
962 r = __bind(md, table);
971 * Functions to lock and unlock any filesystem running on the
974 static int __lock_fs(struct mapped_device *md)
976 struct block_device *bdev;
978 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
981 bdev = bdget_disk(md->disk, 0);
983 DMWARN("bdget failed in __lock_fs");
987 WARN_ON(md->frozen_sb);
988 md->frozen_sb = freeze_bdev(bdev);
989 /* don't bdput right now, we don't want the bdev
990 * to go away while it is locked. We'll bdput
996 static int __unlock_fs(struct mapped_device *md)
998 struct block_device *bdev;
1000 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
1003 bdev = bdget_disk(md->disk, 0);
1005 DMWARN("bdget failed in __unlock_fs");
1009 thaw_bdev(bdev, md->frozen_sb);
1010 md->frozen_sb = NULL;
1017 * We need to be able to change a mapping table under a mounted
1018 * filesystem. For example we might want to move some data in
1019 * the background. Before the table can be swapped with
1020 * dm_bind_table, dm_suspend must be called to flush any in
1021 * flight bios and ensure that any further io gets deferred.
1023 int dm_suspend(struct mapped_device *md)
1025 struct dm_table *map;
1026 DECLARE_WAITQUEUE(wait, current);
1028 /* Flush I/O to the device. */
1029 down_read(&md->lock);
1030 if (test_bit(DMF_BLOCK_IO, &md->flags)) {
1039 * First we set the BLOCK_IO flag so no more ios will be
1042 down_write(&md->lock);
1043 if (test_bit(DMF_BLOCK_IO, &md->flags)) {
1045 * If we get here we know another thread is
1046 * trying to suspend as well, so we leave the fs
1047 * locked for this thread.
1049 up_write(&md->lock);
1053 set_bit(DMF_BLOCK_IO, &md->flags);
1054 add_wait_queue(&md->wait, &wait);
1055 up_write(&md->lock);
1058 map = dm_get_table(md);
1060 dm_table_unplug_all(map);
1065 * Then we wait for the already mapped ios to
1069 set_current_state(TASK_INTERRUPTIBLE);
1071 if (!atomic_read(&md->pending) || signal_pending(current))
1076 set_current_state(TASK_RUNNING);
1078 down_write(&md->lock);
1079 remove_wait_queue(&md->wait, &wait);
1081 /* were we interrupted ? */
1082 if (atomic_read(&md->pending)) {
1084 clear_bit(DMF_BLOCK_IO, &md->flags);
1085 up_write(&md->lock);
1089 set_bit(DMF_SUSPENDED, &md->flags);
1091 map = dm_get_table(md);
1093 dm_table_suspend_targets(map);
1095 up_write(&md->lock);
1100 int dm_resume(struct mapped_device *md)
1103 struct dm_table *map = dm_get_table(md);
1105 down_write(&md->lock);
1107 !test_bit(DMF_SUSPENDED, &md->flags) ||
1108 !dm_table_get_size(map)) {
1109 up_write(&md->lock);
1114 dm_table_resume_targets(map);
1115 clear_bit(DMF_SUSPENDED, &md->flags);
1116 clear_bit(DMF_BLOCK_IO, &md->flags);
1118 def = bio_list_get(&md->deferred);
1119 __flush_deferred_io(md, def);
1120 up_write(&md->lock);
1122 dm_table_unplug_all(map);
1128 /*-----------------------------------------------------------------
1129 * Event notification.
1130 *---------------------------------------------------------------*/
1131 uint32_t dm_get_event_nr(struct mapped_device *md)
1133 return atomic_read(&md->event_nr);
1136 int dm_wait_event(struct mapped_device *md, int event_nr)
1138 return wait_event_interruptible(md->eventq,
1139 (event_nr != atomic_read(&md->event_nr)));
1143 * The gendisk is only valid as long as you have a reference
1146 struct gendisk *dm_disk(struct mapped_device *md)
1151 int dm_suspended(struct mapped_device *md)
1153 return test_bit(DMF_SUSPENDED, &md->flags);
1156 static struct block_device_operations dm_blk_dops = {
1157 .open = dm_blk_open,
1158 .release = dm_blk_close,
1159 .owner = THIS_MODULE
1165 module_init(dm_init);
1166 module_exit(dm_exit);
1168 module_param(major, uint, 0);
1169 MODULE_PARM_DESC(major, "The major number of the device mapper");
1170 MODULE_DESCRIPTION(DM_NAME " driver");
1171 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1172 MODULE_LICENSE("GPL");