2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
8 #include "dm-bio-list.h"
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/blkpg.h>
14 #include <linux/bio.h>
15 #include <linux/buffer_head.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
19 static const char *_name = DM_NAME;
21 static unsigned int major = 0;
22 static unsigned int _major = 0;
24 static int realloc_minor_bits(unsigned long requested_minor);
25 static void free_minor_bits(void);
28 * One of these is allocated per bio.
31 struct mapped_device *md;
38 * One of these is allocated per target within a bio. Hopefully
39 * this will be simplified out one day.
48 * Bits for the md->flags field.
50 #define DMF_BLOCK_IO 0
51 #define DMF_SUSPENDED 1
52 #define DMF_FS_LOCKED 2
54 struct mapped_device {
55 struct rw_semaphore lock;
61 request_queue_t *queue;
65 * A list of ios that arrived while we were suspended.
68 wait_queue_head_t wait;
69 struct bio_list deferred;
72 * The current mapping.
77 * io objects are allocated from here.
86 wait_queue_head_t eventq;
89 * freeze/thaw support require holding onto a super block
91 struct super_block *frozen_sb;
95 static kmem_cache_t *_io_cache;
96 static kmem_cache_t *_tio_cache;
98 static int __init local_init(void)
102 /* allocate a slab for the dm_ios */
103 _io_cache = kmem_cache_create("dm_io",
104 sizeof(struct dm_io), 0, 0, NULL, NULL);
108 /* allocate a slab for the target ios */
109 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
112 kmem_cache_destroy(_io_cache);
116 r = realloc_minor_bits(1024);
118 kmem_cache_destroy(_tio_cache);
119 kmem_cache_destroy(_io_cache);
124 r = register_blkdev(_major, _name);
126 kmem_cache_destroy(_tio_cache);
127 kmem_cache_destroy(_io_cache);
138 static void local_exit(void)
140 kmem_cache_destroy(_tio_cache);
141 kmem_cache_destroy(_io_cache);
144 if (unregister_blkdev(_major, _name) < 0)
145 DMERR("devfs_unregister_blkdev failed");
149 DMINFO("cleaned up");
153 * We have a lot of init/exit functions, so it seems easier to
154 * store them in an array. The disposable macro 'xx'
155 * expands a prefix into a pair of function names.
162 #define xx(n) {n ## _init, n ## _exit},
171 static int __init dm_init(void)
173 const int count = ARRAY_SIZE(_inits);
177 for (i = 0; i < count; i++) {
178 r = _inits[i].init();
192 static void __exit dm_exit(void)
194 int i = ARRAY_SIZE(_inits);
201 * Block device functions
203 static int dm_blk_open(struct inode *inode, struct file *file)
205 struct mapped_device *md;
207 md = inode->i_bdev->bd_disk->private_data;
212 static int dm_blk_close(struct inode *inode, struct file *file)
214 struct mapped_device *md;
216 md = inode->i_bdev->bd_disk->private_data;
221 static inline struct dm_io *alloc_io(struct mapped_device *md)
223 return mempool_alloc(md->io_pool, GFP_NOIO);
226 static inline void free_io(struct mapped_device *md, struct dm_io *io)
228 mempool_free(io, md->io_pool);
231 static inline struct target_io *alloc_tio(struct mapped_device *md)
233 return mempool_alloc(md->tio_pool, GFP_NOIO);
236 static inline void free_tio(struct mapped_device *md, struct target_io *tio)
238 mempool_free(tio, md->tio_pool);
242 * Add the bio to the list of deferred io.
244 static int queue_io(struct mapped_device *md, struct bio *bio)
246 down_write(&md->lock);
248 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
253 bio_list_add(&md->deferred, bio);
256 return 0; /* deferred successfully */
260 * Everyone (including functions in this file), should use this
261 * function to access the md->map field, and make sure they call
262 * dm_table_put() when finished.
264 struct dm_table *dm_get_table(struct mapped_device *md)
268 read_lock(&md->map_lock);
272 read_unlock(&md->map_lock);
277 /*-----------------------------------------------------------------
279 * A more elegant soln is in the works that uses the queue
280 * merge fn, unfortunately there are a couple of changes to
281 * the block layer that I want to make for this. So in the
282 * interests of getting something for people to use I give
283 * you this clearly demarcated crap.
284 *---------------------------------------------------------------*/
287 * Decrements the number of outstanding ios that a bio has been
288 * cloned into, completing the original io if necc.
290 static inline void dec_pending(struct dm_io *io, int error)
295 if (atomic_dec_and_test(&io->io_count)) {
296 if (atomic_dec_and_test(&io->md->pending))
297 /* nudge anyone waiting on suspend queue */
298 wake_up(&io->md->wait);
300 bio_endio(io->bio, io->bio->bi_size, io->error);
305 static int clone_endio(struct bio *bio, unsigned int done, int error)
308 struct target_io *tio = bio->bi_private;
309 struct dm_io *io = tio->io;
310 dm_endio_fn endio = tio->ti->type->end_io;
315 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
319 r = endio(tio->ti, bio, error, &tio->info);
324 /* the target wants another shot at the io */
328 free_tio(io->md, tio);
329 dec_pending(io, error);
334 static sector_t max_io_len(struct mapped_device *md,
335 sector_t sector, struct dm_target *ti)
337 sector_t offset = sector - ti->begin;
338 sector_t len = ti->len - offset;
341 * Does the target need to split even further ?
345 boundary = dm_round_up(offset + 1, ti->split_io) - offset;
354 static void __map_bio(struct dm_target *ti, struct bio *clone,
355 struct target_io *tio)
362 BUG_ON(!clone->bi_size);
364 clone->bi_end_io = clone_endio;
365 clone->bi_private = tio;
368 * Map the clone. If r == 0 we don't need to do
369 * anything, the target has assumed ownership of
372 atomic_inc(&tio->io->io_count);
373 r = ti->type->map(ti, clone, &tio->info);
375 /* the bio has been remapped so dispatch it */
376 generic_make_request(clone);
379 /* error the io and bail out */
380 struct dm_io *io = tio->io;
381 free_tio(tio->io->md, tio);
382 dec_pending(io, -EIO);
388 struct mapped_device *md;
389 struct dm_table *map;
393 sector_t sector_count;
398 * Creates a little bio that is just does part of a bvec.
400 static struct bio *split_bvec(struct bio *bio, sector_t sector,
401 unsigned short idx, unsigned int offset,
405 struct bio_vec *bv = bio->bi_io_vec + idx;
407 clone = bio_alloc(GFP_NOIO, 1);
408 *clone->bi_io_vec = *bv;
410 clone->bi_sector = sector;
411 clone->bi_bdev = bio->bi_bdev;
412 clone->bi_rw = bio->bi_rw;
414 clone->bi_size = to_bytes(len);
415 clone->bi_io_vec->bv_offset = offset;
416 clone->bi_io_vec->bv_len = clone->bi_size;
422 * Creates a bio that consists of range of complete bvecs.
424 static struct bio *clone_bio(struct bio *bio, sector_t sector,
425 unsigned short idx, unsigned short bv_count,
430 clone = bio_clone(bio, GFP_NOIO);
431 clone->bi_sector = sector;
433 clone->bi_vcnt = idx + bv_count;
434 clone->bi_size = to_bytes(len);
435 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
440 static void __clone_and_map(struct clone_info *ci)
442 struct bio *clone, *bio = ci->bio;
443 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
444 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
445 struct target_io *tio;
448 * Allocate a target io object.
450 tio = alloc_tio(ci->md);
453 memset(&tio->info, 0, sizeof(tio->info));
455 if (ci->sector_count <= max) {
457 * Optimise for the simple case where we can do all of
458 * the remaining io with a single clone.
460 clone = clone_bio(bio, ci->sector, ci->idx,
461 bio->bi_vcnt - ci->idx, ci->sector_count);
462 __map_bio(ti, clone, tio);
463 ci->sector_count = 0;
465 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
467 * There are some bvecs that don't span targets.
468 * Do as many of these as possible.
471 sector_t remaining = max;
474 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
475 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
477 if (bv_len > remaining)
484 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
485 __map_bio(ti, clone, tio);
488 ci->sector_count -= len;
493 * Create two copy bios to deal with io that has
494 * been split across a target.
496 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
498 clone = split_bvec(bio, ci->sector, ci->idx,
500 __map_bio(ti, clone, tio);
503 ci->sector_count -= max;
504 ti = dm_table_find_target(ci->map, ci->sector);
506 len = to_sector(bv->bv_len) - max;
507 clone = split_bvec(bio, ci->sector, ci->idx,
508 bv->bv_offset + to_bytes(max), len);
509 tio = alloc_tio(ci->md);
512 memset(&tio->info, 0, sizeof(tio->info));
513 __map_bio(ti, clone, tio);
516 ci->sector_count -= len;
522 * Split the bio into several clones.
524 static void __split_bio(struct mapped_device *md, struct bio *bio)
526 struct clone_info ci;
528 ci.map = dm_get_table(md);
530 bio_io_error(bio, bio->bi_size);
536 ci.io = alloc_io(md);
538 atomic_set(&ci.io->io_count, 1);
541 ci.sector = bio->bi_sector;
542 ci.sector_count = bio_sectors(bio);
543 ci.idx = bio->bi_idx;
545 atomic_inc(&md->pending);
546 while (ci.sector_count)
547 __clone_and_map(&ci);
549 /* drop the extra reference count */
550 dec_pending(ci.io, 0);
551 dm_table_put(ci.map);
553 /*-----------------------------------------------------------------
555 *---------------------------------------------------------------*/
558 * The request function that just remaps the bio built up by
561 static int dm_request(request_queue_t *q, struct bio *bio)
564 struct mapped_device *md = q->queuedata;
566 down_read(&md->lock);
569 * If we're suspended we have to queue
572 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
575 if (bio_rw(bio) == READA) {
576 bio_io_error(bio, bio->bi_size);
580 r = queue_io(md, bio);
582 bio_io_error(bio, bio->bi_size);
586 return 0; /* deferred successfully */
589 * We're in a while loop, because someone could suspend
590 * before we get to the following read lock.
592 down_read(&md->lock);
595 __split_bio(md, bio);
600 static void dm_unplug_all(request_queue_t *q)
602 struct mapped_device *md = q->queuedata;
603 struct dm_table *map = dm_get_table(md);
606 dm_table_unplug_all(map);
611 static int dm_any_congested(void *congested_data, int bdi_bits)
614 struct mapped_device *md = (struct mapped_device *) congested_data;
615 struct dm_table *map = dm_get_table(md);
617 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
620 r = dm_table_any_congested(map, bdi_bits);
626 /*-----------------------------------------------------------------
627 * A bitset is used to keep track of allocated minor numbers.
628 *---------------------------------------------------------------*/
629 static DECLARE_MUTEX(_minor_lock);
630 static unsigned long *_minor_bits = NULL;
631 static unsigned long _max_minors = 0;
633 #define MINORS_SIZE(minors) ((minors / BITS_PER_LONG) * sizeof(unsigned long))
635 static int realloc_minor_bits(unsigned long requested_minor)
637 unsigned long max_minors;
638 unsigned long *minor_bits, *tmp;
640 if (requested_minor < _max_minors)
643 /* Round up the requested minor to the next power-of-2. */
644 max_minors = 1 << fls(requested_minor - 1);
645 if (max_minors > (1 << MINORBITS))
648 minor_bits = kmalloc(MINORS_SIZE(max_minors), GFP_KERNEL);
651 memset(minor_bits, 0, MINORS_SIZE(max_minors));
653 /* Copy the existing bit-set to the new one. */
655 memcpy(minor_bits, _minor_bits, MINORS_SIZE(_max_minors));
658 _minor_bits = minor_bits;
659 _max_minors = max_minors;
666 static void free_minor_bits(void)
675 static void free_minor(unsigned int minor)
678 if (minor < _max_minors)
679 clear_bit(minor, _minor_bits);
684 * See if the device with a specific minor # is free.
686 static int specific_minor(unsigned int minor)
690 if (minor > (1 << MINORBITS))
694 if (minor >= _max_minors) {
695 r = realloc_minor_bits(minor);
702 if (test_and_set_bit(minor, _minor_bits))
709 static int next_free_minor(unsigned int *minor)
715 m = find_first_zero_bit(_minor_bits, _max_minors);
716 if (m >= _max_minors) {
717 r = realloc_minor_bits(_max_minors * 2);
722 m = find_first_zero_bit(_minor_bits, _max_minors);
725 set_bit(m, _minor_bits);
732 static struct block_device_operations dm_blk_dops;
735 * Allocate and initialise a blank device with a given minor.
737 static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
740 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
743 DMWARN("unable to allocate device, out of memory.");
747 /* get a minor number for the dev */
748 r = persistent ? specific_minor(minor) : next_free_minor(&minor);
752 memset(md, 0, sizeof(*md));
753 init_rwsem(&md->lock);
754 rwlock_init(&md->map_lock);
755 atomic_set(&md->holders, 1);
756 atomic_set(&md->event_nr, 0);
758 md->queue = blk_alloc_queue(GFP_KERNEL);
762 md->queue->queuedata = md;
763 md->queue->backing_dev_info.congested_fn = dm_any_congested;
764 md->queue->backing_dev_info.congested_data = md;
765 blk_queue_make_request(md->queue, dm_request);
766 md->queue->unplug_fn = dm_unplug_all;
768 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
769 mempool_free_slab, _io_cache);
773 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
774 mempool_free_slab, _tio_cache);
778 md->disk = alloc_disk(1);
782 md->disk->major = _major;
783 md->disk->first_minor = minor;
784 md->disk->fops = &dm_blk_dops;
785 md->disk->queue = md->queue;
786 md->disk->private_data = md;
787 sprintf(md->disk->disk_name, "dm-%d", minor);
790 atomic_set(&md->pending, 0);
791 init_waitqueue_head(&md->wait);
792 init_waitqueue_head(&md->eventq);
797 mempool_destroy(md->tio_pool);
799 mempool_destroy(md->io_pool);
801 blk_put_queue(md->queue);
808 static void free_dev(struct mapped_device *md)
810 free_minor(md->disk->first_minor);
811 mempool_destroy(md->tio_pool);
812 mempool_destroy(md->io_pool);
813 del_gendisk(md->disk);
815 blk_put_queue(md->queue);
820 * Bind a table to the device.
822 static void event_callback(void *context)
824 struct mapped_device *md = (struct mapped_device *) context;
826 atomic_inc(&md->event_nr);;
827 wake_up(&md->eventq);
830 static void __set_size(struct gendisk *disk, sector_t size)
832 struct block_device *bdev;
834 set_capacity(disk, size);
835 bdev = bdget_disk(disk, 0);
837 down(&bdev->bd_inode->i_sem);
838 i_size_write(bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
839 up(&bdev->bd_inode->i_sem);
844 static int __bind(struct mapped_device *md, struct dm_table *t)
846 request_queue_t *q = md->queue;
849 size = dm_table_get_size(t);
850 __set_size(md->disk, size);
854 write_lock(&md->map_lock);
856 write_unlock(&md->map_lock);
859 dm_table_event_callback(md->map, event_callback, md);
860 dm_table_set_restrictions(t, q);
864 static void __unbind(struct mapped_device *md)
866 struct dm_table *map = md->map;
871 dm_table_event_callback(map, NULL, NULL);
872 write_lock(&md->map_lock);
874 write_unlock(&md->map_lock);
879 * Constructor for a new device.
881 static int create_aux(unsigned int minor, int persistent,
882 struct mapped_device **result)
884 struct mapped_device *md;
886 md = alloc_dev(minor, persistent);
894 int dm_create(struct mapped_device **result)
896 return create_aux(0, 0, result);
899 int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
901 return create_aux(minor, 1, result);
904 void dm_get(struct mapped_device *md)
906 atomic_inc(&md->holders);
909 void dm_put(struct mapped_device *md)
911 struct dm_table *map = dm_get_table(md);
913 if (atomic_dec_and_test(&md->holders)) {
914 if (!test_bit(DMF_SUSPENDED, &md->flags) && map)
915 dm_table_suspend_targets(map);
924 * Process the deferred bios
926 static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
939 * Swap in a new table (destroying old one).
941 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
945 down_write(&md->lock);
947 /* device must be suspended */
948 if (!test_bit(DMF_SUSPENDED, &md->flags)) {
954 r = __bind(md, table);
963 * Functions to lock and unlock any filesystem running on the
966 static int __lock_fs(struct mapped_device *md)
968 struct block_device *bdev;
970 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
973 bdev = bdget_disk(md->disk, 0);
975 DMWARN("bdget failed in __lock_fs");
979 WARN_ON(md->frozen_sb);
980 md->frozen_sb = freeze_bdev(bdev);
981 /* don't bdput right now, we don't want the bdev
982 * to go away while it is locked. We'll bdput
988 static int __unlock_fs(struct mapped_device *md)
990 struct block_device *bdev;
992 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
995 bdev = bdget_disk(md->disk, 0);
997 DMWARN("bdget failed in __unlock_fs");
1001 thaw_bdev(bdev, md->frozen_sb);
1002 md->frozen_sb = NULL;
1009 * We need to be able to change a mapping table under a mounted
1010 * filesystem. For example we might want to move some data in
1011 * the background. Before the table can be swapped with
1012 * dm_bind_table, dm_suspend must be called to flush any in
1013 * flight bios and ensure that any further io gets deferred.
1015 int dm_suspend(struct mapped_device *md)
1017 struct dm_table *map;
1018 DECLARE_WAITQUEUE(wait, current);
1020 /* Flush I/O to the device. */
1021 down_read(&md->lock);
1022 if (test_bit(DMF_BLOCK_IO, &md->flags)) {
1031 * First we set the BLOCK_IO flag so no more ios will be
1034 down_write(&md->lock);
1035 if (test_bit(DMF_BLOCK_IO, &md->flags)) {
1037 * If we get here we know another thread is
1038 * trying to suspend as well, so we leave the fs
1039 * locked for this thread.
1041 up_write(&md->lock);
1045 set_bit(DMF_BLOCK_IO, &md->flags);
1046 add_wait_queue(&md->wait, &wait);
1047 up_write(&md->lock);
1050 map = dm_get_table(md);
1052 dm_table_unplug_all(map);
1057 * Then we wait for the already mapped ios to
1061 set_current_state(TASK_INTERRUPTIBLE);
1063 if (!atomic_read(&md->pending) || signal_pending(current))
1068 set_current_state(TASK_RUNNING);
1070 down_write(&md->lock);
1071 remove_wait_queue(&md->wait, &wait);
1073 /* were we interrupted ? */
1074 if (atomic_read(&md->pending)) {
1076 clear_bit(DMF_BLOCK_IO, &md->flags);
1077 up_write(&md->lock);
1081 set_bit(DMF_SUSPENDED, &md->flags);
1083 map = dm_get_table(md);
1085 dm_table_suspend_targets(map);
1087 up_write(&md->lock);
1092 int dm_resume(struct mapped_device *md)
1095 struct dm_table *map = dm_get_table(md);
1097 down_write(&md->lock);
1099 !test_bit(DMF_SUSPENDED, &md->flags) ||
1100 !dm_table_get_size(map)) {
1101 up_write(&md->lock);
1106 dm_table_resume_targets(map);
1107 clear_bit(DMF_SUSPENDED, &md->flags);
1108 clear_bit(DMF_BLOCK_IO, &md->flags);
1110 def = bio_list_get(&md->deferred);
1111 __flush_deferred_io(md, def);
1112 up_write(&md->lock);
1114 dm_table_unplug_all(map);
1120 /*-----------------------------------------------------------------
1121 * Event notification.
1122 *---------------------------------------------------------------*/
1123 uint32_t dm_get_event_nr(struct mapped_device *md)
1125 return atomic_read(&md->event_nr);
1128 int dm_wait_event(struct mapped_device *md, int event_nr)
1130 return wait_event_interruptible(md->eventq,
1131 (event_nr != atomic_read(&md->event_nr)));
1135 * The gendisk is only valid as long as you have a reference
1138 struct gendisk *dm_disk(struct mapped_device *md)
1143 int dm_suspended(struct mapped_device *md)
1145 return test_bit(DMF_SUSPENDED, &md->flags);
1148 static struct block_device_operations dm_blk_dops = {
1149 .open = dm_blk_open,
1150 .release = dm_blk_close,
1151 .owner = THIS_MODULE
1157 module_init(dm_init);
1158 module_exit(dm_exit);
1160 module_param(major, uint, 0);
1161 MODULE_PARM_DESC(major, "The major number of the device mapper");
1162 MODULE_DESCRIPTION(DM_NAME " driver");
1163 MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
1164 MODULE_LICENSE("GPL");