2 * Copyright (C) 2003 Sistina Software Limited.
4 * This file is released under the GPL.
8 #include "dm-bio-list.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
23 static struct workqueue_struct *_kmirrord_wq;
24 static struct work_struct _kmirrord_work;
26 static inline void wake(void)
28 queue_work(_kmirrord_wq, &_kmirrord_work);
31 /*-----------------------------------------------------------------
34 * The mirror splits itself up into discrete regions. Each
35 * region can be in one of three states: clean, dirty,
36 * nosync. There is no need to put clean regions in the hash.
38 * In addition to being present in the hash table a region _may_
39 * be present on one of three lists.
41 * clean_regions: Regions on this list have no io pending to
42 * them, they are in sync, we are no longer interested in them,
43 * they are dull. rh_update_states() will remove them from the
46 * quiesced_regions: These regions have been spun down, ready
47 * for recovery. rh_recovery_start() will remove regions from
48 * this list and hand them to kmirrord, which will schedule the
49 * recovery io with kcopyd.
51 * recovered_regions: Regions that kcopyd has successfully
52 * recovered. rh_update_states() will now schedule any delayed
53 * io, up the recovery_count, and remove the region from the
57 * A rw spin lock 'hash_lock' protects just the hash table,
58 * this is never held in write mode from interrupt context,
59 * which I believe means that we only have to disable irqs when
62 * An ordinary spin lock 'region_lock' that protects the three
63 * lists in the region_hash, with the 'state', 'list' and
64 * 'bhs_delayed' fields of the regions. This is used from irq
65 * context, so all other uses will have to suspend local irqs.
66 *---------------------------------------------------------------*/
69 struct mirror_set *ms;
71 unsigned region_shift;
73 /* holds persistent region state */
74 struct dirty_log *log;
78 mempool_t *region_pool;
80 unsigned int nr_buckets;
81 struct list_head *buckets;
83 spinlock_t region_lock;
84 struct semaphore recovery_count;
85 struct list_head clean_regions;
86 struct list_head quiesced_regions;
87 struct list_head recovered_regions;
98 struct region_hash *rh; /* FIXME: can we get rid of this ? */
102 struct list_head hash_list;
103 struct list_head list;
106 struct bio_list delayed_bios;
110 /*-----------------------------------------------------------------
111 * Mirror set structures.
112 *---------------------------------------------------------------*/
114 atomic_t error_count;
120 struct dm_target *ti;
121 struct list_head list;
122 struct region_hash rh;
123 struct kcopyd_client *kcopyd_client;
125 spinlock_t lock; /* protects the next two lists */
126 struct bio_list reads;
127 struct bio_list writes;
133 struct mirror *default_mirror; /* Default mirror */
135 unsigned int nr_mirrors;
136 struct mirror mirror[0];
142 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
144 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
147 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
149 return region << rh->region_shift;
152 /* FIXME move this */
153 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
155 static void *region_alloc(gfp_t gfp_mask, void *pool_data)
157 return kmalloc(sizeof(struct region), gfp_mask);
160 static void region_free(void *element, void *pool_data)
165 #define MIN_REGIONS 64
166 #define MAX_RECOVERY 1
167 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
168 struct dirty_log *log, uint32_t region_size,
171 unsigned int nr_buckets, max_buckets;
175 * Calculate a suitable number of buckets for our hash
178 max_buckets = nr_regions >> 6;
179 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
185 rh->region_size = region_size;
186 rh->region_shift = ffs(region_size) - 1;
187 rwlock_init(&rh->hash_lock);
188 rh->mask = nr_buckets - 1;
189 rh->nr_buckets = nr_buckets;
191 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
193 DMERR("unable to allocate region hash memory");
197 for (i = 0; i < nr_buckets; i++)
198 INIT_LIST_HEAD(rh->buckets + i);
200 spin_lock_init(&rh->region_lock);
201 sema_init(&rh->recovery_count, 0);
202 INIT_LIST_HEAD(&rh->clean_regions);
203 INIT_LIST_HEAD(&rh->quiesced_regions);
204 INIT_LIST_HEAD(&rh->recovered_regions);
206 rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
208 if (!rh->region_pool) {
217 static void rh_exit(struct region_hash *rh)
220 struct region *reg, *nreg;
222 BUG_ON(!list_empty(&rh->quiesced_regions));
223 for (h = 0; h < rh->nr_buckets; h++) {
224 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
225 BUG_ON(atomic_read(®->pending));
226 mempool_free(reg, rh->region_pool);
231 dm_destroy_dirty_log(rh->log);
233 mempool_destroy(rh->region_pool);
237 #define RH_HASH_MULT 2654435387U
239 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
241 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
244 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
248 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
249 if (reg->key == region)
255 static void __rh_insert(struct region_hash *rh, struct region *reg)
257 unsigned int h = rh_hash(rh, reg->key);
258 list_add(®->hash_list, rh->buckets + h);
261 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
263 struct region *reg, *nreg;
265 read_unlock(&rh->hash_lock);
266 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
268 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
269 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
270 RH_CLEAN : RH_NOSYNC;
274 INIT_LIST_HEAD(&nreg->list);
276 atomic_set(&nreg->pending, 0);
277 bio_list_init(&nreg->delayed_bios);
278 write_lock_irq(&rh->hash_lock);
280 reg = __rh_lookup(rh, region);
282 /* we lost the race */
283 mempool_free(nreg, rh->region_pool);
286 __rh_insert(rh, nreg);
287 if (nreg->state == RH_CLEAN) {
288 spin_lock(&rh->region_lock);
289 list_add(&nreg->list, &rh->clean_regions);
290 spin_unlock(&rh->region_lock);
294 write_unlock_irq(&rh->hash_lock);
295 read_lock(&rh->hash_lock);
300 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
304 reg = __rh_lookup(rh, region);
306 reg = __rh_alloc(rh, region);
311 static int rh_state(struct region_hash *rh, region_t region, int may_block)
316 read_lock(&rh->hash_lock);
317 reg = __rh_lookup(rh, region);
318 read_unlock(&rh->hash_lock);
324 * The region wasn't in the hash, so we fall back to the
327 r = rh->log->type->in_sync(rh->log, region, may_block);
330 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
331 * taken as a RH_NOSYNC
333 return r == 1 ? RH_CLEAN : RH_NOSYNC;
336 static inline int rh_in_sync(struct region_hash *rh,
337 region_t region, int may_block)
339 int state = rh_state(rh, region, may_block);
340 return state == RH_CLEAN || state == RH_DIRTY;
343 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
347 while ((bio = bio_list_pop(bio_list))) {
348 queue_bio(ms, bio, WRITE);
352 static void rh_update_states(struct region_hash *rh)
354 struct region *reg, *next;
357 LIST_HEAD(recovered);
360 * Quickly grab the lists.
362 write_lock_irq(&rh->hash_lock);
363 spin_lock(&rh->region_lock);
364 if (!list_empty(&rh->clean_regions)) {
365 list_splice(&rh->clean_regions, &clean);
366 INIT_LIST_HEAD(&rh->clean_regions);
368 list_for_each_entry (reg, &clean, list) {
369 rh->log->type->clear_region(rh->log, reg->key);
370 list_del(®->hash_list);
374 if (!list_empty(&rh->recovered_regions)) {
375 list_splice(&rh->recovered_regions, &recovered);
376 INIT_LIST_HEAD(&rh->recovered_regions);
378 list_for_each_entry (reg, &recovered, list)
379 list_del(®->hash_list);
381 spin_unlock(&rh->region_lock);
382 write_unlock_irq(&rh->hash_lock);
385 * All the regions on the recovered and clean lists have
386 * now been pulled out of the system, so no need to do
389 list_for_each_entry_safe (reg, next, &recovered, list) {
390 rh->log->type->clear_region(rh->log, reg->key);
391 rh->log->type->complete_resync_work(rh->log, reg->key, 1);
392 dispatch_bios(rh->ms, ®->delayed_bios);
393 up(&rh->recovery_count);
394 mempool_free(reg, rh->region_pool);
397 if (!list_empty(&recovered))
398 rh->log->type->flush(rh->log);
400 list_for_each_entry_safe (reg, next, &clean, list)
401 mempool_free(reg, rh->region_pool);
404 static void rh_inc(struct region_hash *rh, region_t region)
408 read_lock(&rh->hash_lock);
409 reg = __rh_find(rh, region);
411 spin_lock_irq(&rh->region_lock);
412 atomic_inc(®->pending);
414 if (reg->state == RH_CLEAN) {
415 reg->state = RH_DIRTY;
416 list_del_init(®->list); /* take off the clean list */
417 spin_unlock_irq(&rh->region_lock);
419 rh->log->type->mark_region(rh->log, reg->key);
421 spin_unlock_irq(&rh->region_lock);
424 read_unlock(&rh->hash_lock);
427 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
431 for (bio = bios->head; bio; bio = bio->bi_next)
432 rh_inc(rh, bio_to_region(rh, bio));
435 static void rh_dec(struct region_hash *rh, region_t region)
441 read_lock(&rh->hash_lock);
442 reg = __rh_lookup(rh, region);
443 read_unlock(&rh->hash_lock);
445 spin_lock_irqsave(&rh->region_lock, flags);
446 if (atomic_dec_and_test(®->pending)) {
447 if (reg->state == RH_RECOVERING) {
448 list_add_tail(®->list, &rh->quiesced_regions);
450 reg->state = RH_CLEAN;
451 list_add(®->list, &rh->clean_regions);
455 spin_unlock_irqrestore(&rh->region_lock, flags);
462 * Starts quiescing a region in preparation for recovery.
464 static int __rh_recovery_prepare(struct region_hash *rh)
471 * Ask the dirty log what's next.
473 r = rh->log->type->get_resync_work(rh->log, ®ion);
478 * Get this region, and start it quiescing by setting the
481 read_lock(&rh->hash_lock);
482 reg = __rh_find(rh, region);
483 read_unlock(&rh->hash_lock);
485 spin_lock_irq(&rh->region_lock);
486 reg->state = RH_RECOVERING;
488 /* Already quiesced ? */
489 if (atomic_read(®->pending))
490 list_del_init(®->list);
493 list_del_init(®->list);
494 list_add(®->list, &rh->quiesced_regions);
496 spin_unlock_irq(&rh->region_lock);
501 static void rh_recovery_prepare(struct region_hash *rh)
503 while (!down_trylock(&rh->recovery_count))
504 if (__rh_recovery_prepare(rh) <= 0) {
505 up(&rh->recovery_count);
511 * Returns any quiesced regions.
513 static struct region *rh_recovery_start(struct region_hash *rh)
515 struct region *reg = NULL;
517 spin_lock_irq(&rh->region_lock);
518 if (!list_empty(&rh->quiesced_regions)) {
519 reg = list_entry(rh->quiesced_regions.next,
520 struct region, list);
521 list_del_init(®->list); /* remove from the quiesced list */
523 spin_unlock_irq(&rh->region_lock);
528 /* FIXME: success ignored for now */
529 static void rh_recovery_end(struct region *reg, int success)
531 struct region_hash *rh = reg->rh;
533 spin_lock_irq(&rh->region_lock);
534 list_add(®->list, ®->rh->recovered_regions);
535 spin_unlock_irq(&rh->region_lock);
540 static void rh_flush(struct region_hash *rh)
542 rh->log->type->flush(rh->log);
545 static void rh_delay(struct region_hash *rh, struct bio *bio)
549 read_lock(&rh->hash_lock);
550 reg = __rh_find(rh, bio_to_region(rh, bio));
551 bio_list_add(®->delayed_bios, bio);
552 read_unlock(&rh->hash_lock);
555 static void rh_stop_recovery(struct region_hash *rh)
559 /* wait for any recovering regions */
560 for (i = 0; i < MAX_RECOVERY; i++)
561 down(&rh->recovery_count);
564 static void rh_start_recovery(struct region_hash *rh)
568 for (i = 0; i < MAX_RECOVERY; i++)
569 up(&rh->recovery_count);
575 * Every mirror should look like this one.
577 #define DEFAULT_MIRROR 0
580 * This is yucky. We squirrel the mirror_set struct away inside
581 * bi_next for write buffers. This is safe since the bh
582 * doesn't get submitted to the lower levels of block layer.
584 static struct mirror_set *bio_get_ms(struct bio *bio)
586 return (struct mirror_set *) bio->bi_next;
589 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
591 bio->bi_next = (struct bio *) ms;
594 /*-----------------------------------------------------------------
597 * When a mirror is first activated we may find that some regions
598 * are in the no-sync state. We have to recover these by
599 * recopying from the default mirror to all the others.
600 *---------------------------------------------------------------*/
601 static void recovery_complete(int read_err, unsigned int write_err,
604 struct region *reg = (struct region *) context;
606 /* FIXME: better error handling */
607 rh_recovery_end(reg, read_err || write_err);
610 static int recover(struct mirror_set *ms, struct region *reg)
614 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
616 unsigned long flags = 0;
618 /* fill in the source */
619 m = ms->default_mirror;
620 from.bdev = m->dev->bdev;
621 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
622 if (reg->key == (ms->nr_regions - 1)) {
624 * The final region may be smaller than
627 from.count = ms->ti->len & (reg->rh->region_size - 1);
629 from.count = reg->rh->region_size;
631 from.count = reg->rh->region_size;
633 /* fill in the destinations */
634 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
635 if (&ms->mirror[i] == ms->default_mirror)
639 dest->bdev = m->dev->bdev;
640 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
641 dest->count = from.count;
646 set_bit(KCOPYD_IGNORE_ERROR, &flags);
647 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
648 recovery_complete, reg);
653 static void do_recovery(struct mirror_set *ms)
657 struct dirty_log *log = ms->rh.log;
660 * Start quiescing some regions.
662 rh_recovery_prepare(&ms->rh);
665 * Copy any already quiesced regions.
667 while ((reg = rh_recovery_start(&ms->rh))) {
668 r = recover(ms, reg);
670 rh_recovery_end(reg, 0);
674 * Update the in sync flag.
677 (log->type->get_sync_count(log) == ms->nr_regions)) {
678 /* the sync is complete */
679 dm_table_event(ms->ti->table);
684 /*-----------------------------------------------------------------
686 *---------------------------------------------------------------*/
687 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
689 /* FIXME: add read balancing */
690 return ms->default_mirror;
694 * remap a buffer to a particular mirror.
696 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
698 bio->bi_bdev = m->dev->bdev;
699 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
702 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
708 while ((bio = bio_list_pop(reads))) {
709 region = bio_to_region(&ms->rh, bio);
712 * We can only read balance if the region is in sync.
714 if (rh_in_sync(&ms->rh, region, 0))
715 m = choose_mirror(ms, bio->bi_sector);
717 m = ms->default_mirror;
720 generic_make_request(bio);
724 /*-----------------------------------------------------------------
727 * We do different things with the write io depending on the
728 * state of the region that it's in:
730 * SYNC: increment pending, use kcopyd to write to *all* mirrors
731 * RECOVERING: delay the io until recovery completes
732 * NOSYNC: increment pending, just write to the default mirror
733 *---------------------------------------------------------------*/
734 static void write_callback(unsigned long error, void *context)
738 struct bio *bio = (struct bio *) context;
739 struct mirror_set *ms;
741 ms = bio_get_ms(bio);
742 bio_set_ms(bio, NULL);
745 * NOTE: We don't decrement the pending count here,
746 * instead it is done by the targets endio function.
747 * This way we handle both writes to SYNC and NOSYNC
748 * regions with the same code.
753 * only error the io if all mirrors failed.
757 for (i = 0; i < ms->nr_mirrors; i++)
758 if (!test_bit(i, &error)) {
763 bio_endio(bio, bio->bi_size, 0);
766 static void do_write(struct mirror_set *ms, struct bio *bio)
769 struct io_region io[KCOPYD_MAX_REGIONS+1];
772 for (i = 0; i < ms->nr_mirrors; i++) {
775 io[i].bdev = m->dev->bdev;
776 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
777 io[i].count = bio->bi_size >> 9;
781 dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
782 bio->bi_io_vec + bio->bi_idx,
783 write_callback, bio);
786 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
790 struct bio_list sync, nosync, recover, *this_list = NULL;
796 * Classify each write.
798 bio_list_init(&sync);
799 bio_list_init(&nosync);
800 bio_list_init(&recover);
802 while ((bio = bio_list_pop(writes))) {
803 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
815 this_list = &recover;
819 bio_list_add(this_list, bio);
823 * Increment the pending counts for any regions that will
824 * be written to (writes to recover regions are going to
827 rh_inc_pending(&ms->rh, &sync);
828 rh_inc_pending(&ms->rh, &nosync);
834 while ((bio = bio_list_pop(&sync)))
837 while ((bio = bio_list_pop(&recover)))
838 rh_delay(&ms->rh, bio);
840 while ((bio = bio_list_pop(&nosync))) {
841 map_bio(ms, ms->default_mirror, bio);
842 generic_make_request(bio);
846 /*-----------------------------------------------------------------
848 *---------------------------------------------------------------*/
849 static LIST_HEAD(_mirror_sets);
850 static DECLARE_RWSEM(_mirror_sets_lock);
852 static void do_mirror(struct mirror_set *ms)
854 struct bio_list reads, writes;
856 spin_lock(&ms->lock);
859 bio_list_init(&ms->reads);
860 bio_list_init(&ms->writes);
861 spin_unlock(&ms->lock);
863 rh_update_states(&ms->rh);
865 do_reads(ms, &reads);
866 do_writes(ms, &writes);
869 static void do_work(void *ignored)
871 struct mirror_set *ms;
873 down_read(&_mirror_sets_lock);
874 list_for_each_entry (ms, &_mirror_sets, list)
876 up_read(&_mirror_sets_lock);
879 /*-----------------------------------------------------------------
881 *---------------------------------------------------------------*/
882 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
883 uint32_t region_size,
884 struct dm_target *ti,
885 struct dirty_log *dl)
888 struct mirror_set *ms = NULL;
890 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
893 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
895 ms = kmalloc(len, GFP_KERNEL);
897 ti->error = "dm-mirror: Cannot allocate mirror context";
902 spin_lock_init(&ms->lock);
905 ms->nr_mirrors = nr_mirrors;
906 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
908 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
910 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
911 ti->error = "dm-mirror: Error creating dirty region hash";
919 static void free_context(struct mirror_set *ms, struct dm_target *ti,
923 dm_put_device(ti, ms->mirror[m].dev);
929 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
931 return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
935 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
936 unsigned int mirror, char **argv)
940 if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) {
941 ti->error = "dm-mirror: Invalid offset";
945 if (dm_get_device(ti, argv[0], offset, ti->len,
946 dm_table_get_mode(ti->table),
947 &ms->mirror[mirror].dev)) {
948 ti->error = "dm-mirror: Device lookup failure";
952 ms->mirror[mirror].offset = offset;
957 static int add_mirror_set(struct mirror_set *ms)
959 down_write(&_mirror_sets_lock);
960 list_add_tail(&ms->list, &_mirror_sets);
961 up_write(&_mirror_sets_lock);
967 static void del_mirror_set(struct mirror_set *ms)
969 down_write(&_mirror_sets_lock);
971 up_write(&_mirror_sets_lock);
975 * Create dirty log: log_type #log_params <log_params>
977 static struct dirty_log *create_dirty_log(struct dm_target *ti,
978 unsigned int argc, char **argv,
979 unsigned int *args_used)
981 unsigned int param_count;
982 struct dirty_log *dl;
985 ti->error = "dm-mirror: Insufficient mirror log arguments";
989 if (sscanf(argv[1], "%u", ¶m_count) != 1) {
990 ti->error = "dm-mirror: Invalid mirror log argument count";
994 *args_used = 2 + param_count;
996 if (argc < *args_used) {
997 ti->error = "dm-mirror: Insufficient mirror log arguments";
1001 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1003 ti->error = "dm-mirror: Error creating mirror dirty log";
1007 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1008 ti->error = "dm-mirror: Invalid region size";
1009 dm_destroy_dirty_log(dl);
1017 * Construct a mirror mapping:
1019 * log_type #log_params <log_params>
1020 * #mirrors [mirror_path offset]{2,}
1022 * log_type is "core" or "disk"
1023 * #log_params is between 1 and 3
1025 #define DM_IO_PAGES 64
1026 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1029 unsigned int nr_mirrors, m, args_used;
1030 struct mirror_set *ms;
1031 struct dirty_log *dl;
1033 dl = create_dirty_log(ti, argc, argv, &args_used);
1040 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1041 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1042 ti->error = "dm-mirror: Invalid number of mirrors";
1043 dm_destroy_dirty_log(dl);
1049 if (argc != nr_mirrors * 2) {
1050 ti->error = "dm-mirror: Wrong number of mirror arguments";
1051 dm_destroy_dirty_log(dl);
1055 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1057 dm_destroy_dirty_log(dl);
1061 /* Get the mirror parameter sets */
1062 for (m = 0; m < nr_mirrors; m++) {
1063 r = get_mirror(ms, ti, m, argv);
1065 free_context(ms, ti, m);
1073 ti->split_io = ms->rh.region_size;
1075 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1077 free_context(ms, ti, ms->nr_mirrors);
1085 static void mirror_dtr(struct dm_target *ti)
1087 struct mirror_set *ms = (struct mirror_set *) ti->private;
1090 kcopyd_client_destroy(ms->kcopyd_client);
1091 free_context(ms, ti, ms->nr_mirrors);
1094 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1096 int should_wake = 0;
1097 struct bio_list *bl;
1099 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1100 spin_lock(&ms->lock);
1101 should_wake = !(bl->head);
1102 bio_list_add(bl, bio);
1103 spin_unlock(&ms->lock);
1110 * Mirror mapping function
1112 static int mirror_map(struct dm_target *ti, struct bio *bio,
1113 union map_info *map_context)
1115 int r, rw = bio_rw(bio);
1117 struct mirror_set *ms = ti->private;
1119 map_context->ll = bio_to_region(&ms->rh, bio);
1122 queue_bio(ms, bio, rw);
1126 r = ms->rh.log->type->in_sync(ms->rh.log,
1127 bio_to_region(&ms->rh, bio), 0);
1128 if (r < 0 && r != -EWOULDBLOCK)
1131 if (r == -EWOULDBLOCK) /* FIXME: ugly */
1135 * We don't want to fast track a recovery just for a read
1136 * ahead. So we just let it silently fail.
1137 * FIXME: get rid of this.
1139 if (!r && rw == READA)
1143 /* Pass this io over to the daemon */
1144 queue_bio(ms, bio, rw);
1148 m = choose_mirror(ms, bio->bi_sector);
1152 map_bio(ms, m, bio);
1156 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1157 int error, union map_info *map_context)
1159 int rw = bio_rw(bio);
1160 struct mirror_set *ms = (struct mirror_set *) ti->private;
1161 region_t region = map_context->ll;
1164 * We need to dec pending if this was a write.
1167 rh_dec(&ms->rh, region);
1172 static void mirror_postsuspend(struct dm_target *ti)
1174 struct mirror_set *ms = (struct mirror_set *) ti->private;
1175 struct dirty_log *log = ms->rh.log;
1177 rh_stop_recovery(&ms->rh);
1178 if (log->type->suspend && log->type->suspend(log))
1179 /* FIXME: need better error handling */
1180 DMWARN("log suspend failed");
1183 static void mirror_resume(struct dm_target *ti)
1185 struct mirror_set *ms = (struct mirror_set *) ti->private;
1186 struct dirty_log *log = ms->rh.log;
1187 if (log->type->resume && log->type->resume(log))
1188 /* FIXME: need better error handling */
1189 DMWARN("log resume failed");
1190 rh_start_recovery(&ms->rh);
1193 static int mirror_status(struct dm_target *ti, status_type_t type,
1194 char *result, unsigned int maxlen)
1197 struct mirror_set *ms = (struct mirror_set *) ti->private;
1199 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1202 case STATUSTYPE_INFO:
1203 DMEMIT("%d ", ms->nr_mirrors);
1204 for (m = 0; m < ms->nr_mirrors; m++)
1205 DMEMIT("%s ", ms->mirror[m].dev->name);
1207 DMEMIT(SECTOR_FORMAT "/" SECTOR_FORMAT,
1208 ms->rh.log->type->get_sync_count(ms->rh.log),
1212 case STATUSTYPE_TABLE:
1213 DMEMIT("%d ", ms->nr_mirrors);
1214 for (m = 0; m < ms->nr_mirrors; m++)
1215 DMEMIT("%s " SECTOR_FORMAT " ",
1216 ms->mirror[m].dev->name, ms->mirror[m].offset);
1222 static struct target_type mirror_target = {
1224 .version = {1, 0, 1},
1225 .module = THIS_MODULE,
1229 .end_io = mirror_end_io,
1230 .postsuspend = mirror_postsuspend,
1231 .resume = mirror_resume,
1232 .status = mirror_status,
1235 static int __init dm_mirror_init(void)
1239 r = dm_dirty_log_init();
1243 _kmirrord_wq = create_singlethread_workqueue("kmirrord");
1244 if (!_kmirrord_wq) {
1245 DMERR("couldn't start kmirrord");
1246 dm_dirty_log_exit();
1249 INIT_WORK(&_kmirrord_work, do_work, NULL);
1251 r = dm_register_target(&mirror_target);
1253 DMERR("%s: Failed to register mirror target",
1254 mirror_target.name);
1255 dm_dirty_log_exit();
1256 destroy_workqueue(_kmirrord_wq);
1262 static void __exit dm_mirror_exit(void)
1266 r = dm_unregister_target(&mirror_target);
1268 DMERR("%s: unregister failed %d", mirror_target.name, r);
1270 destroy_workqueue(_kmirrord_wq);
1271 dm_dirty_log_exit();
1275 module_init(dm_mirror_init);
1276 module_exit(dm_mirror_exit);
1278 MODULE_DESCRIPTION(DM_NAME " mirror target");
1279 MODULE_AUTHOR("Joe Thornber");
1280 MODULE_LICENSE("GPL");