*/
#include <linux/blkdev.h>
-#include <linux/config.h>
#include <linux/ctype.h>
#include <linux/device-mapper.h>
#include <linux/fs.h>
#include "dm-bio-list.h"
#include "kcopyd.h"
+#define DM_MSG_PREFIX "snapshots"
+
/*
* The percentage increment we will wake up users at
*/
*/
#define SNAPSHOT_PAGES 256
+static struct workqueue_struct *ksnapd;
+static void flush_queued_bios(struct work_struct *work);
+
struct pending_exception {
struct exception e;
struct bio_list snapshot_bios;
/*
- * Other pending_exceptions that are processing this
- * chunk. When this list is empty, we know we can
- * complete the origins.
+ * Short-term queue of pending exceptions prior to submission.
*/
- struct list_head siblings;
+ struct list_head list;
+
+ /*
+ * The primary pending_exception is the one that holds
+ * the ref_count and the list of origin_bios for a
+ * group of pending_exceptions. It is always last to get freed.
+ * These fields get set up when writing to the origin.
+ */
+ struct pending_exception *primary_pe;
+
+ /*
+ * Number of pending_exceptions processing this chunk.
+ * When this drops to zero we must complete the origin bios.
+ * If incrementing or decrementing this, hold pe->snap->lock for
+ * the sibling concerned and not pe->primary_pe->snap->lock unless
+ * they are the same.
+ */
+ atomic_t ref_count;
/* Pointer back to snapshot context */
struct dm_snapshot *snap;
* Hash table mapping origin volumes to lists of snapshots and
* a lock to protect it
*/
-static kmem_cache_t *exception_cache;
-static kmem_cache_t *pending_cache;
+static struct kmem_cache *exception_cache;
+static struct kmem_cache *pending_cache;
static mempool_t *pending_pool;
/*
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL);
if (!_origins) {
- DMERR("Device mapper: Snapshot: unable to allocate memory");
+ DMERR("unable to allocate memory");
return -ENOMEM;
}
return 0;
}
-static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
struct list_head *slot;
struct exception *ex, *next;
return (n + size) & ~size;
}
-static void read_snapshot_metadata(struct dm_snapshot *s)
+static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
+ char **error)
{
- if (s->store.read_metadata(&s->store)) {
- down_write(&s->lock);
- s->valid = 0;
- up_write(&s->lock);
+ unsigned long chunk_size;
+ char *value;
+
+ chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
+ if (*chunk_size_arg == '\0' || *value != '\0') {
+ *error = "Invalid chunk size";
+ return -EINVAL;
+ }
+
+ if (!chunk_size) {
+ s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
+ return 0;
+ }
+
+ /*
+ * Chunk size must be multiple of page size. Silently
+ * round up if it's not.
+ */
+ chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
+
+ /* Check chunk_size is a power of 2 */
+ if (chunk_size & (chunk_size - 1)) {
+ *error = "Chunk size is not a power of 2";
+ return -EINVAL;
}
+
+ /* Validate the chunk size against the device block size */
+ if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
+ *error = "Chunk size is not a multiple of device blocksize";
+ return -EINVAL;
+ }
+
+ s->chunk_size = chunk_size;
+ s->chunk_mask = chunk_size - 1;
+ s->chunk_shift = ffs(chunk_size) - 1;
+
+ return 0;
}
/*
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_snapshot *s;
- unsigned long chunk_size;
int r = -EINVAL;
char persistent;
char *origin_path;
char *cow_path;
- char *value;
- int blocksize;
- if (argc < 4) {
- ti->error = "dm-snapshot: requires exactly 4 arguments";
+ if (argc != 4) {
+ ti->error = "requires exactly 4 arguments";
r = -EINVAL;
goto bad1;
}
goto bad1;
}
- chunk_size = simple_strtoul(argv[3], &value, 10);
- if (chunk_size == 0 || value == NULL) {
- ti->error = "Invalid chunk size";
- r = -EINVAL;
- goto bad1;
- }
-
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL) {
ti->error = "Cannot allocate snapshot context private "
goto bad2;
}
- /*
- * Chunk size must be multiple of page size. Silently
- * round up if it's not.
- */
- chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
-
- /* Validate the chunk size against the device block size */
- blocksize = s->cow->bdev->bd_disk->queue->hardsect_size;
- if (chunk_size % (blocksize >> 9)) {
- ti->error = "Chunk size is not a multiple of device blocksize";
- r = -EINVAL;
- goto bad3;
- }
-
- /* Check chunk_size is a power of 2 */
- if (chunk_size & (chunk_size - 1)) {
- ti->error = "Chunk size is not a power of 2";
- r = -EINVAL;
+ r = set_chunk_size(s, argv[3], &ti->error);
+ if (r)
goto bad3;
- }
- s->chunk_size = chunk_size;
- s->chunk_mask = chunk_size - 1;
s->type = persistent;
- s->chunk_shift = ffs(chunk_size) - 1;
s->valid = 1;
s->active = 0;
s->last_percent = 0;
init_rwsem(&s->lock);
+ spin_lock_init(&s->pe_lock);
s->table = ti->table;
/* Allocate hash table for COW data */
goto bad3;
}
- /*
- * Check the persistent flag - done here because we need the iobuf
- * to check the LV header
- */
s->store.snap = s;
if (persistent == 'P')
- r = dm_create_persistent(&s->store, chunk_size);
+ r = dm_create_persistent(&s->store);
else
- r = dm_create_transient(&s->store, s, blocksize);
+ r = dm_create_transient(&s->store);
if (r) {
ti->error = "Couldn't create exception store";
}
/* Metadata must only be loaded into one table at once */
- read_snapshot_metadata(s);
+ r = s->store.read_metadata(&s->store);
+ if (r) {
+ ti->error = "Failed to read snapshot metadata";
+ goto bad6;
+ }
+
+ bio_list_init(&s->queued_bios);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
return r;
}
+static void __free_exceptions(struct dm_snapshot *s)
+{
+ kcopyd_client_destroy(s->kcopyd_client);
+ s->kcopyd_client = NULL;
+
+ exit_exception_table(&s->pending, pending_cache);
+ exit_exception_table(&s->complete, exception_cache);
+
+ s->store.destroy(&s->store);
+}
+
static void snapshot_dtr(struct dm_target *ti)
{
struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+ flush_workqueue(ksnapd);
+
/* Prevent further origin writes from using this snapshot. */
/* After this returns there can be no new kcopyd jobs. */
unregister_snapshot(s);
- kcopyd_client_destroy(s->kcopyd_client);
-
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
-
- /* Deallocate memory used */
- s->store.destroy(&s->store);
+ __free_exceptions(s);
dm_put_device(ti, s->origin);
dm_put_device(ti, s->cow);
}
}
+static void flush_queued_bios(struct work_struct *work)
+{
+ struct dm_snapshot *s =
+ container_of(work, struct dm_snapshot, queued_bios_work);
+ struct bio *queued_bios;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->pe_lock, flags);
+ queued_bios = bio_list_get(&s->queued_bios);
+ spin_unlock_irqrestore(&s->pe_lock, flags);
+
+ flush_bios(queued_bios);
+}
+
/*
* Error a list of buffers.
*/
}
}
-static struct bio *__flush_bios(struct pending_exception *pe)
+static void __invalidate_snapshot(struct dm_snapshot *s, int err)
{
- struct pending_exception *sibling;
+ if (!s->valid)
+ return;
- if (list_empty(&pe->siblings))
- return bio_list_get(&pe->origin_bios);
+ if (err == -EIO)
+ DMERR("Invalidating snapshot: Error reading/writing.");
+ else if (err == -ENOMEM)
+ DMERR("Invalidating snapshot: Unable to allocate exception.");
- sibling = list_entry(pe->siblings.next,
- struct pending_exception, siblings);
+ if (s->store.drop_snapshot)
+ s->store.drop_snapshot(&s->store);
- list_del(&pe->siblings);
+ s->valid = 0;
- /* This is fine as long as kcopyd is single-threaded. If kcopyd
- * becomes multi-threaded, we'll need some locking here.
+ dm_table_event(s->table);
+}
+
+static void get_pending_exception(struct pending_exception *pe)
+{
+ atomic_inc(&pe->ref_count);
+}
+
+static struct bio *put_pending_exception(struct pending_exception *pe)
+{
+ struct pending_exception *primary_pe;
+ struct bio *origin_bios = NULL;
+
+ primary_pe = pe->primary_pe;
+
+ /*
+ * If this pe is involved in a write to the origin and
+ * it is the last sibling to complete then release
+ * the bios for the original write to the origin.
*/
- bio_list_merge(&sibling->origin_bios, &pe->origin_bios);
+ if (primary_pe &&
+ atomic_dec_and_test(&primary_pe->ref_count))
+ origin_bios = bio_list_get(&primary_pe->origin_bios);
- return NULL;
+ /*
+ * Free the pe if it's not linked to an origin write or if
+ * it's not itself a primary pe.
+ */
+ if (!primary_pe || primary_pe != pe)
+ free_pending_exception(pe);
+
+ /*
+ * Free the primary pe if nothing references it.
+ */
+ if (primary_pe && !atomic_read(&primary_pe->ref_count))
+ free_pending_exception(primary_pe);
+
+ return origin_bios;
}
static void pending_complete(struct pending_exception *pe, int success)
{
struct exception *e;
struct dm_snapshot *s = pe->snap;
- struct bio *flush = NULL;
-
- if (success) {
- e = alloc_exception();
- if (!e) {
- DMWARN("Unable to allocate exception.");
- down_write(&s->lock);
- s->store.drop_snapshot(&s->store);
- s->valid = 0;
- flush = __flush_bios(pe);
- up_write(&s->lock);
-
- error_bios(bio_list_get(&pe->snapshot_bios));
- goto out;
- }
- *e = pe->e;
-
- /*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
- */
- down_write(&s->lock);
- insert_exception(&s->complete, e);
- remove_exception(&pe->e);
- flush = __flush_bios(pe);
-
- /* Submit any pending write bios */
- up_write(&s->lock);
+ struct bio *origin_bios = NULL;
+ struct bio *snapshot_bios = NULL;
+ int error = 0;
- flush_bios(bio_list_get(&pe->snapshot_bios));
- } else {
+ if (!success) {
/* Read/write error - snapshot is unusable */
down_write(&s->lock);
- if (s->valid)
- DMERR("Error reading/writing snapshot");
- s->store.drop_snapshot(&s->store);
- s->valid = 0;
- remove_exception(&pe->e);
- flush = __flush_bios(pe);
- up_write(&s->lock);
+ __invalidate_snapshot(s, -EIO);
+ error = 1;
+ goto out;
+ }
- error_bios(bio_list_get(&pe->snapshot_bios));
+ e = alloc_exception();
+ if (!e) {
+ down_write(&s->lock);
+ __invalidate_snapshot(s, -ENOMEM);
+ error = 1;
+ goto out;
+ }
+ *e = pe->e;
- dm_table_event(s->table);
+ down_write(&s->lock);
+ if (!s->valid) {
+ free_exception(e);
+ error = 1;
+ goto out;
}
+ /*
+ * Add a proper exception, and remove the
+ * in-flight exception from the list.
+ */
+ insert_exception(&s->complete, e);
+
out:
- free_pending_exception(pe);
+ remove_exception(&pe->e);
+ snapshot_bios = bio_list_get(&pe->snapshot_bios);
+ origin_bios = put_pending_exception(pe);
- if (flush)
- flush_bios(flush);
+ up_write(&s->lock);
+
+ /* Submit any pending write bios */
+ if (error)
+ error_bios(snapshot_bios);
+ else
+ flush_bios(snapshot_bios);
+
+ flush_bios(origin_bios);
}
static void commit_callback(void *context, int success)
if (e) {
/* cast the exception to a pending exception */
pe = container_of(e, struct pending_exception, e);
+ goto out;
+ }
- } else {
- /*
- * Create a new pending exception, we don't want
- * to hold the lock while we do this.
- */
- up_write(&s->lock);
- pe = alloc_pending_exception();
- down_write(&s->lock);
+ /*
+ * Create a new pending exception, we don't want
+ * to hold the lock while we do this.
+ */
+ up_write(&s->lock);
+ pe = alloc_pending_exception();
+ down_write(&s->lock);
- e = lookup_exception(&s->pending, chunk);
- if (e) {
- free_pending_exception(pe);
- pe = container_of(e, struct pending_exception, e);
- } else {
- pe->e.old_chunk = chunk;
- bio_list_init(&pe->origin_bios);
- bio_list_init(&pe->snapshot_bios);
- INIT_LIST_HEAD(&pe->siblings);
- pe->snap = s;
- pe->started = 0;
-
- if (s->store.prepare_exception(&s->store, &pe->e)) {
- free_pending_exception(pe);
- s->valid = 0;
- return NULL;
- }
+ if (!s->valid) {
+ free_pending_exception(pe);
+ return NULL;
+ }
- insert_exception(&s->pending, &pe->e);
- }
+ e = lookup_exception(&s->pending, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ pe = container_of(e, struct pending_exception, e);
+ goto out;
+ }
+
+ pe->e.old_chunk = chunk;
+ bio_list_init(&pe->origin_bios);
+ bio_list_init(&pe->snapshot_bios);
+ pe->primary_pe = NULL;
+ atomic_set(&pe->ref_count, 0);
+ pe->snap = s;
+ pe->started = 0;
+
+ if (s->store.prepare_exception(&s->store, &pe->e)) {
+ free_pending_exception(pe);
+ return NULL;
}
+ get_pending_exception(pe);
+ insert_exception(&s->pending, &pe->e);
+
+ out:
return pe;
}
{
struct exception *e;
struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- int r = 1;
+ int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
- struct pending_exception *pe;
+ struct pending_exception *pe = NULL;
chunk = sector_to_chunk(s, bio->bi_sector);
/* Full snapshots are not usable */
+ /* To get here the table must be live so s->active is always set. */
if (!s->valid)
return -EIO;
if (unlikely(bio_barrier(bio)))
return -EOPNOTSUPP;
+ /* FIXME: should only take write lock if we need
+ * to copy an exception */
+ down_write(&s->lock);
+
+ if (!s->valid) {
+ r = -EIO;
+ goto out_unlock;
+ }
+
+ /* If the block is already remapped - use that, else remap it */
+ e = lookup_exception(&s->complete, chunk);
+ if (e) {
+ remap_exception(s, e, bio);
+ goto out_unlock;
+ }
+
/*
* Write to snapshot - higher level takes care of RW/RO
* flags so we should only get this if we are
* writeable.
*/
if (bio_rw(bio) == WRITE) {
+ pe = __find_pending_exception(s, bio);
+ if (!pe) {
+ __invalidate_snapshot(s, -ENOMEM);
+ r = -EIO;
+ goto out_unlock;
+ }
- /* FIXME: should only take write lock if we need
- * to copy an exception */
- down_write(&s->lock);
+ remap_exception(s, &pe->e, bio);
+ bio_list_add(&pe->snapshot_bios, bio);
- /* If the block is already remapped - use that, else remap it */
- e = lookup_exception(&s->complete, chunk);
- if (e) {
- remap_exception(s, e, bio);
- up_write(&s->lock);
+ r = DM_MAPIO_SUBMITTED;
- } else {
- pe = __find_pending_exception(s, bio);
-
- if (!pe) {
- if (s->store.drop_snapshot)
- s->store.drop_snapshot(&s->store);
- s->valid = 0;
- r = -EIO;
- up_write(&s->lock);
- } else {
- remap_exception(s, &pe->e, bio);
- bio_list_add(&pe->snapshot_bios, bio);
-
- if (!pe->started) {
- /* this is protected by snap->lock */
- pe->started = 1;
- up_write(&s->lock);
- start_copy(pe);
- } else
- up_write(&s->lock);
- r = 0;
- }
+ if (!pe->started) {
+ /* this is protected by snap->lock */
+ pe->started = 1;
+ up_write(&s->lock);
+ start_copy(pe);
+ goto out;
}
-
- } else {
+ } else
/*
* FIXME: this read path scares me because we
* always use the origin when we have a pending
* exception. However I can't think of a
* situation where this is wrong - ejt.
*/
+ bio->bi_bdev = s->origin->bdev;
- /* Do reads */
- down_read(&s->lock);
-
- /* See if it it has been remapped */
- e = lookup_exception(&s->complete, chunk);
- if (e)
- remap_exception(s, e, bio);
- else
- bio->bi_bdev = s->origin->bdev;
-
- up_read(&s->lock);
- }
-
+ out_unlock:
+ up_write(&s->lock);
+ out:
return r;
}
snap->store.fraction_full(&snap->store,
&numerator,
&denominator);
- snprintf(result, maxlen,
- SECTOR_FORMAT "/" SECTOR_FORMAT,
- numerator, denominator);
+ snprintf(result, maxlen, "%llu/%llu",
+ (unsigned long long)numerator,
+ (unsigned long long)denominator);
}
else
snprintf(result, maxlen, "Unknown");
* to make private copies if the output is to
* make sense.
*/
- snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT,
+ snprintf(result, maxlen, "%s %s %c %llu",
snap->origin->name, snap->cow->name,
- snap->type, snap->chunk_size);
+ snap->type,
+ (unsigned long long)snap->chunk_size);
break;
}
/*-----------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
-static void list_merge(struct list_head *l1, struct list_head *l2)
-{
- struct list_head *l1_n, *l2_p;
-
- l1_n = l1->next;
- l2_p = l2->prev;
-
- l1->next = l2;
- l2->prev = l1;
-
- l2_p->next = l1_n;
- l1_n->prev = l2_p;
-}
-
static int __origin_write(struct list_head *snapshots, struct bio *bio)
{
- int r = 1, first = 1;
+ int r = DM_MAPIO_REMAPPED, first = 0;
struct dm_snapshot *snap;
struct exception *e;
- struct pending_exception *pe, *last = NULL;
+ struct pending_exception *pe, *next_pe, *primary_pe = NULL;
chunk_t chunk;
+ LIST_HEAD(pe_queue);
/* Do all the snapshots on this origin */
list_for_each_entry (snap, snapshots, list) {
+ down_write(&snap->lock);
+
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
- continue;
+ goto next_snapshot;
/* Nothing to do if writing beyond end of snapshot */
if (bio->bi_sector >= dm_table_get_size(snap->table))
- continue;
-
- down_write(&snap->lock);
+ goto next_snapshot;
/*
* Remember, different snapshots can have
* Check exception table to see if block
* is already remapped in this snapshot
* and trigger an exception if not.
+ *
+ * ref_count is initialised to 1 so pending_complete()
+ * won't destroy the primary_pe while we're inside this loop.
*/
e = lookup_exception(&snap->complete, chunk);
- if (!e) {
- pe = __find_pending_exception(snap, bio);
- if (!pe) {
- snap->store.drop_snapshot(&snap->store);
- snap->valid = 0;
-
- } else {
- if (last)
- list_merge(&pe->siblings,
- &last->siblings);
-
- last = pe;
- r = 0;
+ if (e)
+ goto next_snapshot;
+
+ pe = __find_pending_exception(snap, bio);
+ if (!pe) {
+ __invalidate_snapshot(snap, -ENOMEM);
+ goto next_snapshot;
+ }
+
+ if (!primary_pe) {
+ /*
+ * Either every pe here has same
+ * primary_pe or none has one yet.
+ */
+ if (pe->primary_pe)
+ primary_pe = pe->primary_pe;
+ else {
+ primary_pe = pe;
+ first = 1;
}
+
+ bio_list_add(&primary_pe->origin_bios, bio);
+
+ r = DM_MAPIO_SUBMITTED;
}
+ if (!pe->primary_pe) {
+ pe->primary_pe = primary_pe;
+ get_pending_exception(primary_pe);
+ }
+
+ if (!pe->started) {
+ pe->started = 1;
+ list_add_tail(&pe->list, &pe_queue);
+ }
+
+ next_snapshot:
up_write(&snap->lock);
}
+ if (!primary_pe)
+ return r;
+
/*
- * Now that we have a complete pe list we can start the copying.
+ * If this is the first time we're processing this chunk and
+ * ref_count is now 1 it means all the pending exceptions
+ * got completed while we were in the loop above, so it falls to
+ * us here to remove the primary_pe and submit any origin_bios.
*/
- if (last) {
- pe = last;
- do {
- down_write(&pe->snap->lock);
- if (first)
- bio_list_add(&pe->origin_bios, bio);
- if (!pe->started) {
- pe->started = 1;
- up_write(&pe->snap->lock);
- start_copy(pe);
- } else
- up_write(&pe->snap->lock);
- first = 0;
- pe = list_entry(pe->siblings.next,
- struct pending_exception, siblings);
-
- } while (pe != last);
+
+ if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
+ flush_bios(bio_list_get(&primary_pe->origin_bios));
+ free_pending_exception(primary_pe);
+ /* If we got here, pe_queue is necessarily empty. */
+ return r;
}
+ /*
+ * Now that we have a complete pe list we can start the copying.
+ */
+ list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
+ start_copy(pe);
+
return r;
}
static int do_origin(struct dm_dev *origin, struct bio *bio)
{
struct origin *o;
- int r = 1;
+ int r = DM_MAPIO_REMAPPED;
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
struct dm_dev *dev;
if (argc != 1) {
- ti->error = "dm-origin: incorrect number of arguments";
+ ti->error = "origin: incorrect number of arguments";
return -EINVAL;
}
return -EOPNOTSUPP;
/* Only tell snapshots if this is a write */
- return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1;
+ return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
r = dm_register_target(&origin_target);
if (r < 0) {
- DMERR("Device mapper: Origin: register failed %d\n", r);
+ DMERR("Origin target register failed %d", r);
goto bad1;
}
goto bad4;
}
- pending_pool = mempool_create(128, mempool_alloc_slab,
- mempool_free_slab, pending_cache);
+ pending_pool = mempool_create_slab_pool(128, pending_cache);
if (!pending_pool) {
DMERR("Couldn't create pending pool.");
r = -ENOMEM;
goto bad5;
}
+ ksnapd = create_singlethread_workqueue("ksnapd");
+ if (!ksnapd) {
+ DMERR("Failed to create ksnapd workqueue.");
+ r = -ENOMEM;
+ goto bad6;
+ }
+
return 0;
+ bad6:
+ mempool_destroy(pending_pool);
bad5:
kmem_cache_destroy(pending_cache);
bad4:
{
int r;
+ destroy_workqueue(ksnapd);
+
r = dm_unregister_target(&snapshot_target);
if (r)
DMERR("snapshot unregister failed %d", r);