* unsigned short
*/
-#define BV(x) { .nr_vecs = x, .name = "biovec-" #x }
+#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
static struct biovec_pool bvec_array[BIOVEC_NR_POOLS] = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
/*
* default destructor for a bio allocated with bio_alloc()
*/
-void bio_destructor(struct bio *bio)
+static void bio_destructor(struct bio *bio)
{
const int pool_idx = BIO_POOL_IDX(bio);
struct biovec_pool *bp = bvec_array + pool_idx;
BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
- /*
- * cloned bio doesn't own the veclist
- */
- if (!bio_flagged(bio, BIO_CLONED))
- mempool_free(bio->bi_io_vec, bp->pool);
-
+ mempool_free(bio->bi_io_vec, bp->pool);
mempool_free(bio, bio_pool);
}
bio->bi_idx = 0;
bio->bi_phys_segments = 0;
bio->bi_hw_segments = 0;
+ bio->bi_hw_front_size = 0;
+ bio->bi_hw_back_size = 0;
bio->bi_size = 0;
bio->bi_max_vecs = 0;
bio->bi_end_io = NULL;
**/
struct bio *bio_alloc(int gfp_mask, int nr_iovecs)
{
- struct bio_vec *bvl = NULL;
- unsigned long idx;
- struct bio *bio;
-
- bio = mempool_alloc(bio_pool, gfp_mask);
- if (unlikely(!bio))
- goto out;
-
- bio_init(bio);
-
- if (unlikely(!nr_iovecs))
- goto noiovec;
-
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx);
- if (bvl) {
- bio->bi_flags |= idx << BIO_POOL_OFFSET;
- bio->bi_max_vecs = bvec_array[idx].nr_vecs;
-noiovec:
+ struct bio *bio = mempool_alloc(bio_pool, gfp_mask);
+
+ if (likely(bio)) {
+ struct bio_vec *bvl = NULL;
+
+ bio_init(bio);
+ if (likely(nr_iovecs)) {
+ unsigned long idx;
+
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx);
+ if (unlikely(!bvl)) {
+ mempool_free(bio, bio_pool);
+ bio = NULL;
+ goto out;
+ }
+ bio->bi_flags |= idx << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = bvec_array[idx].nr_vecs;
+ }
bio->bi_io_vec = bvl;
bio->bi_destructor = bio_destructor;
-out:
- return bio;
}
-
- mempool_free(bio, bio_pool);
- bio = NULL;
- goto out;
+out:
+ return bio;
}
/**
*/
inline void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- bio->bi_io_vec = bio_src->bi_io_vec;
+ request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
+
+ memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec));
bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev;
* for the clone
*/
bio->bi_vcnt = bio_src->bi_vcnt;
- bio->bi_idx = bio_src->bi_idx;
- if (bio_flagged(bio, BIO_SEG_VALID)) {
- bio->bi_phys_segments = bio_src->bi_phys_segments;
- bio->bi_hw_segments = bio_src->bi_hw_segments;
- bio->bi_flags |= (1 << BIO_SEG_VALID);
- }
bio->bi_size = bio_src->bi_size;
-
- /*
- * cloned bio does not own the bio_vec, so users cannot fiddle with
- * it. clear bi_max_vecs and clear the BIO_POOL_BITS to make this
- * apparent
- */
- bio->bi_max_vecs = 0;
- bio->bi_flags &= (BIO_POOL_MASK - 1);
+ bio_phys_segments(q, bio);
+ bio_hw_segments(q, bio);
}
/**
*/
struct bio *bio_clone(struct bio *bio, int gfp_mask)
{
- struct bio *b = bio_alloc(gfp_mask, 0);
+ struct bio *b = bio_alloc(gfp_mask, bio->bi_max_vecs);
if (b)
__bio_clone(b, bio);
* make this too complex.
*/
- while (bio_phys_segments(q, bio) >= q->max_phys_segments
- || bio_hw_segments(q, bio) >= q->max_hw_segments) {
+ while (bio->bi_phys_segments >= q->max_phys_segments
+ || bio->bi_hw_segments >= q->max_hw_segments
+ || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
if (retried_segments)
return 0;
- bio->bi_flags &= ~(1 << BIO_SEG_VALID);
retried_segments = 1;
+ blk_recount_segments(q, bio);
}
/*
}
}
+ /* If we may be able to merge these biovecs, force a recount */
+ if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) ||
+ BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
bio->bi_vcnt++;
bio->bi_phys_segments++;
bio->bi_hw_segments++;
len, offset);
}
+struct bio_map_data {
+ struct bio_vec *iovecs;
+ void __user *userptr;
+};
+
+static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
+{
+ memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
+ bio->bi_private = bmd;
+}
+
+static void bio_free_map_data(struct bio_map_data *bmd)
+{
+ kfree(bmd->iovecs);
+ kfree(bmd);
+}
+
+static struct bio_map_data *bio_alloc_map_data(int nr_segs)
+{
+ struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
+
+ if (!bmd)
+ return NULL;
+
+ bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
+ if (bmd->iovecs)
+ return bmd;
+
+ kfree(bmd);
+ return NULL;
+}
+
+/**
+ * bio_uncopy_user - finish previously mapped bio
+ * @bio: bio being terminated
+ *
+ * Free pages allocated from bio_copy_user() and write back data
+ * to user space in case of a read.
+ */
+int bio_uncopy_user(struct bio *bio)
+{
+ struct bio_map_data *bmd = bio->bi_private;
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_vec *bvec;
+ int i, ret = 0;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+ unsigned int len = bmd->iovecs[i].bv_len;
+
+ if (read && !ret && copy_to_user(bmd->userptr, addr, len))
+ ret = -EFAULT;
+
+ __free_page(bvec->bv_page);
+ bmd->userptr += len;
+ }
+ bio_free_map_data(bmd);
+ bio_put(bio);
+ return ret;
+}
+
+/**
+ * bio_copy_user - copy user data to bio
+ * @q: destination block queue
+ * @uaddr: start of user address
+ * @len: length in bytes
+ * @write_to_vm: bool indicating writing to pages or not
+ *
+ * Prepares and returns a bio for indirect user io, bouncing data
+ * to/from kernel pages as necessary. Must be paired with
+ * call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
+ unsigned int len, int write_to_vm)
+{
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ struct bio_map_data *bmd;
+ struct bio_vec *bvec;
+ struct page *page;
+ struct bio *bio;
+ int i, ret;
+
+ bmd = bio_alloc_map_data(end - start);
+ if (!bmd)
+ return ERR_PTR(-ENOMEM);
+
+ bmd->userptr = (void __user *) uaddr;
+
+ ret = -ENOMEM;
+ bio = bio_alloc(GFP_KERNEL, end - start);
+ if (!bio)
+ goto out_bmd;
+
+ bio->bi_rw |= (!write_to_vm << BIO_RW);
+
+ ret = 0;
+ while (len) {
+ unsigned int bytes = PAGE_SIZE;
+
+ if (bytes > len)
+ bytes = len;
+
+ page = alloc_page(q->bounce_gfp | GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
+ ret = -EINVAL;
+ break;
+ }
+
+ len -= bytes;
+ }
+
+ if (ret)
+ goto cleanup;
+
+ /*
+ * success
+ */
+ if (!write_to_vm) {
+ char __user *p = (char __user *) uaddr;
+
+ /*
+ * for a write, copy in data to kernel pages
+ */
+ ret = -EFAULT;
+ bio_for_each_segment(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ if (copy_from_user(addr, p, bvec->bv_len))
+ goto cleanup;
+ p += bvec->bv_len;
+ }
+ }
+
+ bio_set_map_data(bmd, bio);
+ return bio;
+cleanup:
+ bio_for_each_segment(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ bio_put(bio);
+out_bmd:
+ bio_free_map_data(bmd);
+ return ERR_PTR(ret);
+}
+
static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len,
int write_to_vm)
* size for now, in the future we can relax this restriction
*/
if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
- return NULL;
+ return ERR_PTR(-EINVAL);
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto out;
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
- blk_queue_bounce(q, &bio);
+ bio->bi_flags |= (1 << BIO_USER_MAPPED);
return bio;
out:
kfree(pages);
bio_put(bio);
- return NULL;
+ return ERR_PTR(ret);
}
/**
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
- * device.
+ * device. Returns an error pointer in case of error.
*/
struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
- if (bio) {
- /*
- * subtle -- if __bio_map_user() ended up bouncing a bio,
- * it would normally disappear when its bi_end_io is run.
- * however, we need it for the unmap, so grab an extra
- * reference to it
- */
- bio_get(bio);
+ if (IS_ERR(bio))
+ return bio;
- if (bio->bi_size < len) {
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio, 0);
- return NULL;
- }
- }
+ /*
+ * subtle -- if __bio_map_user() ended up bouncing a bio,
+ * it would normally disappear when its bi_end_io is run.
+ * however, we need it for the unmap, so grab an extra
+ * reference to it
+ */
+ bio_get(bio);
- return bio;
+ if (bio->bi_size == len)
+ return bio;
+
+ /*
+ * don't support partial mappings
+ */
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio);
+ return ERR_PTR(-EINVAL);
}
-static void __bio_unmap_user(struct bio *bio, int write_to_vm)
+static void __bio_unmap_user(struct bio *bio)
{
struct bio_vec *bvec;
int i;
- /*
- * find original bio if it was bounced
- */
- if (bio->bi_private) {
- /*
- * someone stole our bio, must not happen
- */
- BUG_ON(!bio_flagged(bio, BIO_BOUNCED));
-
- bio = bio->bi_private;
- }
-
/*
* make sure we dirty pages we wrote to
*/
__bio_for_each_segment(bvec, bio, i, 0) {
- if (write_to_vm)
+ if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
- * @write_to_vm: bool indicating whether pages were written to
*
- * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
- * must be the same as passed into bio_map_user(). Must be called with
+ * Unmap a bio previously mapped by bio_map_user(). Must be called with
* a process context.
*
* bio_unmap_user() may sleep.
*/
-void bio_unmap_user(struct bio *bio, int write_to_vm)
+void bio_unmap_user(struct bio *bio)
{
- __bio_unmap_user(bio, write_to_vm);
+ __bio_unmap_user(bio);
bio_put(bio);
}
static void bio_dirty_fn(void *data);
static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
-static spinlock_t bio_dirty_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*
size = bp->nr_vecs * sizeof(struct bio_vec);
bp->slab = kmem_cache_create(bp->name, size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!bp->slab)
- panic("biovec: can't init slab cache\n");
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
if (i >= scale)
pool_entries >>= 1;
static int __init init_bio(void)
{
bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!bio_slab)
- panic("bio: can't create slab cache\n");
- bio_pool = mempool_create(BIO_POOL_SIZE, mempool_alloc_slab, mempool_free_slab, bio_slab);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ bio_pool = mempool_create(BIO_POOL_SIZE, mempool_alloc_slab,
+ mempool_free_slab, bio_slab);
if (!bio_pool)
panic("bio: can't create mempool\n");
biovec_init_pools();
- bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, bio_pair_alloc, bio_pair_free, NULL);
+ bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
+ bio_pair_alloc, bio_pair_free, NULL);
if (!bio_split_pool)
panic("bio: can't create split pool\n");
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);
+EXPORT_SYMBOL(bio_copy_user);
+EXPORT_SYMBOL(bio_uncopy_user);