/*
* default destructor for a bio allocated with bio_alloc()
*/
-void bio_destructor(struct bio *bio)
+static void bio_destructor(struct bio *bio)
{
const int pool_idx = BIO_POOL_IDX(bio);
struct biovec_pool *bp = bvec_array + pool_idx;
BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
- /*
- * cloned bio doesn't own the veclist
- */
- if (!bio_flagged(bio, BIO_CLONED))
- mempool_free(bio->bi_io_vec, bp->pool);
-
+ mempool_free(bio->bi_io_vec, bp->pool);
mempool_free(bio, bio_pool);
}
**/
struct bio *bio_alloc(int gfp_mask, int nr_iovecs)
{
- struct bio_vec *bvl = NULL;
- unsigned long idx;
- struct bio *bio;
+ struct bio *bio = mempool_alloc(bio_pool, gfp_mask);
- bio = mempool_alloc(bio_pool, gfp_mask);
- if (unlikely(!bio))
- goto out;
+ if (likely(bio)) {
+ struct bio_vec *bvl = NULL;
- bio_init(bio);
+ bio_init(bio);
+ if (likely(nr_iovecs)) {
+ unsigned long idx;
- if (unlikely(!nr_iovecs))
- goto noiovec;
-
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx);
- if (bvl) {
- bio->bi_flags |= idx << BIO_POOL_OFFSET;
- bio->bi_max_vecs = bvec_array[idx].nr_vecs;
-noiovec:
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx);
+ if (unlikely(!bvl)) {
+ mempool_free(bio, bio_pool);
+ bio = NULL;
+ goto out;
+ }
+ bio->bi_flags |= idx << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = bvec_array[idx].nr_vecs;
+ }
bio->bi_io_vec = bvl;
bio->bi_destructor = bio_destructor;
-out:
- return bio;
}
-
- mempool_free(bio, bio_pool);
- bio = NULL;
- goto out;
+out:
+ return bio;
}
/**
*/
inline void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- bio->bi_io_vec = bio_src->bi_io_vec;
+ request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
+
+ memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec));
bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev;
* for the clone
*/
bio->bi_vcnt = bio_src->bi_vcnt;
- bio->bi_idx = bio_src->bi_idx;
- if (bio_flagged(bio, BIO_SEG_VALID)) {
- bio->bi_phys_segments = bio_src->bi_phys_segments;
- bio->bi_hw_segments = bio_src->bi_hw_segments;
- bio->bi_flags |= (1 << BIO_SEG_VALID);
- }
bio->bi_size = bio_src->bi_size;
-
- /*
- * cloned bio does not own the bio_vec, so users cannot fiddle with
- * it. clear bi_max_vecs and clear the BIO_POOL_BITS to make this
- * apparent
- */
- bio->bi_max_vecs = 0;
- bio->bi_flags &= (BIO_POOL_MASK - 1);
+ bio_phys_segments(q, bio);
+ bio_hw_segments(q, bio);
}
/**
*/
struct bio *bio_clone(struct bio *bio, int gfp_mask)
{
- struct bio *b = bio_alloc(gfp_mask, 0);
+ struct bio *b = bio_alloc(gfp_mask, bio->bi_max_vecs);
if (b)
__bio_clone(b, bio);
len, offset);
}
+struct bio_map_data {
+ struct bio_vec *iovecs;
+ void __user *userptr;
+};
+
+static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
+{
+ memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
+ bio->bi_private = bmd;
+}
+
+static void bio_free_map_data(struct bio_map_data *bmd)
+{
+ kfree(bmd->iovecs);
+ kfree(bmd);
+}
+
+static struct bio_map_data *bio_alloc_map_data(int nr_segs)
+{
+ struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
+
+ if (!bmd)
+ return NULL;
+
+ bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
+ if (bmd->iovecs)
+ return bmd;
+
+ kfree(bmd);
+ return NULL;
+}
+
/**
* bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated
*/
int bio_uncopy_user(struct bio *bio)
{
+ struct bio_map_data *bmd = bio->bi_private;
+ const int read = bio_data_dir(bio) == READ;
struct bio_vec *bvec;
int i, ret = 0;
- if (bio_data_dir(bio) == READ) {
- char *uaddr = bio->bi_private;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- char *addr = page_address(bvec->bv_page);
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+ unsigned int len = bmd->iovecs[i].bv_len;
- if (!ret && copy_to_user(uaddr, addr, bvec->bv_len))
- ret = -EFAULT;
+ if (read && !ret && copy_to_user(bmd->userptr, addr, len))
+ ret = -EFAULT;
- __free_page(bvec->bv_page);
- uaddr += bvec->bv_len;
- }
+ __free_page(bvec->bv_page);
+ bmd->userptr += len;
}
-
+ bio_free_map_data(bmd);
bio_put(bio);
return ret;
}
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
+ struct bio_map_data *bmd;
struct bio_vec *bvec;
struct page *page;
struct bio *bio;
int i, ret;
+ bmd = bio_alloc_map_data(end - start);
+ if (!bmd)
+ return ERR_PTR(-ENOMEM);
+
+ bmd->userptr = (void __user *) uaddr;
+
+ ret = -ENOMEM;
bio = bio_alloc(GFP_KERNEL, end - start);
if (!bio)
- return ERR_PTR(-ENOMEM);
+ goto out_bmd;
+
+ bio->bi_rw |= (!write_to_vm << BIO_RW);
ret = 0;
while (len) {
len -= bytes;
}
+ if (ret)
+ goto cleanup;
+
/*
* success
*/
- if (!ret) {
- if (!write_to_vm) {
- bio->bi_rw |= (1 << BIO_RW);
- /*
- * for a write, copy in data to kernel pages
- */
- ret = -EFAULT;
- bio_for_each_segment(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
+ if (!write_to_vm) {
+ char __user *p = (char __user *) uaddr;
- if (copy_from_user(addr, (char *) uaddr, bvec->bv_len))
- goto cleanup;
- }
- }
+ /*
+ * for a write, copy in data to kernel pages
+ */
+ ret = -EFAULT;
+ bio_for_each_segment(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
- bio->bi_private = (void *) uaddr;
- return bio;
+ if (copy_from_user(addr, p, bvec->bv_len))
+ goto cleanup;
+ p += bvec->bv_len;
+ }
}
- /*
- * cleanup
- */
+ bio_set_map_data(bmd, bio);
+ return bio;
cleanup:
bio_for_each_segment(bvec, bio, i)
__free_page(bvec->bv_page);
bio_put(bio);
+out_bmd:
+ bio_free_map_data(bmd);
return ERR_PTR(ret);
}
struct bio_vec *bvec;
int i;
- /*
- * find original bio if it was bounced
- */
- if (bio->bi_private) {
- /*
- * someone stole our bio, must not happen
- */
- BUG_ON(!bio_flagged(bio, BIO_BOUNCED));
-
- bio = bio->bi_private;
- }
-
/*
* make sure we dirty pages we wrote to
*/
static void bio_dirty_fn(void *data);
static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
-static spinlock_t bio_dirty_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*