X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fbio.c;h=3a1472acc361ef0e95f03843cac07f5b92f23e61;hb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;hp=6284147b20dc77f2c2ff9ed9f9829666aac767d6;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/fs/bio.c b/fs/bio.c index 6284147b2..3a1472acc 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -28,7 +28,6 @@ #define BIO_POOL_SIZE 256 -static mempool_t *bio_pool; static kmem_cache_t *bio_slab; #define BIOVEC_NR_POOLS 6 @@ -40,11 +39,10 @@ static kmem_cache_t *bio_slab; #define BIO_SPLIT_ENTRIES 8 mempool_t *bio_split_pool; -struct biovec_pool { +struct biovec_slab { int nr_vecs; char *name; kmem_cache_t *slab; - mempool_t *pool; }; /* @@ -53,16 +51,33 @@ struct biovec_pool { * unsigned short */ -#define BV(x) { .nr_vecs = x, .name = "biovec-" #x } -static struct biovec_pool bvec_array[BIOVEC_NR_POOLS] = { +#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } +static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] = { BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), }; #undef BV -static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, unsigned long *idx) +/* + * bio_set is used to allow other portions of the IO system to + * allocate their own private memory pools for bio and iovec structures. + * These memory pools in turn all allocate from the bio_slab + * and the bvec_slabs[]. + */ +struct bio_set { + mempool_t *bio_pool; + mempool_t *bvec_pools[BIOVEC_NR_POOLS]; +}; + +/* + * fs_bio_set is the bio_set containing bio and iovec memory pools used by + * IO code that does not need private memory pools. + */ +static struct bio_set *fs_bio_set; + +static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { - struct biovec_pool *bp; struct bio_vec *bvl; + struct biovec_slab *bp; /* * see comment near bvec_array define! @@ -80,31 +95,27 @@ static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, unsigned long *id /* * idx now points to the pool we want to allocate from */ - bp = bvec_array + *idx; - bvl = mempool_alloc(bp->pool, gfp_mask); + bp = bvec_slabs + *idx; + bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); if (bvl) memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); + return bvl; } /* - * default destructor for a bio allocated with bio_alloc() + * default destructor for a bio allocated with bio_alloc_bioset() */ -void bio_destructor(struct bio *bio) +static void bio_destructor(struct bio *bio) { const int pool_idx = BIO_POOL_IDX(bio); - struct biovec_pool *bp = bvec_array + pool_idx; + struct bio_set *bs = bio->bi_set; BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); - /* - * cloned bio doesn't own the veclist - */ - if (!bio_flagged(bio, BIO_CLONED)) - mempool_free(bio->bi_io_vec, bp->pool); - - mempool_free(bio, bio_pool); + mempool_free(bio->bi_io_vec, bs->bvec_pools[pool_idx]); + mempool_free(bio, bs->bio_pool); } inline void bio_init(struct bio *bio) @@ -116,6 +127,8 @@ inline void bio_init(struct bio *bio) bio->bi_idx = 0; bio->bi_phys_segments = 0; bio->bi_hw_segments = 0; + bio->bi_hw_front_size = 0; + bio->bi_hw_back_size = 0; bio->bi_size = 0; bio->bi_max_vecs = 0; bio->bi_end_io = NULL; @@ -124,45 +137,66 @@ inline void bio_init(struct bio *bio) } /** - * bio_alloc - allocate a bio for I/O + * bio_alloc_bioset - allocate a bio for I/O * @gfp_mask: the GFP_ mask given to the slab allocator * @nr_iovecs: number of iovecs to pre-allocate + * @bs: the bio_set to allocate from * * Description: - * bio_alloc will first try it's on mempool to satisfy the allocation. + * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. * If %__GFP_WAIT is set then we will block on the internal pool waiting * for a &struct bio to become free. + * + * allocate bio and iovecs from the memory pools specified by the + * bio_set structure. **/ -struct bio *bio_alloc(int gfp_mask, int nr_iovecs) +struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs) { - struct bio_vec *bvl = NULL; - unsigned long idx; - struct bio *bio; - - bio = mempool_alloc(bio_pool, gfp_mask); - if (unlikely(!bio)) - goto out; - - bio_init(bio); - - if (unlikely(!nr_iovecs)) - goto noiovec; - - bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx); - if (bvl) { - bio->bi_flags |= idx << BIO_POOL_OFFSET; - bio->bi_max_vecs = bvec_array[idx].nr_vecs; -noiovec: + struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); + + if (likely(bio)) { + struct bio_vec *bvl = NULL; + + bio_init(bio); + if (likely(nr_iovecs)) { + unsigned long idx; + + bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); + if (unlikely(!bvl)) { + mempool_free(bio, bs->bio_pool); + bio = NULL; + goto out; + } + bio->bi_flags |= idx << BIO_POOL_OFFSET; + bio->bi_max_vecs = bvec_slabs[idx].nr_vecs; + } bio->bi_io_vec = bvl; bio->bi_destructor = bio_destructor; -out: - return bio; + bio->bi_set = bs; } +out: + return bio; +} + +struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); +} - mempool_free(bio, bio_pool); - bio = NULL; - goto out; +void zero_fill_bio(struct bio *bio) +{ + unsigned long flags; + struct bio_vec *bv; + int i; + + bio_for_each_segment(bv, bio, i) { + char *data = bvec_kmap_irq(bv, &flags); + memset(data, 0, bv->bv_len); + flush_dcache_page(bv->bv_page); + bvec_kunmap_irq(data, &flags); + } } +EXPORT_SYMBOL(zero_fill_bio); /** * bio_put - release a reference to a bio @@ -212,7 +246,9 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) */ inline void __bio_clone(struct bio *bio, struct bio *bio_src) { - bio->bi_io_vec = bio_src->bi_io_vec; + request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); + + memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec)); bio->bi_sector = bio_src->bi_sector; bio->bi_bdev = bio_src->bi_bdev; @@ -224,21 +260,9 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) * for the clone */ bio->bi_vcnt = bio_src->bi_vcnt; - bio->bi_idx = bio_src->bi_idx; - if (bio_flagged(bio, BIO_SEG_VALID)) { - bio->bi_phys_segments = bio_src->bi_phys_segments; - bio->bi_hw_segments = bio_src->bi_hw_segments; - bio->bi_flags |= (1 << BIO_SEG_VALID); - } bio->bi_size = bio_src->bi_size; - - /* - * cloned bio does not own the bio_vec, so users cannot fiddle with - * it. clear bi_max_vecs and clear the BIO_POOL_BITS to make this - * apparent - */ - bio->bi_max_vecs = 0; - bio->bi_flags &= (BIO_POOL_MASK - 1); + bio_phys_segments(q, bio); + bio_hw_segments(q, bio); } /** @@ -248,9 +272,9 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) * * Like __bio_clone, only also allocates the returned bio */ -struct bio *bio_clone(struct bio *bio, int gfp_mask) +struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask) { - struct bio *b = bio_alloc(gfp_mask, 0); + struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); if (b) __bio_clone(b, bio); @@ -304,14 +328,15 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page * make this too complex. */ - while (bio_phys_segments(q, bio) >= q->max_phys_segments - || bio_hw_segments(q, bio) >= q->max_hw_segments) { + while (bio->bi_phys_segments >= q->max_phys_segments + || bio->bi_hw_segments >= q->max_hw_segments + || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) { if (retried_segments) return 0; - bio->bi_flags &= ~(1 << BIO_SEG_VALID); retried_segments = 1; + blk_recount_segments(q, bio); } /* @@ -341,6 +366,11 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page } } + /* If we may be able to merge these biovecs, force a recount */ + if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || + BIOVEC_VIRT_MERGEABLE(bvec-1, bvec))) + bio->bi_flags &= ~(1 << BIO_SEG_VALID); + bio->bi_vcnt++; bio->bi_phys_segments++; bio->bi_hw_segments++; @@ -368,6 +398,157 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, len, offset); } +struct bio_map_data { + struct bio_vec *iovecs; + void __user *userptr; +}; + +static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio) +{ + memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); + bio->bi_private = bmd; +} + +static void bio_free_map_data(struct bio_map_data *bmd) +{ + kfree(bmd->iovecs); + kfree(bmd); +} + +static struct bio_map_data *bio_alloc_map_data(int nr_segs) +{ + struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); + + if (!bmd) + return NULL; + + bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); + if (bmd->iovecs) + return bmd; + + kfree(bmd); + return NULL; +} + +/** + * bio_uncopy_user - finish previously mapped bio + * @bio: bio being terminated + * + * Free pages allocated from bio_copy_user() and write back data + * to user space in case of a read. + */ +int bio_uncopy_user(struct bio *bio) +{ + struct bio_map_data *bmd = bio->bi_private; + const int read = bio_data_dir(bio) == READ; + struct bio_vec *bvec; + int i, ret = 0; + + __bio_for_each_segment(bvec, bio, i, 0) { + char *addr = page_address(bvec->bv_page); + unsigned int len = bmd->iovecs[i].bv_len; + + if (read && !ret && copy_to_user(bmd->userptr, addr, len)) + ret = -EFAULT; + + __free_page(bvec->bv_page); + bmd->userptr += len; + } + bio_free_map_data(bmd); + bio_put(bio); + return ret; +} + +/** + * bio_copy_user - copy user data to bio + * @q: destination block queue + * @uaddr: start of user address + * @len: length in bytes + * @write_to_vm: bool indicating writing to pages or not + * + * Prepares and returns a bio for indirect user io, bouncing data + * to/from kernel pages as necessary. Must be paired with + * call bio_uncopy_user() on io completion. + */ +struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, + unsigned int len, int write_to_vm) +{ + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + struct bio_map_data *bmd; + struct bio_vec *bvec; + struct page *page; + struct bio *bio; + int i, ret; + + bmd = bio_alloc_map_data(end - start); + if (!bmd) + return ERR_PTR(-ENOMEM); + + bmd->userptr = (void __user *) uaddr; + + ret = -ENOMEM; + bio = bio_alloc(GFP_KERNEL, end - start); + if (!bio) + goto out_bmd; + + bio->bi_rw |= (!write_to_vm << BIO_RW); + + ret = 0; + while (len) { + unsigned int bytes = PAGE_SIZE; + + if (bytes > len) + bytes = len; + + page = alloc_page(q->bounce_gfp | GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + break; + } + + if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { + ret = -EINVAL; + break; + } + + len -= bytes; + } + + if (ret) + goto cleanup; + + /* + * success + */ + if (!write_to_vm) { + char __user *p = (char __user *) uaddr; + + /* + * for a write, copy in data to kernel pages + */ + ret = -EFAULT; + bio_for_each_segment(bvec, bio, i) { + char *addr = page_address(bvec->bv_page); + + if (copy_from_user(addr, p, bvec->bv_len)) + goto cleanup; + p += bvec->bv_len; + } + } + + bio_set_map_data(bmd, bio); + return bio; +cleanup: + bio_for_each_segment(bvec, bio, i) + __free_page(bvec->bv_page); + + bio_put(bio); +out_bmd: + bio_free_map_data(bmd); + return ERR_PTR(ret); +} + static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm) @@ -384,12 +565,13 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, * size for now, in the future we can relax this restriction */ if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) - return NULL; + return ERR_PTR(-EINVAL); bio = bio_alloc(GFP_KERNEL, nr_pages); if (!bio) - return NULL; + return ERR_PTR(-ENOMEM); + ret = -ENOMEM; pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); if (!pages) goto out; @@ -438,23 +620,24 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, if (!write_to_vm) bio->bi_rw |= (1 << BIO_RW); - blk_queue_bounce(q, &bio); + bio->bi_flags |= (1 << BIO_USER_MAPPED); return bio; out: kfree(pages); bio_put(bio); - return NULL; + return ERR_PTR(ret); } /** * bio_map_user - map user address into bio + * @q: the request_queue_t for the bio * @bdev: destination block device * @uaddr: start of user address * @len: length in bytes * @write_to_vm: bool indicating writing to pages or not * * Map the user space address into a bio suitable for io to a block - * device. + * device. Returns an error pointer in case of error. */ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm) @@ -463,47 +646,38 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm); - if (bio) { - /* - * subtle -- if __bio_map_user() ended up bouncing a bio, - * it would normally disappear when its bi_end_io is run. - * however, we need it for the unmap, so grab an extra - * reference to it - */ - bio_get(bio); + if (IS_ERR(bio)) + return bio; - if (bio->bi_size < len) { - bio_endio(bio, bio->bi_size, 0); - bio_unmap_user(bio, 0); - return NULL; - } - } + /* + * subtle -- if __bio_map_user() ended up bouncing a bio, + * it would normally disappear when its bi_end_io is run. + * however, we need it for the unmap, so grab an extra + * reference to it + */ + bio_get(bio); - return bio; + if (bio->bi_size == len) + return bio; + + /* + * don't support partial mappings + */ + bio_endio(bio, bio->bi_size, 0); + bio_unmap_user(bio); + return ERR_PTR(-EINVAL); } -static void __bio_unmap_user(struct bio *bio, int write_to_vm) +static void __bio_unmap_user(struct bio *bio) { struct bio_vec *bvec; int i; - /* - * find original bio if it was bounced - */ - if (bio->bi_private) { - /* - * someone stole our bio, must not happen - */ - BUG_ON(!bio_flagged(bio, BIO_BOUNCED)); - - bio = bio->bi_private; - } - /* * make sure we dirty pages we wrote to */ __bio_for_each_segment(bvec, bio, i, 0) { - if (write_to_vm) + if (bio_data_dir(bio) == READ) set_page_dirty_lock(bvec->bv_page); page_cache_release(bvec->bv_page); @@ -515,17 +689,15 @@ static void __bio_unmap_user(struct bio *bio, int write_to_vm) /** * bio_unmap_user - unmap a bio * @bio: the bio being unmapped - * @write_to_vm: bool indicating whether pages were written to * - * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm - * must be the same as passed into bio_map_user(). Must be called with + * Unmap a bio previously mapped by bio_map_user(). Must be called with * a process context. * * bio_unmap_user() may sleep. */ -void bio_unmap_user(struct bio *bio, int write_to_vm) +void bio_unmap_user(struct bio *bio) { - __bio_unmap_user(bio, write_to_vm); + __bio_unmap_user(bio); bio_put(bio); } @@ -598,7 +770,7 @@ static void bio_release_pages(struct bio *bio) static void bio_dirty_fn(void *data); static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); -static spinlock_t bio_dirty_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* @@ -764,7 +936,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) return bp; } -static void *bio_pair_alloc(int gfp_flags, void *data) +static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data) { return kmalloc(sizeof(struct bio_pair), gfp_flags); } @@ -774,11 +946,99 @@ static void bio_pair_free(void *bp, void *data) kfree(bp); } -static void __init biovec_init_pools(void) + +/* + * create memory pools for biovec's in a bio_set. + * use the global biovec slabs created for general use. + */ +static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) +{ + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + struct biovec_slab *bp = bvec_slabs + i; + mempool_t **bvp = bs->bvec_pools + i; + + if (i >= scale) + pool_entries >>= 1; + + *bvp = mempool_create(pool_entries, mempool_alloc_slab, + mempool_free_slab, bp->slab); + if (!*bvp) + return -ENOMEM; + } + return 0; +} + +static void biovec_free_pools(struct bio_set *bs) { - int i, size, megabytes, pool_entries = BIO_POOL_SIZE; + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + mempool_t *bvp = bs->bvec_pools[i]; + + if (bvp) + mempool_destroy(bvp); + } + +} + +void bioset_free(struct bio_set *bs) +{ + if (bs->bio_pool) + mempool_destroy(bs->bio_pool); + + biovec_free_pools(bs); + + kfree(bs); +} + +struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) +{ + struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL); + + if (!bs) + return NULL; + + memset(bs, 0, sizeof(*bs)); + bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, + mempool_free_slab, bio_slab); + + if (!bs->bio_pool) + goto bad; + + if (!biovec_create_pools(bs, bvec_pool_size, scale)) + return bs; + +bad: + bioset_free(bs); + return NULL; +} + +static void __init biovec_init_slabs(void) +{ + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + int size; + struct biovec_slab *bvs = bvec_slabs + i; + + size = bvs->nr_vecs * sizeof(struct bio_vec); + bvs->slab = kmem_cache_create(bvs->name, size, 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + } +} + +static int __init init_bio(void) +{ + int megabytes, bvec_pool_entries; int scale = BIOVEC_NR_POOLS; + bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + + biovec_init_slabs(); + megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); /* @@ -798,43 +1058,16 @@ static void __init biovec_init_pools(void) /* * scale number of entries */ - pool_entries = megabytes * 2; - if (pool_entries > 256) - pool_entries = 256; - - for (i = 0; i < BIOVEC_NR_POOLS; i++) { - struct biovec_pool *bp = bvec_array + i; - - size = bp->nr_vecs * sizeof(struct bio_vec); - - bp->slab = kmem_cache_create(bp->name, size, 0, - SLAB_HWCACHE_ALIGN, NULL, NULL); - if (!bp->slab) - panic("biovec: can't init slab cache\n"); - - if (i >= scale) - pool_entries >>= 1; - - bp->pool = mempool_create(pool_entries, mempool_alloc_slab, - mempool_free_slab, bp->slab); - if (!bp->pool) - panic("biovec: can't init mempool\n"); - } -} - -static int __init init_bio(void) -{ - bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, - SLAB_HWCACHE_ALIGN, NULL, NULL); - if (!bio_slab) - panic("bio: can't create slab cache\n"); - bio_pool = mempool_create(BIO_POOL_SIZE, mempool_alloc_slab, mempool_free_slab, bio_slab); - if (!bio_pool) - panic("bio: can't create mempool\n"); + bvec_pool_entries = megabytes * 2; + if (bvec_pool_entries > 256) + bvec_pool_entries = 256; - biovec_init_pools(); + fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); + if (!fs_bio_set) + panic("bio: can't allocate bios\n"); - bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, bio_pair_alloc, bio_pair_free, NULL); + bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, + bio_pair_alloc, bio_pair_free, NULL); if (!bio_split_pool) panic("bio: can't create split pool\n"); @@ -858,3 +1091,8 @@ EXPORT_SYMBOL(bio_unmap_user); EXPORT_SYMBOL(bio_pair_release); EXPORT_SYMBOL(bio_split); EXPORT_SYMBOL(bio_split_pool); +EXPORT_SYMBOL(bio_copy_user); +EXPORT_SYMBOL(bio_uncopy_user); +EXPORT_SYMBOL(bioset_create); +EXPORT_SYMBOL(bioset_free); +EXPORT_SYMBOL(bio_alloc_bioset);