X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fbio.c;h=6a0b9ad8f8c9d031d4c32fb32e1c53e0677d1ef2;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=2d1ec65361a5ea47add50a1caf6441ec59266b42;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/fs/bio.c b/fs/bio.c index 2d1ec6536..6a0b9ad8f 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -25,11 +25,12 @@ #include #include #include +#include +#include /* for struct sg_iovec */ #define BIO_POOL_SIZE 256 -static mempool_t *bio_pool; -static kmem_cache_t *bio_slab; +static kmem_cache_t *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -38,13 +39,12 @@ static kmem_cache_t *bio_slab; * basically we just need to survive */ #define BIO_SPLIT_ENTRIES 8 -mempool_t *bio_split_pool; +mempool_t *bio_split_pool __read_mostly; -struct biovec_pool { +struct biovec_slab { int nr_vecs; char *name; kmem_cache_t *slab; - mempool_t *pool; }; /* @@ -54,15 +54,32 @@ struct biovec_pool { */ #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } -static struct biovec_pool bvec_array[BIOVEC_NR_POOLS] = { +static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), }; #undef BV -static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, unsigned long *idx) +/* + * bio_set is used to allow other portions of the IO system to + * allocate their own private memory pools for bio and iovec structures. + * These memory pools in turn all allocate from the bio_slab + * and the bvec_slabs[]. + */ +struct bio_set { + mempool_t *bio_pool; + mempool_t *bvec_pools[BIOVEC_NR_POOLS]; +}; + +/* + * fs_bio_set is the bio_set containing bio and iovec memory pools used by + * IO code that does not need private memory pools. + */ +static struct bio_set *fs_bio_set; + +static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { - struct biovec_pool *bp; struct bio_vec *bvl; + struct biovec_slab *bp; /* * see comment near bvec_array define! @@ -80,42 +97,45 @@ static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, unsigned long *id /* * idx now points to the pool we want to allocate from */ - bp = bvec_array + *idx; - bvl = mempool_alloc(bp->pool, gfp_mask); + bp = bvec_slabs + *idx; + bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); if (bvl) memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); + return bvl; } -/* - * default destructor for a bio allocated with bio_alloc() - */ -void bio_destructor(struct bio *bio) +void bio_free(struct bio *bio, struct bio_set *bio_set) { const int pool_idx = BIO_POOL_IDX(bio); - struct biovec_pool *bp = bvec_array + pool_idx; BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); - /* - * cloned bio doesn't own the veclist - */ - if (!bio_flagged(bio, BIO_CLONED)) - mempool_free(bio->bi_io_vec, bp->pool); + mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); + mempool_free(bio, bio_set->bio_pool); +} - mempool_free(bio, bio_pool); +/* + * default destructor for a bio allocated with bio_alloc_bioset() + */ +static void bio_fs_destructor(struct bio *bio) +{ + bio_free(bio, fs_bio_set); } -inline void bio_init(struct bio *bio) +void bio_init(struct bio *bio) { bio->bi_next = NULL; + bio->bi_bdev = NULL; bio->bi_flags = 1 << BIO_UPTODATE; bio->bi_rw = 0; bio->bi_vcnt = 0; bio->bi_idx = 0; bio->bi_phys_segments = 0; bio->bi_hw_segments = 0; + bio->bi_hw_front_size = 0; + bio->bi_hw_back_size = 0; bio->bi_size = 0; bio->bi_max_vecs = 0; bio->bi_end_io = NULL; @@ -124,45 +144,69 @@ inline void bio_init(struct bio *bio) } /** - * bio_alloc - allocate a bio for I/O + * bio_alloc_bioset - allocate a bio for I/O * @gfp_mask: the GFP_ mask given to the slab allocator * @nr_iovecs: number of iovecs to pre-allocate + * @bs: the bio_set to allocate from * * Description: - * bio_alloc will first try it's on mempool to satisfy the allocation. + * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. * If %__GFP_WAIT is set then we will block on the internal pool waiting * for a &struct bio to become free. + * + * allocate bio and iovecs from the memory pools specified by the + * bio_set structure. **/ -struct bio *bio_alloc(int gfp_mask, int nr_iovecs) +struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { - struct bio_vec *bvl = NULL; - unsigned long idx; - struct bio *bio; + struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); + + if (likely(bio)) { + struct bio_vec *bvl = NULL; + + bio_init(bio); + if (likely(nr_iovecs)) { + unsigned long idx; + + bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); + if (unlikely(!bvl)) { + mempool_free(bio, bs->bio_pool); + bio = NULL; + goto out; + } + bio->bi_flags |= idx << BIO_POOL_OFFSET; + bio->bi_max_vecs = bvec_slabs[idx].nr_vecs; + } + bio->bi_io_vec = bvl; + } +out: + return bio; +} - bio = mempool_alloc(bio_pool, gfp_mask); - if (unlikely(!bio)) - goto out; +struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) +{ + struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); - bio_init(bio); + if (bio) + bio->bi_destructor = bio_fs_destructor; - if (unlikely(!nr_iovecs)) - goto noiovec; + return bio; +} - bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx); - if (bvl) { - bio->bi_flags |= idx << BIO_POOL_OFFSET; - bio->bi_max_vecs = bvec_array[idx].nr_vecs; -noiovec: - bio->bi_io_vec = bvl; - bio->bi_destructor = bio_destructor; -out: - return bio; - } +void zero_fill_bio(struct bio *bio) +{ + unsigned long flags; + struct bio_vec *bv; + int i; - mempool_free(bio, bio_pool); - bio = NULL; - goto out; + bio_for_each_segment(bv, bio, i) { + char *data = bvec_kmap_irq(bv, &flags); + memset(data, 0, bv->bv_len); + flush_dcache_page(bv->bv_page); + bvec_kunmap_irq(data, &flags); + } } +EXPORT_SYMBOL(zero_fill_bio); /** * bio_put - release a reference to a bio @@ -210,35 +254,22 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) * the actual data it points to. Reference count of returned * bio will be one. */ -inline void __bio_clone(struct bio *bio, struct bio *bio_src) +void __bio_clone(struct bio *bio, struct bio *bio_src) { - bio->bi_io_vec = bio_src->bi_io_vec; + request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); + + memcpy(bio->bi_io_vec, bio_src->bi_io_vec, + bio_src->bi_max_vecs * sizeof(struct bio_vec)); bio->bi_sector = bio_src->bi_sector; bio->bi_bdev = bio_src->bi_bdev; bio->bi_flags |= 1 << BIO_CLONED; bio->bi_rw = bio_src->bi_rw; - - /* - * notes -- maybe just leave bi_idx alone. assume identical mapping - * for the clone - */ bio->bi_vcnt = bio_src->bi_vcnt; - bio->bi_idx = bio_src->bi_idx; - if (bio_flagged(bio, BIO_SEG_VALID)) { - bio->bi_phys_segments = bio_src->bi_phys_segments; - bio->bi_hw_segments = bio_src->bi_hw_segments; - bio->bi_flags |= (1 << BIO_SEG_VALID); - } bio->bi_size = bio_src->bi_size; - - /* - * cloned bio does not own the bio_vec, so users cannot fiddle with - * it. clear bi_max_vecs and clear the BIO_POOL_BITS to make this - * apparent - */ - bio->bi_max_vecs = 0; - bio->bi_flags &= (BIO_POOL_MASK - 1); + bio->bi_idx = bio_src->bi_idx; + bio_phys_segments(q, bio); + bio_hw_segments(q, bio); } /** @@ -248,12 +279,14 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) * * Like __bio_clone, only also allocates the returned bio */ -struct bio *bio_clone(struct bio *bio, int gfp_mask) +struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) { - struct bio *b = bio_alloc(gfp_mask, 0); + struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); - if (b) + if (b) { + b->bi_destructor = bio_fs_destructor; __bio_clone(b, bio); + } return b; } @@ -282,7 +315,8 @@ int bio_get_nr_vecs(struct block_device *bdev) } static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page - *page, unsigned int len, unsigned int offset) + *page, unsigned int len, unsigned int offset, + unsigned short max_sectors) { int retried_segments = 0; struct bio_vec *bvec; @@ -293,10 +327,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page if (unlikely(bio_flagged(bio, BIO_CLONED))) return 0; - if (bio->bi_vcnt >= bio->bi_max_vecs) + if (((bio->bi_size + len) >> 9) > max_sectors) return 0; - if (((bio->bi_size + len) >> 9) > q->max_sectors) + /* + * For filesystems with a blocksize smaller than the pagesize + * we will often be called with the same page as last time and + * a consecutive offset. Optimize this special case. + */ + if (bio->bi_vcnt > 0) { + struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page == prev->bv_page && + offset == prev->bv_offset + prev->bv_len) { + prev->bv_len += len; + if (q->merge_bvec_fn && + q->merge_bvec_fn(q, bio, prev) < len) { + prev->bv_len -= len; + return 0; + } + + goto done; + } + } + + if (bio->bi_vcnt >= bio->bi_max_vecs) return 0; /* @@ -304,14 +359,15 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page * make this too complex. */ - while (bio_phys_segments(q, bio) >= q->max_phys_segments - || bio_hw_segments(q, bio) >= q->max_hw_segments) { + while (bio->bi_phys_segments >= q->max_phys_segments + || bio->bi_hw_segments >= q->max_hw_segments + || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) { if (retried_segments) return 0; - bio->bi_flags &= ~(1 << BIO_SEG_VALID); retried_segments = 1; + blk_recount_segments(q, bio); } /* @@ -341,13 +397,39 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page } } + /* If we may be able to merge these biovecs, force a recount */ + if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || + BIOVEC_VIRT_MERGEABLE(bvec-1, bvec))) + bio->bi_flags &= ~(1 << BIO_SEG_VALID); + bio->bi_vcnt++; bio->bi_phys_segments++; bio->bi_hw_segments++; + done: bio->bi_size += len; return len; } +/** + * bio_add_pc_page - attempt to add page to bio + * @q: the target queue + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset + * + * Attempt to add a page to the bio_vec maplist. This can fail for a + * number of reasons, such as the bio being full or target block + * device limitations. The target block device must allow bio's + * smaller than PAGE_SIZE, so it is always possible to add a single + * page to an empty bio. This should only be used by REQ_PC bios. + */ +int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); +} + /** * bio_add_page - attempt to add page to bio * @bio: destination bio @@ -364,71 +446,247 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, - len, offset); + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + return __bio_add_page(q, bio, page, len, offset, q->max_sectors); } -static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, - unsigned long uaddr, unsigned int len, - int write_to_vm) +struct bio_map_data { + struct bio_vec *iovecs; + void __user *userptr; +}; + +static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio) { - unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = uaddr >> PAGE_SHIFT; - const int nr_pages = end - start; - int ret, offset, i; - struct page **pages; - struct bio *bio; + memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); + bio->bi_private = bmd; +} - /* - * transfer and buffer must be aligned to at least hardsector - * size for now, in the future we can relax this restriction - */ - if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) - return NULL; +static void bio_free_map_data(struct bio_map_data *bmd) +{ + kfree(bmd->iovecs); + kfree(bmd); +} - bio = bio_alloc(GFP_KERNEL, nr_pages); - if (!bio) +static struct bio_map_data *bio_alloc_map_data(int nr_segs) +{ + struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); + + if (!bmd) return NULL; - pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); - if (!pages) - goto out; + bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); + if (bmd->iovecs) + return bmd; - down_read(¤t->mm->mmap_sem); - ret = get_user_pages(current, current->mm, uaddr, nr_pages, - write_to_vm, 0, pages, NULL); - up_read(¤t->mm->mmap_sem); + kfree(bmd); + return NULL; +} - if (ret < nr_pages) - goto out; +/** + * bio_uncopy_user - finish previously mapped bio + * @bio: bio being terminated + * + * Free pages allocated from bio_copy_user() and write back data + * to user space in case of a read. + */ +int bio_uncopy_user(struct bio *bio) +{ + struct bio_map_data *bmd = bio->bi_private; + const int read = bio_data_dir(bio) == READ; + struct bio_vec *bvec; + int i, ret = 0; - bio->bi_bdev = bdev; + __bio_for_each_segment(bvec, bio, i, 0) { + char *addr = page_address(bvec->bv_page); + unsigned int len = bmd->iovecs[i].bv_len; - offset = uaddr & ~PAGE_MASK; - for (i = 0; i < nr_pages; i++) { - unsigned int bytes = PAGE_SIZE - offset; + if (read && !ret && copy_to_user(bmd->userptr, addr, len)) + ret = -EFAULT; - if (len <= 0) - break; + __free_page(bvec->bv_page); + bmd->userptr += len; + } + bio_free_map_data(bmd); + bio_put(bio); + return ret; +} + +/** + * bio_copy_user - copy user data to bio + * @q: destination block queue + * @uaddr: start of user address + * @len: length in bytes + * @write_to_vm: bool indicating writing to pages or not + * + * Prepares and returns a bio for indirect user io, bouncing data + * to/from kernel pages as necessary. Must be paired with + * call bio_uncopy_user() on io completion. + */ +struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, + unsigned int len, int write_to_vm) +{ + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + struct bio_map_data *bmd; + struct bio_vec *bvec; + struct page *page; + struct bio *bio; + int i, ret; + + bmd = bio_alloc_map_data(end - start); + if (!bmd) + return ERR_PTR(-ENOMEM); + + bmd->userptr = (void __user *) uaddr; + + ret = -ENOMEM; + bio = bio_alloc(GFP_KERNEL, end - start); + if (!bio) + goto out_bmd; + + bio->bi_rw |= (!write_to_vm << BIO_RW); + + ret = 0; + while (len) { + unsigned int bytes = PAGE_SIZE; if (bytes > len) bytes = len; - /* - * sorry... - */ - if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes) + page = alloc_page(q->bounce_gfp | GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + break; + } + + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { + ret = -EINVAL; break; + } len -= bytes; - offset = 0; } + if (ret) + goto cleanup; + /* - * release the pages we didn't map into the bio, if any + * success */ - while (i < nr_pages) - page_cache_release(pages[i++]); + if (!write_to_vm) { + char __user *p = (char __user *) uaddr; + + /* + * for a write, copy in data to kernel pages + */ + ret = -EFAULT; + bio_for_each_segment(bvec, bio, i) { + char *addr = page_address(bvec->bv_page); + + if (copy_from_user(addr, p, bvec->bv_len)) + goto cleanup; + p += bvec->bv_len; + } + } + + bio_set_map_data(bmd, bio); + return bio; +cleanup: + bio_for_each_segment(bvec, bio, i) + __free_page(bvec->bv_page); + + bio_put(bio); +out_bmd: + bio_free_map_data(bmd); + return ERR_PTR(ret); +} + +static struct bio *__bio_map_user_iov(request_queue_t *q, + struct block_device *bdev, + struct sg_iovec *iov, int iov_count, + int write_to_vm) +{ + int i, j; + int nr_pages = 0; + struct page **pages; + struct bio *bio; + int cur_page = 0; + int ret, offset; + + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + unsigned long len = iov[i].iov_len; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + + nr_pages += end - start; + /* + * transfer and buffer must be aligned to at least hardsector + * size for now, in the future we can relax this restriction + */ + if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) + return ERR_PTR(-EINVAL); + } + + if (!nr_pages) + return ERR_PTR(-EINVAL); + + bio = bio_alloc(GFP_KERNEL, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + ret = -ENOMEM; + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto out; + + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + unsigned long len = iov[i].iov_len; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + const int local_nr_pages = end - start; + const int page_limit = cur_page + local_nr_pages; + + down_read(¤t->mm->mmap_sem); + ret = get_user_pages(current, current->mm, uaddr, + local_nr_pages, + write_to_vm, 0, &pages[cur_page], NULL); + up_read(¤t->mm->mmap_sem); + + if (ret < local_nr_pages) { + ret = -EFAULT; + goto out_unmap; + } + + offset = uaddr & ~PAGE_MASK; + for (j = cur_page; j < page_limit; j++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + /* + * sorry... + */ + if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < + bytes) + break; + + len -= bytes; + offset = 0; + } + + cur_page = j; + /* + * release the pages we didn't map into the bio, if any + */ + while (j < page_limit) + page_cache_release(pages[j++]); + } kfree(pages); @@ -438,72 +696,99 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, if (!write_to_vm) bio->bi_rw |= (1 << BIO_RW); - blk_queue_bounce(q, &bio); + bio->bi_bdev = bdev; + bio->bi_flags |= (1 << BIO_USER_MAPPED); return bio; -out: + + out_unmap: + for (i = 0; i < nr_pages; i++) { + if(!pages[i]) + break; + page_cache_release(pages[i]); + } + out: kfree(pages); bio_put(bio); - return NULL; + return ERR_PTR(ret); } /** * bio_map_user - map user address into bio + * @q: the request_queue_t for the bio * @bdev: destination block device * @uaddr: start of user address * @len: length in bytes * @write_to_vm: bool indicating writing to pages or not * * Map the user space address into a bio suitable for io to a block - * device. + * device. Returns an error pointer in case of error. */ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm) +{ + struct sg_iovec iov; + + iov.iov_base = (void __user *)uaddr; + iov.iov_len = len; + + return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); +} + +/** + * bio_map_user_iov - map user sg_iovec table into bio + * @q: the request_queue_t for the bio + * @bdev: destination block device + * @iov: the iovec. + * @iov_count: number of elements in the iovec + * @write_to_vm: bool indicating writing to pages or not + * + * Map the user space address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, + struct sg_iovec *iov, int iov_count, + int write_to_vm) { struct bio *bio; + int len = 0, i; - bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm); + bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); - if (bio) { - /* - * subtle -- if __bio_map_user() ended up bouncing a bio, - * it would normally disappear when its bi_end_io is run. - * however, we need it for the unmap, so grab an extra - * reference to it - */ - bio_get(bio); + if (IS_ERR(bio)) + return bio; - if (bio->bi_size < len) { - bio_endio(bio, bio->bi_size, 0); - bio_unmap_user(bio, 0); - return NULL; - } - } + /* + * subtle -- if __bio_map_user() ended up bouncing a bio, + * it would normally disappear when its bi_end_io is run. + * however, we need it for the unmap, so grab an extra + * reference to it + */ + bio_get(bio); - return bio; + for (i = 0; i < iov_count; i++) + len += iov[i].iov_len; + + if (bio->bi_size == len) + return bio; + + /* + * don't support partial mappings + */ + bio_endio(bio, bio->bi_size, 0); + bio_unmap_user(bio); + return ERR_PTR(-EINVAL); } -static void __bio_unmap_user(struct bio *bio, int write_to_vm) +static void __bio_unmap_user(struct bio *bio) { struct bio_vec *bvec; int i; - /* - * find original bio if it was bounced - */ - if (bio->bi_private) { - /* - * someone stole our bio, must not happen - */ - BUG_ON(!bio_flagged(bio, BIO_BOUNCED)); - - bio = bio->bi_private; - } - /* * make sure we dirty pages we wrote to */ __bio_for_each_segment(bvec, bio, i, 0) { - if (write_to_vm) + if (bio_data_dir(bio) == READ) set_page_dirty_lock(bvec->bv_page); page_cache_release(bvec->bv_page); @@ -515,18 +800,92 @@ static void __bio_unmap_user(struct bio *bio, int write_to_vm) /** * bio_unmap_user - unmap a bio * @bio: the bio being unmapped - * @write_to_vm: bool indicating whether pages were written to * - * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm - * must be the same as passed into bio_map_user(). Must be called with + * Unmap a bio previously mapped by bio_map_user(). Must be called with * a process context. * * bio_unmap_user() may sleep. */ -void bio_unmap_user(struct bio *bio, int write_to_vm) +void bio_unmap_user(struct bio *bio) +{ + __bio_unmap_user(bio); + bio_put(bio); +} + +static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) { - __bio_unmap_user(bio, write_to_vm); + if (bio->bi_size) + return 1; + bio_put(bio); + return 0; +} + + +static struct bio *__bio_map_kern(request_queue_t *q, void *data, + unsigned int len, gfp_t gfp_mask) +{ + unsigned long kaddr = (unsigned long)data; + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = kaddr >> PAGE_SHIFT; + const int nr_pages = end - start; + int offset, i; + struct bio *bio; + + bio = bio_alloc(gfp_mask, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + offset = offset_in_page(kaddr); + for (i = 0; i < nr_pages; i++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, + offset) < bytes) + break; + + data += bytes; + len -= bytes; + offset = 0; + } + + bio->bi_end_io = bio_map_kern_endio; + return bio; +} + +/** + * bio_map_kern - map kernel address into bio + * @q: the request_queue_t for the bio + * @data: pointer to buffer to map + * @len: length in bytes + * @gfp_mask: allocation flags for bio allocation + * + * Map the kernel address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, + gfp_t gfp_mask) +{ + struct bio *bio; + + bio = __bio_map_kern(q, data, len, gfp_mask); + if (IS_ERR(bio)) + return bio; + + if (bio->bi_size == len) + return bio; + + /* + * Don't support partial mappings. + */ + bio_put(bio); + return ERR_PTR(-EINVAL); } /* @@ -598,7 +957,7 @@ static void bio_release_pages(struct bio *bio) static void bio_dirty_fn(void *data); static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); -static spinlock_t bio_dirty_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* @@ -736,6 +1095,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) if (!bp) return bp; + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + bi->bi_sector + first_sectors); + BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_idx != 0); atomic_set(&bp->cnt, 3); @@ -755,6 +1117,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) bp->bio1.bi_io_vec = &bp->bv1; bp->bio2.bi_io_vec = &bp->bv2; + bp->bio1.bi_max_vecs = 1; + bp->bio2.bi_max_vecs = 1; + bp->bio1.bi_end_io = bio_pair_end_1; bp->bio2.bi_end_io = bio_pair_end_2; @@ -764,21 +1129,95 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) return bp; } -static void *bio_pair_alloc(int gfp_flags, void *data) + +/* + * create memory pools for biovec's in a bio_set. + * use the global biovec slabs created for general use. + */ +static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) { - return kmalloc(sizeof(struct bio_pair), gfp_flags); + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + struct biovec_slab *bp = bvec_slabs + i; + mempool_t **bvp = bs->bvec_pools + i; + + if (i >= scale) + pool_entries >>= 1; + + *bvp = mempool_create_slab_pool(pool_entries, bp->slab); + if (!*bvp) + return -ENOMEM; + } + return 0; } -static void bio_pair_free(void *bp, void *data) +static void biovec_free_pools(struct bio_set *bs) { - kfree(bp); + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + mempool_t *bvp = bs->bvec_pools[i]; + + if (bvp) + mempool_destroy(bvp); + } + } -static void __init biovec_init_pools(void) +void bioset_free(struct bio_set *bs) { - int i, size, megabytes, pool_entries = BIO_POOL_SIZE; + if (bs->bio_pool) + mempool_destroy(bs->bio_pool); + + biovec_free_pools(bs); + + kfree(bs); +} + +struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) +{ + struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); + + if (!bs) + return NULL; + + bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); + if (!bs->bio_pool) + goto bad; + + if (!biovec_create_pools(bs, bvec_pool_size, scale)) + return bs; + +bad: + bioset_free(bs); + return NULL; +} + +static void __init biovec_init_slabs(void) +{ + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + int size; + struct biovec_slab *bvs = bvec_slabs + i; + + size = bvs->nr_vecs * sizeof(struct bio_vec); + bvs->slab = kmem_cache_create(bvs->name, size, 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + } +} + +static int __init init_bio(void) +{ + int megabytes, bvec_pool_entries; int scale = BIOVEC_NR_POOLS; + bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + + biovec_init_slabs(); + megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); /* @@ -796,43 +1235,18 @@ static void __init biovec_init_pools(void) scale = 4; /* - * scale number of entries + * Limit number of entries reserved -- mempools are only used when + * the system is completely unable to allocate memory, so we only + * need enough to make progress. */ - pool_entries = megabytes * 2; - if (pool_entries > 256) - pool_entries = 256; - - for (i = 0; i < BIOVEC_NR_POOLS; i++) { - struct biovec_pool *bp = bvec_array + i; - - size = bp->nr_vecs * sizeof(struct bio_vec); - - bp->slab = kmem_cache_create(bp->name, size, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - - if (i >= scale) - pool_entries >>= 1; - - bp->pool = mempool_create(pool_entries, mempool_alloc_slab, - mempool_free_slab, bp->slab); - if (!bp->pool) - panic("biovec: can't init mempool\n"); - } -} - -static int __init init_bio(void) -{ - bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - bio_pool = mempool_create(BIO_POOL_SIZE, mempool_alloc_slab, - mempool_free_slab, bio_slab); - if (!bio_pool) - panic("bio: can't create mempool\n"); + bvec_pool_entries = 1 + scale; - biovec_init_pools(); + fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); + if (!fs_bio_set) + panic("bio: can't allocate bios\n"); - bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, - bio_pair_alloc, bio_pair_free, NULL); + bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, + sizeof(struct bio_pair)); if (!bio_split_pool) panic("bio: can't create split pool\n"); @@ -843,6 +1257,7 @@ subsys_initcall(init_bio); EXPORT_SYMBOL(bio_alloc); EXPORT_SYMBOL(bio_put); +EXPORT_SYMBOL(bio_free); EXPORT_SYMBOL(bio_endio); EXPORT_SYMBOL(bio_init); EXPORT_SYMBOL(__bio_clone); @@ -850,9 +1265,16 @@ EXPORT_SYMBOL(bio_clone); EXPORT_SYMBOL(bio_phys_segments); EXPORT_SYMBOL(bio_hw_segments); EXPORT_SYMBOL(bio_add_page); +EXPORT_SYMBOL(bio_add_pc_page); EXPORT_SYMBOL(bio_get_nr_vecs); EXPORT_SYMBOL(bio_map_user); EXPORT_SYMBOL(bio_unmap_user); +EXPORT_SYMBOL(bio_map_kern); EXPORT_SYMBOL(bio_pair_release); EXPORT_SYMBOL(bio_split); EXPORT_SYMBOL(bio_split_pool); +EXPORT_SYMBOL(bio_copy_user); +EXPORT_SYMBOL(bio_uncopy_user); +EXPORT_SYMBOL(bioset_create); +EXPORT_SYMBOL(bioset_free); +EXPORT_SYMBOL(bio_alloc_bioset);