*/
static kmem_cache_t *request_cachep;
+/*
+ * For queue allocation
+ */
+static kmem_cache_t *requestq_cachep;
+
+/*
+ * For io context allocations
+ */
+static kmem_cache_t *iocontext_cachep;
+
static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
*/
static inline int queue_congestion_on_threshold(struct request_queue *q)
{
- int ret;
-
- ret = q->nr_requests - (q->nr_requests / 8) + 1;
-
- if (ret > q->nr_requests)
- ret = q->nr_requests;
-
- return ret;
+ return q->nr_congestion_on;
}
/*
*/
static inline int queue_congestion_off_threshold(struct request_queue *q)
{
- int ret;
+ return q->nr_congestion_off;
+}
- ret = q->nr_requests - (q->nr_requests / 8) - 1;
+static void blk_queue_congestion_threshold(struct request_queue *q)
+{
+ int nr;
- if (ret < 1)
- ret = 1;
+ nr = q->nr_requests - (q->nr_requests / 8) + 1;
+ if (nr > q->nr_requests)
+ nr = q->nr_requests;
+ q->nr_congestion_on = nr;
- return ret;
+ nr = q->nr_requests - (q->nr_requests / 8) - 1;
+ if (nr < 1)
+ nr = 1;
+ q->nr_congestion_off = nr;
}
/*
return ret;
}
+EXPORT_SYMBOL(blk_get_backing_dev_info);
+
void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
{
q->activity_fn = fn;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
+ blk_queue_congestion_threshold(q);
q->unplug_thresh = 4; /* hmm */
q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
EXPORT_SYMBOL(blk_queue_make_request);
+/**
+ * blk_queue_ordered - does this queue support ordered writes
+ * @q: the request queue
+ * @flag: see below
+ *
+ * Description:
+ * For journalled file systems, doing ordered writes on a commit
+ * block instead of explicitly doing wait_on_buffer (which is bad
+ * for performance) can be a big win. Block drivers supporting this
+ * feature should call this function and indicate so.
+ *
+ **/
+void blk_queue_ordered(request_queue_t *q, int flag)
+{
+ if (flag)
+ set_bit(QUEUE_FLAG_ORDERED, &q->queue_flags);
+ else
+ clear_bit(QUEUE_FLAG_ORDERED, &q->queue_flags);
+}
+
+EXPORT_SYMBOL(blk_queue_ordered);
+
+/**
+ * blk_queue_issue_flush_fn - set function for issuing a flush
+ * @q: the request queue
+ * @iff: the function to be called issuing the flush
+ *
+ * Description:
+ * If a driver supports issuing a flush command, the support is notified
+ * to the block layer by defining it through this call.
+ *
+ **/
+void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
+{
+ q->issue_flush_fn = iff;
+}
+
+EXPORT_SYMBOL(blk_queue_issue_flush_fn);
+
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
* @q: the request queue for the device
void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
- unsigned long mb = dma_addr >> 20;
- static request_queue_t *last_q;
/*
* set appropriate bounce gfp mask -- unfortunately we don't have a
} else
q->bounce_gfp = GFP_NOIO;
- /*
- * keep this for debugging for now...
- */
- if (dma_addr != BLK_BOUNCE_HIGH && q != last_q) {
- printk("blk: queue %p, ", q);
- if (dma_addr == BLK_BOUNCE_ANY)
- printk("no I/O memory limit\n");
- else
- printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (long long) dma_addr);
- }
-
q->bounce_pfn = bounce_pfn;
- last_q = q;
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
}
- q->max_sectors = max_sectors;
+ q->max_sectors = q->max_hw_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_sectors);
void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
{
/* zero is "infinity" */
- t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+ t->max_sectors = t->max_hw_sectors =
+ min_not_zero(t->max_sectors,b->max_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
EXPORT_SYMBOL(blk_queue_find_tag);
/**
- * blk_queue_free_tags - release tag maintenance info
+ * __blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
* Notes:
* blk_cleanup_queue() will take care of calling this function, if tagging
- * has been used. So there's usually no need to call this directly, unless
- * tagging is just being disabled but the queue remains in function.
+ * has been used. So there's no need to call this directly.
**/
-void blk_queue_free_tags(request_queue_t *q)
+static void __blk_queue_free_tags(request_queue_t *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
}
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q: the request queue for the device
+ *
+ * Notes:
+ * This is used to disabled tagged queuing to a device, yet leave
+ * queue in function.
+ **/
+void blk_queue_free_tags(request_queue_t *q)
+{
+ clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+}
+
EXPORT_SYMBOL(blk_queue_free_tags);
static int
init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
{
int bits, i;
+ struct request **tag_index;
+ unsigned long *tag_map;
if (depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
__FUNCTION__, depth);
}
- tags->tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
- if (!tags->tag_index)
+ tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+ if (!tag_index)
goto fail;
bits = (depth / BLK_TAGS_PER_LONG) + 1;
- tags->tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
- if (!tags->tag_map)
+ tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
+ if (!tag_map)
goto fail;
- memset(tags->tag_index, 0, depth * sizeof(struct request *));
- memset(tags->tag_map, 0, bits * sizeof(unsigned long));
+ memset(tag_index, 0, depth * sizeof(struct request *));
+ memset(tag_map, 0, bits * sizeof(unsigned long));
tags->max_depth = depth;
tags->real_max_depth = bits * BITS_PER_LONG;
+ tags->tag_index = tag_index;
+ tags->tag_map = tag_map;
/*
* set the upper bits if the depth isn't a multiple of the word size
*/
for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
- __set_bit(i, tags->tag_map);
+ __set_bit(i, tag_map);
- INIT_LIST_HEAD(&tags->busy_list);
- tags->busy = 0;
- atomic_set(&tags->refcnt, 1);
return 0;
fail:
- kfree(tags->tag_index);
+ kfree(tag_index);
return -ENOMEM;
}
int blk_queue_init_tags(request_queue_t *q, int depth,
struct blk_queue_tag *tags)
{
- if (!tags) {
+ int rc;
+
+ BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+
+ if (!tags && !q->queue_tags) {
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
if (!tags)
goto fail;
if (init_tag_map(q, tags, depth))
goto fail;
+
+ INIT_LIST_HEAD(&tags->busy_list);
+ tags->busy = 0;
+ atomic_set(&tags->refcnt, 1);
+ } else if (q->queue_tags) {
+ if ((rc = blk_queue_resize_tags(q, depth)))
+ return rc;
+ set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+ return 0;
} else
atomic_inc(&tags->refcnt);
return 0;
}
+EXPORT_SYMBOL(blk_queue_resize_tags);
+
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
void blk_recount_segments(request_queue_t *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
- int i, nr_phys_segs, nr_hw_segs, seg_size, cluster;
+ int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
int high, highprv = 1;
if (unlikely(!bio->bi_io_vec))
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
- seg_size = nr_phys_segs = nr_hw_segs = 0;
+ hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
bio_for_each_segment(bv, bio, i) {
/*
* the trick here is making sure that a high page is never
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
goto new_segment;
+ if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+ goto new_hw_segment;
seg_size += bv->bv_len;
+ hw_seg_size += bv->bv_len;
bvprv = bv;
continue;
}
new_segment:
- if (!BIOVEC_VIRT_MERGEABLE(bvprv, bv))
+ if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
+ !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
+ hw_seg_size += bv->bv_len;
+ } else {
new_hw_segment:
+ if (hw_seg_size > bio->bi_hw_front_size)
+ bio->bi_hw_front_size = hw_seg_size;
+ hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
nr_hw_segs++;
+ }
nr_phys_segs++;
bvprv = bv;
seg_size = bv->bv_len;
highprv = high;
}
-
+ if (hw_seg_size > bio->bi_hw_back_size)
+ bio->bi_hw_back_size = hw_seg_size;
+ if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
+ bio->bi_hw_front_size = hw_seg_size;
bio->bi_phys_segments = nr_phys_segs;
bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
{
- if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
- return 0;
-
- if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+ blk_recount_segments(q, bio);
+ if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
+ blk_recount_segments(q, nxt);
+ if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
+ BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
return 0;
if (bio->bi_size + nxt->bi_size > q->max_segment_size)
return 0;
- /*
- * bio and nxt are contigous in memory, check if the queue allows
- * these two to be merged into one
- */
- if (BIO_SEG_BOUNDARY(q, bio, nxt))
- return 1;
-
- return 0;
+ return 1;
}
EXPORT_SYMBOL(blk_hw_contig_segment);
if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->flags |= REQ_NOMERGE;
- q->last_merge = NULL;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
return 0;
}
if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->flags |= REQ_NOMERGE;
- q->last_merge = NULL;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
return 0;
}
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
+ int len;
+
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
req->flags |= REQ_NOMERGE;
- q->last_merge = NULL;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
return 0;
}
-
- if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)))
- return ll_new_mergeable(q, req, bio);
+ if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+ blk_recount_segments(q, req->biotail);
+ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+ blk_recount_segments(q, bio);
+ len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
+ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
+ !BIOVEC_VIRT_OVERSIZE(len)) {
+ int mergeable = ll_new_mergeable(q, req, bio);
+
+ if (mergeable) {
+ if (req->nr_hw_segments == 1)
+ req->bio->bi_hw_front_size = len;
+ if (bio->bi_hw_segments == 1)
+ bio->bi_hw_back_size = len;
+ }
+ return mergeable;
+ }
return ll_new_hw_segment(q, req, bio);
}
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
+ int len;
+
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
req->flags |= REQ_NOMERGE;
- q->last_merge = NULL;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
return 0;
}
-
- if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)))
- return ll_new_mergeable(q, req, bio);
+ len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
+ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+ blk_recount_segments(q, bio);
+ if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+ blk_recount_segments(q, req->bio);
+ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
+ !BIOVEC_VIRT_OVERSIZE(len)) {
+ int mergeable = ll_new_mergeable(q, req, bio);
+
+ if (mergeable) {
+ if (bio->bi_hw_segments == 1)
+ bio->bi_hw_front_size = len;
+ if (req->nr_hw_segments == 1)
+ req->biotail->bi_hw_back_size = len;
+ }
+ return mergeable;
+ }
return ll_new_hw_segment(q, req, bio);
}
return 0;
total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
- if (blk_hw_contig_segment(q, req->biotail, next->bio))
+ if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
+ int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+ /*
+ * propagate the combined length to the end of the requests
+ */
+ if (req->nr_hw_segments == 1)
+ req->bio->bi_hw_front_size = len;
+ if (next->nr_hw_segments == 1)
+ next->biotail->bi_hw_back_size = len;
total_hw_segments--;
+ }
if (total_hw_segments > q->max_hw_segments)
return 0;
/*
* remove the plug and let it rip..
*/
-static inline void __generic_unplug_device(request_queue_t *q)
+void __generic_unplug_device(request_queue_t *q)
{
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
return;
if (elv_next_request(q))
q->request_fn(q);
}
+EXPORT_SYMBOL(__generic_unplug_device);
/**
* generic_unplug_device - fire a request queue
- * @data: The &request_queue_t in question
+ * @q: The &request_queue_t in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
}
EXPORT_SYMBOL(generic_unplug_device);
-static void blk_backing_dev_unplug(struct backing_dev_info *bdi)
+static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
+ struct page *page)
{
request_queue_t *q = bdi->unplug_io_data;
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
} else {
blk_plug_device(q);
- schedule_work(&q->unplug_work);
+ kblockd_schedule_work(&q->unplug_work);
}
}
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
- if (blk_queue_tagged(q))
- blk_queue_free_tags(q);
+ if (q->queue_tags)
+ __blk_queue_free_tags(q);
- kfree(q);
+ kmem_cache_free(requestq_cachep, q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
request_queue_t *blk_alloc_queue(int gfp_mask)
{
- request_queue_t *q = kmalloc(sizeof(*q), gfp_mask);
+ request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
if (!q)
return NULL;
printk("Using %s io scheduler\n", chosen_elevator->elevator_name);
}
- if (elevator_init(q, chosen_elevator))
- goto out_elv;
-
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
- return q;
-out_elv:
+ /*
+ * all done
+ */
+ if (!elevator_init(q, chosen_elevator))
+ return q;
+
blk_cleanup_queue(q);
out_init:
- kfree(q);
+ kmem_cache_free(requestq_cachep, q);
return NULL;
}
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
+ *
+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ * before being submitted to the device, as pages mapped may be out of
+ * reach. It's the callers responsibility to make sure this happens. The
+ * original bio must be passed back in to blk_rq_unmap_user() for proper
+ * unmapping.
*/
struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
unsigned int len)
{
- struct request *rq = NULL;
- char *buf = NULL;
+ unsigned long uaddr;
+ struct request *rq;
struct bio *bio;
- int ret;
+
+ if (len > (q->max_sectors << 9))
+ return ERR_PTR(-EINVAL);
+ if ((!len && ubuf) || (len && !ubuf))
+ return ERR_PTR(-EINVAL);
rq = blk_get_request(q, rw, __GFP_WAIT);
if (!rq)
return ERR_PTR(-ENOMEM);
- bio = bio_map_user(q, NULL, (unsigned long) ubuf, len, rw == READ);
- if (!bio) {
- int bytes = (len + 511) & ~511;
-
- buf = kmalloc(bytes, q->bounce_gfp | GFP_USER);
- if (!buf) {
- ret = -ENOMEM;
- goto fault;
- }
-
- if (rw == WRITE) {
- if (copy_from_user(buf, ubuf, len)) {
- ret = -EFAULT;
- goto fault;
- }
- } else
- memset(buf, 0, len);
- }
+ /*
+ * if alignment requirement is satisfied, map in user pages for
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+ if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+ bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
+ else
+ bio = bio_copy_user(q, uaddr, len, rw == READ);
- rq->bio = rq->biotail = bio;
- if (rq->bio)
+ if (!IS_ERR(bio)) {
+ rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
- rq->buffer = rq->data = buf;
- rq->data_len = len;
- return rq;
-fault:
- if (buf)
- kfree(buf);
- if (bio)
- bio_unmap_user(bio, 1);
- if (rq)
- blk_put_request(rq);
+ rq->buffer = rq->data = NULL;
+ rq->data_len = len;
+ return rq;
+ }
- return ERR_PTR(ret);
+ /*
+ * bio is the err-ptr
+ */
+ blk_put_request(rq);
+ return (struct request *) bio;
}
EXPORT_SYMBOL(blk_rq_map_user);
* Description:
* Unmap a request previously mapped by blk_rq_map_user().
*/
-int blk_rq_unmap_user(struct request *rq, void __user *ubuf, struct bio *bio,
- unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
{
- const int read = rq_data_dir(rq) == READ;
int ret = 0;
- if (bio)
- bio_unmap_user(bio, read);
- if (rq->buffer) {
- if (read && copy_to_user(ubuf, rq->buffer, ulen))
- ret = -EFAULT;
- kfree(rq->buffer);
+ if (bio) {
+ if (bio_flagged(bio, BIO_USER_MAPPED))
+ bio_unmap_user(bio);
+ else
+ ret = bio_uncopy_user(bio);
}
blk_put_request(rq);
}
rq->flags |= REQ_NOMERGE;
- rq->waiting = &wait;
+ if (!rq->waiting)
+ rq->waiting = &wait;
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
generic_unplug_device(q);
- wait_for_completion(&wait);
+ wait_for_completion(rq->waiting);
+ rq->waiting = NULL;
if (rq->errors)
err = -EIO;
EXPORT_SYMBOL(blk_execute_rq);
+/**
+ * blkdev_issue_flush - queue a flush
+ * @bdev: blockdev to issue flush for
+ * @error_sector: error sector
+ *
+ * Description:
+ * Issue a flush for the block device in question. Caller can supply
+ * room for storing the error offset in case of a flush error, if they
+ * wish to. Caller must run wait_for_completion() on its own.
+ */
+int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+{
+ request_queue_t *q;
+
+ if (bdev->bd_disk == NULL)
+ return -ENXIO;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+ if (!q->issue_flush_fn)
+ return -EOPNOTSUPP;
+
+ return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+}
+
+EXPORT_SYMBOL(blkdev_issue_flush);
+
+/**
+ * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
+ * @q: device queue
+ * @disk: gendisk
+ * @error_sector: error offset
+ *
+ * Description:
+ * Devices understanding the SCSI command set, can use this function as
+ * a helper for issuing a cache flush. Note: driver is required to store
+ * the error offset (in case of error flushing) in ->sector of struct
+ * request.
+ */
+int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+{
+ struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ int ret;
+
+ rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
+ rq->sector = 0;
+ memset(rq->cmd, 0, sizeof(rq->cmd));
+ rq->cmd[0] = 0x35;
+ rq->cmd_len = 12;
+ rq->data = NULL;
+ rq->data_len = 0;
+ rq->timeout = 60 * HZ;
+
+ ret = blk_execute_rq(q, disk, rq);
+
+ if (ret && error_sector)
+ *error_sector = rq->sector;
+
+ blk_put_request(rq);
+ return ret;
+}
+
+EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
+
void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
int rw = rq_data_dir(rq);
static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
- int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra;
+ int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err;
sector_t sector;
sector = bio->bi_sector;
spin_lock_prefetch(q->queue_lock);
- barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw);
-
- ra = bio->bi_rw & (1 << BIO_RW_AHEAD);
+ barrier = bio_barrier(bio);
+ if (barrier && !(q->queue_flags & (1 << QUEUE_FLAG_ORDERED))) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
again:
spin_lock_irq(q->queue_lock);
/*
* READA bit set
*/
- if (ra)
+ err = -EWOULDBLOCK;
+ if (bio_rw_ahead(bio))
goto end_io;
freereq = get_request_wait(q, rw);
req->flags |= REQ_CMD;
/*
- * inherit FAILFAST from bio and don't stack up
- * retries for read ahead
+ * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
- if (ra || test_bit(BIO_RW_FAILFAST, &bio->bi_rw))
+ if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST;
/*
out:
if (freereq)
__blk_put_request(q, freereq);
+ if (bio_sync(bio))
+ __generic_unplug_device(q);
- if (blk_queue_plugged(q)) {
- int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight;
-
- if (nrq == q->unplug_thresh || bio_sync(bio))
- __generic_unplug_device(q);
- }
spin_unlock_irq(q->queue_lock);
return 0;
end_io:
- bio_endio(bio, nr_sectors << 9, -EWOULDBLOCK);
+ bio_endio(bio, nr_sectors << 9, err);
return 0;
}
sector_t maxsector;
int ret, nr_sectors = bio_sectors(bio);
+ might_sleep();
/* Test device or partition size, when known. */
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
if (maxsector) {
break;
}
- if (unlikely(bio_sectors(bio) > q->max_sectors)) {
+ if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
printk("bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
- q->max_sectors);
+ q->max_hw_sectors);
goto end_io;
}
if (unlikely(block_dump)) {
char b[BDEVNAME_SIZE];
- printk("%s(%d): %s block %Lu on %s\n",
+ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
current->comm, current->pid,
(rw & WRITE) ? "WRITE" : "READ",
(unsigned long long)bio->bi_sector,
void blk_recalc_rq_segments(struct request *rq)
{
- struct bio *bio;
+ struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
if (!rq->bio)
nr_phys_segs += bio_phys_segments(rq->q, bio);
nr_hw_segs += bio_hw_segments(rq->q, bio);
+ if (prevbio) {
+ if (blk_phys_contig_segment(rq->q, prevbio, bio))
+ nr_phys_segs--;
+ if (blk_hw_contig_segment(rq->q, prevbio, bio))
+ nr_hw_segs--;
+ }
+ prevbio = bio;
}
rq->nr_phys_segments = nr_phys_segs;
static int __end_that_request_first(struct request *req, int uptodate,
int nr_bytes)
{
- int total_bytes, bio_nbytes, error = 0, next_idx = 0;
+ int total_bytes, bio_nbytes, error, next_idx = 0;
struct bio *bio;
+ /*
+ * extend uptodate bool to allow < 0 value to be direct io error
+ */
+ error = 0;
+ if (end_io_error(uptodate))
+ error = !uptodate ? -EIO : uptodate;
+
/*
* for a REQ_BLOCK_PC request, we want to carry any eventual
* sense key with us all the way through
req->errors = 0;
if (!uptodate) {
- error = -EIO;
if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
printk("end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
}
total_bytes = bio_nbytes = 0;
- while ((bio = req->bio)) {
+ while ((bio = req->bio) != NULL) {
int nbytes;
if (nr_bytes >= bio->bi_size) {
/**
* end_that_request_first - end I/O on a request
* @req: the request being processed
- * @uptodate: 0 for I/O error
+ * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
/**
* end_that_request_chunk - end I/O on a request
* @req: the request being processed
- * @uptodate: 0 for I/O error
+ * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
* @nr_bytes: number of bytes to complete
*
* Description:
return queue_work(kblockd_workqueue, work);
}
+EXPORT_SYMBOL(kblockd_schedule_work);
+
void kblockd_flush(void)
{
flush_workqueue(kblockd_workqueue);
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, 0, NULL, NULL);
- if (!request_cachep)
- panic("Can't create request pool slab cache\n");
+ sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+
+ requestq_cachep = kmem_cache_create("blkdev_queue",
+ sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+
+ iocontext_cachep = kmem_cache_create("blkdev_ioc",
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
if (atomic_dec_and_test(&ioc->refcount)) {
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
- kfree(ioc);
+ kmem_cache_free(iocontext_cachep, ioc);
}
}
local_irq_save(flags);
ret = tsk->io_context;
if (ret == NULL) {
- ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+ ret = kmem_cache_alloc(iocontext_cachep, GFP_ATOMIC);
if (ret) {
atomic_set(&ret->refcount, 1);
ret->pid = tsk->pid;
int ret = queue_var_store(&q->nr_requests, page, count);
if (q->nr_requests < BLKDEV_MIN_RQ)
q->nr_requests = BLKDEV_MIN_RQ;
+ blk_queue_congestion_threshold(q);
if (rl->count[READ] >= queue_congestion_on_threshold(q))
set_queue_congested(q, READ);
return ret;
}
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
+{
+ int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+
+ return queue_var_show(ra_kb, (page));
+}
+
+static ssize_t
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long ra_kb;
+ ssize_t ret = queue_var_store(&ra_kb, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (ra_kb > (q->max_sectors >> 1))
+ ra_kb = (q->max_sectors >> 1);
+
+ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+ int max_sectors_kb = q->max_sectors >> 1;
+
+ return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long max_sectors_kb,
+ max_hw_sectors_kb = q->max_hw_sectors >> 1,
+ page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+ ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+ int ra_kb;
+
+ if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+ return -EINVAL;
+ /*
+ * Take the queue lock to update the readahead and max_sectors
+ * values synchronously:
+ */
+ spin_lock_irq(q->queue_lock);
+ /*
+ * Trim readahead window as well, if necessary:
+ */
+ ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+ if (ra_kb > max_sectors_kb)
+ q->backing_dev_info.ra_pages =
+ max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
+
+ q->max_sectors = max_sectors_kb << 1;
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+ int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+ return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
.store = queue_requests_store,
};
+static struct queue_sysfs_entry queue_ra_entry = {
+ .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_ra_show,
+ .store = queue_ra_store,
+};
+
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+ .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_max_sectors_show,
+ .store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+ .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+ .show = queue_max_hw_sectors_show,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
+ &queue_ra_entry.attr,
+ &queue_max_hw_sectors_entry.attr,
+ &queue_max_sectors_entry.attr,
NULL,
};