*/
static kmem_cache_t *request_cachep;
+/*
+ * For queue allocation
+ */
+static kmem_cache_t *requestq_cachep;
+
+/*
+ * For io context allocations
+ */
+static kmem_cache_t *iocontext_cachep;
+
static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
*/
static inline int queue_congestion_on_threshold(struct request_queue *q)
{
- int ret;
-
- ret = q->nr_requests - (q->nr_requests / 8) + 1;
-
- if (ret > q->nr_requests)
- ret = q->nr_requests;
-
- return ret;
+ return q->nr_congestion_on;
}
/*
*/
static inline int queue_congestion_off_threshold(struct request_queue *q)
{
- int ret;
+ return q->nr_congestion_off;
+}
- ret = q->nr_requests - (q->nr_requests / 8) - 1;
+static void blk_queue_congestion_threshold(struct request_queue *q)
+{
+ int nr;
- if (ret < 1)
- ret = 1;
+ nr = q->nr_requests - (q->nr_requests / 8) + 1;
+ if (nr > q->nr_requests)
+ nr = q->nr_requests;
+ q->nr_congestion_on = nr;
- return ret;
+ nr = q->nr_requests - (q->nr_requests / 8) - 1;
+ if (nr < 1)
+ nr = 1;
+ q->nr_congestion_off = nr;
}
/*
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
+ blk_queue_congestion_threshold(q);
q->unplug_thresh = 4; /* hmm */
q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
- unsigned long mb = dma_addr >> 20;
- static request_queue_t *last_q;
/*
* set appropriate bounce gfp mask -- unfortunately we don't have a
} else
q->bounce_gfp = GFP_NOIO;
- /*
- * keep this for debugging for now...
- */
- if (dma_addr != BLK_BOUNCE_HIGH && q != last_q) {
- printk("blk: queue %p, ", q);
- if (dma_addr == BLK_BOUNCE_ANY)
- printk("no I/O memory limit\n");
- else
- printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (long long) dma_addr);
- }
-
q->bounce_pfn = bounce_pfn;
- last_q = q;
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
* generic_unplug_device - fire a request queue
- * @data: The &request_queue_t in question
+ * @q: The &request_queue_t in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
}
EXPORT_SYMBOL(generic_unplug_device);
-static void blk_backing_dev_unplug(struct backing_dev_info *bdi)
+static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
+ struct page *page)
{
request_queue_t *q = bdi->unplug_io_data;
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
} else {
blk_plug_device(q);
- schedule_work(&q->unplug_work);
+ kblockd_schedule_work(&q->unplug_work);
}
}
if (blk_queue_tagged(q))
blk_queue_free_tags(q);
- kfree(q);
+ kmem_cache_free(requestq_cachep, q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
static int __make_request(request_queue_t *, struct bio *);
static elevator_t *chosen_elevator =
-#if defined(CONFIG_IOSCHED_AS)
+#if defined(CONFIG_IOSCHED_CFQ)
+ &iosched_cfq;
+#elif defined(CONFIG_IOSCHED_AS)
&iosched_as;
#elif defined(CONFIG_IOSCHED_DEADLINE)
&iosched_deadline;
-#elif defined(CONFIG_IOSCHED_CFQ)
- &iosched_cfq;
#elif defined(CONFIG_IOSCHED_NOOP)
&elevator_noop;
#else
request_queue_t *blk_alloc_queue(int gfp_mask)
{
- request_queue_t *q = kmalloc(sizeof(*q), gfp_mask);
+ request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
if (!q)
return NULL;
out_elv:
blk_cleanup_queue(q);
out_init:
- kfree(q);
+ kmem_cache_free(requestq_cachep, q);
return NULL;
}
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
generic_unplug_device(q);
wait_for_completion(&wait);
+ rq->waiting = NULL;
if (rq->errors)
err = -EIO;
if (unlikely(block_dump)) {
char b[BDEVNAME_SIZE];
- printk("%s(%d): %s block %Lu on %s\n",
+ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
current->comm, current->pid,
(rw & WRITE) ? "WRITE" : "READ",
(unsigned long long)bio->bi_sector,
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, 0, NULL, NULL);
- if (!request_cachep)
- panic("Can't create request pool slab cache\n");
+ sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+
+ requestq_cachep = kmem_cache_create("blkdev_queue",
+ sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+
+ iocontext_cachep = kmem_cache_create("blkdev_ioc",
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
if (atomic_dec_and_test(&ioc->refcount)) {
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
- kfree(ioc);
+ kmem_cache_free(iocontext_cachep, ioc);
}
}
local_irq_save(flags);
ret = tsk->io_context;
if (ret == NULL) {
- ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+ ret = kmem_cache_alloc(iocontext_cachep, GFP_ATOMIC);
if (ret) {
atomic_set(&ret->refcount, 1);
ret->pid = tsk->pid;
int ret = queue_var_store(&q->nr_requests, page, count);
if (q->nr_requests < BLKDEV_MIN_RQ)
q->nr_requests = BLKDEV_MIN_RQ;
+ blk_queue_congestion_threshold(q);
if (rl->count[READ] >= queue_congestion_on_threshold(q))
set_queue_congested(q, READ);
return ret;
}
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
+{
+ int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+
+ return queue_var_show(ra_kb, (page));
+}
+
+static ssize_t
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long ra_kb;
+ ssize_t ret = queue_var_store(&ra_kb, page, count);
+
+ if (ra_kb > (q->max_sectors >> 1))
+ ra_kb = (q->max_sectors >> 1);
+
+ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
.store = queue_requests_store,
};
+static struct queue_sysfs_entry queue_ra_entry = {
+ .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_ra_show,
+ .store = queue_ra_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
+ &queue_ra_entry.attr,
NULL,
};