#define REQ_ASYNC 0
/*
- * See Documentation/as-iosched.txt
+ * See Documentation/block/as-iosched.txt
*/
/*
* read_batch_expire describes how long we will allow a stream of reads to
* persist before looking to see whether it is time to switch over to writes.
*/
-#define default_read_batch_expire (HZ / 4)
+#define default_read_batch_expire (HZ / 2)
/*
* write_batch_expire describes how long we want a stream of writes to run for.
* See, the problem is: we can send a lot of writes to disk cache / TCQ in
* a short amount of time...
*/
-#define default_write_batch_expire (HZ / 16)
+#define default_write_batch_expire (HZ / 8)
/*
* max time we may wait to anticipate a read (default around 6ms)
static void as_antic_timeout(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
*/
static void as_completed_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
WARN_ON(!list_empty(&rq->queuelist));
{
struct as_rq *arq = RQ_DATA(rq);
const int data_dir = arq->is_sync;
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
WARN_ON(arq->state != AS_RQ_QUEUED);
}
as_remove_queued_request(ad->q, rq);
+ WARN_ON(arq->state != AS_RQ_QUEUED);
+
list_add(&rq->queuelist, insert);
+ arq->state = AS_RQ_DISPATCHED;
if (arq->io_context && arq->io_context->aic)
atomic_inc(&arq->io_context->aic->nr_dispatched);
-
- WARN_ON(arq->state != AS_RQ_QUEUED);
- arq->state = AS_RQ_DISPATCHED;
-
ad->nr_dispatched++;
}
static struct request *as_next_request(request_queue_t *q)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct request *rq = NULL;
/*
arq->state = AS_RQ_QUEUED;
}
-/*
- * requeue the request. The request has not been completed, nor is it a
- * new request, so don't touch accounting.
- */
-static void as_requeue_request(request_queue_t *q, struct request *rq)
+static void as_deactivate_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (arq) {
- if (arq->state != AS_RQ_REMOVED) {
- printk("arq->state %d\n", arq->state);
- WARN_ON(1);
+ if (arq->state == AS_RQ_REMOVED) {
+ arq->state = AS_RQ_DISPATCHED;
+ if (arq->io_context && arq->io_context->aic)
+ atomic_inc(&arq->io_context->aic->nr_dispatched);
}
-
- arq->state = AS_RQ_DISPATCHED;
- if (arq->io_context && arq->io_context->aic)
- atomic_inc(&arq->io_context->aic->nr_dispatched);
} else
WARN_ON(blk_fs_request(rq)
&& (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
- list_add(&rq->queuelist, ad->dispatch);
-
/* Stop anticipating - let this request get through */
as_antic_stop(ad);
}
+/*
+ * requeue the request. The request has not been completed, nor is it a
+ * new request, so don't touch accounting.
+ */
+static void as_requeue_request(request_queue_t *q, struct request *rq)
+{
+ as_deactivate_request(q, rq);
+ list_add(&rq->queuelist, &q->queue_head);
+}
+
+/*
+ * Account a request that is inserted directly onto the dispatch queue.
+ * arq->io_context->aic->nr_dispatched should not need to be incremented
+ * because only new requests should come through here: requeues go through
+ * our explicit requeue handler.
+ */
+static void as_account_queued_request(struct as_data *ad, struct request *rq)
+{
+ if (blk_fs_request(rq)) {
+ struct as_rq *arq = RQ_DATA(rq);
+ arq->state = AS_RQ_DISPATCHED;
+ ad->nr_dispatched++;
+ }
+}
+
static void
as_insert_request(request_queue_t *q, struct request *rq, int where)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (arq) {
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
list_add_tail(&rq->queuelist, ad->dispatch);
+ as_account_queued_request(ad, rq);
as_antic_stop(ad);
break;
case ELEVATOR_INSERT_FRONT:
list_add(&rq->queuelist, ad->dispatch);
+ as_account_queued_request(ad, rq);
as_antic_stop(ad);
break;
case ELEVATOR_INSERT_SORT:
*/
static int as_queue_empty(request_queue_t *q)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
if (!list_empty(&ad->fifo_list[REQ_ASYNC])
|| !list_empty(&ad->fifo_list[REQ_SYNC])
static int
as_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
struct request *__rq;
int ret;
static void as_merged_request(request_queue_t *q, struct request *req)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
/*
as_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
struct as_rq *anext = RQ_DATA(next);
static void as_put_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (!arq) {
static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
if (arq) {
static int as_may_queue(request_queue_t *q, int rw)
{
- int ret = 0;
- struct as_data *ad = q->elevator.elevator_data;
+ int ret = ELV_MQUEUE_MAY;
+ struct as_data *ad = q->elevator->elevator_data;
struct io_context *ioc;
if (ad->antic_status == ANTIC_WAIT_REQ ||
ad->antic_status == ANTIC_WAIT_NEXT) {
ioc = as_get_io_context();
if (ad->io_context == ioc)
- ret = 1;
+ ret = ELV_MQUEUE_MUST;
put_io_context(ioc);
}
return ret;
}
-static void as_exit(request_queue_t *q, elevator_t *e)
+static void as_exit_queue(elevator_t *e)
{
struct as_data *ad = e->elevator_data;
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
-static int as_init(request_queue_t *q, elevator_t *e)
+static int as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
int i;
return pos;
}
-#define SHOW_FUNCTION(__FUNC, __VAR) \
+#define SHOW_FUNCTION(__FUNC, __VAR) \
static ssize_t __FUNC(struct as_data *ad, char *page) \
-{ \
- return as_var_show(__VAR, (page)); \
+{ \
+ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
}
SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
*(__PTR) = (MIN); \
else if (*(__PTR) > (MAX)) \
*(__PTR) = (MAX); \
+ *(__PTR) = msecs_to_jiffies(*(__PTR)); \
return ret; \
}
STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
.default_attrs = default_attrs,
};
-static int __init as_slab_setup(void)
+static struct elevator_type iosched_as = {
+ .ops = {
+ .elevator_merge_fn = as_merge,
+ .elevator_merged_fn = as_merged_request,
+ .elevator_merge_req_fn = as_merged_requests,
+ .elevator_next_req_fn = as_next_request,
+ .elevator_add_req_fn = as_insert_request,
+ .elevator_remove_req_fn = as_remove_request,
+ .elevator_requeue_req_fn = as_requeue_request,
+ .elevator_deactivate_req_fn = as_deactivate_request,
+ .elevator_queue_empty_fn = as_queue_empty,
+ .elevator_completed_req_fn = as_completed_request,
+ .elevator_former_req_fn = as_former_request,
+ .elevator_latter_req_fn = as_latter_request,
+ .elevator_set_req_fn = as_set_request,
+ .elevator_put_req_fn = as_put_request,
+ .elevator_may_queue_fn = as_may_queue,
+ .elevator_init_fn = as_init_queue,
+ .elevator_exit_fn = as_exit_queue,
+ },
+
+ .elevator_ktype = &as_ktype,
+ .elevator_name = "anticipatory",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init as_init(void)
{
+ int ret;
+
arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
0, 0, NULL, NULL);
-
if (!arq_pool)
- panic("as: can't init slab pool\n");
+ return -ENOMEM;
- return 0;
+ ret = elv_register(&iosched_as);
+ if (!ret) {
+ /*
+ * don't allow AS to get unregistered, since we would have
+ * to browse all tasks in the system and release their
+ * as_io_context first
+ */
+ __module_get(THIS_MODULE);
+ return 0;
+ }
+
+ kmem_cache_destroy(arq_pool);
+ return ret;
}
-subsys_initcall(as_slab_setup);
-
-elevator_t iosched_as = {
- .elevator_merge_fn = as_merge,
- .elevator_merged_fn = as_merged_request,
- .elevator_merge_req_fn = as_merged_requests,
- .elevator_next_req_fn = as_next_request,
- .elevator_add_req_fn = as_insert_request,
- .elevator_remove_req_fn = as_remove_request,
- .elevator_requeue_req_fn = as_requeue_request,
- .elevator_queue_empty_fn = as_queue_empty,
- .elevator_completed_req_fn = as_completed_request,
- .elevator_former_req_fn = as_former_request,
- .elevator_latter_req_fn = as_latter_request,
- .elevator_set_req_fn = as_set_request,
- .elevator_put_req_fn = as_put_request,
- .elevator_may_queue_fn = as_may_queue,
- .elevator_init_fn = as_init,
- .elevator_exit_fn = as_exit,
-
- .elevator_ktype = &as_ktype,
- .elevator_name = "anticipatory",
-};
+static void __exit as_exit(void)
+{
+ kmem_cache_destroy(arq_pool);
+ elv_unregister(&iosched_as);
+}
+
+module_init(as_init);
+module_exit(as_exit);
-EXPORT_SYMBOL(iosched_as);
+MODULE_AUTHOR("Nick Piggin");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("anticipatory IO scheduler");