* read_batch_expire describes how long we will allow a stream of reads to
* persist before looking to see whether it is time to switch over to writes.
*/
-#define default_read_batch_expire (HZ / 4)
+#define default_read_batch_expire (HZ / 2)
/*
* write_batch_expire describes how long we want a stream of writes to run for.
* See, the problem is: we can send a lot of writes to disk cache / TCQ in
* a short amount of time...
*/
-#define default_write_batch_expire (HZ / 16)
+#define default_write_batch_expire (HZ / 8)
/*
* max time we may wait to anticipate a read (default around 6ms)
}
as_remove_queued_request(ad->q, rq);
+ WARN_ON(arq->state != AS_RQ_QUEUED);
+
list_add(&rq->queuelist, insert);
+ arq->state = AS_RQ_DISPATCHED;
if (arq->io_context && arq->io_context->aic)
atomic_inc(&arq->io_context->aic->nr_dispatched);
-
- WARN_ON(arq->state != AS_RQ_QUEUED);
- arq->state = AS_RQ_DISPATCHED;
-
ad->nr_dispatched++;
}
as_antic_stop(ad);
}
+/*
+ * Account a request that is inserted directly onto the dispatch queue.
+ * arq->io_context->aic->nr_dispatched should not need to be incremented
+ * because only new requests should come through here: requeues go through
+ * our explicit requeue handler.
+ */
+static void as_account_queued_request(struct as_data *ad, struct request *rq)
+{
+ if (blk_fs_request(rq)) {
+ struct as_rq *arq = RQ_DATA(rq);
+ arq->state = AS_RQ_DISPATCHED;
+ ad->nr_dispatched++;
+ }
+}
+
static void
as_insert_request(request_queue_t *q, struct request *rq, int where)
{
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
list_add_tail(&rq->queuelist, ad->dispatch);
+ as_account_queued_request(ad, rq);
as_antic_stop(ad);
break;
case ELEVATOR_INSERT_FRONT:
list_add(&rq->queuelist, ad->dispatch);
+ as_account_queued_request(ad, rq);
as_antic_stop(ad);
break;
case ELEVATOR_INSERT_SORT: