2 * linux/drivers/block/cfq-iosched.c
4 * CFQ, or complete fairness queueing, disk scheduler.
6 * Based on ideas from a previously unfinished io
7 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
9 * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
11 #include <linux/kernel.h>
13 #include <linux/blkdev.h>
14 #include <linux/elevator.h>
15 #include <linux/bio.h>
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/compiler.h>
21 #include <linux/hash.h>
22 #include <linux/rbtree.h>
23 #include <linux/mempool.h>
28 static int cfq_quantum = 4;
29 static int cfq_queued = 8;
31 #define CFQ_QHASH_SHIFT 6
32 #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
33 #define list_entry_qhash(entry) list_entry((entry), struct cfq_queue, cfq_hash)
35 #define CFQ_MHASH_SHIFT 8
36 #define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
37 #define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
38 #define CFQ_MHASH_FN(sec) (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
39 #define ON_MHASH(crq) !list_empty(&(crq)->hash)
40 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
41 #define list_entry_hash(ptr) list_entry((ptr), struct cfq_rq, hash)
43 #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
45 #define RQ_DATA(rq) ((struct cfq_rq *) (rq)->elevator_private)
47 static kmem_cache_t *crq_pool;
48 static kmem_cache_t *cfq_pool;
49 static mempool_t *cfq_mpool;
52 struct list_head rr_list;
53 struct list_head *dispatch;
54 struct list_head *cfq_hash;
56 struct list_head *crq_hash;
58 unsigned int busy_queues;
59 unsigned int max_queued;
63 request_queue_t *queue;
68 unsigned int cfq_quantum;
69 unsigned int cfq_queued;
73 struct list_head cfq_hash;
74 struct list_head cfq_list;
75 struct rb_root sort_list;
80 * with a simple addition like this, we can do io priorities. almost.
81 * does need a split request free list, too.
88 struct rb_node rb_node;
91 struct request *request;
93 struct cfq_queue *cfq_queue;
95 struct list_head hash;
98 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
99 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid);
100 static void cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
104 * lots of deadline iosched dupes, can be abstracted later...
106 static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
108 list_del_init(&crq->hash);
111 static inline void cfq_del_crq_hash(struct cfq_rq *crq)
114 __cfq_del_crq_hash(crq);
117 static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
119 cfq_del_crq_hash(crq);
121 if (q->last_merge == crq->request)
122 q->last_merge = NULL;
125 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
127 struct request *rq = crq->request;
129 BUG_ON(ON_MHASH(crq));
131 list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
134 static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
136 struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
137 struct list_head *entry, *next = hash_list->next;
139 while ((entry = next) != hash_list) {
140 struct cfq_rq *crq = list_entry_hash(entry);
141 struct request *__rq = crq->request;
145 BUG_ON(!ON_MHASH(crq));
147 if (!rq_mergeable(__rq)) {
148 __cfq_del_crq_hash(crq);
152 if (rq_hash_key(__rq) == offset)
160 * rb tree support functions
163 #define RB_EMPTY(node) ((node)->rb_node == NULL)
164 #define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
165 #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
166 #define ON_RB(node) ((node)->rb_color != RB_NONE)
167 #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
168 #define rq_rb_key(rq) (rq)->sector
170 static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
172 if (ON_RB(&crq->rb_node)) {
173 cfqq->queued[rq_data_dir(crq->request)]--;
174 rb_erase(&crq->rb_node, &cfqq->sort_list);
175 crq->cfq_queue = NULL;
179 static struct cfq_rq *
180 __cfq_add_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
182 struct rb_node **p = &cfqq->sort_list.rb_node;
183 struct rb_node *parent = NULL;
184 struct cfq_rq *__crq;
188 __crq = rb_entry_crq(parent);
190 if (crq->rb_key < __crq->rb_key)
192 else if (crq->rb_key > __crq->rb_key)
198 rb_link_node(&crq->rb_node, parent, p);
203 cfq_add_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
205 struct request *rq = crq->request;
206 struct cfq_rq *__alias;
208 crq->rb_key = rq_rb_key(rq);
209 cfqq->queued[rq_data_dir(rq)]++;
211 __alias = __cfq_add_crq_rb(cfqq, crq);
213 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
214 crq->cfq_queue = cfqq;
218 cfq_dispatch_sort(cfqd, cfqq, __alias);
222 static struct request *
223 cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
225 struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
231 n = cfqq->sort_list.rb_node;
233 struct cfq_rq *crq = rb_entry_crq(n);
235 if (sector < crq->rb_key)
237 else if (sector > crq->rb_key)
247 static void cfq_remove_request(request_queue_t *q, struct request *rq)
249 struct cfq_data *cfqd = q->elevator.elevator_data;
250 struct cfq_rq *crq = RQ_DATA(rq);
253 struct cfq_queue *cfqq = crq->cfq_queue;
255 cfq_remove_merge_hints(q, crq);
256 list_del_init(&rq->queuelist);
259 cfq_del_crq_rb(cfqq, crq);
261 if (RB_EMPTY(&cfqq->sort_list))
262 cfq_put_queue(cfqd, cfqq);
268 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
270 struct cfq_data *cfqd = q->elevator.elevator_data;
271 struct request *__rq;
274 ret = elv_try_last_merge(q, bio);
275 if (ret != ELEVATOR_NO_MERGE) {
276 __rq = q->last_merge;
280 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
282 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
284 if (elv_rq_merge_ok(__rq, bio)) {
285 ret = ELEVATOR_BACK_MERGE;
290 __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
292 if (elv_rq_merge_ok(__rq, bio)) {
293 ret = ELEVATOR_FRONT_MERGE;
298 return ELEVATOR_NO_MERGE;
300 q->last_merge = __rq;
306 static void cfq_merged_request(request_queue_t *q, struct request *req)
308 struct cfq_data *cfqd = q->elevator.elevator_data;
309 struct cfq_rq *crq = RQ_DATA(req);
311 cfq_del_crq_hash(crq);
312 cfq_add_crq_hash(cfqd, crq);
314 if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
315 struct cfq_queue *cfqq = crq->cfq_queue;
317 cfq_del_crq_rb(cfqq, crq);
318 cfq_add_crq_rb(cfqd, cfqq, crq);
325 cfq_merged_requests(request_queue_t *q, struct request *req,
326 struct request *next)
328 cfq_merged_request(q, req);
329 cfq_remove_request(q, next);
333 cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
336 struct list_head *head = cfqd->dispatch, *entry = head;
337 struct request *__rq;
339 cfq_del_crq_rb(cfqq, crq);
340 cfq_remove_merge_hints(cfqd->queue, crq);
342 if (!list_empty(head)) {
343 __rq = list_entry_rq(head->next);
345 if (crq->request->sector < __rq->sector) {
351 while ((entry = entry->prev) != head) {
352 __rq = list_entry_rq(entry);
354 if (crq->request->sector <= __rq->sector)
359 list_add_tail(&crq->request->queuelist, entry);
363 __cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
364 struct cfq_queue *cfqq)
366 struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
368 cfq_dispatch_sort(cfqd, cfqq, crq);
371 static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
373 struct cfq_queue *cfqq;
374 struct list_head *entry, *tmp;
375 int ret, queued, good_queues;
377 if (list_empty(&cfqd->rr_list))
383 list_for_each_safe(entry, tmp, &cfqd->rr_list) {
384 cfqq = list_entry_cfqq(cfqd->rr_list.next);
386 BUG_ON(RB_EMPTY(&cfqq->sort_list));
388 __cfq_dispatch_requests(q, cfqd, cfqq);
390 if (RB_EMPTY(&cfqq->sort_list))
391 cfq_put_queue(cfqd, cfqq);
399 if ((queued < cfqd->cfq_quantum) && good_queues)
405 static struct request *cfq_next_request(request_queue_t *q)
407 struct cfq_data *cfqd = q->elevator.elevator_data;
410 if (!list_empty(cfqd->dispatch)) {
413 rq = list_entry_rq(cfqd->dispatch->next);
417 cfq_remove_merge_hints(q, crq);
422 if (cfq_dispatch_requests(q, cfqd))
428 static inline struct cfq_queue *
429 __cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
431 struct list_head *hash_list = &cfqd->cfq_hash[hashval];
432 struct list_head *entry;
434 list_for_each(entry, hash_list) {
435 struct cfq_queue *__cfqq = list_entry_qhash(entry);
437 if (__cfqq->pid == pid)
444 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
446 const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
448 return __cfq_find_cfq_hash(cfqd, pid, hashval);
451 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
454 list_del(&cfqq->cfq_list);
455 list_del(&cfqq->cfq_hash);
456 mempool_free(cfqq, cfq_mpool);
459 static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid,
462 const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
463 struct cfq_queue *cfqq, *new_cfqq = NULL;
464 request_queue_t *q = cfqd->queue;
467 cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
473 } else if (gfp_mask & __GFP_WAIT) {
474 spin_unlock_irq(q->queue_lock);
475 new_cfqq = mempool_alloc(cfq_mpool, gfp_mask);
476 spin_lock_irq(q->queue_lock);
481 INIT_LIST_HEAD(&cfqq->cfq_hash);
482 INIT_LIST_HEAD(&cfqq->cfq_list);
483 RB_CLEAR_ROOT(&cfqq->sort_list);
486 cfqq->queued[0] = cfqq->queued[1] = 0;
487 list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
491 mempool_free(new_cfqq, cfq_mpool);
496 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
499 request_queue_t *q = cfqd->queue;
500 struct cfq_queue *cfqq;
502 spin_lock_irq(q->queue_lock);
503 cfqq = __cfq_get_queue(cfqd, pid, gfp_mask);
504 spin_unlock_irq(q->queue_lock);
509 static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
511 struct cfq_queue *cfqq;
513 cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC);
515 cfq_add_crq_rb(cfqd, cfqq, crq);
517 if (list_empty(&cfqq->cfq_list)) {
518 list_add(&cfqq->cfq_list, &cfqd->rr_list);
523 * should can only happen if the request wasn't allocated
524 * through blk_alloc_request(), eg stack requests from ide-cd
525 * (those should be removed) _and_ we are in OOM.
527 list_add_tail(&crq->request->queuelist, cfqd->dispatch);
532 cfq_insert_request(request_queue_t *q, struct request *rq, int where)
534 struct cfq_data *cfqd = q->elevator.elevator_data;
535 struct cfq_rq *crq = RQ_DATA(rq);
538 case ELEVATOR_INSERT_BACK:
539 while (cfq_dispatch_requests(q, cfqd))
541 list_add_tail(&rq->queuelist, cfqd->dispatch);
543 case ELEVATOR_INSERT_FRONT:
544 list_add(&rq->queuelist, cfqd->dispatch);
546 case ELEVATOR_INSERT_SORT:
547 BUG_ON(!blk_fs_request(rq));
548 cfq_enqueue(cfqd, crq);
551 printk("%s: bad insert point %d\n", __FUNCTION__,where);
555 if (rq_mergeable(rq)) {
556 cfq_add_crq_hash(cfqd, crq);
563 static int cfq_queue_empty(request_queue_t *q)
565 struct cfq_data *cfqd = q->elevator.elevator_data;
567 if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
573 static struct request *
574 cfq_former_request(request_queue_t *q, struct request *rq)
576 struct cfq_rq *crq = RQ_DATA(rq);
577 struct rb_node *rbprev = rb_prev(&crq->rb_node);
580 return rb_entry_crq(rbprev)->request;
585 static struct request *
586 cfq_latter_request(request_queue_t *q, struct request *rq)
588 struct cfq_rq *crq = RQ_DATA(rq);
589 struct rb_node *rbnext = rb_next(&crq->rb_node);
592 return rb_entry_crq(rbnext)->request;
597 static int cfq_may_queue(request_queue_t *q, int rw)
599 struct cfq_data *cfqd = q->elevator.elevator_data;
600 struct cfq_queue *cfqq;
603 if (!cfqd->busy_queues)
606 cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
608 int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
612 else if (limit > cfqd->max_queued)
613 limit = cfqd->max_queued;
615 if (cfqq->queued[rw] > limit)
622 static void cfq_put_request(request_queue_t *q, struct request *rq)
624 struct cfq_data *cfqd = q->elevator.elevator_data;
625 struct cfq_rq *crq = RQ_DATA(rq);
626 struct request_list *rl;
630 BUG_ON(q->last_merge == rq);
631 BUG_ON(ON_MHASH(crq));
633 mempool_free(crq, cfqd->crq_pool);
634 rq->elevator_private = NULL;
638 * work-around for may_queue "bug": if a read gets issued and refused
639 * to queue because writes ate all the allowed slots and no other
640 * reads are pending for this queue, it could get stuck infinitely
641 * since freed_request() only checks the waitqueue for writes when
642 * freeing them. or vice versa for a single write vs many reads.
643 * so check here whether "the other" data direction might be able
644 * to queue and wake them
647 other_rw = rq_data_dir(rq) ^ 1;
648 if (rl->count[other_rw] <= q->nr_requests) {
650 if (waitqueue_active(&rl->wait[other_rw]))
651 wake_up(&rl->wait[other_rw]);
655 static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
657 struct cfq_data *cfqd = q->elevator.elevator_data;
658 struct cfq_queue *cfqq;
662 * prepare a queue up front, so cfq_enqueue() doesn't have to
664 cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask);
668 crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
670 memset(crq, 0, sizeof(*crq));
671 RB_CLEAR(&crq->rb_node);
673 crq->cfq_queue = NULL;
674 INIT_LIST_HEAD(&crq->hash);
675 rq->elevator_private = crq;
682 static void cfq_exit(request_queue_t *q, elevator_t *e)
684 struct cfq_data *cfqd = e->elevator_data;
686 e->elevator_data = NULL;
687 mempool_destroy(cfqd->crq_pool);
688 kfree(cfqd->crq_hash);
689 kfree(cfqd->cfq_hash);
693 static int cfq_init(request_queue_t *q, elevator_t *e)
695 struct cfq_data *cfqd;
698 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
702 memset(cfqd, 0, sizeof(*cfqd));
703 INIT_LIST_HEAD(&cfqd->rr_list);
705 cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
709 cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
713 cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
717 for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
718 INIT_LIST_HEAD(&cfqd->crq_hash[i]);
719 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
720 INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
722 cfqd->dispatch = &q->queue_head;
723 e->elevator_data = cfqd;
727 * just set it to some high value, we want anyone to be able to queue
728 * some requests. fairness is handled differently
730 cfqd->max_queued = q->nr_requests;
731 q->nr_requests = 8192;
733 cfqd->cfq_queued = cfq_queued;
734 cfqd->cfq_quantum = cfq_quantum;
738 kfree(cfqd->cfq_hash);
740 kfree(cfqd->crq_hash);
746 static int __init cfq_slab_setup(void)
748 crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
752 panic("cfq_iosched: can't init crq pool\n");
754 cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
758 panic("cfq_iosched: can't init cfq pool\n");
760 cfq_mpool = mempool_create(64, mempool_alloc_slab, mempool_free_slab, cfq_pool);
763 panic("cfq_iosched: can't init cfq mpool\n");
768 subsys_initcall(cfq_slab_setup);
771 * sysfs parts below -->
773 struct cfq_fs_entry {
774 struct attribute attr;
775 ssize_t (*show)(struct cfq_data *, char *);
776 ssize_t (*store)(struct cfq_data *, const char *, size_t);
780 cfq_var_show(unsigned int var, char *page)
782 return sprintf(page, "%d\n", var);
786 cfq_var_store(unsigned int *var, const char *page, size_t count)
788 char *p = (char *) page;
790 *var = simple_strtoul(p, &p, 10);
794 #define SHOW_FUNCTION(__FUNC, __VAR) \
795 static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
797 return cfq_var_show(__VAR, (page)); \
799 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
800 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
803 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
804 static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
806 int ret = cfq_var_store(__PTR, (page), count); \
807 if (*(__PTR) < (MIN)) \
809 else if (*(__PTR) > (MAX)) \
813 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
814 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
815 #undef STORE_FUNCTION
817 static struct cfq_fs_entry cfq_quantum_entry = {
818 .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
819 .show = cfq_quantum_show,
820 .store = cfq_quantum_store,
822 static struct cfq_fs_entry cfq_queued_entry = {
823 .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
824 .show = cfq_queued_show,
825 .store = cfq_queued_store,
828 static struct attribute *default_attrs[] = {
829 &cfq_quantum_entry.attr,
830 &cfq_queued_entry.attr,
834 #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
837 cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
839 elevator_t *e = container_of(kobj, elevator_t, kobj);
840 struct cfq_fs_entry *entry = to_cfq(attr);
845 return entry->show(e->elevator_data, page);
849 cfq_attr_store(struct kobject *kobj, struct attribute *attr,
850 const char *page, size_t length)
852 elevator_t *e = container_of(kobj, elevator_t, kobj);
853 struct cfq_fs_entry *entry = to_cfq(attr);
858 return entry->store(e->elevator_data, page, length);
861 static struct sysfs_ops cfq_sysfs_ops = {
862 .show = cfq_attr_show,
863 .store = cfq_attr_store,
866 struct kobj_type cfq_ktype = {
867 .sysfs_ops = &cfq_sysfs_ops,
868 .default_attrs = default_attrs,
871 elevator_t iosched_cfq = {
872 .elevator_name = "cfq",
873 .elevator_ktype = &cfq_ktype,
874 .elevator_merge_fn = cfq_merge,
875 .elevator_merged_fn = cfq_merged_request,
876 .elevator_merge_req_fn = cfq_merged_requests,
877 .elevator_next_req_fn = cfq_next_request,
878 .elevator_add_req_fn = cfq_insert_request,
879 .elevator_remove_req_fn = cfq_remove_request,
880 .elevator_queue_empty_fn = cfq_queue_empty,
881 .elevator_former_req_fn = cfq_former_request,
882 .elevator_latter_req_fn = cfq_latter_request,
883 .elevator_set_req_fn = cfq_set_request,
884 .elevator_put_req_fn = cfq_put_request,
885 .elevator_may_queue_fn = cfq_may_queue,
886 .elevator_init_fn = cfq_init,
887 .elevator_exit_fn = cfq_exit,
890 EXPORT_SYMBOL(iosched_cfq);