This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / drivers / block / cfq-iosched.c
1 /*
2  *  linux/drivers/block/cfq-iosched.c
3  *
4  *  CFQ, or complete fairness queueing, disk scheduler.
5  *
6  *  Based on ideas from a previously unfinished io
7  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
8  *
9  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
10  */
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/blkdev.h>
14 #include <linux/elevator.h>
15 #include <linux/bio.h>
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/compiler.h>
21 #include <linux/hash.h>
22 #include <linux/rbtree.h>
23 #include <linux/mempool.h>
24
25 /*
26  * tunables
27  */
28 static int cfq_quantum = 4;
29 static int cfq_queued = 8;
30
31 #define CFQ_QHASH_SHIFT         6
32 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
33 #define list_entry_qhash(entry) list_entry((entry), struct cfq_queue, cfq_hash)
34
35 #define CFQ_MHASH_SHIFT         8
36 #define CFQ_MHASH_BLOCK(sec)    ((sec) >> 3)
37 #define CFQ_MHASH_ENTRIES       (1 << CFQ_MHASH_SHIFT)
38 #define CFQ_MHASH_FN(sec)       (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
39 #define ON_MHASH(crq)           !list_empty(&(crq)->hash)
40 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
41 #define list_entry_hash(ptr)    list_entry((ptr), struct cfq_rq, hash)
42
43 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
44
45 #define RQ_DATA(rq)             ((struct cfq_rq *) (rq)->elevator_private)
46
47 static kmem_cache_t *crq_pool;
48 static kmem_cache_t *cfq_pool;
49 static mempool_t *cfq_mpool;
50
51 struct cfq_data {
52         struct list_head rr_list;
53         struct list_head *dispatch;
54         struct list_head *cfq_hash;
55
56         struct list_head *crq_hash;
57
58         unsigned int busy_queues;
59         unsigned int max_queued;
60
61         mempool_t *crq_pool;
62
63         request_queue_t *queue;
64
65         /*
66          * tunables
67          */
68         unsigned int cfq_quantum;
69         unsigned int cfq_queued;
70 };
71
72 struct cfq_queue {
73         struct list_head cfq_hash;
74         struct list_head cfq_list;
75         struct rb_root sort_list;
76         int pid;
77         int queued[2];
78 #if 0
79         /*
80          * with a simple addition like this, we can do io priorities. almost.
81          * does need a split request free list, too.
82          */
83         int io_prio
84 #endif
85 };
86
87 struct cfq_rq {
88         struct rb_node rb_node;
89         sector_t rb_key;
90
91         struct request *request;
92
93         struct cfq_queue *cfq_queue;
94
95         struct list_head hash;
96 };
97
98 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
99 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid);
100 static void cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
101                               struct cfq_rq *crq);
102
103 /*
104  * lots of deadline iosched dupes, can be abstracted later...
105  */
106 static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
107 {
108         list_del_init(&crq->hash);
109 }
110
111 static inline void cfq_del_crq_hash(struct cfq_rq *crq)
112 {
113         if (ON_MHASH(crq))
114                 __cfq_del_crq_hash(crq);
115 }
116
117 static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
118 {
119         cfq_del_crq_hash(crq);
120
121         if (q->last_merge == crq->request)
122                 q->last_merge = NULL;
123 }
124
125 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
126 {
127         struct request *rq = crq->request;
128
129         BUG_ON(ON_MHASH(crq));
130
131         list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
132 }
133
134 static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
135 {
136         struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
137         struct list_head *entry, *next = hash_list->next;
138
139         while ((entry = next) != hash_list) {
140                 struct cfq_rq *crq = list_entry_hash(entry);
141                 struct request *__rq = crq->request;
142
143                 next = entry->next;
144
145                 BUG_ON(!ON_MHASH(crq));
146
147                 if (!rq_mergeable(__rq)) {
148                         __cfq_del_crq_hash(crq);
149                         continue;
150                 }
151
152                 if (rq_hash_key(__rq) == offset)
153                         return __rq;
154         }
155
156         return NULL;
157 }
158
159 /*
160  * rb tree support functions
161  */
162 #define RB_NONE         (2)
163 #define RB_EMPTY(node)  ((node)->rb_node == NULL)
164 #define RB_CLEAR(node)  ((node)->rb_color = RB_NONE)
165 #define RB_CLEAR_ROOT(root)     ((root)->rb_node = NULL)
166 #define ON_RB(node)     ((node)->rb_color != RB_NONE)
167 #define rb_entry_crq(node)      rb_entry((node), struct cfq_rq, rb_node)
168 #define rq_rb_key(rq)           (rq)->sector
169
170 static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
171 {
172         if (ON_RB(&crq->rb_node)) {
173                 cfqq->queued[rq_data_dir(crq->request)]--;
174                 rb_erase(&crq->rb_node, &cfqq->sort_list);
175                 crq->cfq_queue = NULL;
176         }
177 }
178
179 static struct cfq_rq *
180 __cfq_add_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
181 {
182         struct rb_node **p = &cfqq->sort_list.rb_node;
183         struct rb_node *parent = NULL;
184         struct cfq_rq *__crq;
185
186         while (*p) {
187                 parent = *p;
188                 __crq = rb_entry_crq(parent);
189
190                 if (crq->rb_key < __crq->rb_key)
191                         p = &(*p)->rb_left;
192                 else if (crq->rb_key > __crq->rb_key)
193                         p = &(*p)->rb_right;
194                 else
195                         return __crq;
196         }
197
198         rb_link_node(&crq->rb_node, parent, p);
199         return 0;
200 }
201
202 static void
203 cfq_add_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
204 {
205         struct request *rq = crq->request;
206         struct cfq_rq *__alias;
207
208         crq->rb_key = rq_rb_key(rq);
209         cfqq->queued[rq_data_dir(rq)]++;
210 retry:
211         __alias = __cfq_add_crq_rb(cfqq, crq);
212         if (!__alias) {
213                 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
214                 crq->cfq_queue = cfqq;
215                 return;
216         }
217
218         cfq_dispatch_sort(cfqd, cfqq, __alias);
219         goto retry;
220 }
221
222 static struct request *
223 cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
224 {
225         struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
226         struct rb_node *n;
227
228         if (!cfqq)
229                 goto out;
230
231         n = cfqq->sort_list.rb_node;
232         while (n) {
233                 struct cfq_rq *crq = rb_entry_crq(n);
234
235                 if (sector < crq->rb_key)
236                         n = n->rb_left;
237                 else if (sector > crq->rb_key)
238                         n = n->rb_right;
239                 else
240                         return crq->request;
241         }
242
243 out:
244         return NULL;
245 }
246
247 static void cfq_remove_request(request_queue_t *q, struct request *rq)
248 {
249         struct cfq_data *cfqd = q->elevator.elevator_data;
250         struct cfq_rq *crq = RQ_DATA(rq);
251
252         if (crq) {
253                 struct cfq_queue *cfqq = crq->cfq_queue;
254
255                 cfq_remove_merge_hints(q, crq);
256                 list_del_init(&rq->queuelist);
257
258                 if (cfqq) {
259                         cfq_del_crq_rb(cfqq, crq);
260
261                         if (RB_EMPTY(&cfqq->sort_list))
262                                 cfq_put_queue(cfqd, cfqq);
263                 }
264         }
265 }
266
267 static int
268 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
269 {
270         struct cfq_data *cfqd = q->elevator.elevator_data;
271         struct request *__rq;
272         int ret;
273
274         ret = elv_try_last_merge(q, bio);
275         if (ret != ELEVATOR_NO_MERGE) {
276                 __rq = q->last_merge;
277                 goto out_insert;
278         }
279
280         __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
281         if (__rq) {
282                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
283
284                 if (elv_rq_merge_ok(__rq, bio)) {
285                         ret = ELEVATOR_BACK_MERGE;
286                         goto out;
287                 }
288         }
289
290         __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
291         if (__rq) {
292                 if (elv_rq_merge_ok(__rq, bio)) {
293                         ret = ELEVATOR_FRONT_MERGE;
294                         goto out;
295                 }
296         }
297
298         return ELEVATOR_NO_MERGE;
299 out:
300         q->last_merge = __rq;
301 out_insert:
302         *req = __rq;
303         return ret;
304 }
305
306 static void cfq_merged_request(request_queue_t *q, struct request *req)
307 {
308         struct cfq_data *cfqd = q->elevator.elevator_data;
309         struct cfq_rq *crq = RQ_DATA(req);
310
311         cfq_del_crq_hash(crq);
312         cfq_add_crq_hash(cfqd, crq);
313
314         if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
315                 struct cfq_queue *cfqq = crq->cfq_queue;
316
317                 cfq_del_crq_rb(cfqq, crq);
318                 cfq_add_crq_rb(cfqd, cfqq, crq);
319         }
320
321         q->last_merge = req;
322 }
323
324 static void
325 cfq_merged_requests(request_queue_t *q, struct request *req,
326                     struct request *next)
327 {
328         cfq_merged_request(q, req);
329         cfq_remove_request(q, next);
330 }
331
332 static void
333 cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
334                   struct cfq_rq *crq)
335 {
336         struct list_head *head = cfqd->dispatch, *entry = head;
337         struct request *__rq;
338
339         cfq_del_crq_rb(cfqq, crq);
340         cfq_remove_merge_hints(cfqd->queue, crq);
341
342         if (!list_empty(head)) {
343                 __rq = list_entry_rq(head->next);
344
345                 if (crq->request->sector < __rq->sector) {
346                         entry = head->prev;
347                         goto link;
348                 }
349         }
350
351         while ((entry = entry->prev) != head) {
352                 __rq = list_entry_rq(entry);
353
354                 if (crq->request->sector <= __rq->sector)
355                         break;
356         }
357
358 link:
359         list_add_tail(&crq->request->queuelist, entry);
360 }
361
362 static inline void
363 __cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
364                         struct cfq_queue *cfqq)
365 {
366         struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
367
368         cfq_dispatch_sort(cfqd, cfqq, crq);
369 }
370
371 static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
372 {
373         struct cfq_queue *cfqq;
374         struct list_head *entry, *tmp;
375         int ret, queued, good_queues;
376
377         if (list_empty(&cfqd->rr_list))
378                 return 0;
379
380         queued = ret = 0;
381 restart:
382         good_queues = 0;
383         list_for_each_safe(entry, tmp, &cfqd->rr_list) {
384                 cfqq = list_entry_cfqq(cfqd->rr_list.next);
385
386                 BUG_ON(RB_EMPTY(&cfqq->sort_list));
387
388                 __cfq_dispatch_requests(q, cfqd, cfqq);
389
390                 if (RB_EMPTY(&cfqq->sort_list))
391                         cfq_put_queue(cfqd, cfqq);
392                 else
393                         good_queues++;
394
395                 queued++;
396                 ret = 1;
397         }
398
399         if ((queued < cfqd->cfq_quantum) && good_queues)
400                 goto restart;
401
402         return ret;
403 }
404
405 static struct request *cfq_next_request(request_queue_t *q)
406 {
407         struct cfq_data *cfqd = q->elevator.elevator_data;
408         struct request *rq;
409
410         if (!list_empty(cfqd->dispatch)) {
411                 struct cfq_rq *crq;
412 dispatch:
413                 rq = list_entry_rq(cfqd->dispatch->next);
414
415                 crq = RQ_DATA(rq);
416                 if (crq)
417                         cfq_remove_merge_hints(q, crq);
418
419                 return rq;
420         }
421
422         if (cfq_dispatch_requests(q, cfqd))
423                 goto dispatch;
424
425         return NULL;
426 }
427
428 static inline struct cfq_queue *
429 __cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
430 {
431         struct list_head *hash_list = &cfqd->cfq_hash[hashval];
432         struct list_head *entry;
433
434         list_for_each(entry, hash_list) {
435                 struct cfq_queue *__cfqq = list_entry_qhash(entry);
436
437                 if (__cfqq->pid == pid)
438                         return __cfqq;
439         }
440
441         return NULL;
442 }
443
444 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
445 {
446         const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
447
448         return __cfq_find_cfq_hash(cfqd, pid, hashval);
449 }
450
451 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
452 {
453         cfqd->busy_queues--;
454         list_del(&cfqq->cfq_list);
455         list_del(&cfqq->cfq_hash);
456         mempool_free(cfqq, cfq_mpool);
457 }
458
459 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid)
460 {
461         const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
462         struct cfq_queue *cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
463
464         if (!cfqq) {
465                 cfqq = mempool_alloc(cfq_mpool, GFP_NOIO);
466
467                 INIT_LIST_HEAD(&cfqq->cfq_hash);
468                 INIT_LIST_HEAD(&cfqq->cfq_list);
469                 RB_CLEAR_ROOT(&cfqq->sort_list);
470
471                 cfqq->pid = pid;
472                 cfqq->queued[0] = cfqq->queued[1] = 0;
473                 list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
474         }
475
476         return cfqq;
477 }
478
479 static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
480 {
481         struct cfq_queue *cfqq;
482
483         cfqq = cfq_get_queue(cfqd, current->tgid);
484
485         cfq_add_crq_rb(cfqd, cfqq, crq);
486
487         if (list_empty(&cfqq->cfq_list)) {
488                 list_add(&cfqq->cfq_list, &cfqd->rr_list);
489                 cfqd->busy_queues++;
490         }
491 }
492
493 static void
494 cfq_insert_request(request_queue_t *q, struct request *rq, int where)
495 {
496         struct cfq_data *cfqd = q->elevator.elevator_data;
497         struct cfq_rq *crq = RQ_DATA(rq);
498
499         switch (where) {
500                 case ELEVATOR_INSERT_BACK:
501                         while (cfq_dispatch_requests(q, cfqd))
502                                 ;
503                         list_add_tail(&rq->queuelist, cfqd->dispatch);
504                         break;
505                 case ELEVATOR_INSERT_FRONT:
506                         list_add(&rq->queuelist, cfqd->dispatch);
507                         break;
508                 case ELEVATOR_INSERT_SORT:
509                         BUG_ON(!blk_fs_request(rq));
510                         cfq_enqueue(cfqd, crq);
511                         break;
512                 default:
513                         printk("%s: bad insert point %d\n", __FUNCTION__,where);
514                         return;
515         }
516
517         if (rq_mergeable(rq)) {
518                 cfq_add_crq_hash(cfqd, crq);
519
520                 if (!q->last_merge)
521                         q->last_merge = rq;
522         }
523 }
524
525 static int cfq_queue_empty(request_queue_t *q)
526 {
527         struct cfq_data *cfqd = q->elevator.elevator_data;
528
529         if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
530                 return 1;
531
532         return 0;
533 }
534
535 static struct request *
536 cfq_former_request(request_queue_t *q, struct request *rq)
537 {
538         struct cfq_rq *crq = RQ_DATA(rq);
539         struct rb_node *rbprev = rb_prev(&crq->rb_node);
540
541         if (rbprev)
542                 return rb_entry_crq(rbprev)->request;
543
544         return NULL;
545 }
546
547 static struct request *
548 cfq_latter_request(request_queue_t *q, struct request *rq)
549 {
550         struct cfq_rq *crq = RQ_DATA(rq);
551         struct rb_node *rbnext = rb_next(&crq->rb_node);
552
553         if (rbnext)
554                 return rb_entry_crq(rbnext)->request;
555
556         return NULL;
557 }
558
559 static int cfq_may_queue(request_queue_t *q, int rw)
560 {
561         struct cfq_data *cfqd = q->elevator.elevator_data;
562         struct cfq_queue *cfqq;
563         int ret = 1;
564
565         if (!cfqd->busy_queues)
566                 goto out;
567
568         cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
569         if (cfqq) {
570                 int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
571
572                 if (limit < 3)
573                         limit = 3;
574                 else if (limit > cfqd->max_queued)
575                         limit = cfqd->max_queued;
576
577                 if (cfqq->queued[rw] > limit)
578                         ret = 0;
579         }
580 out:
581         return ret;
582 }
583
584 static void cfq_put_request(request_queue_t *q, struct request *rq)
585 {
586         struct cfq_data *cfqd = q->elevator.elevator_data;
587         struct cfq_rq *crq = RQ_DATA(rq);
588         struct request_list *rl;
589         int other_rw;
590
591         if (crq) {
592                 BUG_ON(q->last_merge == rq);
593                 BUG_ON(ON_MHASH(crq));
594
595                 mempool_free(crq, cfqd->crq_pool);
596                 rq->elevator_private = NULL;
597         }
598
599         /*
600          * work-around for may_queue "bug": if a read gets issued and refused
601          * to queue because writes ate all the allowed slots and no other
602          * reads are pending for this queue, it could get stuck infinitely
603          * since freed_request() only checks the waitqueue for writes when
604          * freeing them. or vice versa for a single write vs many reads.
605          * so check here whether "the other" data direction might be able
606          * to queue and wake them
607          */
608         rl = &q->rq;
609         other_rw = rq_data_dir(rq) ^ 1;
610         if (rl->count[other_rw] <= q->nr_requests) {
611                 smp_mb();
612                 if (waitqueue_active(&rl->wait[other_rw]))
613                         wake_up(&rl->wait[other_rw]);
614         }
615 }
616
617 static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
618 {
619         struct cfq_data *cfqd = q->elevator.elevator_data;
620         struct cfq_rq *crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
621
622         if (crq) {
623                 RB_CLEAR(&crq->rb_node);
624                 crq->request = rq;
625                 crq->cfq_queue = NULL;
626                 INIT_LIST_HEAD(&crq->hash);
627                 rq->elevator_private = crq;
628                 return 0;
629         }
630
631         return 1;
632 }
633
634 static void cfq_exit(request_queue_t *q, elevator_t *e)
635 {
636         struct cfq_data *cfqd = e->elevator_data;
637
638         e->elevator_data = NULL;
639         mempool_destroy(cfqd->crq_pool);
640         kfree(cfqd->crq_hash);
641         kfree(cfqd->cfq_hash);
642         kfree(cfqd);
643 }
644
645 static int cfq_init(request_queue_t *q, elevator_t *e)
646 {
647         struct cfq_data *cfqd;
648         int i;
649
650         cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
651         if (!cfqd)
652                 return -ENOMEM;
653
654         memset(cfqd, 0, sizeof(*cfqd));
655         INIT_LIST_HEAD(&cfqd->rr_list);
656
657         cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
658         if (!cfqd->crq_hash)
659                 goto out_crqhash;
660
661         cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
662         if (!cfqd->cfq_hash)
663                 goto out_cfqhash;
664
665         cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
666         if (!cfqd->crq_pool)
667                 goto out_crqpool;
668
669         for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
670                 INIT_LIST_HEAD(&cfqd->crq_hash[i]);
671         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
672                 INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
673
674         cfqd->dispatch = &q->queue_head;
675         e->elevator_data = cfqd;
676         cfqd->queue = q;
677
678         /*
679          * just set it to some high value, we want anyone to be able to queue
680          * some requests. fairness is handled differently
681          */
682         cfqd->max_queued = q->nr_requests;
683         q->nr_requests = 8192;
684
685         cfqd->cfq_queued = cfq_queued;
686         cfqd->cfq_quantum = cfq_quantum;
687
688         return 0;
689 out_crqpool:
690         kfree(cfqd->cfq_hash);
691 out_cfqhash:
692         kfree(cfqd->crq_hash);
693 out_crqhash:
694         kfree(cfqd);
695         return -ENOMEM;
696 }
697
698 static int __init cfq_slab_setup(void)
699 {
700         crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
701                                         NULL, NULL);
702
703         if (!crq_pool)
704                 panic("cfq_iosched: can't init crq pool\n");
705
706         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
707                                         NULL, NULL);
708
709         if (!cfq_pool)
710                 panic("cfq_iosched: can't init cfq pool\n");
711
712         cfq_mpool = mempool_create(64, mempool_alloc_slab, mempool_free_slab, cfq_pool);
713
714         if (!cfq_mpool)
715                 panic("cfq_iosched: can't init cfq mpool\n");
716
717         return 0;
718 }
719
720 subsys_initcall(cfq_slab_setup);
721
722 /*
723  * sysfs parts below -->
724  */
725 struct cfq_fs_entry {
726         struct attribute attr;
727         ssize_t (*show)(struct cfq_data *, char *);
728         ssize_t (*store)(struct cfq_data *, const char *, size_t);
729 };
730
731 static ssize_t
732 cfq_var_show(unsigned int var, char *page)
733 {
734         return sprintf(page, "%d\n", var);
735 }
736
737 static ssize_t
738 cfq_var_store(unsigned int *var, const char *page, size_t count)
739 {
740         char *p = (char *) page;
741
742         *var = simple_strtoul(p, &p, 10);
743         return count;
744 }
745
746 #define SHOW_FUNCTION(__FUNC, __VAR)                                    \
747 static ssize_t __FUNC(struct cfq_data *cfqd, char *page)                \
748 {                                                                       \
749         return cfq_var_show(__VAR, (page));                             \
750 }
751 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
752 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
753 #undef SHOW_FUNCTION
754
755 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                         \
756 static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)    \
757 {                                                                       \
758         int ret = cfq_var_store(__PTR, (page), count);                  \
759         if (*(__PTR) < (MIN))                                           \
760                 *(__PTR) = (MIN);                                       \
761         else if (*(__PTR) > (MAX))                                      \
762                 *(__PTR) = (MAX);                                       \
763         return ret;                                                     \
764 }
765 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
766 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
767 #undef STORE_FUNCTION
768
769 static struct cfq_fs_entry cfq_quantum_entry = {
770         .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
771         .show = cfq_quantum_show,
772         .store = cfq_quantum_store,
773 };
774 static struct cfq_fs_entry cfq_queued_entry = {
775         .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
776         .show = cfq_queued_show,
777         .store = cfq_queued_store,
778 };
779
780 static struct attribute *default_attrs[] = {
781         &cfq_quantum_entry.attr,
782         &cfq_queued_entry.attr,
783         NULL,
784 };
785
786 #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
787
788 static ssize_t
789 cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
790 {
791         elevator_t *e = container_of(kobj, elevator_t, kobj);
792         struct cfq_fs_entry *entry = to_cfq(attr);
793
794         if (!entry->show)
795                 return 0;
796
797         return entry->show(e->elevator_data, page);
798 }
799
800 static ssize_t
801 cfq_attr_store(struct kobject *kobj, struct attribute *attr,
802                const char *page, size_t length)
803 {
804         elevator_t *e = container_of(kobj, elevator_t, kobj);
805         struct cfq_fs_entry *entry = to_cfq(attr);
806
807         if (!entry->store)
808                 return -EINVAL;
809
810         return entry->store(e->elevator_data, page, length);
811 }
812
813 static struct sysfs_ops cfq_sysfs_ops = {
814         .show   = cfq_attr_show,
815         .store  = cfq_attr_store,
816 };
817
818 struct kobj_type cfq_ktype = {
819         .sysfs_ops      = &cfq_sysfs_ops,
820         .default_attrs  = default_attrs,
821 };
822
823 elevator_t iosched_cfq = {
824         .elevator_name =                "cfq",
825         .elevator_ktype =               &cfq_ktype,
826         .elevator_merge_fn =            cfq_merge,
827         .elevator_merged_fn =           cfq_merged_request,
828         .elevator_merge_req_fn =        cfq_merged_requests,
829         .elevator_next_req_fn =         cfq_next_request,
830         .elevator_add_req_fn =          cfq_insert_request,
831         .elevator_remove_req_fn =       cfq_remove_request,
832         .elevator_queue_empty_fn =      cfq_queue_empty,
833         .elevator_former_req_fn =       cfq_former_request,
834         .elevator_latter_req_fn =       cfq_latter_request,
835         .elevator_set_req_fn =          cfq_set_request,
836         .elevator_put_req_fn =          cfq_put_request,
837         .elevator_may_queue_fn =        cfq_may_queue,
838         .elevator_init_fn =             cfq_init,
839         .elevator_exit_fn =             cfq_exit,
840 };
841
842 EXPORT_SYMBOL(iosched_cfq);