CKRM E16rc1 io controller support.
authorMarc Fiuczynski <mef@cs.princeton.edu>
Thu, 23 Sep 2004 19:26:45 +0000 (19:26 +0000)
committerMarc Fiuczynski <mef@cs.princeton.edu>
Thu, 23 Sep 2004 19:26:45 +0000 (19:26 +0000)
This version only implements 20 classes of I/O priority. The next release
candidate will support limits, which can then be used as a means to express
proportional sharing between classes (vservers).

The cfq-iosched.c function was hacked by me to work around an issue with
Fedora Core 2 that does not appear in the vanilla 2.6.8.1 kernel.  The
CKRM folks are aware of this and will fix this problem (hopefully in the
next release, too).

drivers/block/cfq-iosched.c
drivers/block/ckrm-io.c
include/linux/ckrm-io.h

index 068f4ea..e9c9732 100644 (file)
@@ -6,6 +6,18 @@
  *  Based on ideas from a previously unfinished io
  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  *
+ *  IO priorities are supported, from 0% to 100% in 5% increments. Both of
+ *  those values have special meaning - 0% class is allowed to do io if
+ *  noone else wants to use the disk. 100% is considered real-time io, and
+ *  always get priority. Default process io rate is 95%. In absence of other
+ *  io, a class may consume 100% disk bandwidth regardless. Withing a class,
+ *  bandwidth is distributed equally among the citizens.
+ *
+ * TODO:
+ *     - cfq_select_requests() needs some work for 5-95% io
+ *     - barriers not supported
+ *     - export grace periods in ms, not jiffies
+ *
  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
  */
 #include <linux/kernel.h>
 #include <linux/rbtree.h>
 #include <linux/mempool.h>
 
+#if IOPRIO_NR > BITS_PER_LONG
+#error Cannot support this many io priority levels
+#endif
+
 /*
  * tunables
  */
-static int cfq_quantum = 4;
-static int cfq_queued = 8;
+static int cfq_quantum = 6;
+static int cfq_quantum_io = 256;
+static int cfq_idle_quantum = 1;
+static int cfq_idle_quantum_io = 64;
+static int cfq_queued = 4;
+static int cfq_grace_rt = HZ / 100 ?: 1;
+static int cfq_grace_idle = HZ / 10;
 
 #define CFQ_QHASH_SHIFT                6
 #define CFQ_QHASH_ENTRIES      (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry)        list_entry((entry), struct cfq_queue, cfq_hash)
+#define list_entry_qhash(entry)        hlist_entry((entry), struct cfq_queue, cfq_hash)
 
 #define CFQ_MHASH_SHIFT                8
 #define CFQ_MHASH_BLOCK(sec)   ((sec) >> 3)
 #define CFQ_MHASH_ENTRIES      (1 << CFQ_MHASH_SHIFT)
 #define CFQ_MHASH_FN(sec)      (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
-#define ON_MHASH(crq)          !list_empty(&(crq)->hash)
 #define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr)   list_entry((ptr), struct cfq_rq, hash)
+#define list_entry_hash(ptr)   hlist_entry((ptr), struct cfq_rq, hash)
 
 #define list_entry_cfqq(ptr)   list_entry((ptr), struct cfq_queue, cfq_list)
+#define list_entry_prio(ptr)   list_entry((ptr), struct cfq_rq, prio_list)
+
+#define cfq_account_io(crq)    \
+       ((crq)->ioprio != IOPRIO_IDLE && (crq)->ioprio != IOPRIO_RT)
 
-#define RQ_DATA(rq)            ((struct cfq_rq *) (rq)->elevator_private)
+/*
+ * defines how we distribute bandwidth (can be tgid, uid, etc)
+ */
+
+/* FIXME: change hash_key to be sizeof(void *) rather than sizeof(int) 
+ * otherwise the cast of cki_tsk_icls will not work reliably on 64-bit arches.
+ * OR, change cki_tsk_icls to return ints (will need another id space to be 
+ * managed)
+ */
+
+#if defined(CONFIG_CKRM_RES_BLKIO) || defined(CONFIG_CKRM_RES_BLKIO_MODULE)
+extern inline void *cki_hash_key(struct task_struct *tsk);
+extern inline int cki_ioprio(struct task_struct *tsk);
+#define cfq_hash_key(current)   ((int)cki_hash_key((current)))
+#define cfq_ioprio(current)    (cki_ioprio((current)))
+
+#else
+#define cfq_hash_key(current)   ((current)->tgid)
+/*
+ * move to io_context
+ */
+#define cfq_ioprio(current)    ((current)->ioprio)
+#endif
+
+#define CFQ_WAIT_RT    0
+#define CFQ_WAIT_NORM  1
 
 static kmem_cache_t *crq_pool;
 static kmem_cache_t *cfq_pool;
 static mempool_t *cfq_mpool;
 
-struct cfq_data {
+/*
+ * defines an io priority level
+ */
+struct io_prio_data {
        struct list_head rr_list;
+       int busy_queues;
+       int busy_rq;
+       unsigned long busy_sectors;
+       
+       /* Statistics on requests, sectors and queues 
+         * added to (in) and dispatched from (out) 
+        * this priority level. Reinsertion of previously
+        * dispatched crq's into cfq's results in double counting
+        * which is ignored for now as in-out should 
+        * still be accurate.
+        */
+       atomic_t cum_rq_in,cum_rq_out;              
+       atomic_t cum_sectors_in,cum_sectors_out;    
+       atomic_t cum_queues_in,cum_queues_out;
+      
+       struct list_head prio_list;
+       int last_rq;
+       int last_sectors;
+};
+
+/*
+ * per-request queue structure
+ */
+struct cfq_data {
        struct list_head *dispatch;
-       struct list_head *cfq_hash;
+       struct hlist_head *cfq_hash;
+       struct hlist_head *crq_hash;
+       mempool_t *crq_pool;
 
-       struct list_head *crq_hash;
+       struct io_prio_data cid[IOPRIO_NR];
 
-       unsigned int busy_queues;
-       unsigned int max_queued;
+       /*
+        * total number of busy queues and requests
+        */
+       int busy_rq;
+       int busy_queues;
+       unsigned long busy_sectors;
 
-       mempool_t *crq_pool;
+       unsigned long rq_starved_mask;
 
-       request_queue_t *queue;
+       /*
+        * grace period handling
+        */
+       struct timer_list timer;
+       unsigned long wait_end;
+       unsigned long flags;
+       struct work_struct work;
 
        /*
         * tunables
         */
        unsigned int cfq_quantum;
+       unsigned int cfq_quantum_io;
+       unsigned int cfq_idle_quantum;
+       unsigned int cfq_idle_quantum_io;
        unsigned int cfq_queued;
+       unsigned int cfq_grace_rt;
+       unsigned int cfq_grace_idle;
 };
 
+/*
+ * per-class structure
+ */
 struct cfq_queue {
-       struct list_head cfq_hash;
        struct list_head cfq_list;
+       struct hlist_node cfq_hash;
+       int hash_key;
        struct rb_root sort_list;
-       int pid;
        int queued[2];
-#if 0
-       /*
-        * with a simple addition like this, we can do io priorities. almost.
-        * does need a split request free list, too.
-        */
-       int io_prio
-#endif
+       int ioprio;
 };
 
+/*
+ * per-request structure
+ */
 struct cfq_rq {
+       struct cfq_queue *cfq_queue;
        struct rb_node rb_node;
+       struct hlist_node hash;
        sector_t rb_key;
 
        struct request *request;
 
-       struct cfq_queue *cfq_queue;
-
-       struct list_head hash;
+       struct list_head prio_list;
+       unsigned long nr_sectors;
+       int ioprio;
 };
 
 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid);
-static void cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                             struct cfq_rq *crq);
+static void cfq_dispatch_sort(struct list_head *head, struct cfq_rq *crq);
 
 /*
  * lots of deadline iosched dupes, can be abstracted later...
  */
-static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
-{
-       list_del_init(&crq->hash);
-}
-
 static inline void cfq_del_crq_hash(struct cfq_rq *crq)
 {
-       if (ON_MHASH(crq))
-               __cfq_del_crq_hash(crq);
+       hlist_del_init(&crq->hash);
 }
 
-static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
+static inline void
+cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
 {
        cfq_del_crq_hash(crq);
 
@@ -125,27 +214,26 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
 {
        struct request *rq = crq->request;
+       const int hash_idx = CFQ_MHASH_FN(rq_hash_key(rq));
 
-       BUG_ON(ON_MHASH(crq));
+       BUG_ON(!hlist_unhashed(&crq->hash));
 
-       list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
+       hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
 }
 
 static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
 {
-       struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
-       struct list_head *entry, *next = hash_list->next;
+       struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+       struct hlist_node *entry, *next;
 
-       while ((entry = next) != hash_list) {
+       hlist_for_each_safe(entry, next, hash_list) {
                struct cfq_rq *crq = list_entry_hash(entry);
                struct request *__rq = crq->request;
 
-               next = entry->next;
-
-               BUG_ON(!ON_MHASH(crq));
+               BUG_ON(hlist_unhashed(&crq->hash));
 
                if (!rq_mergeable(__rq)) {
-                       __cfq_del_crq_hash(crq);
+                       cfq_del_crq_hash(crq);
                        continue;
                }
 
@@ -159,20 +247,27 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
 /*
  * rb tree support functions
  */
-#define RB_NONE                (2)
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
-#define ON_RB(node)    ((node)->rb_color != RB_NONE)
+#define RB_EMPTY(node)         ((node)->rb_node == NULL)
 #define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
 #define rq_rb_key(rq)          (rq)->sector
 
-static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+static void
+cfq_del_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
 {
-       if (ON_RB(&crq->rb_node)) {
+       if (crq->cfq_queue) {
+               crq->cfq_queue = NULL;
+
+               if (cfq_account_io(crq)) {
+                       cfqd->busy_rq--;
+                       cfqd->busy_sectors -= crq->nr_sectors;
+                       cfqd->cid[crq->ioprio].busy_rq--;
+                       atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_out));
+                       cfqd->cid[crq->ioprio].busy_sectors -= crq->nr_sectors;
+                       atomic_add(crq->nr_sectors,&(cfqd->cid[crq->ioprio].cum_sectors_out));
+               }
+
                cfqq->queued[rq_data_dir(crq->request)]--;
                rb_erase(&crq->rb_node, &cfqq->sort_list);
-               crq->cfq_queue = NULL;
        }
 }
 
@@ -196,7 +291,7 @@ __cfq_add_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
        }
 
        rb_link_node(&crq->rb_node, parent, p);
-       return NULL;
+       return 0;
 }
 
 static void
@@ -205,24 +300,33 @@ cfq_add_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
        struct request *rq = crq->request;
        struct cfq_rq *__alias;
 
-       crq->rb_key = rq_rb_key(rq);
        cfqq->queued[rq_data_dir(rq)]++;
+       if (cfq_account_io(crq)) {
+               cfqd->busy_rq++;
+               cfqd->busy_sectors += crq->nr_sectors;
+               cfqd->cid[crq->ioprio].busy_rq++;
+               atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_in));                
+               cfqd->cid[crq->ioprio].busy_sectors += crq->nr_sectors;
+               atomic_add(crq->nr_sectors,&(cfqd->cid[crq->ioprio].cum_sectors_in));
+       }
 retry:
        __alias = __cfq_add_crq_rb(cfqq, crq);
        if (!__alias) {
                rb_insert_color(&crq->rb_node, &cfqq->sort_list);
+               crq->rb_key = rq_rb_key(rq);
                crq->cfq_queue = cfqq;
                return;
        }
 
-       cfq_dispatch_sort(cfqd, cfqq, __alias);
+       cfq_del_crq_rb(cfqd, cfqq, __alias);
+       cfq_dispatch_sort(cfqd->dispatch, __alias);
        goto retry;
 }
 
 static struct request *
 cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
 {
-       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
+       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
        struct rb_node *n;
 
        if (!cfqq)
@@ -247,16 +351,30 @@ out:
 static void cfq_remove_request(request_queue_t *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
 
        if (crq) {
-               struct cfq_queue *cfqq = crq->cfq_queue;
-
                cfq_remove_merge_hints(q, crq);
+               list_del_init(&crq->prio_list);
                list_del_init(&rq->queuelist);
 
-               if (cfqq) {
-                       cfq_del_crq_rb(cfqq, crq);
+               /*
+                * set a grace period timer to allow realtime io to make real
+                * progress, if we release an rt request. for normal request,
+                * set timer so idle io doesn't interfere with other io
+                */
+               if (crq->ioprio == IOPRIO_RT) {
+                       set_bit(CFQ_WAIT_RT, &cfqd->flags);
+                       cfqd->wait_end = jiffies + cfqd->cfq_grace_rt;
+               } else if (crq->ioprio != IOPRIO_IDLE) {
+                       set_bit(CFQ_WAIT_NORM, &cfqd->flags);
+                       cfqd->wait_end = jiffies + cfqd->cfq_grace_idle;
+               }
+
+               if (crq->cfq_queue) {
+                       struct cfq_queue *cfqq = crq->cfq_queue;
+
+                       cfq_del_crq_rb(cfqd, cfqq, crq);
 
                        if (RB_EMPTY(&cfqq->sort_list))
                                cfq_put_queue(cfqd, cfqq);
@@ -306,18 +424,22 @@ out_insert:
 static void cfq_merged_request(request_queue_t *q, struct request *req)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(req);
+       struct cfq_rq *crq = RQ_ELV_DATA(req);
 
        cfq_del_crq_hash(crq);
        cfq_add_crq_hash(cfqd, crq);
 
-       if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
+       if (crq->cfq_queue && (rq_rb_key(req) != crq->rb_key)) {
                struct cfq_queue *cfqq = crq->cfq_queue;
 
-               cfq_del_crq_rb(cfqq, crq);
+               cfq_del_crq_rb(cfqd, cfqq, crq);
                cfq_add_crq_rb(cfqd, cfqq, crq);
        }
 
+       cfqd->busy_sectors += req->hard_nr_sectors - crq->nr_sectors;
+       cfqd->cid[crq->ioprio].busy_sectors += req->hard_nr_sectors - crq->nr_sectors;
+       crq->nr_sectors = req->hard_nr_sectors;
+
        q->last_merge = req;
 }
 
@@ -329,16 +451,14 @@ cfq_merged_requests(request_queue_t *q, struct request *req,
        cfq_remove_request(q, next);
 }
 
-static void
-cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                 struct cfq_rq *crq)
+/*
+ * sort into dispatch list, in optimal ascending order
+ */
+static void cfq_dispatch_sort(struct list_head *head, struct cfq_rq *crq)
 {
-       struct list_head *head = cfqd->dispatch, *entry = head;
+       struct list_head *entry = head;
        struct request *__rq;
 
-       cfq_del_crq_rb(cfqq, crq);
-       cfq_remove_merge_hints(cfqd->queue, crq);
-
        if (!list_empty(head)) {
                __rq = list_entry_rq(head->next);
 
@@ -359,47 +479,164 @@ link:
        list_add_tail(&crq->request->queuelist, entry);
 }
 
-static inline void
+/*
+ * remove from io scheduler core and put on dispatch list for service
+ */
+static inline int
 __cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
                        struct cfq_queue *cfqq)
 {
-       struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+       struct cfq_rq *crq;
 
-       cfq_dispatch_sort(cfqd, cfqq, crq);
+       crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+
+       cfq_del_crq_rb(cfqd, cfqq, crq);
+       cfq_remove_merge_hints(q, crq);
+       cfq_dispatch_sort(cfqd->dispatch, crq);
+
+       /*
+        * technically, for IOPRIO_RT we don't need to add it to the list.
+        */
+       list_add_tail(&crq->prio_list, &cfqd->cid[cfqq->ioprio].prio_list);
+       return crq->nr_sectors;
 }
 
-static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
+static int
+cfq_dispatch_requests(request_queue_t *q, int prio, int max_rq, int max_sectors)
 {
-       struct cfq_queue *cfqq;
-       struct list_head *entry, *tmp;
-       int ret, queued, good_queues;
-
-       if (list_empty(&cfqd->rr_list))
-               return 0;
+       struct cfq_data *cfqd = q->elevator.elevator_data;
+       struct list_head *plist = &cfqd->cid[prio].rr_list;
+       struct list_head *entry, *nxt;
+       int q_rq, q_io;
 
-       queued = ret = 0;
-restart:
-       good_queues = 0;
-       list_for_each_safe(entry, tmp, &cfqd->rr_list) {
-               cfqq = list_entry_cfqq(cfqd->rr_list.next);
+       /*
+        * for each queue at this prio level, dispatch a request
+        */
+       q_rq = q_io = 0;
+       list_for_each_safe(entry, nxt, plist) {
+               struct cfq_queue *cfqq = list_entry_cfqq(entry);
 
                BUG_ON(RB_EMPTY(&cfqq->sort_list));
 
-               __cfq_dispatch_requests(q, cfqd, cfqq);
+               q_io += __cfq_dispatch_requests(q, cfqd, cfqq);
+               q_rq++;
 
                if (RB_EMPTY(&cfqq->sort_list))
                        cfq_put_queue(cfqd, cfqq);
-               else
-                       good_queues++;
 
-               queued++;
-               ret = 1;
+               /*
+                * if we hit the queue limit, put the string of serviced
+                * queues at the back of the pending list
+                */
+               if (q_io >= max_sectors || q_rq >= max_rq) {
+                       struct list_head *prv = nxt->prev;
+
+                       if (prv != plist) {
+                               list_del(plist);
+                               list_add(plist, prv);
+                       }
+                       break;
+               }
        }
 
-       if ((queued < cfqd->cfq_quantum) && good_queues)
-               goto restart;
+       cfqd->cid[prio].last_rq = q_rq;
+       cfqd->cid[prio].last_sectors = q_io;
+       return q_rq;
+}
 
-       return ret;
+/*
+ * try to move some requests to the dispatch list. return 0 on success
+ */
+static int cfq_select_requests(request_queue_t *q, struct cfq_data *cfqd)
+{
+       int queued, busy_rq, busy_sectors, i;
+
+       /*
+        * if there's any realtime io, only schedule that
+        */
+       if (cfq_dispatch_requests(q, IOPRIO_RT, cfqd->cfq_quantum, cfqd->cfq_quantum_io))
+               return 1;
+
+       /*
+        * if RT io was last serviced and grace time hasn't expired,
+        * arm the timer to restart queueing if no other RT io has been
+        * submitted in the mean time
+        */
+       if (test_bit(CFQ_WAIT_RT, &cfqd->flags)) {
+               if (time_before(jiffies, cfqd->wait_end)) {
+                       mod_timer(&cfqd->timer, cfqd->wait_end);
+                       return 0;
+               }
+               clear_bit(CFQ_WAIT_RT, &cfqd->flags);
+       }
+
+       /*
+        * for each priority level, calculate number of requests we
+        * are allowed to put into service.
+        */
+       queued = 0;
+       busy_rq = cfqd->busy_rq;
+       busy_sectors = cfqd->busy_sectors;
+       for (i = IOPRIO_RT - 1; i > IOPRIO_IDLE; i--) {
+               const int o_rq = busy_rq - cfqd->cid[i].busy_rq;
+               const int o_sectors = busy_sectors - cfqd->cid[i].busy_sectors;
+               int q_rq = cfqd->cfq_quantum * (i + 1) / IOPRIO_NR;
+               int q_io = cfqd->cfq_quantum_io * (i + 1) / IOPRIO_NR;
+
+               /*
+                * no need to keep iterating the list, if there are no
+                * requests pending anymore
+                */
+               if (!cfqd->busy_rq)
+                       break;
+
+               /*
+                * find out how many requests and sectors we are allowed to
+                * service
+                */
+               if (o_rq)
+                       q_rq = o_sectors * (i + 1) / IOPRIO_NR;
+               if (q_rq > cfqd->cfq_quantum)
+                       q_rq = cfqd->cfq_quantum;
+
+               if (o_sectors)
+                       q_io = o_sectors * (i + 1) / IOPRIO_NR;
+               if (q_io > cfqd->cfq_quantum_io)
+                       q_io = cfqd->cfq_quantum_io;
+
+               /*
+                * average with last dispatched for fairness
+                */
+               if (cfqd->cid[i].last_rq != -1)
+                       q_rq = (cfqd->cid[i].last_rq + q_rq) / 2;
+               if (cfqd->cid[i].last_sectors != -1)
+                       q_io = (cfqd->cid[i].last_sectors + q_io) / 2;
+
+               queued += cfq_dispatch_requests(q, i, q_rq, q_io);
+       }
+
+       if (queued)
+               return 1;
+
+       /*
+        * only allow dispatch of idle io, if the queue has been idle from
+        * servicing RT or normal io for the grace period
+        */
+       if (test_bit(CFQ_WAIT_NORM, &cfqd->flags)) {
+               if (time_before(jiffies, cfqd->wait_end)) {
+                       mod_timer(&cfqd->timer, cfqd->wait_end);
+                       return 0;
+               }
+               clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+       }
+
+       /*
+        * if we found nothing to do, allow idle io to be serviced
+        */
+       if (cfq_dispatch_requests(q, IOPRIO_IDLE, cfqd->cfq_idle_quantum, cfqd->cfq_idle_quantum_io))
+               return 1;
+
+       return 0;
 }
 
 static struct request *cfq_next_request(request_queue_t *q)
@@ -410,121 +647,188 @@ static struct request *cfq_next_request(request_queue_t *q)
        if (!list_empty(cfqd->dispatch)) {
                struct cfq_rq *crq;
 dispatch:
+               /*
+                * end grace period, we are servicing a request
+                */
+               del_timer(&cfqd->timer);
+               clear_bit(CFQ_WAIT_RT, &cfqd->flags);
+               clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+
+               BUG_ON(list_empty(cfqd->dispatch));
                rq = list_entry_rq(cfqd->dispatch->next);
 
-               crq = RQ_DATA(rq);
-               if (crq)
-                       cfq_remove_merge_hints(q, crq);
+               BUG_ON(q->last_merge == rq);
+               crq = RQ_ELV_DATA(rq);
+               if (crq) {
+                       BUG_ON(!hlist_unhashed(&crq->hash));
+                       list_del_init(&crq->prio_list);
+               }
 
                return rq;
        }
 
-       if (cfq_dispatch_requests(q, cfqd))
+       /*
+        * we moved requests to dispatch list, go back end serve one
+        */
+       if (cfq_select_requests(q, cfqd))
                goto dispatch;
 
        return NULL;
 }
 
 static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey, const int hashval)
 {
-       struct list_head *hash_list = &cfqd->cfq_hash[hashval];
-       struct list_head *entry;
+       struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
+       struct hlist_node *entry;
 
-       list_for_each(entry, hash_list) {
+       hlist_for_each(entry, hash_list) {
                struct cfq_queue *__cfqq = list_entry_qhash(entry);
 
-               if (__cfqq->pid == pid)
+               if (__cfqq->hash_key == hashkey)
                        return __cfqq;
        }
 
        return NULL;
 }
 
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey)
 {
-       const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
+       const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
 
-       return __cfq_find_cfq_hash(cfqd, pid, hashval);
+       return __cfq_find_cfq_hash(cfqd, hashkey, hashval);
 }
 
 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        cfqd->busy_queues--;
+       WARN_ON(cfqd->busy_queues < 0);
+
+       cfqd->cid[cfqq->ioprio].busy_queues--;
+       WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues < 0);
+       atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
+
        list_del(&cfqq->cfq_list);
-       list_del(&cfqq->cfq_hash);
+       hlist_del(&cfqq->cfq_hash);
        mempool_free(cfqq, cfq_mpool);
 }
 
-static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid,
-                                        int gfp_mask)
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int hashkey)
 {
-       const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
+       const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
        struct cfq_queue *cfqq, *new_cfqq = NULL;
-       request_queue_t *q = cfqd->queue;
 
-retry:
-       cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
+ retry:
+       cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
 
        if (!cfqq) {
-               if (new_cfqq) {
-                       cfqq = new_cfqq;
-                       new_cfqq = NULL;
-               } else if (gfp_mask & __GFP_WAIT) {
-                       spin_unlock_irq(q->queue_lock);
-                       new_cfqq = mempool_alloc(cfq_mpool, gfp_mask);
-                       spin_lock_irq(q->queue_lock);
-                       goto retry;
-               } else
-                       return NULL;
-
-               INIT_LIST_HEAD(&cfqq->cfq_hash);
+               if (new_cfqq) {
+                      cfqq = new_cfqq;
+                      new_cfqq = NULL;
+               } else {
+                 new_cfqq = mempool_alloc(cfq_mpool, GFP_ATOMIC);
+                 /* MEF: I think cfq-iosched.c needs further fixing
+                    to avoid the bugon. Shailabh will be sending
+                    a new patch for this soon.
+                 */
+                 BUG_ON(new_cfqq == NULL);
+                 goto retry;
+               }
+               
+               memset(cfqq, 0, sizeof(*cfqq));
+               INIT_HLIST_NODE(&cfqq->cfq_hash);
                INIT_LIST_HEAD(&cfqq->cfq_list);
-               RB_CLEAR_ROOT(&cfqq->sort_list);
 
-               cfqq->pid = pid;
-               cfqq->queued[0] = cfqq->queued[1] = 0;
-               list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+               cfqq->hash_key = cfq_hash_key(current);
+               cfqq->ioprio = cfq_ioprio(current);
+               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
        }
 
-       if (new_cfqq)
-               mempool_free(new_cfqq, cfq_mpool);
+       if (new_cfqq) {
+               mempool_free(new_cfqq, cfq_mpool);
+       }
 
        return cfqq;
 }
 
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
-                                      int gfp_mask)
+static void
+__cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
 {
-       request_queue_t *q = cfqd->queue;
+       const int prio = crq->ioprio;
        struct cfq_queue *cfqq;
 
-       spin_lock_irq(q->queue_lock);
-       cfqq = __cfq_get_queue(cfqd, pid, gfp_mask);
-       spin_unlock_irq(q->queue_lock);
+       cfqq = cfq_get_queue(cfqd, cfq_hash_key(current));
+
+       /*
+        * not too good...
+        */
+       if (prio > cfqq->ioprio) {
+               printk("prio hash collision %d %d\n", prio, cfqq->ioprio);
+               if (!list_empty(&cfqq->cfq_list)) {
+                       cfqd->cid[cfqq->ioprio].busy_queues--;
+                       WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues < 0);
+                       atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
+                       cfqd->cid[prio].busy_queues++;
+                       atomic_inc(&(cfqd->cid[prio].cum_queues_in));
+                       list_move_tail(&cfqq->cfq_list, &cfqd->cid[prio].rr_list);
+               }
+               cfqq->ioprio = prio;
+       }
+
+       cfq_add_crq_rb(cfqd, cfqq, crq);
+
+       if (list_empty(&cfqq->cfq_list)) {
+               list_add_tail(&cfqq->cfq_list, &cfqd->cid[prio].rr_list);
+               cfqd->cid[prio].busy_queues++;
+               atomic_inc(&(cfqd->cid[prio].cum_queues_in));
+               cfqd->busy_queues++;
+       }
+
+       if (rq_mergeable(crq->request)) {
+               cfq_add_crq_hash(cfqd, crq);
+
+               if (!q->last_merge)
+                       q->last_merge = crq->request;
+       }
 
-       return cfqq;
 }
 
-static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
+static void cfq_reenqueue(request_queue_t *q, struct cfq_data *cfqd, int prio)
 {
-       struct cfq_queue *cfqq;
+       struct list_head *prio_list = &cfqd->cid[prio].prio_list;
+       struct list_head *entry, *tmp;
 
-       cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC);
-       if (cfqq) {
-               cfq_add_crq_rb(cfqd, cfqq, crq);
+       list_for_each_safe(entry, tmp, prio_list) {
+               struct cfq_rq *crq = list_entry_prio(entry);
 
-               if (list_empty(&cfqq->cfq_list)) {
-                       list_add(&cfqq->cfq_list, &cfqd->rr_list);
-                       cfqd->busy_queues++;
-               }
-       } else {
+               list_del_init(entry);
+               list_del_init(&crq->request->queuelist);
+               __cfq_enqueue(q, cfqd, crq);
+       }
+}
+
+static void
+cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
+{
+       const int prio = cfq_ioprio(current);
+
+       crq->ioprio = prio;
+       crq->nr_sectors = crq->request->hard_nr_sectors;
+       __cfq_enqueue(q, cfqd, crq);
+
+       if (prio == IOPRIO_RT) {
+               int i;
+
+               /*
+                * realtime io gets priority, move all other io back
+                */
+               for (i = IOPRIO_IDLE; i < IOPRIO_RT; i++)
+                       cfq_reenqueue(q, cfqd, i);
+       } else if (prio != IOPRIO_IDLE) {
                /*
-                * should can only happen if the request wasn't allocated
-                * through blk_alloc_request(), eg stack requests from ide-cd
-                * (those should be removed) _and_ we are in OOM.
+                * check if we need to move idle io back into queue
                 */
-               list_add_tail(&crq->request->queuelist, cfqd->dispatch);
+               cfq_reenqueue(q, cfqd, IOPRIO_IDLE);
        }
 }
 
@@ -532,12 +836,14 @@ static void
 cfq_insert_request(request_queue_t *q, struct request *rq, int where)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
 
        switch (where) {
                case ELEVATOR_INSERT_BACK:
+#if 0
                        while (cfq_dispatch_requests(q, cfqd))
                                ;
+#endif
                        list_add_tail(&rq->queuelist, cfqd->dispatch);
                        break;
                case ELEVATOR_INSERT_FRONT:
@@ -545,26 +851,19 @@ cfq_insert_request(request_queue_t *q, struct request *rq, int where)
                        break;
                case ELEVATOR_INSERT_SORT:
                        BUG_ON(!blk_fs_request(rq));
-                       cfq_enqueue(cfqd, crq);
+                       cfq_enqueue(q, cfqd, crq);
                        break;
                default:
                        printk("%s: bad insert point %d\n", __FUNCTION__,where);
                        return;
        }
-
-       if (rq_mergeable(rq)) {
-               cfq_add_crq_hash(cfqd, crq);
-
-               if (!q->last_merge)
-                       q->last_merge = rq;
-       }
 }
 
 static int cfq_queue_empty(request_queue_t *q)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
 
-       if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
+       if (list_empty(cfqd->dispatch) && !cfqd->busy_queues)
                return 1;
 
        return 0;
@@ -573,7 +872,7 @@ static int cfq_queue_empty(request_queue_t *q)
 static struct request *
 cfq_former_request(request_queue_t *q, struct request *rq)
 {
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
        struct rb_node *rbprev = rb_prev(&crq->rb_node);
 
        if (rbprev)
@@ -585,7 +884,7 @@ cfq_former_request(request_queue_t *q, struct request *rq)
 static struct request *
 cfq_latter_request(request_queue_t *q, struct request *rq)
 {
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
        struct rb_node *rbnext = rb_next(&crq->rb_node);
 
        if (rbnext)
@@ -594,27 +893,47 @@ cfq_latter_request(request_queue_t *q, struct request *rq)
        return NULL;
 }
 
+static void cfq_queue_congested(request_queue_t *q)
+{
+       struct cfq_data *cfqd = q->elevator.elevator_data;
+
+       set_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
+}
+
 static int cfq_may_queue(request_queue_t *q, int rw)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
        struct cfq_queue *cfqq;
-       int ret = 1;
+       const int prio = cfq_ioprio(current);
+       int limit, ret = 1;
 
        if (!cfqd->busy_queues)
                goto out;
 
-       cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
-       if (cfqq) {
-               int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
+       cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
+       if (!cfqq)
+               goto out;
 
-               if (limit < 3)
-                       limit = 3;
-               else if (limit > cfqd->max_queued)
-                       limit = cfqd->max_queued;
+       cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
+       if (!cfqq)
+               goto out;
+
+       /*
+        * if higher or equal prio io is sleeping waiting for a request, don't
+        * allow this one to allocate one. as long as ll_rw_blk does fifo
+        * waitqueue wakeups this should work...
+        */
+       if (cfqd->rq_starved_mask & ~((1 << prio) - 1))
+               goto out;
+
+       if (cfqq->queued[rw] < cfqd->cfq_queued || !cfqd->cid[prio].busy_queues)
+               goto out;
+
+       limit = q->nr_requests * (prio + 1) / IOPRIO_NR;
+       limit /= cfqd->cid[prio].busy_queues;
+       if (cfqq->queued[rw] > limit)
+               ret = 0;
 
-               if (cfqq->queued[rw] > limit)
-                       ret = 0;
-       }
 out:
        return ret;
 }
@@ -622,56 +941,32 @@ out:
 static void cfq_put_request(request_queue_t *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
-       struct request_list *rl;
-       int other_rw;
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
 
        if (crq) {
                BUG_ON(q->last_merge == rq);
-               BUG_ON(ON_MHASH(crq));
+               BUG_ON(!hlist_unhashed(&crq->hash));
 
                mempool_free(crq, cfqd->crq_pool);
                rq->elevator_private = NULL;
        }
-
-       /*
-        * work-around for may_queue "bug": if a read gets issued and refused
-        * to queue because writes ate all the allowed slots and no other
-        * reads are pending for this queue, it could get stuck infinitely
-        * since freed_request() only checks the waitqueue for writes when
-        * freeing them. or vice versa for a single write vs many reads.
-        * so check here whether "the other" data direction might be able
-        * to queue and wake them
-        */
-       rl = &q->rq;
-       other_rw = rq_data_dir(rq) ^ 1;
-       if (rl->count[other_rw] <= q->nr_requests) {
-               smp_mb();
-               if (waitqueue_active(&rl->wait[other_rw]))
-                       wake_up(&rl->wait[other_rw]);
-       }
 }
 
 static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_queue *cfqq;
-       struct cfq_rq *crq;
+       struct cfq_rq *crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
 
-       /*
-        * prepare a queue up front, so cfq_enqueue() doesn't have to
-        */
-       cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask);
-       if (!cfqq)
-               return 1;
-
-       crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
        if (crq) {
+               /*
+                * process now has one request
+                */
+               clear_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
+
                memset(crq, 0, sizeof(*crq));
-               RB_CLEAR(&crq->rb_node);
                crq->request = rq;
-               crq->cfq_queue = NULL;
-               INIT_LIST_HEAD(&crq->hash);
+               INIT_HLIST_NODE(&crq->hash);
+               INIT_LIST_HEAD(&crq->prio_list);
                rq->elevator_private = crq;
                return 0;
        }
@@ -690,6 +985,26 @@ static void cfq_exit(request_queue_t *q, elevator_t *e)
        kfree(cfqd);
 }
 
+static void cfq_timer(unsigned long data)
+{
+       struct cfq_data *cfqd = (struct cfq_data *) data;
+
+       clear_bit(CFQ_WAIT_RT, &cfqd->flags);
+       clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+       kblockd_schedule_work(&cfqd->work);
+}
+
+static void cfq_work(void *data)
+{
+       request_queue_t *q = data;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (cfq_next_request(q))
+               q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
 static int cfq_init(request_queue_t *q, elevator_t *e)
 {
        struct cfq_data *cfqd;
@@ -700,13 +1015,34 @@ static int cfq_init(request_queue_t *q, elevator_t *e)
                return -ENOMEM;
 
        memset(cfqd, 0, sizeof(*cfqd));
-       INIT_LIST_HEAD(&cfqd->rr_list);
 
-       cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
+       init_timer(&cfqd->timer);
+       cfqd->timer.function = cfq_timer;
+       cfqd->timer.data = (unsigned long) cfqd;
+
+       INIT_WORK(&cfqd->work, cfq_work, q);
+
+       for (i = 0; i < IOPRIO_NR; i++) {
+               struct io_prio_data *cid = &cfqd->cid[i];
+
+               INIT_LIST_HEAD(&cid->rr_list);
+               INIT_LIST_HEAD(&cid->prio_list);
+               cid->last_rq = -1;
+               cid->last_sectors = -1;
+
+               atomic_set(&cid->cum_rq_in,0);          
+               atomic_set(&cid->cum_rq_out,0);
+               atomic_set(&cid->cum_sectors_in,0);
+               atomic_set(&cid->cum_sectors_out,0);            
+               atomic_set(&cid->cum_queues_in,0);
+               atomic_set(&cid->cum_queues_out,0);
+       }
+
+       cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
        if (!cfqd->crq_hash)
                goto out_crqhash;
 
-       cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
+       cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
        if (!cfqd->cfq_hash)
                goto out_cfqhash;
 
@@ -715,23 +1051,22 @@ static int cfq_init(request_queue_t *q, elevator_t *e)
                goto out_crqpool;
 
        for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
-               INIT_LIST_HEAD(&cfqd->crq_hash[i]);
+               INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
        for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
-               INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
-
-       cfqd->dispatch = &q->queue_head;
-       e->elevator_data = cfqd;
-       cfqd->queue = q;
-
-       /*
-        * just set it to some high value, we want anyone to be able to queue
-        * some requests. fairness is handled differently
-        */
-       cfqd->max_queued = q->nr_requests;
-       q->nr_requests = 8192;
+               INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
 
        cfqd->cfq_queued = cfq_queued;
        cfqd->cfq_quantum = cfq_quantum;
+       cfqd->cfq_quantum_io = cfq_quantum_io;
+       cfqd->cfq_idle_quantum = cfq_idle_quantum;
+       cfqd->cfq_idle_quantum_io = cfq_idle_quantum_io;
+       cfqd->cfq_grace_rt = cfq_grace_rt;
+       cfqd->cfq_grace_idle = cfq_grace_idle;
+
+       q->nr_requests <<= 2;
+
+       cfqd->dispatch = &q->queue_head;
+       e->elevator_data = cfqd;
 
        return 0;
 out_crqpool:
@@ -797,7 +1132,12 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page)          \
        return cfq_var_show(__VAR, (page));                             \
 }
 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
+SHOW_FUNCTION(cfq_quantum_io_show, cfqd->cfq_quantum_io);
+SHOW_FUNCTION(cfq_idle_quantum_show, cfqd->cfq_idle_quantum);
+SHOW_FUNCTION(cfq_idle_quantum_io_show, cfqd->cfq_idle_quantum_io);
 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
+SHOW_FUNCTION(cfq_grace_rt_show, cfqd->cfq_grace_rt);
+SHOW_FUNCTION(cfq_grace_idle_show, cfqd->cfq_grace_idle);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                                \
@@ -811,23 +1151,236 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)    \
        return ret;                                                     \
 }
 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
+STORE_FUNCTION(cfq_quantum_io_store, &cfqd->cfq_quantum_io, 4, INT_MAX);
+STORE_FUNCTION(cfq_idle_quantum_store, &cfqd->cfq_idle_quantum, 1, INT_MAX);
+STORE_FUNCTION(cfq_idle_quantum_io_store, &cfqd->cfq_idle_quantum_io, 4, INT_MAX);
 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
+STORE_FUNCTION(cfq_grace_rt_store, &cfqd->cfq_grace_rt, 0, INT_MAX);
+STORE_FUNCTION(cfq_grace_idle_store, &cfqd->cfq_grace_idle, 0, INT_MAX);
 #undef STORE_FUNCTION
 
+
+/* Additional entries to get priority level data */
+static ssize_t
+cfq_prio_show(struct cfq_data *cfqd, char *page, unsigned int priolvl)
+{
+       int r1,r2,s1,s2,q1,q2;
+
+       if (!(priolvl >= IOPRIO_IDLE && priolvl <= IOPRIO_RT)) 
+               return 0;
+       
+       r1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_in));
+       r2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_out));
+       s1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_in));
+       s2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_out));
+       q1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_in)); 
+       q2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_out));
+       
+
+       /*
+         return sprintf(page,"rq %d (%d,%d) sec %d (%d,%d) q %d (%d,%d)\n",
+                     r1-r2,r1,r2,
+                     s1-s2,s1,s2,
+                     q1-q2,q1,q2);
+       */
+
+       return sprintf(page,"rq (%d,%d) sec (%d,%d) q (%d,%d)\n",
+                     r1,r2,
+                     s1,s2,
+                     q1,q2);
+
+}
+
+#define SHOW_PRIO_DATA(__PRIOLVL)                                               \
+static ssize_t cfq_prio_##__PRIOLVL##_show(struct cfq_data *cfqd, char *page)  \
+{                                                                              \
+       return cfq_prio_show(cfqd,page,__PRIOLVL);                              \
+}
+SHOW_PRIO_DATA(0);
+SHOW_PRIO_DATA(1);
+SHOW_PRIO_DATA(2);
+SHOW_PRIO_DATA(3);
+SHOW_PRIO_DATA(4);
+SHOW_PRIO_DATA(5);
+SHOW_PRIO_DATA(6);
+SHOW_PRIO_DATA(7);
+SHOW_PRIO_DATA(8);
+SHOW_PRIO_DATA(9);
+SHOW_PRIO_DATA(10);
+SHOW_PRIO_DATA(11);
+SHOW_PRIO_DATA(12);
+SHOW_PRIO_DATA(13);
+SHOW_PRIO_DATA(14);
+SHOW_PRIO_DATA(15);
+SHOW_PRIO_DATA(16);
+SHOW_PRIO_DATA(17);
+SHOW_PRIO_DATA(18);
+SHOW_PRIO_DATA(19);
+SHOW_PRIO_DATA(20);
+#undef SHOW_PRIO_DATA
+
+
+static ssize_t cfq_prio_store(struct cfq_data *cfqd, const char *page, size_t count, int priolvl)
+{      
+       atomic_set(&(cfqd->cid[priolvl].cum_rq_in),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_rq_out),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_sectors_in),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_sectors_out),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_queues_in),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_queues_out),0);
+
+       return count;
+}
+
+
+#define STORE_PRIO_DATA(__PRIOLVL)                                                                \
+static ssize_t cfq_prio_##__PRIOLVL##_store(struct cfq_data *cfqd, const char *page, size_t count) \
+{                                                                                                 \
+        return cfq_prio_store(cfqd,page,count,__PRIOLVL);                                          \
+}                  
+STORE_PRIO_DATA(0);     
+STORE_PRIO_DATA(1);
+STORE_PRIO_DATA(2);
+STORE_PRIO_DATA(3);
+STORE_PRIO_DATA(4);
+STORE_PRIO_DATA(5);
+STORE_PRIO_DATA(6);
+STORE_PRIO_DATA(7);
+STORE_PRIO_DATA(8);
+STORE_PRIO_DATA(9);
+STORE_PRIO_DATA(10);
+STORE_PRIO_DATA(11);
+STORE_PRIO_DATA(12);
+STORE_PRIO_DATA(13);
+STORE_PRIO_DATA(14);
+STORE_PRIO_DATA(15);
+STORE_PRIO_DATA(16);
+STORE_PRIO_DATA(17);
+STORE_PRIO_DATA(18);
+STORE_PRIO_DATA(19);
+STORE_PRIO_DATA(20);
+#undef STORE_PRIO_DATA
+
+
+
 static struct cfq_fs_entry cfq_quantum_entry = {
        .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
        .show = cfq_quantum_show,
        .store = cfq_quantum_store,
 };
+static struct cfq_fs_entry cfq_quantum_io_entry = {
+       .attr = {.name = "quantum_io", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_quantum_io_show,
+       .store = cfq_quantum_io_store,
+};
+static struct cfq_fs_entry cfq_idle_quantum_entry = {
+       .attr = {.name = "idle_quantum", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_idle_quantum_show,
+       .store = cfq_idle_quantum_store,
+};
+static struct cfq_fs_entry cfq_idle_quantum_io_entry = {
+       .attr = {.name = "idle_quantum_io", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_idle_quantum_io_show,
+       .store = cfq_idle_quantum_io_store,
+};
 static struct cfq_fs_entry cfq_queued_entry = {
        .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
        .show = cfq_queued_show,
        .store = cfq_queued_store,
 };
+static struct cfq_fs_entry cfq_grace_rt_entry = {
+       .attr = {.name = "grace_rt", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_grace_rt_show,
+       .store = cfq_grace_rt_store,
+};
+static struct cfq_fs_entry cfq_grace_idle_entry = {
+       .attr = {.name = "grace_idle", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_grace_idle_show,
+       .store = cfq_grace_idle_store,
+};
+
+#define P_0_STR   "p0"
+#define P_1_STR   "p1"
+#define P_2_STR   "p2"
+#define P_3_STR   "p3"
+#define P_4_STR   "p4"
+#define P_5_STR   "p5"
+#define P_6_STR   "p6"
+#define P_7_STR   "p7"
+#define P_8_STR   "p8"
+#define P_9_STR   "p9"
+#define P_10_STR  "p10"
+#define P_11_STR  "p11"
+#define P_12_STR  "p12"
+#define P_13_STR  "p13"
+#define P_14_STR  "p14"
+#define P_15_STR  "p15"
+#define P_16_STR  "p16"
+#define P_17_STR  "p17"
+#define P_18_STR  "p18"
+#define P_19_STR  "p19"
+#define P_20_STR  "p20"
+
+
+#define CFQ_PRIO_SYSFS_ENTRY(__PRIOLVL)                                           \
+static struct cfq_fs_entry cfq_prio_##__PRIOLVL##_entry = {                \
+       .attr = {.name = P_##__PRIOLVL##_STR, .mode = S_IRUGO | S_IWUSR }, \
+       .show = cfq_prio_##__PRIOLVL##_show,                               \
+       .store = cfq_prio_##__PRIOLVL##_store,                             \
+};
+CFQ_PRIO_SYSFS_ENTRY(0);
+CFQ_PRIO_SYSFS_ENTRY(1);
+CFQ_PRIO_SYSFS_ENTRY(2);
+CFQ_PRIO_SYSFS_ENTRY(3);
+CFQ_PRIO_SYSFS_ENTRY(4);
+CFQ_PRIO_SYSFS_ENTRY(5);
+CFQ_PRIO_SYSFS_ENTRY(6);
+CFQ_PRIO_SYSFS_ENTRY(7);
+CFQ_PRIO_SYSFS_ENTRY(8);
+CFQ_PRIO_SYSFS_ENTRY(9);
+CFQ_PRIO_SYSFS_ENTRY(10);
+CFQ_PRIO_SYSFS_ENTRY(11);
+CFQ_PRIO_SYSFS_ENTRY(12);
+CFQ_PRIO_SYSFS_ENTRY(13);
+CFQ_PRIO_SYSFS_ENTRY(14);
+CFQ_PRIO_SYSFS_ENTRY(15);
+CFQ_PRIO_SYSFS_ENTRY(16);
+CFQ_PRIO_SYSFS_ENTRY(17);
+CFQ_PRIO_SYSFS_ENTRY(18);
+CFQ_PRIO_SYSFS_ENTRY(19);
+CFQ_PRIO_SYSFS_ENTRY(20);
+#undef CFQ_PRIO_SYSFS_ENTRY
+
 
 static struct attribute *default_attrs[] = {
        &cfq_quantum_entry.attr,
+       &cfq_quantum_io_entry.attr,
+       &cfq_idle_quantum_entry.attr,
+       &cfq_idle_quantum_io_entry.attr,
        &cfq_queued_entry.attr,
+       &cfq_grace_rt_entry.attr,
+       &cfq_grace_idle_entry.attr,
+       &cfq_prio_0_entry.attr,
+       &cfq_prio_1_entry.attr,
+       &cfq_prio_2_entry.attr,
+       &cfq_prio_3_entry.attr,
+       &cfq_prio_4_entry.attr,
+       &cfq_prio_5_entry.attr,
+       &cfq_prio_6_entry.attr,
+       &cfq_prio_7_entry.attr,
+       &cfq_prio_8_entry.attr,
+       &cfq_prio_9_entry.attr,
+       &cfq_prio_10_entry.attr,
+       &cfq_prio_11_entry.attr,
+       &cfq_prio_12_entry.attr,
+       &cfq_prio_13_entry.attr,
+       &cfq_prio_14_entry.attr,
+       &cfq_prio_15_entry.attr,
+       &cfq_prio_16_entry.attr,
+       &cfq_prio_17_entry.attr,
+       &cfq_prio_18_entry.attr,
+       &cfq_prio_19_entry.attr,
+       &cfq_prio_20_entry.attr,
        NULL,
 };
 
@@ -883,6 +1436,7 @@ elevator_t iosched_cfq = {
        .elevator_set_req_fn =          cfq_set_request,
        .elevator_put_req_fn =          cfq_put_request,
        .elevator_may_queue_fn =        cfq_may_queue,
+       .elevator_set_congested_fn =    cfq_queue_congested,
        .elevator_init_fn =             cfq_init,
        .elevator_exit_fn =             cfq_exit,
 };
index ce166e8..960407e 100644 (file)
@@ -74,10 +74,10 @@ typedef struct ckrm_io_class {
        /* Absolute shares of this class
         * in local units. 
         */
-       
-       int ioprio;
-       int unused;
-       
+
+       int cnt_guarantee; /* Allocation as parent */
+       int cnt_unused;    /* Allocation to default subclass */
+
        /* Statistics, for class and default subclass */
        cki_stats_t stats; 
        cki_stats_t mystats;
@@ -90,13 +90,8 @@ typedef struct ckrm_io_class {
 static inline void cki_reset_stats(cki_stats_t *usg);
 static inline void init_icls_one(cki_icls_t *icls);
 static inline int cki_div(int *a, int b, int c);
-static inline int cki_recalc(cki_icls_t *icls, int rel2abs);
-
-#ifdef DOES_NOT_WORK_AND_NOT_NEEDED
-/* External functions e.g. interface to ioscheduler */
-inline void *cki_tsk_icls(struct task_struct *tsk);
-inline int cki_tsk_ioprio(struct task_struct *tsk);
-#endif
+//static inline int cki_recalc(cki_icls_t *icls, int rel2abs);
+static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres);
 
 extern void cki_cfq_set(icls_tsk_t tskicls, icls_ioprio_t tskioprio);
 
@@ -140,9 +135,13 @@ static inline void init_icls_stats(cki_icls_t *icls)
 
 static inline void init_icls_one(cki_icls_t *icls)
 {
-       icls->shares.my_guarantee = 
-               (CKI_IOPRIO_MIN * CKRM_SHARE_DFLT_TOTAL_GUARANTEE) / 
-               CKI_IOPRIO_DIV ;
+       // Assign zero as initial guarantee otherwise creations
+       // could fail due to inadequate share
+
+       //icls->shares.my_guarantee = 
+       //      (CKI_IOPRIO_MIN * CKRM_SHARE_DFLT_TOTAL_GUARANTEE) / 
+       //      CKI_IOPRIO_DIV ;
+       icls->shares.my_guarantee = 0;
        icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
        icls->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
        icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
@@ -152,8 +151,11 @@ static inline void init_icls_one(cki_icls_t *icls)
        icls->shares.cur_max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
 
 
-       icls->ioprio = CKI_IOPRIO_MIN;
-       icls->unused = 0 ;
+       icls->cnt_guarantee = icls->cnt_unused = IOPRIO_IDLE;
+
+       //Same rationale icls->ioprio = CKI_IOPRIO_MIN;
+       //IOPRIO_IDLE equivalence to zero my_guarantee (set above) relies
+       //on former being zero.
        
        init_icls_stats(icls);
 }
@@ -174,6 +176,55 @@ static inline int cki_div(int *a, int b, int c)
  * Caller should have a lock on icls
  */
 
+static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres)
+{
+
+       ckrm_core_class_t *child = NULL;
+       cki_icls_t *childres;
+       int resid = cki_rcbs.resid;
+
+       if (parres) {
+               struct ckrm_shares *par = &parres->shares;
+               struct ckrm_shares *self = &res->shares;
+
+
+
+               if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
+                       res->cnt_guarantee = CKRM_SHARE_DONTCARE;
+               } else if (par->total_guarantee) {
+                       u64 temp = (u64) self->my_guarantee * 
+                               parres->cnt_guarantee;
+                       do_div(temp, par->total_guarantee);
+                       res->cnt_guarantee = (int) temp;
+               } else {
+                       res->cnt_guarantee = 0;
+               }
+
+               if (res->cnt_guarantee == CKRM_SHARE_DONTCARE) {
+                       res->cnt_unused = CKRM_SHARE_DONTCARE;
+               } else if (self->total_guarantee) {
+                       u64 temp = (u64) self->unused_guarantee * 
+                               res->cnt_guarantee;
+                       do_div(temp, self->total_guarantee);
+                       res->cnt_unused = (int) temp;
+               } else {
+                       res->cnt_unused = 0;
+               }
+       }
+       // propagate to children
+       ckrm_lock_hier(res->core);
+       while ((child = ckrm_get_next_child(res->core,child)) != NULL){
+               childres = ckrm_get_res_class(child, resid, 
+                                             cki_icls_t);
+               
+               spin_lock(&childres->shares_lock);
+               cki_recalc_propagate(childres, res);
+               spin_unlock(&childres->shares_lock);
+       }
+       ckrm_unlock_hier(res->core);
+}
+
+#if 0
 static inline int cki_recalc(cki_icls_t *icls, int rel2abs)
 {
        u64 temp;
@@ -184,8 +235,10 @@ static inline int cki_recalc(cki_icls_t *icls, int rel2abs)
                temp = icls->shares.my_guarantee * (IOPRIO_NR-1);
                do_div(temp, icls->shares.total_guarantee);
 
+               icls->total = IOPRIO_NR-1;
                icls->ioprio = temp ;
-               icls->unused = (IOPRIO_NR-1)-icls->ioprio;
+               icls->unused = icls->total - icls->ioprio;
+//             icls->unused = (IOPRIO_NR-1)-icls->ioprio;
 
        } else {
                cki_icls_t *parres;
@@ -200,9 +253,9 @@ static inline int cki_recalc(cki_icls_t *icls, int rel2abs)
                        return -EINVAL;
                }
 
-               partot = parres->ioprio + parres->unused;
 
-               temp = (icls->shares.my_guarantee * (parres->ioprio + parres->unused));
+               temp = (icls->shares.my_guarantee * 
+                       parres->total);
                do_div(temp, parres->shares.total_guarantee);
 
                icls->ioprio = temp;
@@ -213,19 +266,19 @@ static inline int cki_recalc(cki_icls_t *icls, int rel2abs)
        return 0;
 
 }
+#endif
 
-
-inline void *cki_icls_tsk(struct task_struct *tsk)
+void *cki_tsk_icls(struct task_struct *tsk)
 {
        return (void *) ckrm_get_res_class(class_core(tsk->taskclass),
                                           cki_rcbs.resid, cki_icls_t);
 }
 
-inline int cki_icls_ioprio(struct task_struct *tsk)
+int cki_tsk_ioprio(struct task_struct *tsk)
 {
        cki_icls_t *icls = ckrm_get_res_class(class_core(tsk->taskclass),
                                           cki_rcbs.resid, cki_icls_t);
-       return icls->ioprio;
+       return icls->cnt_unused;
 }
 
 static void *cki_alloc(struct ckrm_core_class *core,
@@ -245,15 +298,13 @@ static void *cki_alloc(struct ckrm_core_class *core,
        icls->shares_lock = SPIN_LOCK_UNLOCKED;
 
        if (parent == NULL) {
-               u64 temp;
 
                /* Root class gets same as "normal" CFQ priorities to
                 * retain compatibility of behaviour in the absence of 
                 * other classes
                 */
 
-               icls->ioprio = IOPRIO_NORM;
-               icls->unused = (IOPRIO_NR-1)-IOPRIO_NORM;
+               icls->cnt_guarantee = icls->cnt_unused = IOPRIO_NR-1; 
 
                /* Default gets normal, not minimum */
                //icls->unused = IOPRIO_NORM;
@@ -262,24 +313,27 @@ static void *cki_alloc(struct ckrm_core_class *core,
 
                /* Compute shares in abstract units */
                icls->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-               temp = (u64) icls->ioprio * icls->shares.total_guarantee;
-               do_div(temp, CKI_IOPRIO_DIV); 
-               icls->shares.my_guarantee = (int) temp;
-
-               //icls->shares.my_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
-               //icls->shares.max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
-               icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-               icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
 
+               // my_guarantee for root is meaningless. Set to default
+               icls->shares.my_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
 
                icls->shares.unused_guarantee = 
-                       icls->shares.total_guarantee - 
-                       icls->shares.my_guarantee;
-               //icls->shares.cur_max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
+                       CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
+
+               //temp = (u64) icls->cnt_unused * icls->shares.total_guarantee;
+               //do_div(temp, CKI_IOPRIO_DIV); 
+               // temp now has root's default's share
+               //icls->shares.unused_guarantee = 
+               // icls->shares.total_guarantee - temp; 
+
+               icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
+               icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
                icls->shares.cur_max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
 
        } else {
                init_icls_one(icls);
+               /* No propagation to parent needed if icls'
+                  initial share is zero */
        }
        try_module_get(THIS_MODULE);
        return icls;
@@ -315,7 +369,7 @@ static void cki_free(void *res)
        /* Update parent's shares */
        spin_lock(&parres->shares_lock);
        child_guarantee_changed(&parres->shares, icls->shares.my_guarantee, 0);
-       parres->unused += icls->ioprio;
+       parres->cnt_unused += icls->cnt_guarantee;
        spin_unlock(&parres->shares_lock);
 
        kfree(res);
@@ -340,9 +394,7 @@ static int cki_setshare(void *res, struct ckrm_shares *new)
        /* limits not supported */
        if ((new->max_limit != CKRM_SHARE_UNCHANGED)
            || (new->my_limit != CKRM_SHARE_UNCHANGED)) {
-               printk(KERN_ERR "limits changed max_limit %d my_limit %d\n",
-                      new->max_limit, new->my_limit);
-
+               printk(KERN_ERR "limits not supported\n");
                return -EINVAL;
        }
 
@@ -364,17 +416,32 @@ static int cki_setshare(void *res, struct ckrm_shares *new)
        }
 
        rc = set_shares(new, cur, par);
-
        printk(KERN_ERR "rc from set_shares %d\n", rc);
 
-       if (!rc) {
+       if ((!rc) && parres) {
+               
+               if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
+                       parres->cnt_unused = CKRM_SHARE_DONTCARE;
+               } else if (par->total_guarantee) {
+                       u64 temp = (u64) par->unused_guarantee * 
+                               parres->cnt_guarantee;
+                       do_div(temp, par->total_guarantee);
+                       parres->cnt_unused = (int) temp;
+               } else {
+                       parres->cnt_unused = 0;
+               }
+               cki_recalc_propagate(res, parres);
+       
+#if 0
                int old = icls->ioprio;
+               
                rc = cki_recalc(icls,0);
 
                if (!rc && parres) {
                        int raise_tot = icls->ioprio - old ;
-                       parres->unused += raise_tot ;
+                       parres->unused -= raise_tot ;
                }
+#endif
        }
        spin_unlock(&icls->shares_lock);
        if (icls->parent) {
@@ -407,8 +474,8 @@ static int cki_getstats(void *res, struct seq_file *sfile)
        seq_printf(sfile, "%d total_write\n",atomic_read(&icls->stats.blkwr));
 */
        
-       seq_printf(sfile, "%d ioprio\n",icls->ioprio);
-       seq_printf(sfile, "%d unused\n",icls->unused);
+       seq_printf(sfile, "%d total ioprio\n",icls->cnt_guarantee);
+       seq_printf(sfile, "%d unused/default ioprio\n",icls->cnt_unused);
 
        return 0;
 }
@@ -483,7 +550,7 @@ int __init cki_init(void)
                resid = ckrm_register_res_ctlr(clstype, &cki_rcbs);
                if (resid != -1) {
                        cki_rcbs.classtype = clstype;
-                       cki_cfq_set(cki_icls_tsk,cki_icls_ioprio);
+                       cki_cfq_set(cki_tsk_icls,cki_tsk_ioprio);
                }
        }
        
index 6d6e127..4b419d1 100644 (file)
 typedef void *(*icls_tsk_t) (struct task_struct *tsk);
 typedef int (*icls_ioprio_t) (struct task_struct *tsk);
 
-
 #ifdef CONFIG_CKRM_RES_BLKIO
 
-#ifdef DOES_NOT_WORK_AND_NOT_NEEDED
-extern inline icls_tsk_t cki_tsk_icls;
-extern inline icls_ioprio_t cki_tsk_ioprio;
-#endif
+void *cki_tsk_icls(struct task_struct *tsk);
+int cki_tsk_ioprio(struct task_struct *tsk);
 
 #endif /* CONFIG_CKRM_RES_BLKIO */