ckrm_E16 rc1 io controller version 4
authorMarc Fiuczynski <mef@cs.princeton.edu>
Tue, 28 Sep 2004 12:16:50 +0000 (12:16 +0000)
committerMarc Fiuczynski <mef@cs.princeton.edu>
Tue, 28 Sep 2004 12:16:50 +0000 (12:16 +0000)
25 files changed:
arch/i386/kernel/entry.S
arch/ppc/kernel/misc.S
drivers/block/Makefile
drivers/block/cfq-iosched.c
drivers/block/ckrm-io.c
drivers/block/elevator.c
drivers/block/ll_rw_blk.c
fs/exec.c
include/asm-i386/unistd.h
include/asm-ppc/unistd.h
include/asm-x86_64/unistd.h
include/linux/ckrm-io.h
include/linux/elevator.h
include/linux/fs.h
include/linux/init_task.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/page-flags.h
include/linux/sched.h
init/Kconfig
kernel/ckrm/Makefile
kernel/exit.c
kernel/fork.c
mm/page_alloc.c
mm/vmscan.c

index 7b68563..d660581 100644 (file)
@@ -886,5 +886,7 @@ ENTRY(sys_call_table)
        .long sys_mq_notify
        .long sys_mq_getsetattr
        .long sys_ni_syscall            /* reserved for kexec */
+       .long sys_ioprio_set
+       .long sys_ioprio_get            /* 285 */
 
 syscall_table_size=(.-sys_call_table)
index 873199e..32e1e40 100644 (file)
@@ -1450,3 +1450,5 @@ _GLOBAL(sys_call_table)
        .long sys_mq_notify
        .long sys_mq_getsetattr
        .long sys_ni_syscall            /* 268 reserved for sys_kexec_load */
+       .long sys_ioprio_set
+       .long sys_ioprio_get            
index 2654b5b..c66498b 100644 (file)
 # kblockd threads
 #
 
-obj-y  := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
+obj-y  := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o ckrm-iostub.o
 
 obj-$(CONFIG_IOSCHED_NOOP)     += noop-iosched.o
 obj-$(CONFIG_IOSCHED_AS)       += as-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)      += cfq-iosched.o
+obj-$(CONFIG_CKRM_RES_BLKIO)   += ckrm-io.o
 obj-$(CONFIG_MAC_FLOPPY)       += swim3.o
 obj-$(CONFIG_BLK_DEV_FD)       += floppy.o
 obj-$(CONFIG_BLK_DEV_FD98)     += floppy98.o
index 068f4ea..7b45a80 100644 (file)
@@ -6,6 +6,18 @@
  *  Based on ideas from a previously unfinished io
  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  *
+ *  IO priorities are supported, from 0% to 100% in 5% increments. Both of
+ *  those values have special meaning - 0% class is allowed to do io if
+ *  noone else wants to use the disk. 100% is considered real-time io, and
+ *  always get priority. Default process io rate is 95%. In absence of other
+ *  io, a class may consume 100% disk bandwidth regardless. Withing a class,
+ *  bandwidth is distributed equally among the citizens.
+ *
+ * TODO:
+ *     - cfq_select_requests() needs some work for 5-95% io
+ *     - barriers not supported
+ *     - export grace periods in ms, not jiffies
+ *
  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
  */
 #include <linux/kernel.h>
 #include <linux/hash.h>
 #include <linux/rbtree.h>
 #include <linux/mempool.h>
+#include <asm/div64.h>
+
+#if IOPRIO_NR > BITS_PER_LONG
+#error Cannot support this many io priority levels
+#endif
+
+#define LIMIT_DEBUG   1
 
 /*
  * tunables
  */
-static int cfq_quantum = 4;
-static int cfq_queued = 8;
+static int cfq_quantum = 6;
+static int cfq_quantum_io = 256;
+static int cfq_idle_quantum = 1;
+static int cfq_idle_quantum_io = 64;
+static int cfq_queued = 4;
+static int cfq_grace_rt = HZ / 100 ?: 1;
+static int cfq_grace_idle = HZ / 10;
 
 #define CFQ_QHASH_SHIFT                6
 #define CFQ_QHASH_ENTRIES      (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry)        list_entry((entry), struct cfq_queue, cfq_hash)
+#define list_entry_qhash(entry)        hlist_entry((entry), struct cfq_queue, cfq_hash)
 
 #define CFQ_MHASH_SHIFT                8
 #define CFQ_MHASH_BLOCK(sec)   ((sec) >> 3)
 #define CFQ_MHASH_ENTRIES      (1 << CFQ_MHASH_SHIFT)
 #define CFQ_MHASH_FN(sec)      (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
-#define ON_MHASH(crq)          !list_empty(&(crq)->hash)
 #define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr)   list_entry((ptr), struct cfq_rq, hash)
+#define list_entry_hash(ptr)   hlist_entry((ptr), struct cfq_rq, hash)
 
 #define list_entry_cfqq(ptr)   list_entry((ptr), struct cfq_queue, cfq_list)
+#define list_entry_prio(ptr)   list_entry((ptr), struct cfq_rq, prio_list)
+
+#define cfq_account_io(crq)    \
+       ((crq)->ioprio != IOPRIO_IDLE && (crq)->ioprio != IOPRIO_RT)
+
+/* define to be 50 ms for now; make tunable later */
+#define CFQ_EPOCH              50000
+/* Needs to be made tunable right away, in MiB/s */
+#define CFQ_DISKBW             10       
+/* Temporary global limit, as percent of available b/w, for each "class" */
+#define CFQ_TEMPLIM            10
+
+/*
+ * defines how we distribute bandwidth (can be tgid, uid, etc)
+ */
+
+/* FIXME: change hash_key to be sizeof(void *) rather than sizeof(int) 
+ * otherwise the cast of cki_tsk_icls will not work reliably on 64-bit arches.
+ * OR, change cki_tsk_icls to return ints (will need another id space to be 
+ * managed)
+ */
+
+#if defined(CONFIG_CKRM_RES_BLKIO) || defined(CONFIG_CKRM_RES_BLKIO_MODULE)
+extern inline void *cki_hash_key(struct task_struct *tsk);
+extern inline int cki_ioprio(struct task_struct *tsk);
+#define cfq_hash_key(current)   ((int)cki_hash_key((current)))
+#define cfq_ioprio(current)    (cki_ioprio((current)))
+
+#else
+#define cfq_hash_key(current)  ((current)->tgid)
+
+/*
+ * move to io_context
+ */
+#define cfq_ioprio(current)    ((current)->ioprio)
+#endif
 
-#define RQ_DATA(rq)            ((struct cfq_rq *) (rq)->elevator_private)
+#define CFQ_WAIT_RT    0
+#define CFQ_WAIT_NORM  1
 
 static kmem_cache_t *crq_pool;
 static kmem_cache_t *cfq_pool;
 static mempool_t *cfq_mpool;
 
+/*
+ * defines an io priority level
+ */
+struct io_prio_data {
+       struct list_head rr_list;
+       int busy_queues;
+       int busy_rq;
+       unsigned long busy_sectors;
+       
+       /* requests, sectors and queues 
+         * added(in),dispatched/deleted(out) 
+        * at this priority level. 
+        */
+       atomic_t cum_rq_in,cum_rq_out;              
+       atomic_t cum_sectors_in,cum_sectors_out;    
+       atomic_t cum_queues_in,cum_queues_out;
+
+#ifdef LIMIT_DEBUG
+       int nskip;
+       unsigned long navsec;
+       unsigned long csectorate;
+       unsigned long lsectorate;
+#endif
+
+       struct list_head prio_list;
+       int last_rq;
+       int last_sectors;
+};
+
+/*
+ * per-request queue structure
+ */
 struct cfq_data {
        struct list_head rr_list;
        struct list_head *dispatch;
-       struct list_head *cfq_hash;
+       struct hlist_head *cfq_hash;
+       struct hlist_head *crq_hash;
+       mempool_t *crq_pool;
 
-       struct list_head *crq_hash;
+       struct io_prio_data cid[IOPRIO_NR];
 
-       unsigned int busy_queues;
-       unsigned int max_queued;
+       /*
+        * total number of busy queues and requests
+        */
+       int busy_rq;
+       int busy_queues;
+       unsigned long busy_sectors;
 
-       mempool_t *crq_pool;
 
        request_queue_t *queue;
+       unsigned long rq_starved_mask;
+
+       /*
+        * grace period handling
+        */
+       struct timer_list timer;
+       unsigned long wait_end;
+       unsigned long flags;
+       struct work_struct work;
 
        /*
         * tunables
         */
        unsigned int cfq_quantum;
+       unsigned int cfq_quantum_io;
+       unsigned int cfq_idle_quantum;
+       unsigned int cfq_idle_quantum_io;
        unsigned int cfq_queued;
+       unsigned int cfq_grace_rt;
+       unsigned int cfq_grace_idle;
+
+       unsigned long cfq_epoch;        /* duration for limit enforcement */
+       unsigned long cfq_epochsectors; /* max sectors dispatchable/epoch */
 };
 
+/*
+ * per-class structure
+ */
 struct cfq_queue {
-       struct list_head cfq_hash;
        struct list_head cfq_list;
+       struct hlist_node cfq_hash;
+       int hash_key;
        struct rb_root sort_list;
-       int pid;
        int queued[2];
-#if 0
-       /*
-        * with a simple addition like this, we can do io priorities. almost.
-        * does need a split request free list, too.
-        */
-       int io_prio
-#endif
+       int ioprio;
+
+       unsigned long avsec;            /* avg sectors dispatched/epoch */
+       unsigned long long lastime;     /* timestamp of last request served */
+       unsigned long sectorate;        /* limit for sectors served/epoch */
+       int skipped;                    /* queue skipped at last dispatch ? */
 };
 
+/*
+ * per-request structure
+ */
 struct cfq_rq {
+       struct cfq_queue *cfq_queue;
        struct rb_node rb_node;
+       struct hlist_node hash;
        sector_t rb_key;
 
        struct request *request;
-
-       struct cfq_queue *cfq_queue;
-
-       struct list_head hash;
+       struct list_head prio_list;
+       unsigned long nr_sectors;
+       int ioprio;
 };
 
 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
@@ -103,18 +223,13 @@ static void cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 /*
  * lots of deadline iosched dupes, can be abstracted later...
  */
-static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
-{
-       list_del_init(&crq->hash);
-}
-
 static inline void cfq_del_crq_hash(struct cfq_rq *crq)
 {
-       if (ON_MHASH(crq))
-               __cfq_del_crq_hash(crq);
+       hlist_del_init(&crq->hash);
 }
 
-static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
+static inline void
+cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
 {
        cfq_del_crq_hash(crq);
 
@@ -125,27 +240,26 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
 {
        struct request *rq = crq->request;
+       const int hash_idx = CFQ_MHASH_FN(rq_hash_key(rq));
 
-       BUG_ON(ON_MHASH(crq));
-
-       list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
+       BUG_ON(!hlist_unhashed(&crq->hash));
+       hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
 }
 
 static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
 {
-       struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
-       struct list_head *entry, *next = hash_list->next;
+       struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+       struct hlist_node *entry, *next;
 
-       while ((entry = next) != hash_list) {
+       hlist_for_each_safe(entry, next, hash_list) {
                struct cfq_rq *crq = list_entry_hash(entry);
                struct request *__rq = crq->request;
 
-               next = entry->next;
-
-               BUG_ON(!ON_MHASH(crq));
+               BUG_ON(hlist_unhashed(&crq->hash));
 
                if (!rq_mergeable(__rq)) {
-                       __cfq_del_crq_hash(crq);
+                       cfq_del_crq_hash(crq);
                        continue;
                }
 
@@ -159,20 +273,27 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
 /*
  * rb tree support functions
  */
-#define RB_NONE                (2)
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
-#define ON_RB(node)    ((node)->rb_color != RB_NONE)
+#define RB_EMPTY(node)         ((node)->rb_node == NULL)
 #define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
 #define rq_rb_key(rq)          (rq)->sector
 
-static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+static void
+cfq_del_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
 {
-       if (ON_RB(&crq->rb_node)) {
+       if (crq->cfq_queue) {
+               crq->cfq_queue = NULL;
+
+               if (cfq_account_io(crq)) {
+                       cfqd->busy_rq--;
+                       cfqd->busy_sectors -= crq->nr_sectors;
+                       cfqd->cid[crq->ioprio].busy_rq--;
+                       cfqd->cid[crq->ioprio].busy_sectors -= crq->nr_sectors;
+               }
+               atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_out));
+               atomic_add(crq->nr_sectors,
+                          &(cfqd->cid[crq->ioprio].cum_sectors_out));
                cfqq->queued[rq_data_dir(crq->request)]--;
                rb_erase(&crq->rb_node, &cfqq->sort_list);
-               crq->cfq_queue = NULL;
        }
 }
 
@@ -205,12 +326,22 @@ cfq_add_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
        struct request *rq = crq->request;
        struct cfq_rq *__alias;
 
-       crq->rb_key = rq_rb_key(rq);
+
        cfqq->queued[rq_data_dir(rq)]++;
+       if (cfq_account_io(crq)) {
+               cfqd->busy_rq++;
+               cfqd->busy_sectors += crq->nr_sectors;
+               cfqd->cid[crq->ioprio].busy_rq++;
+               cfqd->cid[crq->ioprio].busy_sectors += crq->nr_sectors;
+       }
+       atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_in));
+       atomic_add(crq->nr_sectors,
+                  &(cfqd->cid[crq->ioprio].cum_sectors_in));
 retry:
        __alias = __cfq_add_crq_rb(cfqq, crq);
        if (!__alias) {
                rb_insert_color(&crq->rb_node, &cfqq->sort_list);
+               crq->rb_key = rq_rb_key(rq);
                crq->cfq_queue = cfqq;
                return;
        }
@@ -222,7 +353,7 @@ retry:
 static struct request *
 cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
 {
-       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
+       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
        struct rb_node *n;
 
        if (!cfqq)
@@ -247,16 +378,31 @@ out:
 static void cfq_remove_request(request_queue_t *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
 
        if (crq) {
-               struct cfq_queue *cfqq = crq->cfq_queue;
 
                cfq_remove_merge_hints(q, crq);
+               list_del_init(&crq->prio_list);
                list_del_init(&rq->queuelist);
 
-               if (cfqq) {
-                       cfq_del_crq_rb(cfqq, crq);
+               /*
+                * set a grace period timer to allow realtime io to make real
+                * progress, if we release an rt request. for normal request,
+                * set timer so idle io doesn't interfere with other io
+                */
+               if (crq->ioprio == IOPRIO_RT) {
+                       set_bit(CFQ_WAIT_RT, &cfqd->flags);
+                       cfqd->wait_end = jiffies + cfqd->cfq_grace_rt;
+               } else if (crq->ioprio != IOPRIO_IDLE) {
+                       set_bit(CFQ_WAIT_NORM, &cfqd->flags);
+                       cfqd->wait_end = jiffies + cfqd->cfq_grace_idle;
+               }
+
+               if (crq->cfq_queue) {
+                       struct cfq_queue *cfqq = crq->cfq_queue;
+
+                       cfq_del_crq_rb(cfqd, cfqq, crq);
 
                        if (RB_EMPTY(&cfqq->sort_list))
                                cfq_put_queue(cfqd, cfqq);
@@ -306,18 +452,26 @@ out_insert:
 static void cfq_merged_request(request_queue_t *q, struct request *req)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(req);
+       struct cfq_rq *crq = RQ_ELV_DATA(req);
+       int tmp;
 
        cfq_del_crq_hash(crq);
        cfq_add_crq_hash(cfqd, crq);
 
-       if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
+       if (crq->cfq_queue && (rq_rb_key(req) != crq->rb_key)) {
                struct cfq_queue *cfqq = crq->cfq_queue;
 
-               cfq_del_crq_rb(cfqq, crq);
+               cfq_del_crq_rb(cfqd, cfqq, crq);
                cfq_add_crq_rb(cfqd, cfqq, crq);
        }
 
+       tmp = req->hard_nr_sectors - crq->nr_sectors;
+       cfqd->busy_sectors += tmp;
+       cfqd->cid[crq->ioprio].busy_sectors += tmp;
+       atomic_add(tmp,&(cfqd->cid[crq->ioprio].cum_sectors_in));
+
+       crq->nr_sectors = req->hard_nr_sectors;
+
        q->last_merge = req;
 }
 
@@ -329,6 +483,9 @@ cfq_merged_requests(request_queue_t *q, struct request *req,
        cfq_remove_request(q, next);
 }
 
+/*
+ * sort into dispatch list, in optimal ascending order
+ */
 static void
 cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                  struct cfq_rq *crq)
@@ -336,7 +493,7 @@ cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct list_head *head = cfqd->dispatch, *entry = head;
        struct request *__rq;
 
-       cfq_del_crq_rb(cfqq, crq);
+       cfq_del_crq_rb(cfqd, cfqq, crq);
        cfq_remove_merge_hints(cfqd->queue, crq);
 
        if (!list_empty(head)) {
@@ -359,47 +516,219 @@ link:
        list_add_tail(&crq->request->queuelist, entry);
 }
 
-static inline void
+/*
+ * remove from io scheduler core and put on dispatch list for service
+ */
+static inline int
 __cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
                        struct cfq_queue *cfqq)
 {
-       struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+       struct cfq_rq *crq;
+       unsigned long long ts, gap;
+       unsigned long newavsec;
+
+       crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+
+#if 1
+       /* Determine if queue should be skipped for being overshare */
+       ts = sched_clock();
+       gap = ts - cfqq->lastime;
+#ifdef LIMIT_DEBUG
+       cfqq->sectorate = (cfqd->cfq_epochsectors 
+                          * CFQ_TEMPLIM)/100;
+       
+#endif
+       if ((gap >= cfqd->cfq_epoch) || (gap < 0)) {
+               cfqq->avsec = crq->nr_sectors ; 
+               cfqq->lastime = ts;
+       } else {
+               u64 tmp;
+               /* Age old average and accumalate request to be served */
+
+//             tmp = (u64) (cfqq->avsec * gap) ;
+//             do_div(tmp, cfqd->cfq_epoch);
+               newavsec = (unsigned long)(cfqq->avsec >> 1) + crq->nr_sectors;
+//             if (crq->ioprio >= 0 && crq->ioprio <= 20)
+//                     cfqd->cid[crq->ioprio].lsectorate = newavsec; 
+//             atomic_set(&(cfqd->cid[crq->ioprio].lsectorate),
+//                        newavsec);
+
+               if ((newavsec < cfqq->sectorate) || cfqq->skipped) {
+                       cfqq->avsec = newavsec ;
+                       cfqq->lastime = ts;
+                       cfqq->skipped = 0;
+               } else {
+                       /* queue over share ; skip once */
+                       cfqq->skipped = 1;
+#ifdef LIMIT_DEBUG     
+//                     atomic_inc(&(cfqd->cid[crq->ioprio].nskip));
+//                     if (crq->ioprio >= 0 && crq->ioprio <= 20)
+//                             cfqd->cid[crq->ioprio].nskip++;
+#endif
+                       return 0;
+               }
+       }
+#endif
+
+#ifdef LIMIT_DEBUG
+//     if (crq->ioprio >= 0 && crq->ioprio <= 20) {
+//             cfqd->cid[crq->ioprio].navsec = cfqq->avsec;
+//             cfqd->cid[crq->ioprio].csectorate = cfqq->sectorate;
+//     }
 
+//     atomic_set(&(cfqd->cid[crq->ioprio].navsec),cfqq->avsec);
+//     atomic_set(&(cfqd->cid[crq->ioprio].csectorate),cfqq->sectorate);
+#endif
        cfq_dispatch_sort(cfqd, cfqq, crq);
+
+       /*
+        * technically, for IOPRIO_RT we don't need to add it to the list.
+        */
+       list_add_tail(&crq->prio_list, &cfqd->cid[cfqq->ioprio].prio_list);
+       return crq->nr_sectors;
 }
 
-static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
+static int
+cfq_dispatch_requests(request_queue_t *q, int prio, int max_rq, int max_sectors)
 {
-       struct cfq_queue *cfqq;
-       struct list_head *entry, *tmp;
-       int ret, queued, good_queues;
-
-       if (list_empty(&cfqd->rr_list))
-               return 0;
+       struct cfq_data *cfqd = q->elevator.elevator_data;
+       struct list_head *plist = &cfqd->cid[prio].rr_list;
+       struct list_head *entry, *nxt;
+       int q_rq, q_io;
+       int ret ;
 
-       queued = ret = 0;
-restart:
-       good_queues = 0;
-       list_for_each_safe(entry, tmp, &cfqd->rr_list) {
-               cfqq = list_entry_cfqq(cfqd->rr_list.next);
+       /*
+        * for each queue at this prio level, dispatch a request
+        */
+       q_rq = q_io = 0;
+       list_for_each_safe(entry, nxt, plist) {
+               struct cfq_queue *cfqq = list_entry_cfqq(entry);
 
                BUG_ON(RB_EMPTY(&cfqq->sort_list));
 
-               __cfq_dispatch_requests(q, cfqd, cfqq);
+               ret = __cfq_dispatch_requests(q, cfqd, cfqq);
+               if (ret <= 0) {
+                       continue; /* skip queue */
+                       /* can optimize more by moving q to end of plist ? */
+               }
+               q_io += ret ;
+               q_rq++ ;
 
                if (RB_EMPTY(&cfqq->sort_list))
                        cfq_put_queue(cfqd, cfqq);
-               else
-                       good_queues++;
+               /*
+                * if we hit the queue limit, put the string of serviced
+                * queues at the back of the pending list
+                */
+               if (q_io >= max_sectors || q_rq >= max_rq) {
+                       struct list_head *prv = nxt->prev;
 
-               queued++;
-               ret = 1;
+                       if (prv != plist) {
+                               list_del(plist);
+                               list_add(plist, prv);
+                       }
+                       break;
+               }
        }
 
-       if ((queued < cfqd->cfq_quantum) && good_queues)
-               goto restart;
+       cfqd->cid[prio].last_rq = q_rq;
+       cfqd->cid[prio].last_sectors = q_io;
+       return q_rq;
+}
 
-       return ret;
+/*
+ * try to move some requests to the dispatch list. return 0 on success
+ */
+static int cfq_select_requests(request_queue_t *q, struct cfq_data *cfqd)
+{
+       int queued, busy_rq, busy_sectors, i;
+
+       /*
+        * if there's any realtime io, only schedule that
+        */
+       if (cfq_dispatch_requests(q, IOPRIO_RT, cfqd->cfq_quantum, cfqd->cfq_quantum_io))
+               return 1;
+
+       /*
+        * if RT io was last serviced and grace time hasn't expired,
+        * arm the timer to restart queueing if no other RT io has been
+        * submitted in the mean time
+        */
+       if (test_bit(CFQ_WAIT_RT, &cfqd->flags)) {
+               if (time_before(jiffies, cfqd->wait_end)) {
+                       mod_timer(&cfqd->timer, cfqd->wait_end);
+                       return 0;
+               }
+               clear_bit(CFQ_WAIT_RT, &cfqd->flags);
+       }
+
+       /*
+        * for each priority level, calculate number of requests we
+        * are allowed to put into service.
+        */
+       queued = 0;
+       busy_rq = cfqd->busy_rq;
+       busy_sectors = cfqd->busy_sectors;
+       for (i = IOPRIO_RT - 1; i > IOPRIO_IDLE; i--) {
+               const int o_rq = busy_rq - cfqd->cid[i].busy_rq;
+               const int o_sectors = busy_sectors - cfqd->cid[i].busy_sectors;
+               int q_rq = cfqd->cfq_quantum * (i + 1) / IOPRIO_NR;
+               int q_io = cfqd->cfq_quantum_io * (i + 1) / IOPRIO_NR;
+
+               /*
+                * no need to keep iterating the list, if there are no
+                * requests pending anymore
+                */
+               if (!cfqd->busy_rq)
+                       break;
+
+               /*
+                * find out how many requests and sectors we are allowed to
+                * service
+                */
+               if (o_rq)
+                       q_rq = o_sectors * (i + 1) / IOPRIO_NR;
+               if (q_rq > cfqd->cfq_quantum)
+                       q_rq = cfqd->cfq_quantum;
+
+               if (o_sectors)
+                       q_io = o_sectors * (i + 1) / IOPRIO_NR;
+               if (q_io > cfqd->cfq_quantum_io)
+                       q_io = cfqd->cfq_quantum_io;
+
+               /*
+                * average with last dispatched for fairness
+                */
+               if (cfqd->cid[i].last_rq != -1)
+                       q_rq = (cfqd->cid[i].last_rq + q_rq) / 2;
+               if (cfqd->cid[i].last_sectors != -1)
+                       q_io = (cfqd->cid[i].last_sectors + q_io) / 2;
+
+               queued += cfq_dispatch_requests(q, i, q_rq, q_io);
+       }
+
+       if (queued)
+               return 1;
+
+       /*
+        * only allow dispatch of idle io, if the queue has been idle from
+        * servicing RT or normal io for the grace period
+        */
+       if (test_bit(CFQ_WAIT_NORM, &cfqd->flags)) {
+               if (time_before(jiffies, cfqd->wait_end)) {
+                       mod_timer(&cfqd->timer, cfqd->wait_end);
+                       return 0;
+               }
+               clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+       }
+
+       /*
+        * if we found nothing to do, allow idle io to be serviced
+        */
+       if (cfq_dispatch_requests(q, IOPRIO_IDLE, cfqd->cfq_idle_quantum, cfqd->cfq_idle_quantum_io))
+               return 1;
+
+       return 0;
 }
 
 static struct request *cfq_next_request(request_queue_t *q)
@@ -410,61 +739,82 @@ static struct request *cfq_next_request(request_queue_t *q)
        if (!list_empty(cfqd->dispatch)) {
                struct cfq_rq *crq;
 dispatch:
+               /*
+                * end grace period, we are servicing a request
+                */
+               del_timer(&cfqd->timer);
+               clear_bit(CFQ_WAIT_RT, &cfqd->flags);
+               clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+
+               BUG_ON(list_empty(cfqd->dispatch));
                rq = list_entry_rq(cfqd->dispatch->next);
 
-               crq = RQ_DATA(rq);
-               if (crq)
-                       cfq_remove_merge_hints(q, crq);
+               BUG_ON(q->last_merge == rq);
+               crq = RQ_ELV_DATA(rq);
+               if (crq) {
+                       BUG_ON(!hlist_unhashed(&crq->hash));
+                       list_del_init(&crq->prio_list);
+               }
 
                return rq;
        }
 
-       if (cfq_dispatch_requests(q, cfqd))
+       /*
+        * we moved requests to dispatch list, go back end serve one
+        */
+       if (cfq_select_requests(q, cfqd))
                goto dispatch;
 
        return NULL;
 }
 
 static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey, const int hashval)
 {
-       struct list_head *hash_list = &cfqd->cfq_hash[hashval];
-       struct list_head *entry;
+       struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
+       struct hlist_node *entry;
 
-       list_for_each(entry, hash_list) {
+       hlist_for_each(entry, hash_list) {
                struct cfq_queue *__cfqq = list_entry_qhash(entry);
 
-               if (__cfqq->pid == pid)
+               if (__cfqq->hash_key == hashkey)
                        return __cfqq;
        }
 
        return NULL;
 }
 
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
+
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey)
 {
-       const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
+       const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
 
-       return __cfq_find_cfq_hash(cfqd, pid, hashval);
+       return __cfq_find_cfq_hash(cfqd, hashkey, hashval);
 }
 
 static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        cfqd->busy_queues--;
+       WARN_ON(cfqd->busy_queues < 0);
+
+       cfqd->cid[cfqq->ioprio].busy_queues--;
+       WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues < 0);
+       atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
+
        list_del(&cfqq->cfq_list);
-       list_del(&cfqq->cfq_hash);
+       hlist_del(&cfqq->cfq_hash);
        mempool_free(cfqq, cfq_mpool);
 }
 
-static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid,
+static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int hashkey,
                                         int gfp_mask)
 {
-       const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
+       const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
        struct cfq_queue *cfqq, *new_cfqq = NULL;
        request_queue_t *q = cfqd->queue;
 
 retry:
-       cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
+       cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
 
        if (!cfqq) {
                if (new_cfqq) {
@@ -478,13 +828,15 @@ retry:
                } else
                        return NULL;
 
-               INIT_LIST_HEAD(&cfqq->cfq_hash);
+               memset(cfqq, 0, sizeof(*cfqq));
+               INIT_HLIST_NODE(&cfqq->cfq_hash);
                INIT_LIST_HEAD(&cfqq->cfq_list);
-               RB_CLEAR_ROOT(&cfqq->sort_list);
-
-               cfqq->pid = pid;
-               cfqq->queued[0] = cfqq->queued[1] = 0;
-               list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+               cfqq->hash_key = cfq_hash_key(current);
+               cfqq->ioprio = cfq_ioprio(current);
+               cfqq->avsec = 0 ;
+               cfqq->lastime = sched_clock();
+               cfqq->sectorate = (cfqd->cfq_epochsectors * CFQ_TEMPLIM)/100;
+               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
        }
 
        if (new_cfqq)
@@ -493,31 +845,63 @@ retry:
        return cfqq;
 }
 
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int hashkey,
                                       int gfp_mask)
 {
        request_queue_t *q = cfqd->queue;
        struct cfq_queue *cfqq;
 
        spin_lock_irq(q->queue_lock);
-       cfqq = __cfq_get_queue(cfqd, pid, gfp_mask);
+       cfqq = __cfq_get_queue(cfqd, hashkey, gfp_mask);
        spin_unlock_irq(q->queue_lock);
 
        return cfqq;
 }
 
-static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
+static void
+__cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
 {
+       const int prio = crq->ioprio;
        struct cfq_queue *cfqq;
 
-       cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC);
+       cfqq = __cfq_get_queue(cfqd, cfq_hash_key(current), GFP_ATOMIC);
        if (cfqq) {
+
+               /*
+                * not too good...
+                */
+               if (prio > cfqq->ioprio) {
+                       printk("prio hash collision %d %d\n", 
+                              prio, cfqq->ioprio);
+                       if (!list_empty(&cfqq->cfq_list)) {
+                               cfqd->cid[cfqq->ioprio].busy_queues--;
+                               WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues<0);
+                               atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
+                               cfqd->cid[prio].busy_queues++;
+                               atomic_inc(&(cfqd->cid[prio].cum_queues_in));
+                               list_move_tail(&cfqq->cfq_list, 
+                                              &cfqd->cid[prio].rr_list);
+                       }
+                       cfqq->ioprio = prio;
+               }
+
                cfq_add_crq_rb(cfqd, cfqq, crq);
 
                if (list_empty(&cfqq->cfq_list)) {
-                       list_add(&cfqq->cfq_list, &cfqd->rr_list);
+                       list_add_tail(&cfqq->cfq_list, 
+                                     &cfqd->cid[prio].rr_list);
+                       cfqd->cid[prio].busy_queues++;
+                       atomic_inc(&(cfqd->cid[prio].cum_queues_in));
                        cfqd->busy_queues++;
                }
+
+               if (rq_mergeable(crq->request)) {
+                       cfq_add_crq_hash(cfqd, crq);
+                       
+                       if (!q->last_merge)
+                               q->last_merge = crq->request;
+               }
+
        } else {
                /*
                 * should can only happen if the request wasn't allocated
@@ -528,16 +912,57 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
        }
 }
 
+static void cfq_reenqueue(request_queue_t *q, struct cfq_data *cfqd, int prio)
+{
+       struct list_head *prio_list = &cfqd->cid[prio].prio_list;
+       struct list_head *entry, *tmp;
+
+       list_for_each_safe(entry, tmp, prio_list) {
+               struct cfq_rq *crq = list_entry_prio(entry);
+
+               list_del_init(entry);
+               list_del_init(&crq->request->queuelist);
+               __cfq_enqueue(q, cfqd, crq);
+       }
+}
+
+static void
+cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
+{
+       const int prio = cfq_ioprio(current);
+
+       crq->ioprio = prio;
+       crq->nr_sectors = crq->request->hard_nr_sectors;
+       __cfq_enqueue(q, cfqd, crq);
+
+       if (prio == IOPRIO_RT) {
+               int i;
+
+               /*
+                * realtime io gets priority, move all other io back
+                */
+               for (i = IOPRIO_IDLE; i < IOPRIO_RT; i++)
+                       cfq_reenqueue(q, cfqd, i);
+       } else if (prio != IOPRIO_IDLE) {
+               /*
+                * check if we need to move idle io back into queue
+                */
+               cfq_reenqueue(q, cfqd, IOPRIO_IDLE);
+       }
+}
+
 static void
 cfq_insert_request(request_queue_t *q, struct request *rq, int where)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
 
        switch (where) {
                case ELEVATOR_INSERT_BACK:
+#if 0
                        while (cfq_dispatch_requests(q, cfqd))
                                ;
+#endif
                        list_add_tail(&rq->queuelist, cfqd->dispatch);
                        break;
                case ELEVATOR_INSERT_FRONT:
@@ -545,26 +970,20 @@ cfq_insert_request(request_queue_t *q, struct request *rq, int where)
                        break;
                case ELEVATOR_INSERT_SORT:
                        BUG_ON(!blk_fs_request(rq));
-                       cfq_enqueue(cfqd, crq);
+                       cfq_enqueue(q, cfqd, crq);
                        break;
                default:
-                       printk("%s: bad insert point %d\n", __FUNCTION__,where);
+                       printk("%s: bad insert point %d\n", 
+                              __FUNCTION__,where);
                        return;
        }
-
-       if (rq_mergeable(rq)) {
-               cfq_add_crq_hash(cfqd, crq);
-
-               if (!q->last_merge)
-                       q->last_merge = rq;
-       }
 }
 
 static int cfq_queue_empty(request_queue_t *q)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
 
-       if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
+       if (list_empty(cfqd->dispatch) && !cfqd->busy_queues)
                return 1;
 
        return 0;
@@ -573,7 +992,7 @@ static int cfq_queue_empty(request_queue_t *q)
 static struct request *
 cfq_former_request(request_queue_t *q, struct request *rq)
 {
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
        struct rb_node *rbprev = rb_prev(&crq->rb_node);
 
        if (rbprev)
@@ -585,7 +1004,7 @@ cfq_former_request(request_queue_t *q, struct request *rq)
 static struct request *
 cfq_latter_request(request_queue_t *q, struct request *rq)
 {
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
        struct rb_node *rbnext = rb_next(&crq->rb_node);
 
        if (rbnext)
@@ -594,27 +1013,46 @@ cfq_latter_request(request_queue_t *q, struct request *rq)
        return NULL;
 }
 
+static void cfq_queue_congested(request_queue_t *q)
+{
+       struct cfq_data *cfqd = q->elevator.elevator_data;
+
+       set_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
+}
+
 static int cfq_may_queue(request_queue_t *q, int rw)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
        struct cfq_queue *cfqq;
-       int ret = 1;
+       const int prio = cfq_ioprio(current);
+       int limit, ret = 1;
 
        if (!cfqd->busy_queues)
                goto out;
 
-       cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
-       if (cfqq) {
-               int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
+       cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
+       if (!cfqq)
+               goto out;
 
-               if (limit < 3)
-                       limit = 3;
-               else if (limit > cfqd->max_queued)
-                       limit = cfqd->max_queued;
+       cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
+       if (!cfqq)
+               goto out;
 
-               if (cfqq->queued[rw] > limit)
-                       ret = 0;
-       }
+       /*
+        * if higher or equal prio io is sleeping waiting for a request, don't
+        * allow this one to allocate one. as long as ll_rw_blk does fifo
+        * waitqueue wakeups this should work...
+        */
+       if (cfqd->rq_starved_mask & ~((1 << prio) - 1))
+               goto out;
+
+       if (cfqq->queued[rw] < cfqd->cfq_queued || !cfqd->cid[prio].busy_queues)
+               goto out;
+
+       limit = q->nr_requests * (prio + 1) / IOPRIO_NR;
+       limit /= cfqd->cid[prio].busy_queues;
+       if (cfqq->queued[rw] > limit)
+               ret = 0;
 out:
        return ret;
 }
@@ -622,13 +1060,13 @@ out:
 static void cfq_put_request(request_queue_t *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator.elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_rq *crq = RQ_ELV_DATA(rq);
        struct request_list *rl;
        int other_rw;
 
        if (crq) {
                BUG_ON(q->last_merge == rq);
-               BUG_ON(ON_MHASH(crq));
+               BUG_ON(!hlist_unhashed(&crq->hash));
 
                mempool_free(crq, cfqd->crq_pool);
                rq->elevator_private = NULL;
@@ -661,17 +1099,21 @@ static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
        /*
         * prepare a queue up front, so cfq_enqueue() doesn't have to
         */
-       cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask);
+       cfqq = cfq_get_queue(cfqd, cfq_hash_key(current), gfp_mask);
        if (!cfqq)
                return 1;
 
        crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
        if (crq) {
+               /*
+                * process now has one request
+                */
+               clear_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
+
                memset(crq, 0, sizeof(*crq));
-               RB_CLEAR(&crq->rb_node);
                crq->request = rq;
-               crq->cfq_queue = NULL;
-               INIT_LIST_HEAD(&crq->hash);
+               INIT_HLIST_NODE(&crq->hash);
+               INIT_LIST_HEAD(&crq->prio_list);
                rq->elevator_private = crq;
                return 0;
        }
@@ -690,6 +1132,26 @@ static void cfq_exit(request_queue_t *q, elevator_t *e)
        kfree(cfqd);
 }
 
+static void cfq_timer(unsigned long data)
+{
+       struct cfq_data *cfqd = (struct cfq_data *) data;
+
+       clear_bit(CFQ_WAIT_RT, &cfqd->flags);
+       clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+       kblockd_schedule_work(&cfqd->work);
+}
+
+static void cfq_work(void *data)
+{
+       request_queue_t *q = data;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (cfq_next_request(q))
+               q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
 static int cfq_init(request_queue_t *q, elevator_t *e)
 {
        struct cfq_data *cfqd;
@@ -700,38 +1162,75 @@ static int cfq_init(request_queue_t *q, elevator_t *e)
                return -ENOMEM;
 
        memset(cfqd, 0, sizeof(*cfqd));
-       INIT_LIST_HEAD(&cfqd->rr_list);
+       init_timer(&cfqd->timer);
+       cfqd->timer.function = cfq_timer;
+       cfqd->timer.data = (unsigned long) cfqd;
+
+       INIT_WORK(&cfqd->work, cfq_work, q);
+
+       for (i = 0; i < IOPRIO_NR; i++) {
+               struct io_prio_data *cid = &cfqd->cid[i];
+
+               INIT_LIST_HEAD(&cid->rr_list);
+               INIT_LIST_HEAD(&cid->prio_list);
+               cid->last_rq = -1;
+               cid->last_sectors = -1;
+
+               atomic_set(&cid->cum_rq_in,0);          
+               atomic_set(&cid->cum_rq_out,0);
+               atomic_set(&cid->cum_sectors_in,0);
+               atomic_set(&cid->cum_sectors_out,0);            
+               atomic_set(&cid->cum_queues_in,0);
+               atomic_set(&cid->cum_queues_out,0);
+#if 0
+               atomic_set(&cid->nskip,0);
+               atomic_set(&cid->navsec,0);
+               atomic_set(&cid->csectorate,0);
+               atomic_set(&cid->lsectorate,0);
+#endif
+       }
 
-       cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
+       cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES,
+                                GFP_KERNEL);
        if (!cfqd->crq_hash)
                goto out_crqhash;
 
-       cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
+       cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES,
+                                GFP_KERNEL);
        if (!cfqd->cfq_hash)
                goto out_cfqhash;
 
-       cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+       cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, 
+                                       mempool_free_slab, crq_pool);
        if (!cfqd->crq_pool)
                goto out_crqpool;
 
        for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
-               INIT_LIST_HEAD(&cfqd->crq_hash[i]);
+               INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
        for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
-               INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
+               INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
+
+       cfqd->cfq_queued = cfq_queued;
+       cfqd->cfq_quantum = cfq_quantum;
+       cfqd->cfq_quantum_io = cfq_quantum_io;
+       cfqd->cfq_idle_quantum = cfq_idle_quantum;
+       cfqd->cfq_idle_quantum_io = cfq_idle_quantum_io;
+       cfqd->cfq_grace_rt = cfq_grace_rt;
+       cfqd->cfq_grace_idle = cfq_grace_idle;
+
+       q->nr_requests <<= 2;
 
        cfqd->dispatch = &q->queue_head;
        e->elevator_data = cfqd;
        cfqd->queue = q;
 
-       /*
-        * just set it to some high value, we want anyone to be able to queue
-        * some requests. fairness is handled differently
-        */
-       cfqd->max_queued = q->nr_requests;
-       q->nr_requests = 8192;
-
-       cfqd->cfq_queued = cfq_queued;
-       cfqd->cfq_quantum = cfq_quantum;
+       cfqd->cfq_epoch = CFQ_EPOCH;
+       if (q->hardsect_size)
+               cfqd->cfq_epochsectors = ((CFQ_DISKBW * 1000000)/
+                                     q->hardsect_size)* (1000000 / CFQ_EPOCH);
+       else
+               cfqd->cfq_epochsectors = ((CFQ_DISKBW * 1000000)/512)
+                       * (1000000 / CFQ_EPOCH) ;
 
        return 0;
 out_crqpool:
@@ -797,7 +1296,12 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page)          \
        return cfq_var_show(__VAR, (page));                             \
 }
 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
+SHOW_FUNCTION(cfq_quantum_io_show, cfqd->cfq_quantum_io);
+SHOW_FUNCTION(cfq_idle_quantum_show, cfqd->cfq_idle_quantum);
+SHOW_FUNCTION(cfq_idle_quantum_io_show, cfqd->cfq_idle_quantum_io);
 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
+SHOW_FUNCTION(cfq_grace_rt_show, cfqd->cfq_grace_rt);
+SHOW_FUNCTION(cfq_grace_idle_show, cfqd->cfq_grace_idle);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                                \
@@ -811,23 +1315,271 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)    \
        return ret;                                                     \
 }
 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
+STORE_FUNCTION(cfq_quantum_io_store, &cfqd->cfq_quantum_io, 4, INT_MAX);
+STORE_FUNCTION(cfq_idle_quantum_store, &cfqd->cfq_idle_quantum, 1, INT_MAX);
+STORE_FUNCTION(cfq_idle_quantum_io_store, &cfqd->cfq_idle_quantum_io, 4, INT_MAX);
 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
+STORE_FUNCTION(cfq_grace_rt_store, &cfqd->cfq_grace_rt, 0, INT_MAX);
+STORE_FUNCTION(cfq_grace_idle_store, &cfqd->cfq_grace_idle, 0, INT_MAX);
 #undef STORE_FUNCTION
 
+
+static ssize_t cfq_epoch_show(struct cfq_data *cfqd, char *page)
+{
+       return sprintf(page, "%lu\n", cfqd->cfq_epoch);
+}
+
+static ssize_t cfq_epoch_store(struct cfq_data *cfqd, const char *page, size_t count)
+{
+       char *p = (char *) page;
+       cfqd->cfq_epoch = simple_strtoul(p, &p, 10);
+       return count;
+}
+
+static ssize_t cfq_epochsectors_show(struct cfq_data *cfqd, char *page)
+{
+       return sprintf(page, "%lu\n", cfqd->cfq_epochsectors);
+}
+
+static ssize_t 
+cfq_epochsectors_store(struct cfq_data *cfqd, const char *page, size_t count)
+{
+       char *p = (char *) page;
+       cfqd->cfq_epochsectors = simple_strtoul(p, &p, 10);
+       return count;
+}
+
+/* Additional entries to get priority level data */
+static ssize_t
+cfq_prio_show(struct cfq_data *cfqd, char *page, unsigned int priolvl)
+{
+       int r1,r2,s1,s2,q1,q2;
+
+       if (!(priolvl >= IOPRIO_IDLE && priolvl <= IOPRIO_RT)) 
+               return 0;
+       
+       r1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_in));
+       r2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_out));
+       s1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_in));
+       s2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_out));
+       q1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_in)); 
+       q2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_out));
+       
+       return sprintf(page,"skip %d avsec %lu rate %lu new %lu"
+                      "rq (%d,%d) sec (%d,%d) q (%d,%d)\n",
+                      cfqd->cid[priolvl].nskip,
+                      cfqd->cid[priolvl].navsec,
+                      cfqd->cid[priolvl].csectorate,
+                      cfqd->cid[priolvl].lsectorate,
+//                    atomic_read(&cfqd->cid[priolvl].nskip),
+//                    atomic_read(&cfqd->cid[priolvl].navsec),
+//                    atomic_read(&cfqd->cid[priolvl].csectorate),
+//                    atomic_read(&cfqd->cid[priolvl].lsectorate),
+                      r1,r2,
+                      s1,s2,
+                      q1,q2);
+}
+
+#define SHOW_PRIO_DATA(__PRIOLVL)                                               \
+static ssize_t cfq_prio_##__PRIOLVL##_show(struct cfq_data *cfqd, char *page)  \
+{                                                                              \
+       return cfq_prio_show(cfqd,page,__PRIOLVL);                              \
+}
+SHOW_PRIO_DATA(0);
+SHOW_PRIO_DATA(1);
+SHOW_PRIO_DATA(2);
+SHOW_PRIO_DATA(3);
+SHOW_PRIO_DATA(4);
+SHOW_PRIO_DATA(5);
+SHOW_PRIO_DATA(6);
+SHOW_PRIO_DATA(7);
+SHOW_PRIO_DATA(8);
+SHOW_PRIO_DATA(9);
+SHOW_PRIO_DATA(10);
+SHOW_PRIO_DATA(11);
+SHOW_PRIO_DATA(12);
+SHOW_PRIO_DATA(13);
+SHOW_PRIO_DATA(14);
+SHOW_PRIO_DATA(15);
+SHOW_PRIO_DATA(16);
+SHOW_PRIO_DATA(17);
+SHOW_PRIO_DATA(18);
+SHOW_PRIO_DATA(19);
+SHOW_PRIO_DATA(20);
+#undef SHOW_PRIO_DATA
+
+
+static ssize_t cfq_prio_store(struct cfq_data *cfqd, const char *page, size_t count, int priolvl)
+{      
+       atomic_set(&(cfqd->cid[priolvl].cum_rq_in),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_rq_out),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_sectors_in),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_sectors_out),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_queues_in),0);
+       atomic_set(&(cfqd->cid[priolvl].cum_queues_out),0);
+
+       return count;
+}
+
+
+#define STORE_PRIO_DATA(__PRIOLVL)                                                                \
+static ssize_t cfq_prio_##__PRIOLVL##_store(struct cfq_data *cfqd, const char *page, size_t count) \
+{                                                                                                 \
+        return cfq_prio_store(cfqd,page,count,__PRIOLVL);                                          \
+}                  
+STORE_PRIO_DATA(0);     
+STORE_PRIO_DATA(1);
+STORE_PRIO_DATA(2);
+STORE_PRIO_DATA(3);
+STORE_PRIO_DATA(4);
+STORE_PRIO_DATA(5);
+STORE_PRIO_DATA(6);
+STORE_PRIO_DATA(7);
+STORE_PRIO_DATA(8);
+STORE_PRIO_DATA(9);
+STORE_PRIO_DATA(10);
+STORE_PRIO_DATA(11);
+STORE_PRIO_DATA(12);
+STORE_PRIO_DATA(13);
+STORE_PRIO_DATA(14);
+STORE_PRIO_DATA(15);
+STORE_PRIO_DATA(16);
+STORE_PRIO_DATA(17);
+STORE_PRIO_DATA(18);
+STORE_PRIO_DATA(19);
+STORE_PRIO_DATA(20);
+#undef STORE_PRIO_DATA
+
+
 static struct cfq_fs_entry cfq_quantum_entry = {
        .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
        .show = cfq_quantum_show,
        .store = cfq_quantum_store,
 };
+static struct cfq_fs_entry cfq_quantum_io_entry = {
+       .attr = {.name = "quantum_io", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_quantum_io_show,
+       .store = cfq_quantum_io_store,
+};
+static struct cfq_fs_entry cfq_idle_quantum_entry = {
+       .attr = {.name = "idle_quantum", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_idle_quantum_show,
+       .store = cfq_idle_quantum_store,
+};
+static struct cfq_fs_entry cfq_idle_quantum_io_entry = {
+       .attr = {.name = "idle_quantum_io", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_idle_quantum_io_show,
+       .store = cfq_idle_quantum_io_store,
+};
 static struct cfq_fs_entry cfq_queued_entry = {
        .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
        .show = cfq_queued_show,
        .store = cfq_queued_store,
 };
+static struct cfq_fs_entry cfq_grace_rt_entry = {
+       .attr = {.name = "grace_rt", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_grace_rt_show,
+       .store = cfq_grace_rt_store,
+};
+static struct cfq_fs_entry cfq_grace_idle_entry = {
+       .attr = {.name = "grace_idle", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_grace_idle_show,
+       .store = cfq_grace_idle_store,
+};
+static struct cfq_fs_entry cfq_epoch_entry = {
+       .attr = {.name = "epoch", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_epoch_show,
+       .store = cfq_epoch_store,
+};
+static struct cfq_fs_entry cfq_epochsectors_entry = {
+       .attr = {.name = "epochsectors", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_epochsectors_show,
+       .store = cfq_epochsectors_store,
+};
+
+#define P_0_STR   "p0"
+#define P_1_STR   "p1"
+#define P_2_STR   "p2"
+#define P_3_STR   "p3"
+#define P_4_STR   "p4"
+#define P_5_STR   "p5"
+#define P_6_STR   "p6"
+#define P_7_STR   "p7"
+#define P_8_STR   "p8"
+#define P_9_STR   "p9"
+#define P_10_STR  "p10"
+#define P_11_STR  "p11"
+#define P_12_STR  "p12"
+#define P_13_STR  "p13"
+#define P_14_STR  "p14"
+#define P_15_STR  "p15"
+#define P_16_STR  "p16"
+#define P_17_STR  "p17"
+#define P_18_STR  "p18"
+#define P_19_STR  "p19"
+#define P_20_STR  "p20"
+
+
+#define CFQ_PRIO_SYSFS_ENTRY(__PRIOLVL)                                           \
+static struct cfq_fs_entry cfq_prio_##__PRIOLVL##_entry = {                \
+       .attr = {.name = P_##__PRIOLVL##_STR, .mode = S_IRUGO | S_IWUSR }, \
+       .show = cfq_prio_##__PRIOLVL##_show,                               \
+       .store = cfq_prio_##__PRIOLVL##_store,                             \
+};
+CFQ_PRIO_SYSFS_ENTRY(0);
+CFQ_PRIO_SYSFS_ENTRY(1);
+CFQ_PRIO_SYSFS_ENTRY(2);
+CFQ_PRIO_SYSFS_ENTRY(3);
+CFQ_PRIO_SYSFS_ENTRY(4);
+CFQ_PRIO_SYSFS_ENTRY(5);
+CFQ_PRIO_SYSFS_ENTRY(6);
+CFQ_PRIO_SYSFS_ENTRY(7);
+CFQ_PRIO_SYSFS_ENTRY(8);
+CFQ_PRIO_SYSFS_ENTRY(9);
+CFQ_PRIO_SYSFS_ENTRY(10);
+CFQ_PRIO_SYSFS_ENTRY(11);
+CFQ_PRIO_SYSFS_ENTRY(12);
+CFQ_PRIO_SYSFS_ENTRY(13);
+CFQ_PRIO_SYSFS_ENTRY(14);
+CFQ_PRIO_SYSFS_ENTRY(15);
+CFQ_PRIO_SYSFS_ENTRY(16);
+CFQ_PRIO_SYSFS_ENTRY(17);
+CFQ_PRIO_SYSFS_ENTRY(18);
+CFQ_PRIO_SYSFS_ENTRY(19);
+CFQ_PRIO_SYSFS_ENTRY(20);
+#undef CFQ_PRIO_SYSFS_ENTRY
 
 static struct attribute *default_attrs[] = {
        &cfq_quantum_entry.attr,
+       &cfq_quantum_io_entry.attr,
+       &cfq_idle_quantum_entry.attr,
+       &cfq_idle_quantum_io_entry.attr,
        &cfq_queued_entry.attr,
+       &cfq_grace_rt_entry.attr,
+       &cfq_grace_idle_entry.attr,
+       &cfq_epoch_entry.attr,
+       &cfq_epochsectors_entry.attr,
+       &cfq_prio_0_entry.attr,
+       &cfq_prio_1_entry.attr,
+       &cfq_prio_2_entry.attr,
+       &cfq_prio_3_entry.attr,
+       &cfq_prio_4_entry.attr,
+       &cfq_prio_5_entry.attr,
+       &cfq_prio_6_entry.attr,
+       &cfq_prio_7_entry.attr,
+       &cfq_prio_8_entry.attr,
+       &cfq_prio_9_entry.attr,
+       &cfq_prio_10_entry.attr,
+       &cfq_prio_11_entry.attr,
+       &cfq_prio_12_entry.attr,
+       &cfq_prio_13_entry.attr,
+       &cfq_prio_14_entry.attr,
+       &cfq_prio_15_entry.attr,
+       &cfq_prio_16_entry.attr,
+       &cfq_prio_17_entry.attr,
+       &cfq_prio_18_entry.attr,
+       &cfq_prio_19_entry.attr,
+       &cfq_prio_20_entry.attr,
        NULL,
 };
 
@@ -883,6 +1635,7 @@ elevator_t iosched_cfq = {
        .elevator_set_req_fn =          cfq_set_request,
        .elevator_put_req_fn =          cfq_put_request,
        .elevator_may_queue_fn =        cfq_may_queue,
+       .elevator_set_congested_fn =    cfq_queue_congested,
        .elevator_init_fn =             cfq_init,
        .elevator_exit_fn =             cfq_exit,
 };
index a514559..7edfce7 100644 (file)
@@ -94,8 +94,8 @@ static inline int cki_div(int *a, int b, int c);
 static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres);
 
 /* External functions e.g. interface to ioscheduler */
-void *cki_tsk_icls(struct task_struct *tsk);
-int cki_tsk_ioprio(struct task_struct *tsk);
+void *cki_tsk_icls (struct task_struct *tsk);
+int cki_tsk_ioprio (struct task_struct *tsk);
 
 extern void cki_cfq_set(icls_tsk_t tskicls, icls_ioprio_t tskioprio);
 
@@ -523,7 +523,7 @@ static void cki_chgcls(void *tsk, void *oldres, void *newres)
 
 
 struct ckrm_res_ctlr cki_rcbs = {
-       .res_name = "cki",
+       .res_name = "io",
        .res_hdepth = 1,
        .resid = -1,
        .res_alloc = cki_alloc,
index 35c9385..950eb99 100644 (file)
@@ -339,6 +339,14 @@ void elv_put_request(request_queue_t *q, struct request *rq)
                e->elevator_put_req_fn(q, rq);
 }
 
+void elv_set_congested(request_queue_t *q)
+{
+       elevator_t *e = &q->elevator;
+
+       if (e->elevator_set_congested_fn)
+               e->elevator_set_congested_fn(q);
+}
+
 int elv_may_queue(request_queue_t *q, int rw)
 {
        elevator_t *e = &q->elevator;
@@ -346,7 +354,7 @@ int elv_may_queue(request_queue_t *q, int rw)
        if (e->elevator_may_queue_fn)
                return e->elevator_may_queue_fn(q, rw);
 
-       return 0;
+       return 1;
 }
 
 void elv_completed_request(request_queue_t *q, struct request *rq)
index 17c403e..b6ff344 100644 (file)
@@ -1594,6 +1594,10 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
        struct io_context *ioc = get_io_context(gfp_mask);
 
        spin_lock_irq(q->queue_lock);
+
+       if (!elv_may_queue(q, rw))
+               goto out_lock;
+
        if (rl->count[rw]+1 >= q->nr_requests) {
                /*
                 * The queue will fill after this allocation, so set it as
@@ -1607,15 +1611,12 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
                }
        }
 
-       if (blk_queue_full(q, rw)
-                       && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
-               /*
-                * The queue is full and the allocating process is not a
-                * "batcher", and not exempted by the IO scheduler
-                */
-               spin_unlock_irq(q->queue_lock);
-               goto out;
-       }
+       /*
+        * The queue is full and the allocating process is not a
+        * "batcher", and not exempted by the IO scheduler
+        */
+       if (blk_queue_full(q, rw) && !ioc_batching(ioc))
+               goto out_lock;
 
        rl->count[rw]++;
        if (rl->count[rw] >= queue_congestion_on_threshold(q))
@@ -1633,8 +1634,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
                 */
                spin_lock_irq(q->queue_lock);
                freed_request(q, rw);
-               spin_unlock_irq(q->queue_lock);
-               goto out;
+               goto out_lock;
        }
 
        if (ioc_batching(ioc))
@@ -1664,6 +1664,11 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
 out:
        put_io_context(ioc);
        return rq;
+out_lock:
+       if (!rq)
+               elv_set_congested(q);
+       spin_unlock_irq(q->queue_lock);
+       goto out;
 }
 
 /*
@@ -3167,3 +3172,21 @@ void blk_unregister_queue(struct gendisk *disk)
                kobject_put(&disk->kobj);
        }
 }
+
+asmlinkage int sys_ioprio_set(int ioprio)
+{
+       if (ioprio < IOPRIO_IDLE || ioprio > IOPRIO_RT)
+               return -EINVAL;
+       if (ioprio == IOPRIO_RT && !capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       printk("%s: set ioprio %d\n", current->comm, ioprio);
+       current->ioprio = ioprio;
+       return 0;
+}
+
+asmlinkage int sys_ioprio_get(void)
+{
+       return current->ioprio;
+}
+
index b0a98b4..b0acd42 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -47,7 +47,6 @@
 #include <linux/syscalls.h>
 #include <linux/rmap.h>
 #include <linux/ckrm.h>
-#include <linux/ckrm_mem.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
@@ -548,18 +547,6 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->active_mm = mm;
        activate_mm(active_mm, mm);
        task_unlock(tsk);
-#ifdef CONFIG_CKRM_RES_MEM
-       if (old_mm) {
-               spin_lock(&old_mm->peertask_lock);
-               list_del(&tsk->mm_peers);
-               ckrm_mem_evaluate_mm(old_mm);
-               spin_unlock(&old_mm->peertask_lock);
-       }
-       spin_lock(&mm->peertask_lock);
-       list_add_tail(&tsk->mm_peers, &mm->tasklist);
-       ckrm_mem_evaluate_mm(mm);
-       spin_unlock(&mm->peertask_lock);
-#endif
        if (old_mm) {
                if (active_mm != old_mm) BUG();
                mmput(old_mm);
index ef936b8..30bbe7f 100644 (file)
 #define __NR_mq_notify         (__NR_mq_open+4)
 #define __NR_mq_getsetattr     (__NR_mq_open+5)
 #define __NR_sys_kexec_load    283
+#define __NR_ioprio_set                284
+#define __NR_ioprio_get                285
 
-#define NR_syscalls 284
+#define NR_syscalls 286
 
 /* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
 
index 57fb02c..bdf4ebe 100644 (file)
 #define __NR_mq_notify         266
 #define __NR_mq_getsetattr     267
 #define __NR_kexec_load                268
+#define __NR_ioprio_set                269
+#define __NR_ioprio_get                270
 
-#define __NR_syscalls          269
+#define __NR_syscalls          271
 
 #define __NR(n)        #n
 
index 26e0aa3..0b0a6a1 100644 (file)
@@ -554,8 +554,12 @@ __SYSCALL(__NR_mq_notify, sys_mq_notify)
 __SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
 #define __NR_kexec_load        246
 __SYSCALL(__NR_kexec_load, sys_ni_syscall)
+#define __NR_ioprio_set                247
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set);
+#define __NR_ioprio_get                248
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get);
 
-#define __NR_syscall_max __NR_kexec_load
+#define __NR_syscall_max __NR_ioprio_get
 #ifndef __NO_STUBS
 
 /* user-visible error numbers are in the range -1 - -4095 */
index a6a46a7..46e2083 100644 (file)
@@ -33,8 +33,8 @@ typedef int (*icls_ioprio_t) (struct task_struct *tsk);
 
 #ifdef CONFIG_CKRM_RES_BLKIO
 
-extern icls_tsk_t cki_tsk_icls;
-extern icls_ioprio_t cki_tsk_ioprio;
+extern void *cki_tsk_icls (struct task_struct *tsk);
+extern int cki_tsk_ioprio (struct task_struct *tsk);
 
 #endif /* CONFIG_CKRM_RES_BLKIO */
 
index 27e8183..b42a9c4 100644 (file)
@@ -17,6 +17,7 @@ typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
 typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
 typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
 typedef int (elevator_may_queue_fn) (request_queue_t *, int);
+typedef void (elevator_set_congested_fn) (request_queue_t *);
 
 typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
 typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
@@ -45,6 +46,7 @@ struct elevator_s
        elevator_put_req_fn *elevator_put_req_fn;
 
        elevator_may_queue_fn *elevator_may_queue_fn;
+       elevator_set_congested_fn *elevator_set_congested_fn;
 
        elevator_init_fn *elevator_init_fn;
        elevator_exit_fn *elevator_exit_fn;
@@ -74,6 +76,7 @@ extern struct request *elv_latter_request(request_queue_t *, struct request *);
 extern int elv_register_queue(request_queue_t *q);
 extern void elv_unregister_queue(request_queue_t *q);
 extern int elv_may_queue(request_queue_t *, int);
+extern void elv_set_congested(request_queue_t *);
 extern void elv_completed_request(request_queue_t *, struct request *);
 extern int elv_set_request(request_queue_t *, struct request *, int);
 extern void elv_put_request(request_queue_t *, struct request *);
@@ -119,4 +122,6 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
 #define ELEVATOR_INSERT_BACK   2
 #define ELEVATOR_INSERT_SORT   3
 
+#define RQ_ELV_DATA(rq)                (rq)->elevator_private
+
 #endif
index 7e10a25..0b4e211 100644 (file)
@@ -1570,5 +1570,17 @@ static inline void free_secdata(void *secdata)
 { }
 #endif /* CONFIG_SECURITY */
 
+/* io priorities */
+
+#define IOPRIO_NR      21
+
+#define IOPRIO_IDLE    0
+#define IOPRIO_NORM    10
+#define IOPRIO_RT      20
+
+asmlinkage int sys_ioprio_set(int ioprio);
+asmlinkage int sys_ioprio_get(void);
+
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_FS_H */
index 9937c8d..5d62063 100644 (file)
@@ -112,6 +112,7 @@ extern struct group_info init_groups;
        .proc_lock      = SPIN_LOCK_UNLOCKED,                           \
        .switch_lock    = SPIN_LOCK_UNLOCKED,                           \
        .journal_info   = NULL,                                         \
+       .ioprio         = IOPRIO_NORM,                                  \
 }
 
 
index 0e79890..5c584cc 100644 (file)
@@ -229,9 +229,6 @@ struct page {
        void *virtual;                  /* Kernel virtual address (NULL if
                                           not kmapped, ie. highmem) */
 #endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CKRM_RES_MEM
-       void *memclass;
-#endif // CONFIG_CKRM_RES_MEM
 };
 
 /*
index 5edb739..47762ca 100644 (file)
@@ -1,11 +1,9 @@
-#include <linux/ckrm_mem_inline.h>
 
 static inline void
 add_page_to_active_list(struct zone *zone, struct page *page)
 {
        list_add(&page->lru, &zone->active_list);
        zone->nr_active++;
-       ckrm_mem_inc_active(page);
 }
 
 static inline void
@@ -13,7 +11,6 @@ add_page_to_inactive_list(struct zone *zone, struct page *page)
 {
        list_add(&page->lru, &zone->inactive_list);
        zone->nr_inactive++;
-       ckrm_mem_inc_inactive(page);
 }
 
 static inline void
@@ -21,7 +18,6 @@ del_page_from_active_list(struct zone *zone, struct page *page)
 {
        list_del(&page->lru);
        zone->nr_active--;
-       ckrm_mem_dec_active(page);
 }
 
 static inline void
@@ -29,7 +25,6 @@ del_page_from_inactive_list(struct zone *zone, struct page *page)
 {
        list_del(&page->lru);
        zone->nr_inactive--;
-       ckrm_mem_dec_inactive(page);
 }
 
 static inline void
@@ -39,9 +34,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
        if (PageActive(page)) {
                ClearPageActive(page);
                zone->nr_active--;
-               ckrm_mem_dec_active(page);
        } else {
                zone->nr_inactive--;
-               ckrm_mem_dec_inactive(page);
        }
 }
index c70f46a..c6f5063 100644 (file)
@@ -77,7 +77,6 @@
 #define PG_compound            19      /* Part of a compound page */
 
 #define PG_anon                        20      /* Anonymous: anon_vma in mapping */
-#define PG_ckrm_account        21      /* This page is accounted by CKRM */
 
 
 /*
index f975c76..4dd9fbd 100644 (file)
@@ -230,11 +230,6 @@ struct mm_struct {
        struct kioctx           *ioctx_list;
 
        struct kioctx           default_kioctx;
-#ifdef CONFIG_CKRM_RES_MEM
-       struct ckrm_mem_res *memclass;
-       struct list_head        tasklist; /* list of all tasks sharing this address space */
-       spinlock_t              peertask_lock; /* protect above tasklist */
-#endif
 };
 
 extern int mmlist_nr;
@@ -526,6 +521,8 @@ struct task_struct {
 
        struct io_context *io_context;
 
+       int ioprio;
+
        unsigned long ptrace_message;
        siginfo_t *last_siginfo; /* For ptrace use.  */
 
@@ -542,10 +539,8 @@ struct task_struct {
        struct ckrm_task_class *taskclass;
        struct list_head        taskclass_link;
 #endif // CONFIG_CKRM_TYPE_TASKCLASS
-#ifdef CONFIG_CKRM_RES_MEM
-       struct list_head        mm_peers; // list of tasks using same mm_struct
-#endif // CONFIG_CKRM_RES_MEM
 #endif // CONFIG_CKRM
+
        struct task_delay_info  delays;
 };
 
index 4fdce31..45a39b1 100644 (file)
@@ -172,25 +172,18 @@ config CKRM_RES_NUMTASKS
        
          Say N if unsure, Y to use the feature.
 
-config CKRM_RES_MEM
-       bool "Class based physical memory controller"
-       default y
-       depends on CKRM
-       help
-         Provide the basic support for collecting physical memory usage information
-         among classes. Say Y if you want to know the memory usage of each class.
-
-config CKRM_MEM_LRUORDER_CHANGE
-       bool "Change the LRU ordering of scanned pages"
-       default n
-       depends on CKRM_RES_MEM
+config CKRM_RES_BLKIO
+       tristate " Disk I/O Resource Controller"
+       depends on CKRM_TYPE_TASKCLASS && IOSCHED_CFQ
+       default m
        help
-         While trying to free pages, by default(n), scanned pages are left were they
-         are found if they belong to relatively under-used class. In this case the
-         LRU ordering of the memory subsystemis left intact. If this option is chosen,
-         then the scanned pages are moved to the tail of the list(active or inactive).
-         Changing this to yes reduces the checking overhead but violates the approximate
-         LRU order that is maintained by the paging subsystem.
+         Provides a resource controller for best-effort block I/O 
+         bandwidth control. The controller attempts this by proportional 
+         servicing of requests in the I/O scheduler. However, seek
+         optimizations and reordering by device drivers/disk controllers may
+         alter the actual bandwidth delivered to a class.
+       
+         Say N if unsure, Y to use the feature.
 
 config CKRM_TYPE_SOCKETCLASS
        bool "Class Manager for socket groups"
index da00554..008b6c6 100644 (file)
@@ -9,4 +9,3 @@ endif
     obj-$(CONFIG_CKRM_RES_NUMTASKS)    += ckrm_numtasks.o
     obj-$(CONFIG_CKRM_TYPE_SOCKETCLASS) += ckrm_sockc.o
     obj-$(CONFIG_CKRM_RES_LISTENAQ)    += ckrm_laq.o
-    obj-$(CONFIG_CKRM_RES_MEM)                         += ckrm_mem.o
index 70c92e5..ca75e5e 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/mempolicy.h>
 #include <linux/ckrm.h>
 #include <linux/ckrm_tsk.h>
-#include <linux/ckrm_mem.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -514,12 +513,6 @@ static inline void __exit_mm(struct task_struct * tsk)
        task_lock(tsk);
        tsk->mm = NULL;
        up_read(&mm->mmap_sem);
-#ifdef CONFIG_CKRM_RES_MEM
-       spin_lock(&mm->peertask_lock);
-       list_del_init(&tsk->mm_peers);
-       ckrm_mem_evaluate_mm(mm);
-       spin_unlock(&mm->peertask_lock);
-#endif
        enter_lazy_tlb(mm, current);
        task_unlock(tsk);
        mmput(mm);
index e639ce1..d665090 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/rmap.h>
 #include <linux/ckrm.h>
 #include <linux/ckrm_tsk.h>
-#include <linux/ckrm_mem_inline.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -266,9 +265,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        ckrm_cb_newtask(tsk);
        /* One for us, one for whoever does the "release_task()" (usually parent) */
        atomic_set(&tsk->usage,2);
-#ifdef CONFIG_CKRM_RES_MEM     
-       INIT_LIST_HEAD(&tsk->mm_peers);
-#endif
        return tsk;
 }
 
@@ -421,10 +417,6 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
        mm->ioctx_list = NULL;
        mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
        mm->free_area_cache = TASK_UNMAPPED_BASE;
-#ifdef CONFIG_CKRM_RES_MEM
-       INIT_LIST_HEAD(&mm->tasklist);
-       mm->peertask_lock = SPIN_LOCK_UNLOCKED;
-#endif
 
        if (likely(!mm_alloc_pgd(mm))) {
                mm->def_flags = 0;
@@ -445,10 +437,6 @@ struct mm_struct * mm_alloc(void)
        if (mm) {
                memset(mm, 0, sizeof(*mm));
                mm = mm_init(mm);
-#ifdef CONFIG_CKRM_RES_MEM
-               mm->memclass = GET_MEM_CLASS(current);
-               mem_class_get(mm->memclass);
-#endif
        }
        return mm;
 }
@@ -463,13 +451,6 @@ void fastcall __mmdrop(struct mm_struct *mm)
        BUG_ON(mm == &init_mm);
        mm_free_pgd(mm);
        destroy_context(mm);
-#ifdef CONFIG_CKRM_RES_MEM
-       /* class can be null and mm's tasklist can be empty here */
-       if (mm->memclass) {
-               mem_class_put(mm->memclass);
-               mm->memclass = NULL;
-       }
-#endif
        free_mm(mm);
 }
 
@@ -597,7 +578,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
 good_mm:
        tsk->mm = mm;
        tsk->active_mm = mm;
-       ckrm_init_mm_to_task(mm, tsk);
        return 0;
 
 free_pt:
@@ -1116,6 +1096,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
        } else
                link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
 
+       p->ioprio = current->ioprio;
        nr_threads++;
        write_unlock_irq(&tasklist_lock);
        retval = 0;
index 0ccf1ee..6708f4f 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/topology.h>
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
-#include <linux/ckrm_mem_inline.h>
 
 #include <asm/tlbflush.h>
 
@@ -269,7 +268,6 @@ free_pages_bulk(struct zone *zone, int count,
                /* have to delete it as __free_pages_bulk list manipulates */
                list_del(&page->lru);
                __free_pages_bulk(page, base, zone, area, order);
-               ckrm_clear_page_class(page);
                ret++;
        }
        spin_unlock_irqrestore(&zone->lock, flags);
@@ -612,10 +610,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
 
        might_sleep_if(wait);
 
-       if (!ckrm_class_limit_ok((GET_MEM_CLASS(current)))) {
-               return NULL;
-       }
-
        zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
        if (zones[0] == NULL)     /* no zones in the zonelist */
                return NULL;
@@ -745,7 +739,6 @@ nopage:
        return NULL;
 got_pg:
        kernel_map_pages(page, 1 << order, 1);
-       ckrm_set_pages_class(page, 1 << order, GET_MEM_CLASS(current));
        return page;
 }
 
index 4911729..8e3b693 100644 (file)
@@ -37,7 +37,6 @@
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
-#include <linux/ckrm_mem.h>
 
 /* possible outcome of pageout() */
 typedef enum {
@@ -72,9 +71,6 @@ struct scan_control {
        /* This context's GFP mask */
        unsigned int gfp_mask;
 
-       /* Flag used by CKRM */
-       unsigned int ckrm_flags;
-
        int may_writepage;
 };
 
@@ -546,23 +542,19 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
 {
        LIST_HEAD(page_list);
        struct pagevec pvec;
-       int max_scan = sc->nr_to_scan, nr_pass;
-       unsigned int ckrm_flags = sc->ckrm_flags, bit_flag;
+       int max_scan = sc->nr_to_scan;
 
        pagevec_init(&pvec, 1);
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-redo:
-       ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag);
-       nr_pass = zone->nr_inactive;
        while (max_scan > 0) {
                struct page *page;
                int nr_taken = 0;
                int nr_scan = 0;
                int nr_freed;
 
-               while (nr_pass-- && nr_scan++ < SWAP_CLUSTER_MAX &&
+               while (nr_scan++ < SWAP_CLUSTER_MAX &&
                                !list_empty(&zone->inactive_list)) {
                        page = lru_to_page(&zone->inactive_list);
 
@@ -580,25 +572,15 @@ redo:
                                SetPageLRU(page);
                                list_add(&page->lru, &zone->inactive_list);
                                continue;
-                       } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) {
-                               __put_page(page);
-                               SetPageLRU(page);
-#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE
-                               list_add_tail(&page->lru, &zone->inactive_list);
-#else
-                               list_add(&page->lru, &zone->inactive_list);
-#endif
-                               continue;
                        }
                        list_add(&page->lru, &page_list);
-                       ckrm_mem_dec_inactive(page);
                        nr_taken++;
                }
                zone->nr_inactive -= nr_taken;
                zone->pages_scanned += nr_taken;
                spin_unlock_irq(&zone->lru_lock);
 
-               if ((bit_flag == 0) && (nr_taken == 0))
+               if (nr_taken == 0)
                        goto done;
 
                max_scan -= nr_scan;
@@ -631,9 +613,6 @@ redo:
                                spin_lock_irq(&zone->lru_lock);
                        }
                }
-               if (ckrm_flags && (nr_pass <= 0)) {
-                       goto redo;
-               }
        }
        spin_unlock_irq(&zone->lru_lock);
 done:
@@ -673,17 +652,11 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
        long mapped_ratio;
        long distress;
        long swap_tendency;
-       unsigned int ckrm_flags = sc->ckrm_flags, bit_flag;
-       int nr_pass;
 
        lru_add_drain();
        pgmoved = 0;
        spin_lock_irq(&zone->lru_lock);
-redo:
-       ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag);
-       nr_pass = zone->nr_active;
-       while (pgscanned < nr_pages && !list_empty(&zone->active_list) &&
-                                               nr_pass) {
+       while (pgscanned < nr_pages && !list_empty(&zone->active_list)) {
                page = lru_to_page(&zone->active_list);
                prefetchw_prev_lru_page(page, &zone->active_list, flags);
                if (!TestClearPageLRU(page))
@@ -699,24 +672,11 @@ redo:
                        __put_page(page);
                        SetPageLRU(page);
                        list_add(&page->lru, &zone->active_list);
-                       pgscanned++;
-               } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) {
-                       __put_page(page);
-                       SetPageLRU(page);
-#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE
-                       list_add_tail(&page->lru, &zone->active_list);
-#else
-                       list_add(&page->lru, &zone->active_list);
-#endif
                } else {
                        list_add(&page->lru, &l_hold);
-                       ckrm_mem_dec_active(page);
                        pgmoved++;
-               pgscanned++;
-       }
-               if (!--nr_pass && ckrm_flags) {
-                       goto redo;
                }
+               pgscanned++;
        }
        zone->nr_active -= pgmoved;
        spin_unlock_irq(&zone->lru_lock);
@@ -790,7 +750,6 @@ redo:
                if (!TestClearPageActive(page))
                        BUG();
                list_move(&page->lru, &zone->inactive_list);
-               ckrm_mem_inc_inactive(page);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        zone->nr_inactive += pgmoved;
@@ -819,7 +778,6 @@ redo:
                        BUG();
                BUG_ON(!PageActive(page));
                list_move(&page->lru, &zone->active_list);
-               ckrm_mem_inc_active(page);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        zone->nr_active += pgmoved;
@@ -867,7 +825,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
        sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
 
        while (nr_active || nr_inactive) {
-               sc->ckrm_flags = ckrm_setup_reclamation();
                if (nr_active) {
                        sc->nr_to_scan = min(nr_active,
                                        (unsigned long)SWAP_CLUSTER_MAX);
@@ -883,113 +840,9 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
                        if (sc->nr_to_reclaim <= 0)
                                break;
                }
-               ckrm_teardown_reclamation();
-       }
-}
-
-#ifdef CONFIG_CKRM_RES_MEM
-// This function needs to be given more thought.
-// Shrink the class to be at 90% of its limit
-static void
-ckrm_shrink_class(ckrm_mem_res_t *cls)
-{
-       struct scan_control sc;
-       struct zone *zone;
-       int zindex = 0, active_credit = 0, inactive_credit = 0;
-
-       if (ckrm_test_set_shrink(cls)) { // set the SHRINK bit atomically
-               // if it is already set somebody is working on it. so... leave
-               return;
-       }
-       sc.nr_mapped = read_page_state(nr_mapped);
-       sc.nr_scanned = 0;
-       sc.ckrm_flags = ckrm_get_reclaim_flags(cls);
-       sc.nr_reclaimed = 0;
-       sc.priority = 0; // always very high priority
-
-       for_each_zone(zone) {
-               int zone_total, zone_limit, active_limit, inactive_limit;
-               int active_over, inactive_over;
-               unsigned long nr_active, nr_inactive;
-               u64 temp;
-
-               zone->temp_priority = zone->prev_priority;
-               zone->prev_priority = sc.priority;
-
-               zone_total = zone->nr_active + zone->nr_inactive + zone->free_pages;
-
-               temp = (u64) cls->pg_limit * zone_total;
-               do_div(temp, ckrm_tot_lru_pages);
-               zone_limit = (int) temp;
-               active_limit = (6 * zone_limit) / 10; // 2/3rd in active list
-               inactive_limit = (3 * zone_limit) / 10; // 1/3rd in inactive list
-
-               active_over = cls->nr_active[zindex] - active_limit + active_credit;
-               inactive_over = active_over +
-                               (cls->nr_inactive[zindex] - inactive_limit) + inactive_credit;
-
-               if (active_over > 0) {
-                       zone->nr_scan_active += active_over + 1;
-                       nr_active = zone->nr_scan_active;
-                       active_credit = 0;
-               } else {
-                       active_credit += active_over;
-                       nr_active = 0;
-               }
-
-               if (inactive_over > 0) {
-                       zone->nr_scan_inactive += inactive_over;
-                       nr_inactive = zone->nr_scan_inactive;
-                       inactive_credit = 0;
-               } else {
-                       inactive_credit += inactive_over;
-                       nr_inactive = 0;
-               }
-               while (nr_active || nr_inactive) {
-                       if (nr_active) {
-                               sc.nr_to_scan = min(nr_active,
-                                               (unsigned long)SWAP_CLUSTER_MAX);
-                               nr_active -= sc.nr_to_scan;
-                               refill_inactive_zone(zone, &sc);
-                       }
-       
-                       if (nr_inactive) {
-                               sc.nr_to_scan = min(nr_inactive,
-                                               (unsigned long)SWAP_CLUSTER_MAX);
-                               nr_inactive -= sc.nr_to_scan;
-                               shrink_cache(zone, &sc);
-                               if (sc.nr_to_reclaim <= 0)
-                                       break;
-                       }
-               }
-               zone->prev_priority = zone->temp_priority;
-               zindex++;
        }
-       ckrm_clear_shrink(cls);
 }
 
-static void
-ckrm_shrink_classes(void)
-{
-       ckrm_mem_res_t *cls;
-
-       spin_lock(&ckrm_mem_lock);
-       while (!ckrm_shrink_list_empty()) {
-               cls =  list_entry(ckrm_shrink_list.next, ckrm_mem_res_t,
-                               shrink_list);
-               spin_unlock(&ckrm_mem_lock);
-               ckrm_shrink_class(cls);
-               spin_lock(&ckrm_mem_lock);
-               list_del(&cls->shrink_list);
-               cls->flags &= ~MEM_AT_LIMIT;
-       }
-       spin_unlock(&ckrm_mem_lock);
-}
-
-#else
-#define ckrm_shrink_classes()  do { } while(0)
-#endif
-
 /*
  * This is the direct reclaim path, for page-allocating processes.  We only
  * try to reclaim pages from zones which will satisfy the caller's allocation
@@ -1295,9 +1148,6 @@ static int kswapd(void *p)
                schedule();
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               if (!ckrm_shrink_list_empty())
-                       ckrm_shrink_classes();
-               else
                balance_pgdat(pgdat, 0);
        }
        return 0;
@@ -1308,7 +1158,7 @@ static int kswapd(void *p)
  */
 void wakeup_kswapd(struct zone *zone)
 {
-       if ((zone->free_pages > zone->pages_low) && ckrm_shrink_list_empty())
+       if (zone->free_pages > zone->pages_low)
                return;
        if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
                return;