fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / block / elevator.c
index 11e356e..f6dafa8 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  *
- * 30042000 Jens Axboe <axboe@suse.de> :
+ * 30042000 Jens Axboe <axboe@kernel.dk> :
  *
  * Split the elevator a bit so that it is possible to choose a different
  * one or even write a new "plug in". There are three pieces:
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
 #include <linux/bio.h>
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
 #include <linux/delay.h>
+#include <linux/blktrace_api.h>
+#include <linux/hash.h>
 
 #include <asm/uaccess.h>
 
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
 
+/*
+ * Merge hash stuff.
+ */
+static const int elv_hash_shift = 6;
+#define ELV_HASH_BLOCK(sec)    ((sec) >> 3)
+#define ELV_HASH_FN(sec)       (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
+#define ELV_HASH_ENTRIES       (1 << elv_hash_shift)
+#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
+#define ELV_ON_HASH(rq)                (!hlist_unhashed(&(rq)->hash))
+
+/*
+ * Query io scheduler to see if the current process issuing bio may be
+ * merged with rq.
+ */
+static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
+{
+       request_queue_t *q = rq->q;
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_allow_merge_fn)
+               return e->ops->elevator_allow_merge_fn(q, rq, bio);
+
+       return 1;
+}
+
 /*
  * can we safely merge with this request?
  */
@@ -54,13 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
                return 0;
 
        /*
-        * same device and no special stuff set, merge is ok
+        * must be same device and not a special request
         */
-       if (rq->rq_disk == bio->bi_bdev->bd_disk &&
-           !rq->waiting && !rq->special)
-               return 1;
+       if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+               return 0;
 
-       return 0;
+       if (!elv_iosched_allow_merge(rq, bio))
+               return 0;
+
+       return 1;
 }
 EXPORT_SYMBOL(elv_rq_merge_ok);
 
@@ -83,21 +111,18 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
 
 static struct elevator_type *elevator_find(const char *name)
 {
-       struct elevator_type *e = NULL;
+       struct elevator_type *e;
        struct list_head *entry;
 
        list_for_each(entry, &elv_list) {
-               struct elevator_type *__e;
 
-               __e = list_entry(entry, struct elevator_type, list);
+               e = list_entry(entry, struct elevator_type, list);
 
-               if (!strcmp(__e->elevator_name, name)) {
-                       e = __e;
-                       break;
-               }
+               if (!strcmp(e->elevator_name, name))
+                       return e;
        }
 
-       return e;
+       return NULL;
 }
 
 static void elevator_put(struct elevator_type *e)
@@ -120,21 +145,16 @@ static struct elevator_type *elevator_get(const char *name)
        return e;
 }
 
-static int elevator_attach(request_queue_t *q, struct elevator_type *e,
-                          struct elevator_queue *eq)
+static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
 {
-       int ret = 0;
-
-       memset(eq, 0, sizeof(*eq));
-       eq->ops = &e->ops;
-       eq->elevator_type = e;
+       return eq->ops->elevator_init_fn(q);
+}
 
+static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+                          void *data)
+{
        q->elevator = eq;
-
-       if (eq->ops->elevator_init_fn)
-               ret = eq->ops->elevator_init_fn(q, eq);
-
-       return ret;
+       eq->elevator_data = data;
 }
 
 static char chosen_elevator[16];
@@ -149,16 +169,60 @@ static int __init elevator_setup(char *str)
                strcpy(chosen_elevator, "anticipatory");
        else
                strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
-       return 0;
+       return 1;
 }
 
 __setup("elevator=", elevator_setup);
 
+static struct kobj_type elv_ktype;
+
+static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
+{
+       elevator_t *eq;
+       int i;
+
+       eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
+       if (unlikely(!eq))
+               goto err;
+
+       memset(eq, 0, sizeof(*eq));
+       eq->ops = &e->ops;
+       eq->elevator_type = e;
+       kobject_init(&eq->kobj);
+       snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
+       eq->kobj.ktype = &elv_ktype;
+       mutex_init(&eq->sysfs_lock);
+
+       eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
+                                       GFP_KERNEL, q->node);
+       if (!eq->hash)
+               goto err;
+
+       for (i = 0; i < ELV_HASH_ENTRIES; i++)
+               INIT_HLIST_HEAD(&eq->hash[i]);
+
+       return eq;
+err:
+       kfree(eq);
+       elevator_put(e);
+       return NULL;
+}
+
+static void elevator_release(struct kobject *kobj)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+
+       elevator_put(e->elevator_type);
+       kfree(e->hash);
+       kfree(e);
+}
+
 int elevator_init(request_queue_t *q, char *name)
 {
        struct elevator_type *e = NULL;
        struct elevator_queue *eq;
        int ret = 0;
+       void *data;
 
        INIT_LIST_HEAD(&q->queue_head);
        q->last_merge = NULL;
@@ -176,35 +240,145 @@ int elevator_init(request_queue_t *q, char *name)
                e = elevator_get("noop");
        }
 
-       eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
-       if (!eq) {
-               elevator_put(e);
+       eq = elevator_alloc(q, e);
+       if (!eq)
                return -ENOMEM;
-       }
 
-       ret = elevator_attach(q, e, eq);
-       if (ret) {
-               kfree(eq);
-               elevator_put(e);
+       data = elevator_init_queue(q, eq);
+       if (!data) {
+               kobject_put(&eq->kobj);
+               return -ENOMEM;
        }
 
+       elevator_attach(q, eq, data);
        return ret;
 }
 
+EXPORT_SYMBOL(elevator_init);
+
 void elevator_exit(elevator_t *e)
 {
+       mutex_lock(&e->sysfs_lock);
        if (e->ops->elevator_exit_fn)
                e->ops->elevator_exit_fn(e);
+       e->ops = NULL;
+       mutex_unlock(&e->sysfs_lock);
 
-       elevator_put(e->elevator_type);
-       e->elevator_type = NULL;
-       kfree(e);
+       kobject_put(&e->kobj);
+}
+
+EXPORT_SYMBOL(elevator_exit);
+
+static inline void __elv_rqhash_del(struct request *rq)
+{
+       hlist_del_init(&rq->hash);
+}
+
+static void elv_rqhash_del(request_queue_t *q, struct request *rq)
+{
+       if (ELV_ON_HASH(rq))
+               __elv_rqhash_del(rq);
+}
+
+static void elv_rqhash_add(request_queue_t *q, struct request *rq)
+{
+       elevator_t *e = q->elevator;
+
+       BUG_ON(ELV_ON_HASH(rq));
+       hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+}
+
+static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
+{
+       __elv_rqhash_del(rq);
+       elv_rqhash_add(q, rq);
+}
+
+static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
+{
+       elevator_t *e = q->elevator;
+       struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
+       struct hlist_node *entry, *next;
+       struct request *rq;
+
+       hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+               BUG_ON(!ELV_ON_HASH(rq));
+
+               if (unlikely(!rq_mergeable(rq))) {
+                       __elv_rqhash_del(rq);
+                       continue;
+               }
+
+               if (rq_hash_key(rq) == offset)
+                       return rq;
+       }
+
+       return NULL;
+}
+
+/*
+ * RB-tree support functions for inserting/lookup/removal of requests
+ * in a sorted RB tree.
+ */
+struct request *elv_rb_add(struct rb_root *root, struct request *rq)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent = NULL;
+       struct request *__rq;
+
+       while (*p) {
+               parent = *p;
+               __rq = rb_entry(parent, struct request, rb_node);
+
+               if (rq->sector < __rq->sector)
+                       p = &(*p)->rb_left;
+               else if (rq->sector > __rq->sector)
+                       p = &(*p)->rb_right;
+               else
+                       return __rq;
+       }
+
+       rb_link_node(&rq->rb_node, parent, p);
+       rb_insert_color(&rq->rb_node, root);
+       return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_add);
+
+void elv_rb_del(struct rb_root *root, struct request *rq)
+{
+       BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+       rb_erase(&rq->rb_node, root);
+       RB_CLEAR_NODE(&rq->rb_node);
+}
+
+EXPORT_SYMBOL(elv_rb_del);
+
+struct request *elv_rb_find(struct rb_root *root, sector_t sector)
+{
+       struct rb_node *n = root->rb_node;
+       struct request *rq;
+
+       while (n) {
+               rq = rb_entry(n, struct request, rb_node);
+
+               if (sector < rq->sector)
+                       n = n->rb_left;
+               else if (sector > rq->sector)
+                       n = n->rb_right;
+               else
+                       return rq;
+       }
+
+       return NULL;
 }
 
+EXPORT_SYMBOL(elv_rb_find);
+
 /*
  * Insert rq into dispatch queue of q.  Queue lock must be held on
- * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
- * appended to the dispatch queue.  To be used by specific elevators.
+ * entry.  rq is sort insted into the dispatch queue. To be used by
+ * specific elevators.
  */
 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
 {
@@ -213,6 +387,9 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
 
        if (q->last_merge == rq)
                q->last_merge = NULL;
+
+       elv_rqhash_del(q, rq);
+
        q->nr_sorted--;
 
        boundary = q->end_sector;
@@ -220,7 +397,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
-               if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
+               if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
                        break;
                if (rq->sector >= boundary) {
                        if (pos->sector < boundary)
@@ -236,11 +413,38 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
        list_add(&rq->queuelist, entry);
 }
 
+EXPORT_SYMBOL(elv_dispatch_sort);
+
+/*
+ * Insert rq into dispatch queue of q.  Queue lock must be held on
+ * entry.  rq is added to the back of the dispatch queue. To be used by
+ * specific elevators.
+ */
+void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
+{
+       if (q->last_merge == rq)
+               q->last_merge = NULL;
+
+       elv_rqhash_del(q, rq);
+
+       q->nr_sorted--;
+
+       q->end_sector = rq_end_sector(rq);
+       q->boundary_rq = rq;
+       list_add_tail(&rq->queuelist, &q->queue_head);
+}
+
+EXPORT_SYMBOL(elv_dispatch_add_tail);
+
 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
 {
        elevator_t *e = q->elevator;
+       struct request *__rq;
        int ret;
 
+       /*
+        * First try one-hit cache.
+        */
        if (q->last_merge) {
                ret = elv_try_merge(q->last_merge, bio);
                if (ret != ELEVATOR_NO_MERGE) {
@@ -249,18 +453,30 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
                }
        }
 
+       /*
+        * See if our hash lookup can find a potential backmerge.
+        */
+       __rq = elv_rqhash_find(q, bio->bi_sector);
+       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+               *req = __rq;
+               return ELEVATOR_BACK_MERGE;
+       }
+
        if (e->ops->elevator_merge_fn)
                return e->ops->elevator_merge_fn(q, req, bio);
 
        return ELEVATOR_NO_MERGE;
 }
 
-void elv_merged_request(request_queue_t *q, struct request *rq)
+void elv_merged_request(request_queue_t *q, struct request *rq, int type)
 {
        elevator_t *e = q->elevator;
 
        if (e->ops->elevator_merged_fn)
-               e->ops->elevator_merged_fn(q, rq);
+               e->ops->elevator_merged_fn(q, rq, type);
+
+       if (type == ELEVATOR_BACK_MERGE)
+               elv_rqhash_reposition(q, rq);
 
        q->last_merge = rq;
 }
@@ -272,8 +488,11 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
 
        if (e->ops->elevator_merge_req_fn)
                e->ops->elevator_merge_req_fn(q, rq, next);
-       q->nr_sorted--;
 
+       elv_rqhash_reposition(q, rq);
+       elv_rqhash_del(q, next);
+
+       q->nr_sorted--;
        q->last_merge = rq;
 }
 
@@ -291,7 +510,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
                        e->ops->elevator_deactivate_req_fn(q, rq);
        }
 
-       rq->flags &= ~REQ_STARTED;
+       rq->cmd_flags &= ~REQ_STARTED;
 
        elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
@@ -316,17 +535,19 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
        unsigned ordseq;
        int unplug_it = 1;
 
+       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+
        rq->q = q;
 
        switch (where) {
        case ELEVATOR_INSERT_FRONT:
-               rq->flags |= REQ_SOFTBARRIER;
+               rq->cmd_flags |= REQ_SOFTBARRIER;
 
                list_add(&rq->queuelist, &q->queue_head);
                break;
 
        case ELEVATOR_INSERT_BACK:
-               rq->flags |= REQ_SOFTBARRIER;
+               rq->cmd_flags |= REQ_SOFTBARRIER;
                elv_drain_elevator(q);
                list_add_tail(&rq->queuelist, &q->queue_head);
                /*
@@ -345,10 +566,14 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
 
        case ELEVATOR_INSERT_SORT:
                BUG_ON(!blk_fs_request(rq));
-               rq->flags |= REQ_SORTED;
+               rq->cmd_flags |= REQ_SORTED;
                q->nr_sorted++;
-               if (q->last_merge == NULL && rq_mergeable(rq))
-                       q->last_merge = rq;
+               if (rq_mergeable(rq)) {
+                       elv_rqhash_add(q, rq);
+                       if (!q->last_merge)
+                               q->last_merge = rq;
+               }
+
                /*
                 * Some ioscheds (cfq) run q->request_fn directly, so
                 * rq cannot be accessed after calling
@@ -363,7 +588,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
                 * insertion; otherwise, requests should be requeued
                 * in ordseq order.
                 */
-               rq->flags |= REQ_SOFTBARRIER;
+               rq->cmd_flags |= REQ_SOFTBARRIER;
+
+               /*
+                * Most requeues happen because of a busy condition,
+                * don't force unplug of the queue for that case.
+                */
+               unplug_it = 0;
 
                if (q->ordseq == 0) {
                        list_add(&rq->queuelist, &q->queue_head);
@@ -379,11 +610,6 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
                }
 
                list_add_tail(&rq->queuelist, pos);
-               /*
-                * most requeues happen because of a busy condition, don't
-                * force unplug of the queue for that case.
-                */
-               unplug_it = 0;
                break;
 
        default:
@@ -405,9 +631,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                       int plug)
 {
        if (q->ordcolor)
-               rq->flags |= REQ_ORDERED_COLOR;
+               rq->cmd_flags |= REQ_ORDERED_COLOR;
 
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+       if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
                /*
                 * toggle ordered color
                 */
@@ -428,7 +654,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                        q->end_sector = rq_end_sector(rq);
                        q->boundary_rq = rq;
                }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+       } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
                where = ELEVATOR_INSERT_BACK;
 
        if (plug)
@@ -437,6 +663,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
        elv_insert(q, rq, where);
 }
 
+EXPORT_SYMBOL(__elv_add_request);
+
 void elv_add_request(request_queue_t *q, struct request *rq, int where,
                     int plug)
 {
@@ -447,6 +675,8 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
+EXPORT_SYMBOL(elv_add_request);
+
 static inline struct request *__elv_next_request(request_queue_t *q)
 {
        struct request *rq;
@@ -469,7 +699,7 @@ struct request *elv_next_request(request_queue_t *q)
        int ret;
 
        while ((rq = __elv_next_request(q)) != NULL) {
-               if (!(rq->flags & REQ_STARTED)) {
+               if (!(rq->cmd_flags & REQ_STARTED)) {
                        elevator_t *e = q->elevator;
 
                        /*
@@ -486,7 +716,8 @@ struct request *elv_next_request(request_queue_t *q)
                         * it, a request that has been delayed should
                         * not be passed by new incoming requests
                         */
-                       rq->flags |= REQ_STARTED;
+                       rq->cmd_flags |= REQ_STARTED;
+                       blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
                }
 
                if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -494,7 +725,7 @@ struct request *elv_next_request(request_queue_t *q)
                        q->boundary_rq = NULL;
                }
 
-               if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
+               if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
                        break;
 
                ret = q->prep_rq_fn(q, rq);
@@ -516,7 +747,7 @@ struct request *elv_next_request(request_queue_t *q)
                                nr_bytes = rq->data_len;
 
                        blkdev_dequeue_request(rq);
-                       rq->flags |= REQ_QUIET;
+                       rq->cmd_flags |= REQ_QUIET;
                        end_that_request_chunk(rq, 0, nr_bytes);
                        end_that_request_last(rq, 0);
                } else {
@@ -529,9 +760,12 @@ struct request *elv_next_request(request_queue_t *q)
        return rq;
 }
 
+EXPORT_SYMBOL(elv_next_request);
+
 void elv_dequeue_request(request_queue_t *q, struct request *rq)
 {
        BUG_ON(list_empty(&rq->queuelist));
+       BUG_ON(ELV_ON_HASH(rq));
 
        list_del_init(&rq->queuelist);
 
@@ -544,6 +778,8 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
                q->in_flight++;
 }
 
+EXPORT_SYMBOL(elv_dequeue_request);
+
 int elv_queue_empty(request_queue_t *q)
 {
        elevator_t *e = q->elevator;
@@ -557,6 +793,8 @@ int elv_queue_empty(request_queue_t *q)
        return 1;
 }
 
+EXPORT_SYMBOL(elv_queue_empty);
+
 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
 {
        elevator_t *e = q->elevator;
@@ -575,13 +813,12 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
        return NULL;
 }
 
-int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-                   gfp_t gfp_mask)
+int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
 {
        elevator_t *e = q->elevator;
 
        if (e->ops->elevator_set_req_fn)
-               return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
+               return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
 
        rq->elevator_private = NULL;
        return 0;
@@ -592,15 +829,15 @@ void elv_put_request(request_queue_t *q, struct request *rq)
        elevator_t *e = q->elevator;
 
        if (e->ops->elevator_put_req_fn)
-               e->ops->elevator_put_req_fn(q, rq);
+               e->ops->elevator_put_req_fn(rq);
 }
 
-int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
+int elv_may_queue(request_queue_t *q, int rw)
 {
        elevator_t *e = q->elevator;
 
        if (e->ops->elevator_may_queue_fn)
-               return e->ops->elevator_may_queue_fn(q, rw, bio);
+               return e->ops->elevator_may_queue_fn(q, rw);
 
        return ELV_MQUEUE_MAY;
 }
@@ -633,34 +870,89 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
        }
 }
 
-int elv_register_queue(struct request_queue *q)
+#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
+
+static ssize_t
+elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
-       elevator_t *e = q->elevator;
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct elv_fs_entry *entry = to_elv(attr);
+       ssize_t error;
 
-       e->kobj.parent = kobject_get(&q->kobj);
-       if (!e->kobj.parent)
-               return -EBUSY;
+       if (!entry->show)
+               return -EIO;
 
-       snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
-       e->kobj.ktype = e->elevator_type->elevator_ktype;
+       mutex_lock(&e->sysfs_lock);
+       error = e->ops ? entry->show(e, page) : -ENOENT;
+       mutex_unlock(&e->sysfs_lock);
+       return error;
+}
+
+static ssize_t
+elv_attr_store(struct kobject *kobj, struct attribute *attr,
+              const char *page, size_t length)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct elv_fs_entry *entry = to_elv(attr);
+       ssize_t error;
+
+       if (!entry->store)
+               return -EIO;
 
-       return kobject_register(&e->kobj);
+       mutex_lock(&e->sysfs_lock);
+       error = e->ops ? entry->store(e, page, length) : -ENOENT;
+       mutex_unlock(&e->sysfs_lock);
+       return error;
 }
 
-void elv_unregister_queue(struct request_queue *q)
+static struct sysfs_ops elv_sysfs_ops = {
+       .show   = elv_attr_show,
+       .store  = elv_attr_store,
+};
+
+static struct kobj_type elv_ktype = {
+       .sysfs_ops      = &elv_sysfs_ops,
+       .release        = elevator_release,
+};
+
+int elv_register_queue(struct request_queue *q)
 {
-       if (q) {
-               elevator_t *e = q->elevator;
-               kobject_unregister(&e->kobj);
-               kobject_put(&q->kobj);
+       elevator_t *e = q->elevator;
+       int error;
+
+       e->kobj.parent = &q->kobj;
+
+       error = kobject_add(&e->kobj);
+       if (!error) {
+               struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
+               if (attr) {
+                       while (attr->attr.name) {
+                               if (sysfs_create_file(&e->kobj, &attr->attr))
+                                       break;
+                               attr++;
+                       }
+               }
+               kobject_uevent(&e->kobj, KOBJ_ADD);
        }
+       return error;
+}
+
+static void __elv_unregister_queue(elevator_t *e)
+{
+       kobject_uevent(&e->kobj, KOBJ_REMOVE);
+       kobject_del(&e->kobj);
+}
+
+void elv_unregister_queue(struct request_queue *q)
+{
+       if (q)
+               __elv_unregister_queue(q->elevator);
 }
 
 int elv_register(struct elevator_type *e)
 {
        spin_lock_irq(&elv_list_lock);
-       if (elevator_find(e->elevator_name))
-               BUG();
+       BUG_ON(elevator_find(e->elevator_name));
        list_add_tail(&e->list, &elv_list);
        spin_unlock_irq(&elv_list_lock);
 
@@ -681,21 +973,16 @@ void elv_unregister(struct elevator_type *e)
        /*
         * Iterate every thread in the process to remove the io contexts.
         */
-       read_lock(&tasklist_lock);
-       do_each_thread(g, p) {
-               struct io_context *ioc = p->io_context;
-               if (ioc && ioc->cic) {
-                       ioc->cic->exit(ioc->cic);
-                       ioc->cic->dtor(ioc->cic);
-                       ioc->cic = NULL;
-               }
-               if (ioc && ioc->aic) {
-                       ioc->aic->exit(ioc->aic);
-                       ioc->aic->dtor(ioc->aic);
-                       ioc->aic = NULL;
-               }
-       } while_each_thread(g, p);
-       read_unlock(&tasklist_lock);
+       if (e->ops.trim) {
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+                       task_lock(p);
+                       if (p->io_context)
+                               e->ops.trim(p->io_context);
+                       task_unlock(p);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+       }
 
        spin_lock_irq(&elv_list_lock);
        list_del_init(&e->list);
@@ -709,16 +996,23 @@ EXPORT_SYMBOL_GPL(elv_unregister);
  * need for the new one. this way we have a chance of going back to the old
  * one, if the new one fails init for some reason.
  */
-static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 {
        elevator_t *old_elevator, *e;
+       void *data;
 
        /*
         * Allocate new elevator
         */
-       e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+       e = elevator_alloc(q, new_e);
        if (!e)
-               goto error;
+               return 0;
+
+       data = elevator_init_queue(q, e);
+       if (!data) {
+               kobject_put(&e->kobj);
+               return 0;
+       }
 
        /*
         * Turn on BYPASS and drain all requests w/ elevator private data
@@ -738,19 +1032,19 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
                elv_drain_elevator(q);
        }
 
-       spin_unlock_irq(q->queue_lock);
-
        /*
-        * unregister old elevator data
+        * Remember old elevator.
         */
-       elv_unregister_queue(q);
        old_elevator = q->elevator;
 
        /*
         * attach and start new elevator
         */
-       if (elevator_attach(q, new_e, e))
-               goto fail;
+       elevator_attach(q, e, data);
+
+       spin_unlock_irq(q->queue_lock);
+
+       __elv_unregister_queue(old_elevator);
 
        if (elv_register_queue(q))
                goto fail_register;
@@ -760,7 +1054,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
         */
        elevator_exit(old_elevator);
        clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       return;
+       return 1;
 
 fail_register:
        /*
@@ -768,15 +1062,10 @@ fail_register:
         * one again (along with re-adding the sysfs dir)
         */
        elevator_exit(e);
-       e = NULL;
-fail:
        q->elevator = old_elevator;
        elv_register_queue(q);
        clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       kfree(e);
-error:
-       elevator_put(new_e);
-       printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
+       return 0;
 }
 
 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
@@ -803,7 +1092,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
                return count;
        }
 
-       elevator_switch(q, e);
+       if (!elevator_switch(q, e))
+               printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
        return count;
 }
 
@@ -830,13 +1120,26 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
        return len;
 }
 
-EXPORT_SYMBOL(elv_dispatch_sort);
-EXPORT_SYMBOL(elv_add_request);
-EXPORT_SYMBOL(__elv_add_request);
-EXPORT_SYMBOL(elv_requeue_request);
-EXPORT_SYMBOL(elv_next_request);
-EXPORT_SYMBOL(elv_dequeue_request);
-EXPORT_SYMBOL(elv_queue_empty);
-EXPORT_SYMBOL(elv_completed_request);
-EXPORT_SYMBOL(elevator_exit);
-EXPORT_SYMBOL(elevator_init);
+struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
+{
+       struct rb_node *rbprev = rb_prev(&rq->rb_node);
+
+       if (rbprev)
+               return rb_entry_rq(rbprev);
+
+       return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_former_request);
+
+struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
+{
+       struct rb_node *rbnext = rb_next(&rq->rb_node);
+
+       if (rbnext)
+               return rb_entry_rq(rbnext);
+
+       return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_latter_request);