#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
struct as_rq *next_arq[2]; /* next in sort order */
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
- struct hlist_head *hash; /* request hash */
+ struct list_head *hash; /* request hash */
unsigned long exit_prob; /* probability a task will exit while
being waited on */
/*
* request hash, key is the ending offset (for back merge lookup)
*/
- struct hlist_node hash;
+ struct list_head hash;
+ unsigned int on_hash;
/*
* expire fifo
static kmem_cache_t *arq_pool;
-static atomic_t ioc_count = ATOMIC_INIT(0);
-static struct completion *ioc_gone;
-
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic)
{
kfree(aic);
- if (atomic_dec_and_test(&ioc_count) && ioc_gone)
- complete(ioc_gone);
-}
-
-static void as_trim(struct io_context *ioc)
-{
- if (ioc->aic)
- free_as_io_context(ioc->aic);
- ioc->aic = NULL;
}
/* Called when the task exits */
ret->seek_total = 0;
ret->seek_samples = 0;
ret->seek_mean = 0;
- atomic_inc(&ioc_count);
}
return ret;
#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
#define AS_HASH_ENTRIES (1 << as_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
static inline void __as_del_arq_hash(struct as_rq *arq)
{
- hlist_del_init(&arq->hash);
+ arq->on_hash = 0;
+ list_del_init(&arq->hash);
}
static inline void as_del_arq_hash(struct as_rq *arq)
{
- if (!hlist_unhashed(&arq->hash))
+ if (arq->on_hash)
__as_del_arq_hash(arq);
}
{
struct request *rq = arq->request;
- BUG_ON(!hlist_unhashed(&arq->hash));
+ BUG_ON(arq->on_hash);
- hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
+ arq->on_hash = 1;
+ list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
}
/*
static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
- struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
+ struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
- if (hlist_unhashed(&arq->hash)) {
+ if (!arq->on_hash) {
WARN_ON(1);
return;
}
- if (&arq->hash != head->first) {
- hlist_del(&arq->hash);
- hlist_add_head(&arq->hash, head);
+ if (arq->hash.prev != head) {
+ list_del(&arq->hash);
+ list_add(&arq->hash, head);
}
}
static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
{
- struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
- struct hlist_node *entry, *next;
- struct as_rq *arq;
+ struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
+ struct list_head *entry, *next = hash_list->next;
- hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
+ while ((entry = next) != hash_list) {
+ struct as_rq *arq = list_entry_hash(entry);
struct request *__rq = arq->request;
- BUG_ON(hlist_unhashed(&arq->hash));
+ next = entry->next;
+
+ BUG_ON(!arq->on_hash);
if (!rq_mergeable(__rq)) {
as_del_arq_hash(arq);
/*
* rb tree support functions
*/
+#define RB_NONE (2)
+#define RB_EMPTY(root) ((root)->rb_node == NULL)
+#define ON_RB(node) ((node)->rb_color != RB_NONE)
+#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
#define rq_rb_key(rq) (rq)->sector
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
- if (!RB_EMPTY_NODE(&arq->rb_node)) {
+ if (!ON_RB(&arq->rb_node)) {
WARN_ON(1);
return;
}
rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
- RB_CLEAR_NODE(&arq->rb_node);
+ RB_CLEAR(&arq->rb_node);
}
static struct request *
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct as_rq *arq_next, *arq_prev;
- BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
+ BUG_ON(!ON_RB(&last->rb_node));
if (rbprev)
arq_prev = rb_entry_arq(rbprev);
}
/*
- * as_can_anticipate indicates whether we should either run arq
+ * as_can_anticipate indicates weather we should either run arq
* or keep anticipating a better request.
*/
static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
struct request *rq = arq->request;
const int data_dir = arq->is_sync;
- BUG_ON(!RB_EMPTY_NODE(&arq->rb_node));
+ BUG_ON(!ON_RB(&arq->rb_node));
as_antic_stop(ad);
ad->antic_status = ANTIC_OFF;
*/
if (reads) {
- BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
+ BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
if (writes && ad->batch_data_dir == REQ_SYNC)
/*
if (writes) {
dispatch_writes:
- BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
+ BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
if (ad->batch_data_dir == REQ_SYNC) {
ad->changed_batch = 1;
arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ
- || (arq->request->flags & REQ_RW_SYNC))
+ || current->flags&PF_SYNCWRITE)
arq->is_sync = 1;
else
arq->is_sync = 0;
if (arq) {
memset(arq, 0, sizeof(*arq));
- RB_CLEAR_NODE(&arq->rb_node);
+ RB_CLEAR(&arq->rb_node);
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
- INIT_HLIST_NODE(&arq->hash);
+ INIT_LIST_HEAD(&arq->hash);
+ arq->on_hash = 0;
INIT_LIST_HEAD(&arq->fifo);
rq->elevator_private = arq;
return 0;
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
-static void *as_init_queue(request_queue_t *q, elevator_t *e)
+static int as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
int i;
if (!arq_pool)
- return NULL;
+ return -ENOMEM;
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
if (!ad)
- return NULL;
+ return -ENOMEM;
memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
- ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
+ ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
- return NULL;
+ return -ENOMEM;
}
ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
if (!ad->arq_pool) {
kfree(ad->hash);
kfree(ad);
- return NULL;
+ return -ENOMEM;
}
/* anticipatory scheduling helpers */
INIT_WORK(&ad->antic_work, as_work_handler, q);
for (i = 0; i < AS_HASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&ad->hash[i]);
+ INIT_LIST_HEAD(&ad->hash[i]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
ad->antic_expire = default_antic_expire;
ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
+ e->elevator_data = ad;
ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
if (ad->write_batch_count < 2)
ad->write_batch_count = 2;
- return ad;
+ return 0;
}
/*
* sysfs parts below
*/
+struct as_fs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct as_data *, char *);
+ ssize_t (*store)(struct as_data *, const char *, size_t);
+};
static ssize_t
as_var_show(unsigned int var, char *page)
return count;
}
-static ssize_t est_time_show(elevator_t *e, char *page)
+static ssize_t as_est_show(struct as_data *ad, char *page)
{
- struct as_data *ad = e->elevator_data;
int pos = 0;
pos += sprintf(page+pos, "%lu %% exit probability\n",
}
#define SHOW_FUNCTION(__FUNC, __VAR) \
-static ssize_t __FUNC(elevator_t *e, char *page) \
+static ssize_t __FUNC(struct as_data *ad, char *page) \
{ \
- struct as_data *ad = e->elevator_data; \
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
}
-SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
-SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
-SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
-SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
-SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
+SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
+static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
{ \
- struct as_data *ad = e->elevator_data; \
- int ret = as_var_store(__PTR, (page), count); \
+ int ret = as_var_store(__PTR, (page), count); \
if (*(__PTR) < (MIN)) \
*(__PTR) = (MIN); \
else if (*(__PTR) > (MAX)) \
*(__PTR) = msecs_to_jiffies(*(__PTR)); \
return ret; \
}
-STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
-STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
-STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
-STORE_FUNCTION(as_read_batch_expire_store,
+STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
+STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
+STORE_FUNCTION(as_read_batchexpire_store,
&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
-STORE_FUNCTION(as_write_batch_expire_store,
+STORE_FUNCTION(as_write_batchexpire_store,
&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
#undef STORE_FUNCTION
-#define AS_ATTR(name) \
- __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
-
-static struct elv_fs_entry as_attrs[] = {
- __ATTR_RO(est_time),
- AS_ATTR(read_expire),
- AS_ATTR(write_expire),
- AS_ATTR(antic_expire),
- AS_ATTR(read_batch_expire),
- AS_ATTR(write_batch_expire),
- __ATTR_NULL
+static struct as_fs_entry as_est_entry = {
+ .attr = {.name = "est_time", .mode = S_IRUGO },
+ .show = as_est_show,
+};
+static struct as_fs_entry as_readexpire_entry = {
+ .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
+ .show = as_readexpire_show,
+ .store = as_readexpire_store,
+};
+static struct as_fs_entry as_writeexpire_entry = {
+ .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
+ .show = as_writeexpire_show,
+ .store = as_writeexpire_store,
+};
+static struct as_fs_entry as_anticexpire_entry = {
+ .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
+ .show = as_anticexpire_show,
+ .store = as_anticexpire_store,
+};
+static struct as_fs_entry as_read_batchexpire_entry = {
+ .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
+ .show = as_read_batchexpire_show,
+ .store = as_read_batchexpire_store,
+};
+static struct as_fs_entry as_write_batchexpire_entry = {
+ .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
+ .show = as_write_batchexpire_show,
+ .store = as_write_batchexpire_store,
+};
+
+static struct attribute *default_attrs[] = {
+ &as_est_entry.attr,
+ &as_readexpire_entry.attr,
+ &as_writeexpire_entry.attr,
+ &as_anticexpire_entry.attr,
+ &as_read_batchexpire_entry.attr,
+ &as_write_batchexpire_entry.attr,
+ NULL,
+};
+
+#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
+
+static ssize_t
+as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ elevator_t *e = container_of(kobj, elevator_t, kobj);
+ struct as_fs_entry *entry = to_as(attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+as_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ elevator_t *e = container_of(kobj, elevator_t, kobj);
+ struct as_fs_entry *entry = to_as(attr);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops as_sysfs_ops = {
+ .show = as_attr_show,
+ .store = as_attr_store,
+};
+
+static struct kobj_type as_ktype = {
+ .sysfs_ops = &as_sysfs_ops,
+ .default_attrs = default_attrs,
};
static struct elevator_type iosched_as = {
.elevator_may_queue_fn = as_may_queue,
.elevator_init_fn = as_init_queue,
.elevator_exit_fn = as_exit_queue,
- .trim = as_trim,
},
- .elevator_attrs = as_attrs,
+ .elevator_ktype = &as_ktype,
.elevator_name = "anticipatory",
.elevator_owner = THIS_MODULE,
};
static void __exit as_exit(void)
{
- DECLARE_COMPLETION_ONSTACK(all_gone);
elv_unregister(&iosched_as);
- ioc_gone = &all_gone;
- /* ioc_gone's update must be visible before reading ioc_count */
- smp_wmb();
- if (atomic_read(&ioc_count))
- wait_for_completion(ioc_gone);
- synchronize_rcu();
kmem_cache_destroy(arq_pool);
}