static int __make_request(request_queue_t *, struct bio *);
static elevator_t *chosen_elevator =
-#if defined(CONFIG_IOSCHED_AS)
+#if defined(CONFIG_IOSCHED_CFQ)
+ &iosched_cfq;
+#elif defined(CONFIG_IOSCHED_AS)
&iosched_as;
#elif defined(CONFIG_IOSCHED_DEADLINE)
&iosched_deadline;
-#elif defined(CONFIG_IOSCHED_CFQ)
- &iosched_cfq;
#elif defined(CONFIG_IOSCHED_NOOP)
&elevator_noop;
#else
struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock);
+
+ if (!elv_may_queue(q, rw))
+ goto out_lock;
+
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
}
}
- if (blk_queue_full(q, rw)
- && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
- /*
- * The queue is full and the allocating process is not a
- * "batcher", and not exempted by the IO scheduler
- */
- spin_unlock_irq(q->queue_lock);
- goto out;
- }
+ /*
+ * The queue is full and the allocating process is not a
+ * "batcher", and not exempted by the IO scheduler
+ */
+ if (blk_queue_full(q, rw) && !ioc_batching(ioc))
+ goto out_lock;
rl->count[rw]++;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw);
- spin_unlock_irq(q->queue_lock);
- goto out;
+ goto out_lock;
}
if (ioc_batching(ioc))
out:
put_io_context(ioc);
return rq;
+out_lock:
+ if (!rq)
+ elv_set_congested(q);
+ spin_unlock_irq(q->queue_lock);
+ goto out;
}
/*
sector_t maxsector;
int ret, nr_sectors = bio_sectors(bio);
+ might_sleep();
/* Test device or partition size, when known. */
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
if (maxsector) {
kobject_put(&disk->kobj);
}
}
+
+asmlinkage int sys_ioprio_set(int ioprio)
+{
+ if (ioprio < IOPRIO_IDLE || ioprio > IOPRIO_RT)
+ return -EINVAL;
+ if (ioprio == IOPRIO_RT && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ printk("%s: set ioprio %d\n", current->comm, ioprio);
+ current->ioprio = ioprio;
+ return 0;
+}
+
+asmlinkage int sys_ioprio_get(void)
+{
+ return current->ioprio;
+}
+