struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock);
-
- if (!elv_may_queue(q, rw))
- goto out_lock;
-
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
}
}
- /*
- * The queue is full and the allocating process is not a
- * "batcher", and not exempted by the IO scheduler
- */
- if (blk_queue_full(q, rw) && !ioc_batching(ioc))
- goto out_lock;
+ if (blk_queue_full(q, rw)
+ && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
+ /*
+ * The queue is full and the allocating process is not a
+ * "batcher", and not exempted by the IO scheduler
+ */
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+ }
rl->count[rw]++;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw);
- goto out_lock;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
}
if (ioc_batching(ioc))
out:
put_io_context(ioc);
return rq;
-out_lock:
- if (!rq)
- elv_set_congested(q);
- spin_unlock_irq(q->queue_lock);
- goto out;
}
/*
kobject_put(&disk->kobj);
}
}
-
-asmlinkage int sys_ioprio_set(int ioprio)
-{
- if (ioprio < IOPRIO_IDLE || ioprio > IOPRIO_RT)
- return -EINVAL;
- if (ioprio == IOPRIO_RT && !capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- printk("%s: set ioprio %d\n", current->comm, ioprio);
- current->ioprio = ioprio;
- return 0;
-}
-
-asmlinkage int sys_ioprio_get(void)
-{
- return current->ioprio;
-}
-