.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* reserved for kexec */
+ .long sys_ioprio_set
+ .long sys_ioprio_get /* 285 */
syscall_table_size=(.-sys_call_table)
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* 268 reserved for sys_kexec_load */
+ .long sys_ioprio_set
+ .long sys_ioprio_get
# kblockd threads
#
-obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
+obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o ckrm-iostub.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
+obj-$(CONFIG_CKRM_RES_BLKIO) += ckrm-io.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_BLK_DEV_FD98) += floppy98.o
e->elevator_put_req_fn(q, rq);
}
+void elv_set_congested(request_queue_t *q)
+{
+ elevator_t *e = &q->elevator;
+
+ if (e->elevator_set_congested_fn)
+ e->elevator_set_congested_fn(q);
+}
+
int elv_may_queue(request_queue_t *q, int rw)
{
elevator_t *e = &q->elevator;
if (e->elevator_may_queue_fn)
return e->elevator_may_queue_fn(q, rw);
- return 0;
+ return 1;
}
void elv_completed_request(request_queue_t *q, struct request *rq)
struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock);
+
+ if (!elv_may_queue(q, rw))
+ goto out_lock;
+
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
}
}
- if (blk_queue_full(q, rw)
- && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
- /*
- * The queue is full and the allocating process is not a
- * "batcher", and not exempted by the IO scheduler
- */
- spin_unlock_irq(q->queue_lock);
- goto out;
- }
+ /*
+ * The queue is full and the allocating process is not a
+ * "batcher", and not exempted by the IO scheduler
+ */
+ if (blk_queue_full(q, rw) && !ioc_batching(ioc))
+ goto out_lock;
rl->count[rw]++;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw);
- spin_unlock_irq(q->queue_lock);
- goto out;
+ goto out_lock;
}
if (ioc_batching(ioc))
out:
put_io_context(ioc);
return rq;
+out_lock:
+ if (!rq)
+ elv_set_congested(q);
+ spin_unlock_irq(q->queue_lock);
+ goto out;
}
/*
kobject_put(&disk->kobj);
}
}
+
+asmlinkage int sys_ioprio_set(int ioprio)
+{
+ if (ioprio < IOPRIO_IDLE || ioprio > IOPRIO_RT)
+ return -EINVAL;
+ if (ioprio == IOPRIO_RT && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ printk("%s: set ioprio %d\n", current->comm, ioprio);
+ current->ioprio = ioprio;
+ return 0;
+}
+
+asmlinkage int sys_ioprio_get(void)
+{
+ return current->ioprio;
+}
+
#define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5)
#define __NR_sys_kexec_load 283
+#define __NR_ioprio_set 284
+#define __NR_ioprio_get 285
-#define NR_syscalls 284
+#define NR_syscalls 286
#ifndef __KERNEL_SYSCALLS_NO_ERRNO__
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
#define __NR_mq_notify 266
#define __NR_mq_getsetattr 267
#define __NR_kexec_load 268
+#define __NR_ioprio_set 269
+#define __NR_ioprio_get 270
-#define __NR_syscalls 269
+#define __NR_syscalls 271
#define __NR(n) #n
__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
#define __NR_kexec_load 246
__SYSCALL(__NR_kexec_load, sys_ni_syscall)
+#define __NR_ioprio_set 247
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set);
+#define __NR_ioprio_get 248
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get);
-#define __NR_syscall_max __NR_kexec_load
+#define __NR_syscall_max __NR_ioprio_get
#ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_may_queue_fn) (request_queue_t *, int);
+typedef void (elevator_set_congested_fn) (request_queue_t *);
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
elevator_put_req_fn *elevator_put_req_fn;
elevator_may_queue_fn *elevator_may_queue_fn;
+ elevator_set_congested_fn *elevator_set_congested_fn;
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
extern int elv_register_queue(request_queue_t *q);
extern void elv_unregister_queue(request_queue_t *q);
extern int elv_may_queue(request_queue_t *, int);
+extern void elv_set_congested(request_queue_t *);
extern void elv_completed_request(request_queue_t *, struct request *);
extern int elv_set_request(request_queue_t *, struct request *, int);
extern void elv_put_request(request_queue_t *, struct request *);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
+#define RQ_ELV_DATA(rq) (rq)->elevator_private
+
#endif
{ }
#endif /* CONFIG_SECURITY */
+/* io priorities */
+
+#define IOPRIO_NR 21
+
+#define IOPRIO_IDLE 0
+#define IOPRIO_NORM 10
+#define IOPRIO_RT 20
+
+asmlinkage int sys_ioprio_set(int ioprio);
+asmlinkage int sys_ioprio_get(void);
+
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
.vx_info = NULL, \
.nid = 0, \
.nx_info = NULL, \
+ .ioprio = IOPRIO_NORM, \
}
struct io_context *io_context;
+ int ioprio;
+
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
Say N if unsure, Y to use the feature.
+config CKRM_RES_BLKIO
+ tristate " Disk I/O Resource Controller"
+ depends on CKRM_TYPE_TASKCLASS && IOSCHED_CFQ
+ default m
+ help
+ Provides a resource controller for best-effort block I/O
+ bandwidth control. The controller attempts this by proportional
+ servicing of requests in the I/O scheduler. However, seek
+ optimizations and reordering by device drivers/disk controllers may
+ alter the actual bandwidth delivered to a class.
+
+ Say N if unsure, Y to use the feature.
+
config CKRM_TYPE_SOCKETCLASS
bool "Class Manager for socket groups"
depends on CKRM
} else
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
+ p->ioprio = current->ioprio;
nr_threads++;
/* p is copy of current */
vxi = p->vx_info;