#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/kthread.h>
+#include <linux/vserver/sched.h>
+#include <linux/vinline.h>
#include <asm/unistd.h>
task_t *migration_thread;
struct list_head migration_queue;
#endif
+ struct list_head hold_queue;
+ int idle_tokens;
};
static DEFINE_PER_CPU(struct runqueue, runqueues);
bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
prio = p->static_prio - bonus;
+ if (__vx_task_flags(p, VXF_SCHED_PRIO, 0))
+ prio += effective_vavavoom(p, MAX_USER_PRIO);
+
if (prio < MAX_RT_PRIO)
prio = MAX_RT_PRIO;
if (prio > MAX_PRIO-1)
}
if (p == rq->idle) {
+ if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
+ set_need_resched();
+
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait += sys_ticks;
else
}
goto out_unlock;
}
- if (!--p->time_slice) {
+ if (vx_need_resched(p)) {
dequeue_task(p, rq->active);
set_tsk_need_resched(p);
p->prio = effective_prio(p);
unsigned long long now;
unsigned long run_time;
int cpu, idx;
+#ifdef CONFIG_VSERVER_HARDCPU
+ struct vx_info *vxi;
+ int maxidle = -HZ;
+#endif
/*
* Test if we are atomic. Since do_exit() needs to call into
}
cpu = smp_processor_id();
+#ifdef CONFIG_VSERVER_HARDCPU
+ if (!list_empty(&rq->hold_queue)) {
+ struct list_head *l, *n;
+ int ret;
+
+ vxi = NULL;
+ list_for_each_safe(l, n, &rq->hold_queue) {
+ next = list_entry(l, task_t, run_list);
+ if (vxi == next->vx_info)
+ continue;
+
+ vxi = next->vx_info;
+ ret = vx_tokens_recalc(vxi);
+ // tokens = vx_tokens_avail(next);
+
+ if (ret > 0) {
+ list_del(&next->run_list);
+ next->state &= ~TASK_ONHOLD;
+ recalc_task_prio(next, now);
+ __activate_task(next, rq);
+ // printk("ยทยทยท unhold %p\n", next);
+ break;
+ }
+ if ((ret < 0) && (maxidle < ret))
+ maxidle = ret;
+ }
+ }
+ rq->idle_tokens = -maxidle;
+
+pick_next:
+#endif
if (unlikely(!rq->nr_running)) {
idle_balance(cpu, rq);
if (!rq->nr_running) {
goto switch_tasks;
}
+#ifdef CONFIG_VSERVER_HARDCPU
+ vxi = next->vx_info;
+ if (vxi && __vx_flags(vxi->vx_flags,
+ VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) {
+ int ret = vx_tokens_recalc(vxi);
+
+ if (unlikely(ret <= 0)) {
+ if (ret && (rq->idle_tokens > -ret))
+ rq->idle_tokens = -ret;
+ deactivate_task(next, rq);
+ list_add_tail(&next->run_list, &rq->hold_queue);
+ next->state |= TASK_ONHOLD;
+ goto pick_next;
+ }
+ }
+#endif
+
if (!rt_task(next) && next->activated > 0) {
unsigned long long delta = now - next->timestamp;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
#endif
+ INIT_LIST_HEAD(&rq->hold_queue);
atomic_set(&rq->nr_iowait, 0);
for (j = 0; j < 2; j++) {