2 #ifdef CONFIG_VSERVER_IDLELIMIT
5 * vx_idle_resched - reschedule after maxidle
8 void vx_idle_resched(struct rq *rq)
10 /* maybe have a better criterion for paused */
11 if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
15 #else /* !CONFIG_VSERVER_IDLELIMIT */
17 #define vx_idle_resched(rq)
19 #endif /* CONFIG_VSERVER_IDLELIMIT */
23 #ifdef CONFIG_VSERVER_IDLETIME
25 #define vx_set_rq_min_skip(rq, min) \
26 (rq)->idle_skip = (min)
28 #define vx_save_min_skip(ret, min, val) \
29 __vx_save_min_skip(ret, min, val)
32 void __vx_save_min_skip(int ret, int *min, int val)
36 if ((*min > val) || !*min)
41 int vx_try_skip(struct rq *rq, int cpu)
43 /* artificially advance time */
44 if (rq->idle_skip > 0) {
45 vxdprintk(list_empty(&rq->hold_queue),
46 "hold queue empty on cpu %d", cpu);
47 rq->idle_time += rq->idle_skip;
48 vxm_idle_skip(rq, cpu);
54 #else /* !CONFIG_VSERVER_IDLETIME */
56 #define vx_set_rq_min_skip(rq, min) \
57 ({ int dummy = (min); dummy; })
59 #define vx_save_min_skip(ret, min, val)
62 int vx_try_skip(struct rq *rq, int cpu)
67 #endif /* CONFIG_VSERVER_IDLETIME */
71 #ifdef CONFIG_VSERVER_HARDCPU
73 #define vx_set_rq_max_idle(rq, max) \
74 (rq)->idle_tokens = (max)
76 #define vx_save_max_idle(ret, min, val) \
77 __vx_save_max_idle(ret, min, val)
80 void __vx_save_max_idle(int ret, int *min, int val)
88 * vx_hold_task - put a task on the hold queue
91 void vx_hold_task(struct task_struct *p, struct rq *rq)
93 __deactivate_task(p, rq);
94 p->state |= TASK_ONHOLD;
95 /* a new one on hold */
98 list_add_tail(&p->run_list, &rq->hold_queue);
102 * vx_unhold_task - put a task back to the runqueue
105 void vx_unhold_task(struct task_struct *p, struct rq *rq)
107 list_del(&p->run_list);
108 /* one less waiting */
110 p->state &= ~TASK_ONHOLD;
111 enqueue_task(p, rq->expired);
112 inc_nr_running(p, rq);
113 vxm_unhold_task(p, rq);
115 if (p->static_prio < rq->best_expired_prio)
116 rq->best_expired_prio = p->static_prio;
119 unsigned long nr_onhold(void)
121 unsigned long i, sum = 0;
123 for_each_online_cpu(i)
124 sum += cpu_rq(i)->nr_onhold;
132 int __vx_tokens_avail(struct _vx_sched_pc *sched_pc)
134 return sched_pc->tokens;
138 void __vx_consume_token(struct _vx_sched_pc *sched_pc)
144 int vx_need_resched(struct task_struct *p, int slice, int cpu)
146 struct vx_info *vxi = p->vx_info;
148 if (vx_info_flags(vxi, VXF_SCHED_HARD|VXF_SCHED_PRIO, 0)) {
149 struct _vx_sched_pc *sched_pc =
150 &vx_per_cpu(vxi, sched_pc, cpu);
153 /* maybe we can simplify that to decrement
154 the token counter unconditional? */
156 if ((tokens = __vx_tokens_avail(sched_pc)) > 0)
157 __vx_consume_token(sched_pc);
159 /* for tokens > 0, one token was consumed */
163 vxm_need_resched(p, slice, cpu);
168 #define vx_set_rq_time(rq, time) do { \
169 rq->norm_time = time; \
174 void vx_try_unhold(struct rq *rq, int cpu)
176 struct vx_info *vxi = NULL;
177 struct list_head *l, *n;
181 /* nothing to do? what about pause? */
182 if (list_empty(&rq->hold_queue))
185 list_for_each_safe(l, n, &rq->hold_queue) {
186 int ret, delta_min[2];
187 struct _vx_sched_pc *sched_pc;
188 struct task_struct *p;
190 p = list_entry(l, struct task_struct, run_list);
191 /* don't bother with same context */
192 if (vxi == p->vx_info)
196 /* ignore paused contexts */
197 if (vx_info_flags(vxi, VXF_SCHED_PAUSE, 0))
200 sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
203 vxm_sched_info(sched_pc, vxi, cpu);
204 ret = vx_tokens_recalc(sched_pc,
205 &rq->norm_time, &rq->idle_time, delta_min);
206 vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
209 /* we found a runable context */
210 vx_unhold_task(p, rq);
213 vx_save_max_idle(ret, &maxidle, delta_min[0]);
214 vx_save_min_skip(ret, &minskip, delta_min[1]);
216 vx_set_rq_max_idle(rq, maxidle);
217 vx_set_rq_min_skip(rq, minskip);
218 vxm_rq_max_min(rq, cpu);
223 int vx_schedule(struct task_struct *next, struct rq *rq, int cpu)
225 struct vx_info *vxi = next->vx_info;
226 struct _vx_sched_pc *sched_pc;
233 flags = vxi->vx_flags;
235 if (unlikely(vs_check_flags(flags , VXF_SCHED_PAUSE, 0)))
237 if (!vs_check_flags(flags , VXF_SCHED_HARD|VXF_SCHED_PRIO, 0))
240 sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
242 /* update scheduler params */
243 if (cpu_isset(cpu, vxi->sched.update)) {
244 vx_update_sched_param(&vxi->sched, sched_pc);
245 vxm_update_sched(sched_pc, vxi, cpu);
246 cpu_clear(cpu, vxi->sched.update);
249 vxm_sched_info(sched_pc, vxi, cpu);
250 ret = vx_tokens_recalc(sched_pc,
251 &rq->norm_time, &rq->idle_time, delta_min);
252 vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
254 if (!vs_check_flags(flags , VXF_SCHED_HARD, 0))
257 if (unlikely(ret < 0)) {
258 vx_save_max_idle(ret, &rq->idle_tokens, delta_min[0]);
259 vx_save_min_skip(ret, &rq->idle_skip, delta_min[1]);
260 vxm_rq_max_min(rq, cpu);
262 vx_hold_task(next, rq);
269 #else /* CONFIG_VSERVER_HARDCPU */
272 void vx_hold_task(struct task_struct *p, struct rq *rq)
278 void vx_unhold_task(struct task_struct *p, struct rq *rq)
283 unsigned long nr_onhold(void)
290 int vx_need_resched(struct task_struct *p, int slice, int cpu)
296 #define vx_set_rq_time(rq, time)
299 void vx_try_unhold(struct rq *rq, int cpu)
305 int vx_schedule(struct task_struct *next, struct rq *rq, int cpu)
307 struct vx_info *vxi = next->vx_info;
308 struct _vx_sched_pc *sched_pc;
312 if (!vx_info_flags(vxi, VXF_SCHED_PRIO, 0))
315 sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
316 vxm_sched_info(sched_pc, vxi, cpu);
317 ret = vx_tokens_recalc(sched_pc,
318 &rq->norm_time, &rq->idle_time, delta_min);
319 vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
323 #endif /* CONFIG_VSERVER_HARDCPU */