This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / kernel / sched_hard.h
1
2 #ifdef CONFIG_VSERVER_IDLELIMIT
3
4 /*
5  * vx_idle_resched - reschedule after maxidle
6  */
7 static inline
8 void vx_idle_resched(struct rq *rq)
9 {
10         /* maybe have a better criterion for paused */
11         if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
12                 set_need_resched();
13 }
14
15 #else /* !CONFIG_VSERVER_IDLELIMIT */
16
17 #define vx_idle_resched(rq)
18
19 #endif /* CONFIG_VSERVER_IDLELIMIT */
20
21
22
23 #ifdef CONFIG_VSERVER_IDLETIME
24
25 #define vx_set_rq_min_skip(rq, min)             \
26         (rq)->idle_skip = (min)
27
28 #define vx_save_min_skip(ret, min, val)         \
29         __vx_save_min_skip(ret, min, val)
30
31 static inline
32 void __vx_save_min_skip(int ret, int *min, int val)
33 {
34         if (ret > -2)
35                 return;
36         if ((*min > val) || !*min)
37                 *min = val;
38 }
39
40 static inline
41 int vx_try_skip(struct rq *rq, int cpu)
42 {
43         /* artificially advance time */
44         if (rq->idle_skip > 0) {
45                 vxdprintk(list_empty(&rq->hold_queue),
46                         "hold queue empty on cpu %d", cpu);
47                 rq->idle_time += rq->idle_skip;
48                 vxm_idle_skip(rq, cpu);
49                 return 1;
50         }
51         return 0;
52 }
53
54 #else /* !CONFIG_VSERVER_IDLETIME */
55
56 #define vx_set_rq_min_skip(rq, min)             \
57         ({ int dummy = (min); dummy; })
58
59 #define vx_save_min_skip(ret, min, val)
60
61 static inline
62 int vx_try_skip(struct rq *rq, int cpu)
63 {
64         return 0;
65 }
66
67 #endif /* CONFIG_VSERVER_IDLETIME */
68
69
70
71 #ifdef CONFIG_VSERVER_HARDCPU
72
73 #define vx_set_rq_max_idle(rq, max)             \
74         (rq)->idle_tokens = (max)
75
76 #define vx_save_max_idle(ret, min, val)         \
77         __vx_save_max_idle(ret, min, val)
78
79 static inline
80 void __vx_save_max_idle(int ret, int *min, int val)
81 {
82         if (*min > val)
83                 *min = val;
84 }
85
86
87 /*
88  * vx_hold_task - put a task on the hold queue
89  */
90 static inline
91 void vx_hold_task(struct task_struct *p, struct rq *rq)
92 {
93         __deactivate_task(p, rq);
94         p->state |= TASK_ONHOLD;
95         /* a new one on hold */
96         rq->nr_onhold++;
97         vxm_hold_task(p, rq);
98         list_add_tail(&p->run_list, &rq->hold_queue);
99 }
100
101 /*
102  * vx_unhold_task - put a task back to the runqueue
103  */
104 static inline
105 void vx_unhold_task(struct task_struct *p, struct rq *rq)
106 {
107         list_del(&p->run_list);
108         /* one less waiting */
109         rq->nr_onhold--;
110         p->state &= ~TASK_ONHOLD;
111         enqueue_task(p, rq->expired);
112         inc_nr_running(p, rq);
113         vxm_unhold_task(p, rq);
114
115         if (p->static_prio < rq->best_expired_prio)
116                 rq->best_expired_prio = p->static_prio;
117 }
118
119 unsigned long nr_onhold(void)
120 {
121         unsigned long i, sum = 0;
122
123         for_each_online_cpu(i)
124                 sum += cpu_rq(i)->nr_onhold;
125
126         return sum;
127 }
128
129
130
131 static inline
132 int __vx_tokens_avail(struct _vx_sched_pc *sched_pc)
133 {
134         return sched_pc->tokens;
135 }
136
137 static inline
138 void __vx_consume_token(struct _vx_sched_pc *sched_pc)
139 {
140         sched_pc->tokens--;
141 }
142
143 static inline
144 int vx_need_resched(struct task_struct *p, int slice, int cpu)
145 {
146         struct vx_info *vxi = p->vx_info;
147
148         if (vx_info_flags(vxi, VXF_SCHED_HARD|VXF_SCHED_PRIO, 0)) {
149                 struct _vx_sched_pc *sched_pc =
150                         &vx_per_cpu(vxi, sched_pc, cpu);
151                 int tokens;
152
153                 /* maybe we can simplify that to decrement
154                    the token counter unconditional? */
155
156                 if ((tokens = __vx_tokens_avail(sched_pc)) > 0)
157                         __vx_consume_token(sched_pc);
158
159                 /* for tokens > 0, one token was consumed */
160                 if (tokens < 2)
161                         slice = 0;
162         }
163         vxm_need_resched(p, slice, cpu);
164         return (slice == 0);
165 }
166
167
168 #define vx_set_rq_time(rq, time) do {   \
169         rq->norm_time = time;           \
170 } while (0)
171
172
173 static inline
174 void vx_try_unhold(struct rq *rq, int cpu)
175 {
176         struct vx_info *vxi = NULL;
177         struct list_head *l, *n;
178         int maxidle = HZ;
179         int minskip = 0;
180
181         /* nothing to do? what about pause? */
182         if (list_empty(&rq->hold_queue))
183                 return;
184
185         list_for_each_safe(l, n, &rq->hold_queue) {
186                 int ret, delta_min[2];
187                 struct _vx_sched_pc *sched_pc;
188                 struct task_struct *p;
189
190                 p = list_entry(l, struct task_struct, run_list);
191                 /* don't bother with same context */
192                 if (vxi == p->vx_info)
193                         continue;
194
195                 vxi = p->vx_info;
196                 /* ignore paused contexts */
197                 if (vx_info_flags(vxi, VXF_SCHED_PAUSE, 0))
198                         continue;
199
200                 sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
201
202                 /* recalc tokens */
203                 vxm_sched_info(sched_pc, vxi, cpu);
204                 ret = vx_tokens_recalc(sched_pc,
205                         &rq->norm_time, &rq->idle_time, delta_min);
206                 vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
207
208                 if (ret > 0) {
209                         /* we found a runable context */
210                         vx_unhold_task(p, rq);
211                         break;
212                 }
213                 vx_save_max_idle(ret, &maxidle, delta_min[0]);
214                 vx_save_min_skip(ret, &minskip, delta_min[1]);
215         }
216         vx_set_rq_max_idle(rq, maxidle);
217         vx_set_rq_min_skip(rq, minskip);
218         vxm_rq_max_min(rq, cpu);
219 }
220
221
222 static inline
223 int vx_schedule(struct task_struct *next, struct rq *rq, int cpu)
224 {
225         struct vx_info *vxi = next->vx_info;
226         struct _vx_sched_pc *sched_pc;
227         int delta_min[2];
228         int flags, ret;
229
230         if (!vxi)
231                 return 1;
232
233         flags = vxi->vx_flags;
234
235         if (unlikely(vs_check_flags(flags , VXF_SCHED_PAUSE, 0)))
236                 goto put_on_hold;
237         if (!vs_check_flags(flags , VXF_SCHED_HARD|VXF_SCHED_PRIO, 0))
238                 return 1;
239
240         sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
241 #ifdef CONFIG_SMP
242         /* update scheduler params */
243         if (cpu_isset(cpu, vxi->sched.update)) {
244                 vx_update_sched_param(&vxi->sched, sched_pc);
245                 vxm_update_sched(sched_pc, vxi, cpu);
246                 cpu_clear(cpu, vxi->sched.update);
247         }
248 #endif
249         vxm_sched_info(sched_pc, vxi, cpu);
250         ret  = vx_tokens_recalc(sched_pc,
251                 &rq->norm_time, &rq->idle_time, delta_min);
252         vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
253
254         if (!vs_check_flags(flags , VXF_SCHED_HARD, 0))
255                 return 1;
256
257         if (unlikely(ret < 0)) {
258                 vx_save_max_idle(ret, &rq->idle_tokens, delta_min[0]);
259                 vx_save_min_skip(ret, &rq->idle_skip, delta_min[1]);
260                 vxm_rq_max_min(rq, cpu);
261         put_on_hold:
262                 vx_hold_task(next, rq);
263                 return 0;
264         }
265         return 1;
266 }
267
268
269 #else /* CONFIG_VSERVER_HARDCPU */
270
271 static inline
272 void vx_hold_task(struct task_struct *p, struct rq *rq)
273 {
274         return;
275 }
276
277 static inline
278 void vx_unhold_task(struct task_struct *p, struct rq *rq)
279 {
280         return;
281 }
282
283 unsigned long nr_onhold(void)
284 {
285         return 0;
286 }
287
288
289 static inline
290 int vx_need_resched(struct task_struct *p, int slice, int cpu)
291 {
292         return (slice == 0);
293 }
294
295
296 #define vx_set_rq_time(rq, time)
297
298 static inline
299 void vx_try_unhold(struct rq *rq, int cpu)
300 {
301         return;
302 }
303
304 static inline
305 int vx_schedule(struct task_struct *next, struct rq *rq, int cpu)
306 {
307         struct vx_info *vxi = next->vx_info;
308         struct _vx_sched_pc *sched_pc;
309         int delta_min[2];
310         int ret;
311
312         if (!vx_info_flags(vxi, VXF_SCHED_PRIO, 0))
313                 return 1;
314
315         sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
316         vxm_sched_info(sched_pc, vxi, cpu);
317         ret  = vx_tokens_recalc(sched_pc,
318                 &rq->norm_time, &rq->idle_time, delta_min);
319         vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
320         return 1;
321 }
322
323 #endif /* CONFIG_VSERVER_HARDCPU */
324