Changes to support Andy's new CPU scheduler
[linux-2.6.git] / include / linux / vs_sched.h
1 #ifndef _VX_VS_SCHED_H
2 #define _VX_VS_SCHED_H
3
4 #ifndef CONFIG_VSERVER
5 #warning config options missing
6 #endif
7
8 #include "vserver/sched.h"
9
10
11 #define VAVAVOOM_RATIO           50
12
13 #define MAX_PRIO_BIAS            20
14 #define MIN_PRIO_BIAS           -20
15
16 #ifdef CONFIG_VSERVER_ACB_SCHED
17
18 #define VX_INVALID_TICKS        -1000000
19 #define IS_BEST_EFFORT(vxi)     (vx_info_flags(vxi, VXF_SCHED_SHARE, 0))
20
21 int vx_tokens_avail(struct vx_info *vxi);
22 void vx_consume_token(struct vx_info *vxi);
23 void vx_scheduler_tick(void);
24 void vx_advance_best_effort_ticks(int ticks);
25 void vx_advance_guaranteed_ticks(int ticks);
26
27 #else
28
29 static inline int vx_tokens_avail(struct vx_info *vxi)
30 {
31         return atomic_read(&vxi->sched.tokens);
32 }
33
34 static inline void vx_consume_token(struct vx_info *vxi)
35 {
36         atomic_dec(&vxi->sched.tokens);
37 }
38
39 #endif /* CONFIG_VSERVER_ACB_SCHED */
40
41 static inline int vx_need_resched(struct task_struct *p)
42 {
43 #ifdef  CONFIG_VSERVER_HARDCPU
44         struct vx_info *vxi = p->vx_info;
45 #endif
46         int slice = --p->time_slice;
47
48 #ifdef  CONFIG_VSERVER_HARDCPU
49         if (vxi) {
50                 int tokens;
51
52                 if ((tokens = vx_tokens_avail(vxi)) > 0)
53                         vx_consume_token(vxi);
54                 /* for tokens > 0, one token was consumed */
55                 if (tokens < 2)
56                         return 1;
57         }
58 #endif
59         return (slice == 0);
60 }
61
62
63 static inline void vx_onhold_inc(struct vx_info *vxi)
64 {
65         int onhold = atomic_read(&vxi->cvirt.nr_onhold);
66
67         atomic_inc(&vxi->cvirt.nr_onhold);
68         if (!onhold)
69                 vxi->cvirt.onhold_last = jiffies;
70 }
71
72 static inline void __vx_onhold_update(struct vx_info *vxi)
73 {
74         int cpu = smp_processor_id();
75         uint32_t now = jiffies;
76         uint32_t delta = now - vxi->cvirt.onhold_last;
77
78         vxi->cvirt.onhold_last = now;
79         vxi->sched.cpu[cpu].hold_ticks += delta;
80 }
81
82 static inline void vx_onhold_dec(struct vx_info *vxi)
83 {
84         if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
85                 __vx_onhold_update(vxi);
86 }
87
88 static inline void vx_account_user(struct vx_info *vxi,
89         cputime_t cputime, int nice)
90 {
91         int cpu = smp_processor_id();
92
93         if (!vxi)
94                 return;
95         vxi->sched.cpu[cpu].user_ticks += cputime;
96 }
97
98 static inline void vx_account_system(struct vx_info *vxi,
99         cputime_t cputime, int idle)
100 {
101         int cpu = smp_processor_id();
102
103         if (!vxi)
104                 return;
105         vxi->sched.cpu[cpu].sys_ticks += cputime;
106 }
107
108 #else
109 #warning duplicate inclusion
110 #endif