BLT -> MOD : CONFIG_IOSCHED_AS
[linux-2.6.git] / include / linux / vs_sched.h
1 #ifndef _VX_VS_SCHED_H
2 #define _VX_VS_SCHED_H
3
4 #include "vserver/sched.h"
5
6
7 #define VAVAVOOM_RATIO           50
8
9 #define MAX_PRIO_BIAS            20
10 #define MIN_PRIO_BIAS           -20
11
12 #ifdef CONFIG_VSERVER_ACB_SCHED
13
14 #define VX_INVALID_TICKS        -1000000
15 #define IS_BEST_EFFORT(vxi)     (vx_info_flags(vxi, VXF_SCHED_SHARE, 0))
16
17 int vx_tokens_avail(struct vx_info *vxi);
18 void vx_consume_token(struct vx_info *vxi);
19 void vx_scheduler_tick(void);
20 void vx_advance_best_effort_ticks(int ticks);
21 void vx_advance_guaranteed_ticks(int ticks);
22
23 #else
24
25 static inline int vx_tokens_avail(struct vx_info *vxi)
26 {
27         return atomic_read(&vxi->sched.tokens);
28 }
29
30 static inline void vx_consume_token(struct vx_info *vxi)
31 {
32         atomic_dec(&vxi->sched.tokens);
33 }
34
35 #endif /* CONFIG_VSERVER_ACB_SCHED */
36
37 static inline int vx_need_resched(struct task_struct *p)
38 {
39 #ifdef  CONFIG_VSERVER_HARDCPU
40         struct vx_info *vxi = p->vx_info;
41 #endif
42         int slice = --p->time_slice;
43
44 #ifdef  CONFIG_VSERVER_HARDCPU
45         if (vxi) {
46                 int tokens;
47
48                 if ((tokens = vx_tokens_avail(vxi)) > 0)
49                         vx_consume_token(vxi);
50                 /* for tokens > 0, one token was consumed */
51                 if (tokens < 2)
52                         return 1;
53         }
54 #endif
55         return (slice == 0);
56 }
57
58
59 static inline void vx_onhold_inc(struct vx_info *vxi)
60 {
61         int onhold = atomic_read(&vxi->cvirt.nr_onhold);
62
63         atomic_inc(&vxi->cvirt.nr_onhold);
64         if (!onhold)
65                 vxi->cvirt.onhold_last = jiffies;
66 }
67
68 static inline void __vx_onhold_update(struct vx_info *vxi)
69 {
70         int cpu = smp_processor_id();
71         uint32_t now = jiffies;
72         uint32_t delta = now - vxi->cvirt.onhold_last;
73
74         vxi->cvirt.onhold_last = now;
75         vxi->sched.cpu[cpu].hold_ticks += delta;
76 }
77
78 static inline void vx_onhold_dec(struct vx_info *vxi)
79 {
80         if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
81                 __vx_onhold_update(vxi);
82 }
83
84 static inline void vx_account_user(struct vx_info *vxi,
85         cputime_t cputime, int nice)
86 {
87         int cpu = smp_processor_id();
88
89         if (!vxi)
90                 return;
91         vxi->sched.cpu[cpu].user_ticks += cputime;
92 }
93
94 static inline void vx_account_system(struct vx_info *vxi,
95         cputime_t cputime, int idle)
96 {
97         int cpu = smp_processor_id();
98
99         if (!vxi)
100                 return;
101         vxi->sched.cpu[cpu].sys_ticks += cputime;
102 }
103
104 #else
105 #warning duplicate inclusion
106 #endif