vserver 1.9.3
[linux-2.6.git] / include / linux / vserver / sched.h
1 /* _VX_SCHED_H defined below */
2
3 #if     defined(__KERNEL__) && defined(_VX_INFO_DEF_)
4
5 #include <linux/spinlock.h>
6 #include <linux/jiffies.h>
7 #include <linux/cpumask.h>
8 #include <asm/atomic.h>
9 #include <asm/param.h>
10
11 struct _vx_ticks {
12         uint64_t user_ticks;            /* token tick events */
13         uint64_t sys_ticks;             /* token tick events */
14         uint64_t hold_ticks;            /* token ticks paused */
15         uint64_t unused[5];             /* cacheline ? */
16 };
17
18 /* context sub struct */
19
20 struct _vx_sched {
21         atomic_t tokens;                /* number of CPU tokens */
22         spinlock_t tokens_lock;         /* lock for token bucket */
23
24         int fill_rate;                  /* Fill rate: add X tokens... */
25         int interval;                   /* Divisor:   per Y jiffies   */
26         int tokens_min;                 /* Limit:     minimum for unhold */
27         int tokens_max;                 /* Limit:     no more than N tokens */
28         uint32_t jiffies;               /* last time accounted */
29
30         int priority_bias;              /* bias offset for priority */
31         cpumask_t cpus_allowed;         /* cpu mask for context */
32
33         struct _vx_ticks cpu[NR_CPUS];
34 };
35
36 static inline void vx_info_init_sched(struct _vx_sched *sched)
37 {
38         int i;
39
40         /* scheduling; hard code starting values as constants */
41         sched->fill_rate        = 1;
42         sched->interval         = 4;
43         sched->tokens_min       = HZ >> 4;
44         sched->tokens_max       = HZ >> 1;
45         sched->jiffies          = jiffies;
46         sched->tokens_lock      = SPIN_LOCK_UNLOCKED;
47
48         atomic_set(&sched->tokens, HZ >> 2);
49         sched->cpus_allowed     = CPU_MASK_ALL;
50         sched->priority_bias    = 0;
51
52         for_each_cpu(i) {
53                 sched->cpu[i].user_ticks        = 0;
54                 sched->cpu[i].sys_ticks         = 0;
55                 sched->cpu[i].hold_ticks        = 0;
56         }
57 }
58
59 static inline void vx_info_exit_sched(struct _vx_sched *sched)
60 {
61         return;
62 }
63
64 static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
65 {
66         int length = 0;
67         int i;
68
69         length += sprintf(buffer,
70                 "Token:\t\t%8d\n"
71                 "FillRate:\t%8d\n"
72                 "Interval:\t%8d\n"
73                 "TokensMin:\t%8d\n"
74                 "TokensMax:\t%8d\n"
75                 "PrioBias:\t%8d\n"
76                 ,atomic_read(&sched->tokens)
77                 ,sched->fill_rate
78                 ,sched->interval
79                 ,sched->tokens_min
80                 ,sched->tokens_max
81                 ,sched->priority_bias
82                 );
83
84         for_each_online_cpu(i) {
85                 length += sprintf(buffer + length,
86                         "cpu %d: %lld %lld %lld\n"
87                         ,i
88                         ,(long long)sched->cpu[i].user_ticks
89                         ,(long long)sched->cpu[i].sys_ticks
90                         ,(long long)sched->cpu[i].hold_ticks
91                         );
92         }
93
94         return length;
95 }
96
97
98 #else   /* _VX_INFO_DEF_ */
99 #ifndef _VX_SCHED_H
100 #define _VX_SCHED_H
101
102 #include "switch.h"
103
104 /*  sched vserver commands */
105
106 #define VCMD_set_sched_v2       VC_CMD(SCHED, 1, 2)
107 #define VCMD_set_sched          VC_CMD(SCHED, 1, 3)
108
109 struct  vcmd_set_sched_v2 {
110         int32_t fill_rate;
111         int32_t interval;
112         int32_t tokens;
113         int32_t tokens_min;
114         int32_t tokens_max;
115         uint64_t cpu_mask;
116 };
117
118 struct  vcmd_set_sched_v3 {
119         uint32_t set_mask;
120         int32_t fill_rate;
121         int32_t interval;
122         int32_t tokens;
123         int32_t tokens_min;
124         int32_t tokens_max;
125         int32_t priority_bias;
126 };
127
128
129 #define VXSM_FILL_RATE          0x0001
130 #define VXSM_INTERVAL           0x0002
131 #define VXSM_TOKENS             0x0010
132 #define VXSM_TOKENS_MIN         0x0020
133 #define VXSM_TOKENS_MAX         0x0040
134 #define VXSM_PRIO_BIAS          0x0100
135
136 #define SCHED_KEEP              (-2)
137
138 #ifdef  __KERNEL__
139
140 extern int vc_set_sched_v1(uint32_t, void __user *);
141 extern int vc_set_sched_v2(uint32_t, void __user *);
142 extern int vc_set_sched(uint32_t, void __user *);
143
144
145 #define VAVAVOOM_RATIO          50
146
147 #define MAX_PRIO_BIAS           20
148 #define MIN_PRIO_BIAS           -20
149
150 #include "context.h"
151
152
153 /* scheduling stuff */
154
155 int effective_vavavoom(struct task_struct *, int);
156
157 int vx_tokens_recalc(struct vx_info *);
158
159 /* new stuff ;) */
160
161 static inline int vx_tokens_avail(struct vx_info *vxi)
162 {
163         return atomic_read(&vxi->sched.tokens);
164 }
165
166 static inline void vx_consume_token(struct vx_info *vxi)
167 {
168         atomic_dec(&vxi->sched.tokens);
169 }
170
171 static inline int vx_need_resched(struct task_struct *p)
172 {
173 #ifdef  CONFIG_VSERVER_HARDCPU
174         struct vx_info *vxi = p->vx_info;
175 #endif
176         int slice = --p->time_slice;
177
178 #ifdef  CONFIG_VSERVER_HARDCPU
179         if (vxi) {
180                 int tokens;
181
182                 if ((tokens = vx_tokens_avail(vxi)) > 0)
183                         vx_consume_token(vxi);
184                 /* for tokens > 0, one token was consumed */
185                 if (tokens < 2)
186                         return 1;
187         }
188 #endif
189         return (slice == 0);
190 }
191
192
193 static inline void vx_onhold_inc(struct vx_info *vxi)
194 {
195         int onhold = atomic_read(&vxi->cvirt.nr_onhold);
196
197         atomic_inc(&vxi->cvirt.nr_onhold);
198         if (!onhold)
199                 vxi->cvirt.onhold_last = jiffies;
200 }
201
202 static inline void __vx_onhold_update(struct vx_info *vxi)
203 {
204         int cpu = smp_processor_id();
205         uint32_t now = jiffies;
206         uint32_t delta = now - vxi->cvirt.onhold_last;
207
208         vxi->cvirt.onhold_last = now;
209         vxi->sched.cpu[cpu].hold_ticks += delta;
210 }
211
212 static inline void vx_onhold_dec(struct vx_info *vxi)
213 {
214         if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
215                 __vx_onhold_update(vxi);
216 }
217
218 #endif  /* __KERNEL__ */
219
220 #endif  /* _VX_SCHED_H */
221 #endif