4 #include "vserver/sched.h"
7 #define VAVAVOOM_RATIO 50
9 #define MAX_PRIO_BIAS 20
10 #define MIN_PRIO_BIAS -20
12 #ifdef CONFIG_VSERVER_ACB_SCHED
14 #define VX_INVALID_TICKS -1000000
15 #define IS_BEST_EFFORT(vxi) (vx_info_flags(vxi, VXF_SCHED_SHARE, 0))
17 int vx_tokens_avail(struct vx_info *vxi);
18 void vx_consume_token(struct vx_info *vxi);
19 void vx_scheduler_tick(void);
20 void vx_advance_best_effort_ticks(int ticks);
21 void vx_advance_guaranteed_ticks(int ticks);
25 static inline int vx_tokens_avail(struct vx_info *vxi)
27 return atomic_read(&vxi->sched.tokens);
30 static inline void vx_consume_token(struct vx_info *vxi)
32 atomic_dec(&vxi->sched.tokens);
35 #endif /* CONFIG_VSERVER_ACB_SCHED */
37 static inline int vx_need_resched(struct task_struct *p)
39 #ifdef CONFIG_VSERVER_HARDCPU
40 struct vx_info *vxi = p->vx_info;
42 int slice = --p->time_slice;
44 #ifdef CONFIG_VSERVER_HARDCPU
48 if ((tokens = vx_tokens_avail(vxi)) > 0)
49 vx_consume_token(vxi);
50 /* for tokens > 0, one token was consumed */
59 static inline void vx_onhold_inc(struct vx_info *vxi)
61 int onhold = atomic_read(&vxi->cvirt.nr_onhold);
63 atomic_inc(&vxi->cvirt.nr_onhold);
65 vxi->cvirt.onhold_last = jiffies;
68 static inline void __vx_onhold_update(struct vx_info *vxi)
70 int cpu = smp_processor_id();
71 uint32_t now = jiffies;
72 uint32_t delta = now - vxi->cvirt.onhold_last;
74 vxi->cvirt.onhold_last = now;
75 vxi->sched.cpu[cpu].hold_ticks += delta;
78 static inline void vx_onhold_dec(struct vx_info *vxi)
80 if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
81 __vx_onhold_update(vxi);
84 static inline void vx_account_user(struct vx_info *vxi,
85 cputime_t cputime, int nice)
87 int cpu = smp_processor_id();
91 vxi->sched.cpu[cpu].user_ticks += cputime;
94 static inline void vx_account_system(struct vx_info *vxi,
95 cputime_t cputime, int idle)
97 int cpu = smp_processor_id();
101 vxi->sched.cpu[cpu].sys_ticks += cputime;
105 #warning duplicate inclusion