5 #include "vserver/sched.h"
8 #define VAVAVOOM_RATIO 50
10 #define MAX_PRIO_BIAS 20
11 #define MIN_PRIO_BIAS -20
14 static inline int vx_tokens_avail(struct vx_info *vxi)
16 return atomic_read(&vxi->sched.tokens);
19 static inline void vx_consume_token(struct vx_info *vxi)
21 atomic_dec(&vxi->sched.tokens);
24 static inline int vx_need_resched(struct task_struct *p)
26 #ifdef CONFIG_VSERVER_HARDCPU
27 struct vx_info *vxi = p->vx_info;
29 int slice = --p->time_slice;
31 #ifdef CONFIG_VSERVER_HARDCPU
35 if ((tokens = vx_tokens_avail(vxi)) > 0)
36 vx_consume_token(vxi);
37 /* for tokens > 0, one token was consumed */
46 static inline void vx_onhold_inc(struct vx_info *vxi)
48 int onhold = atomic_read(&vxi->cvirt.nr_onhold);
50 atomic_inc(&vxi->cvirt.nr_onhold);
52 vxi->cvirt.onhold_last = jiffies;
55 static inline void __vx_onhold_update(struct vx_info *vxi)
57 int cpu = smp_processor_id();
58 uint32_t now = jiffies;
59 uint32_t delta = now - vxi->cvirt.onhold_last;
61 vxi->cvirt.onhold_last = now;
62 vxi->sched.cpu[cpu].hold_ticks += delta;
65 static inline void vx_onhold_dec(struct vx_info *vxi)
67 if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
68 __vx_onhold_update(vxi);
72 #warning duplicate inclusion