1 /* _VX_SCHED_H defined below */
3 #if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
5 #include <linux/spinlock.h>
6 #include <linux/jiffies.h>
7 #include <linux/cpumask.h>
8 #include <asm/atomic.h>
11 /* context sub struct */
14 spinlock_t tokens_lock; /* lock for this structure */
16 int fill_rate; /* Fill rate: add X tokens... */
17 int interval; /* Divisor: per Y jiffies */
18 atomic_t tokens; /* number of CPU tokens in this context */
19 int tokens_min; /* Limit: minimum for unhold */
20 int tokens_max; /* Limit: no more than N tokens */
21 uint32_t jiffies; /* add an integral multiple of Y to this */
23 uint64_t ticks; /* token tick events */
24 cpumask_t cpus_allowed; /* cpu mask for context */
27 static inline void vx_info_init_sched(struct _vx_sched *sched)
29 /* scheduling; hard code starting values as constants */
32 sched->tokens_min = HZ >> 4;
33 sched->tokens_max = HZ >> 1;
34 sched->jiffies = jiffies;
35 sched->tokens_lock = SPIN_LOCK_UNLOCKED;
37 atomic_set(&sched->tokens, HZ >> 2);
38 sched->cpus_allowed = CPU_MASK_ALL;
41 static inline void vx_info_exit_sched(struct _vx_sched *sched)
46 static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
48 return sprintf(buffer,
55 ,(unsigned long long)sched->ticks
56 ,atomic_read(&sched->tokens)
65 #else /* _VX_INFO_DEF_ */
71 /* sched vserver commands */
73 #define VCMD_set_sched VC_CMD(SCHED, 1, 2)
75 struct vcmd_set_sched_v2 {
84 #define SCHED_KEEP (-2)
88 extern int vc_set_sched_v1(uint32_t, void __user *);
89 extern int vc_set_sched(uint32_t, void __user *);
92 #define VAVAVOOM_RATIO 50
97 /* scheduling stuff */
99 int effective_vavavoom(struct task_struct *, int);
101 int vx_tokens_recalc(struct vx_info *);
105 static inline int vx_tokens_avail(struct vx_info *vxi)
107 return atomic_read(&vxi->sched.tokens);
110 static inline void vx_consume_token(struct vx_info *vxi)
112 atomic_dec(&vxi->sched.tokens);
115 static inline int vx_need_resched(struct task_struct *p)
117 #ifdef CONFIG_VSERVER_HARDCPU
118 struct vx_info *vxi = p->vx_info;
124 if (atomic_read(&vxi->vx_usecnt) < 1)
125 printk("need_resched: p=%p, s=%ld, ref=%d, id=%d/%d\n",
126 p, p->state, atomic_read(&vxi->vx_usecnt),
128 if ((tokens = vx_tokens_avail(vxi)) > 0)
129 vx_consume_token(vxi);
130 return ((p->time_slice == 0) || (tokens < 1));
134 return (p->time_slice == 0);
138 #endif /* __KERNEL__ */
140 #endif /* _VX_SCHED_H */