#include <asm/atomic.h>
#include <asm/param.h>
+struct _vx_ticks {
+ uint64_t user_ticks; /* token tick events */
+ uint64_t sys_ticks; /* token tick events */
+ uint64_t hold_ticks; /* token ticks paused */
+ uint64_t unused[5]; /* cacheline ? */
+};
+
/* context sub struct */
struct _vx_sched {
- spinlock_t tokens_lock; /* lock for this structure */
+ atomic_t tokens; /* number of CPU tokens */
+ spinlock_t tokens_lock; /* lock for token bucket */
+
+ int fill_rate; /* Fill rate: add X tokens... */
+ int interval; /* Divisor: per Y jiffies */
+ int tokens_min; /* Limit: minimum for unhold */
+ int tokens_max; /* Limit: no more than N tokens */
+ uint32_t jiffies; /* last time accounted */
- int fill_rate; /* Fill rate: add X tokens... */
- int interval; /* Divisor: per Y jiffies */
- atomic_t tokens; /* number of CPU tokens in this context */
- int tokens_min; /* Limit: minimum for unhold */
- int tokens_max; /* Limit: no more than N tokens */
- uint32_t jiffies; /* add an integral multiple of Y to this */
+ int priority_bias; /* bias offset for priority */
+ cpumask_t cpus_allowed; /* cpu mask for context */
- uint64_t ticks; /* token tick events */
- cpumask_t cpus_allowed; /* cpu mask for context */
+ struct _vx_ticks cpu[NR_CPUS];
};
static inline void vx_info_init_sched(struct _vx_sched *sched)
{
+ int i;
+
/* scheduling; hard code starting values as constants */
sched->fill_rate = 1;
sched->interval = 4;
atomic_set(&sched->tokens, HZ >> 2);
sched->cpus_allowed = CPU_MASK_ALL;
+ sched->priority_bias = 0;
+
+ for_each_cpu(i) {
+ sched->cpu[i].user_ticks = 0;
+ sched->cpu[i].sys_ticks = 0;
+ sched->cpu[i].hold_ticks = 0;
+ }
}
static inline void vx_info_exit_sched(struct _vx_sched *sched)
static inline int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
{
- return sprintf(buffer,
- "Ticks:\t%16lld\n"
+ int length = 0;
+ int i;
+
+ length += sprintf(buffer,
"Token:\t\t%8d\n"
"FillRate:\t%8d\n"
- "Interval:\t%8d\n"
+ "Interval:\t%8d\n"
"TokensMin:\t%8d\n"
"TokensMax:\t%8d\n"
- ,(unsigned long long)sched->ticks
+ "PrioBias:\t%8d\n"
,atomic_read(&sched->tokens)
,sched->fill_rate
,sched->interval
,sched->tokens_min
,sched->tokens_max
+ ,sched->priority_bias
);
+
+ for_each_online_cpu(i) {
+ length += sprintf(buffer + length,
+ "cpu %d: %lld %lld %lld\n"
+ ,i
+ ,(long long)sched->cpu[i].user_ticks
+ ,(long long)sched->cpu[i].sys_ticks
+ ,(long long)sched->cpu[i].hold_ticks
+ );
+ }
+
+ return length;
}
/* sched vserver commands */
-#define VCMD_set_sched VC_CMD(SCHED, 1, 2)
+#define VCMD_set_sched_v2 VC_CMD(SCHED, 1, 2)
+#define VCMD_set_sched VC_CMD(SCHED, 1, 3)
-struct vcmd_set_sched_v2 {
+struct vcmd_set_sched_v2 {
int32_t fill_rate;
int32_t interval;
int32_t tokens;
uint64_t cpu_mask;
};
+struct vcmd_set_sched_v3 {
+ uint32_t set_mask;
+ int32_t fill_rate;
+ int32_t interval;
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ int32_t priority_bias;
+};
+
+
+#define VXSM_FILL_RATE 0x0001
+#define VXSM_INTERVAL 0x0002
+#define VXSM_TOKENS 0x0010
+#define VXSM_TOKENS_MIN 0x0020
+#define VXSM_TOKENS_MAX 0x0040
+#define VXSM_PRIO_BIAS 0x0100
+
#define SCHED_KEEP (-2)
#ifdef __KERNEL__
extern int vc_set_sched_v1(uint32_t, void __user *);
+extern int vc_set_sched_v2(uint32_t, void __user *);
extern int vc_set_sched(uint32_t, void __user *);
#define VAVAVOOM_RATIO 50
+#define MAX_PRIO_BIAS 20
+#define MIN_PRIO_BIAS -20
+
#include "context.h"
{
#ifdef CONFIG_VSERVER_HARDCPU
struct vx_info *vxi = p->vx_info;
+#endif
+ int slice = --p->time_slice;
+#ifdef CONFIG_VSERVER_HARDCPU
if (vxi) {
int tokens;
- p->time_slice--;
- if (atomic_read(&vxi->vx_usecnt) < 1)
- printk("need_resched: p=%p, s=%ld, ref=%d, id=%d/%d\n",
- p, p->state, atomic_read(&vxi->vx_usecnt),
- vxi->vx_id, p->xid);
if ((tokens = vx_tokens_avail(vxi)) > 0)
vx_consume_token(vxi);
- return ((p->time_slice == 0) || (tokens < 1));
+ /* for tokens > 0, one token was consumed */
+ if (tokens < 2)
+ return 1;
}
#endif
- p->time_slice--;
- return (p->time_slice == 0);
+ return (slice == 0);
+}
+
+
+static inline void vx_onhold_inc(struct vx_info *vxi)
+{
+ int onhold = atomic_read(&vxi->cvirt.nr_onhold);
+
+ atomic_inc(&vxi->cvirt.nr_onhold);
+ if (!onhold)
+ vxi->cvirt.onhold_last = jiffies;
+}
+
+static inline void __vx_onhold_update(struct vx_info *vxi)
+{
+ int cpu = smp_processor_id();
+ uint32_t now = jiffies;
+ uint32_t delta = now - vxi->cvirt.onhold_last;
+
+ vxi->cvirt.onhold_last = now;
+ vxi->sched.cpu[cpu].hold_ticks += delta;
}
+static inline void vx_onhold_dec(struct vx_info *vxi)
+{
+ if (atomic_dec_and_test(&vxi->cvirt.nr_onhold))
+ __vx_onhold_update(vxi);
+}
#endif /* __KERNEL__ */