-/*
- * effective_prio - return the priority that is based on the static
- * priority but is modified by bonuses/penalties.
- *
- * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
- * into a -4 ... 0 ... +4 bonus/penalty range.
- *
- * Additionally, we scale another amount based on the number of
- * CPU tokens currently held by the context, if the process is
- * part of a context (and the appropriate SCHED flag is set).
- * This ranges from -5 ... 0 ... +15, quadratically.
- *
- * So, the total bonus is -9 .. 0 .. +19
- * We use ~50% of the full 0...39 priority range so that:
- *
- * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
- * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
- * unless that context is far exceeding its CPU allocation.
- *
- * Both properties are important to certain workloads.
- */
-int vx_effective_vavavoom(struct vx_info *vxi, int max_prio)
+static inline unsigned long ticks_to_msec(unsigned long ticks)
+{
+ return jiffies_to_msecs(ticks);
+}
+
+static inline unsigned long ticks_to_usec(unsigned long ticks)
+{
+ return jiffies_to_usecs(ticks);
+}
+
+
+static int do_set_sched(struct vx_info *vxi, struct vcmd_sched_v5 *data)
+{
+ unsigned int set_mask = data->mask;
+ unsigned int update_mask;
+ int i, cpu;
+
+ /* Sanity check data values */
+ if (data->tokens_max <= 0)
+ data->tokens_max = HZ;
+ if (data->tokens_min < 0)
+ data->tokens_min = HZ/3;
+ if (data->tokens_min >= data->tokens_max)
+ data->tokens_min = data->tokens_max;
+
+ if (data->prio_bias > MAX_PRIO_BIAS)
+ data->prio_bias = MAX_PRIO_BIAS;
+ if (data->prio_bias < MIN_PRIO_BIAS)
+ data->prio_bias = MIN_PRIO_BIAS;
+
+ spin_lock(&vxi->sched.tokens_lock);
+
+ /* sync up on delayed updates */
+ for_each_cpu_mask(cpu, vxi->sched.update)
+ vx_update_sched_param(&vxi->sched,
+ &vx_per_cpu(vxi, sched_pc, cpu));
+
+ if (set_mask & VXSM_FILL_RATE)
+ vxi->sched.fill_rate[0] = data->fill_rate[0];
+ if (set_mask & VXSM_FILL_RATE2)
+ vxi->sched.fill_rate[1] = data->fill_rate[1];
+ if (set_mask & VXSM_INTERVAL)
+ vxi->sched.interval[0] = (set_mask & VXSM_MSEC) ?
+ msec_to_ticks(data->interval[0]) : data->interval[0];
+ if (set_mask & VXSM_INTERVAL2)
+ vxi->sched.interval[1] = (set_mask & VXSM_MSEC) ?
+ msec_to_ticks(data->interval[1]) : data->interval[1];
+ if (set_mask & VXSM_TOKENS)
+ vxi->sched.tokens = data->tokens;
+ if (set_mask & VXSM_TOKENS_MIN)
+ vxi->sched.tokens_min = data->tokens_min;
+ if (set_mask & VXSM_TOKENS_MAX)
+ vxi->sched.tokens_max = data->tokens_max;
+ if (set_mask & VXSM_PRIO_BIAS)
+ vxi->sched.prio_bias = data->prio_bias;
+
+ /* Sanity check rate/interval */
+ for (i=0; i<2; i++) {
+ if (data->fill_rate[i] < 0)
+ data->fill_rate[i] = 0;
+ if (data->interval[i] <= 0)
+ data->interval[i] = HZ;
+ }
+
+ update_mask = vxi->sched.update_mask & VXSM_SET_MASK;
+ update_mask |= (set_mask & (VXSM_SET_MASK|VXSM_IDLE_TIME));
+ vxi->sched.update_mask = update_mask;
+#ifdef CONFIG_SMP
+ rmb();
+ if (set_mask & VXSM_CPU_ID) {
+ vxi->sched.update = cpumask_of_cpu(data->cpu_id);
+ cpus_and(vxi->sched.update, cpu_online_map,
+ vxi->sched.update);
+ }
+ else
+ vxi->sched.update = cpu_online_map;
+
+ /* forced reload? */
+ if (set_mask & VXSM_FORCE) {
+ for_each_cpu_mask(cpu, vxi->sched.update)
+ vx_update_sched_param(&vxi->sched,
+ &vx_per_cpu(vxi, sched_pc, cpu));
+ vxi->sched.update = CPU_MASK_NONE;
+ }
+#else
+ /* on UP we update immediately */
+ vx_update_sched_param(&vxi->sched,
+ &vx_per_cpu(vxi, sched_pc, 0));
+#endif
+
+ spin_unlock(&vxi->sched.tokens_lock);
+ return 0;
+}
+
+#define COPY_IDS(C) C(cpu_id); C(bucket_id)
+#define COPY_PRI(C) C(prio_bias)
+#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
+#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \
+ C(fill_rate[1]); C(interval[1]);
+
+#define COPY_VALUE(name) vc_data.name = data->name
+
+static int do_set_sched_v4(struct vx_info *vxi, struct vcmd_set_sched_v4 *data)