Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / kernel / vserver / sched.c
index 9284bf4..d3e58be 100644 (file)
@@ -3,18 +3,17 @@
  *
  *  Virtual Server: Scheduler Support
  *
- *  Copyright (C) 2004  Herbert Pötzl
+ *  Copyright (C) 2004-2005  Herbert Pötzl
  *
  *  V0.01  adapted Sam Vilains version to 2.6.3
  *  V0.02  removed legacy interface
  *
  */
 
-#include <linux/config.h>
 #include <linux/sched.h>
-#include <linux/vinline.h>
-#include <linux/vserver/context.h>
-#include <linux/vserver/sched.h>
+#include <linux/vs_context.h>
+#include <linux/vs_sched.h>
+#include <linux/vserver/sched_cmd.h>
 
 #include <asm/errno.h>
 #include <asm/uaccess.h>
@@ -32,7 +31,7 @@ int vx_tokens_recalc(struct vx_info *vxi)
 {
        long delta, tokens = 0;
 
-       if (__vx_flags(vxi->vx_flags, VXF_SCHED_PAUSE, 0))
+       if (vx_info_flags(vxi, VXF_SCHED_PAUSE, 0))
                /* we are paused */
                return 0;
 
@@ -51,7 +50,7 @@ int vx_tokens_recalc(struct vx_info *vxi)
                atomic_add(tokens, &vxi->sched.tokens);
                vxi->sched.jiffies += delta;
                tokens = atomic_read(&vxi->sched.tokens);
-       
+
                if (tokens > vxi->sched.tokens_max) {
                        tokens = vxi->sched.tokens_max;
                        atomic_set(&vxi->sched.tokens, tokens);
@@ -59,7 +58,10 @@ int vx_tokens_recalc(struct vx_info *vxi)
                spin_unlock(&vxi->sched.tokens_lock);
        } else {
                /* no new tokens */
-               if ((tokens = vx_tokens_avail(vxi)) < vxi->sched.tokens_min) {
+               tokens = vx_tokens_avail(vxi);
+               if (tokens <= 0)
+                       vxi->vx_state |= VXS_ONHOLD;
+               if (tokens < vxi->sched.tokens_min) {
                        /* enough tokens will be available in */
                        if (vxi->sched.tokens_min == 0)
                                return delta - vxi->sched.interval;
@@ -67,7 +69,14 @@ int vx_tokens_recalc(struct vx_info *vxi)
                                vxi->sched.tokens_min / vxi->sched.fill_rate;
                }
        }
+
        /* we have some tokens left */
+       if (vx_info_state(vxi, VXS_ONHOLD) &&
+               (tokens >= vxi->sched.tokens_min))
+               vxi->vx_state &= ~VXS_ONHOLD;
+       if (vx_info_state(vxi, VXS_ONHOLD))
+               tokens -= vxi->sched.tokens_min;
+
        return tokens;
 }
 
@@ -92,9 +101,8 @@ int vx_tokens_recalc(struct vx_info *vxi)
  *
  * Both properties are important to certain workloads.
  */
-int effective_vavavoom(task_t *p, int max_prio)
+int vx_effective_vavavoom(struct vx_info *vxi, int max_prio)
 {
-       struct vx_info *vxi = p->vx_info;
        int vavavoom, max;
 
        /* lots of tokens = lots of vavavoom
@@ -105,28 +113,23 @@ int effective_vavavoom(task_t *p, int max_prio)
                max = max * max;
                vavavoom = max_prio * VAVAVOOM_RATIO / 100
                        * (vavavoom*vavavoom - (max >> 2)) / max;
-               /*  alternative, geometric mapping
-               vavavoom = -( MAX_USER_PRIO*VAVAVOOM_RATIO/100 * vavavoom
-                       / vxi->sched.tokens_max -
-                       MAX_USER_PRIO*VAVAVOOM_RATIO/100/2); */
        } else
                vavavoom = 0;
-       /* vavavoom = ( MAX_USER_PRIO*VAVAVOOM_RATIO/100*tokens_left(p) -
-               MAX_USER_PRIO*VAVAVOOM_RATIO/100/2); */
 
-       return vavavoom;
+       vxi->sched.vavavoom = vavavoom;
+       return vavavoom + vxi->sched.priority_bias;
 }
 
 
-int vc_set_sched(uint32_t xid, void __user *data)
+int vc_set_sched_v2(uint32_t xid, void __user *data)
 {
        struct vcmd_set_sched_v2 vc_data;
        struct vx_info *vxi;
 
        if (copy_from_user (&vc_data, data, sizeof(vc_data)))
                return -EFAULT;
-       
-       vxi = locate_vx_info(xid);
+
+       vxi = lookup_vx_info(xid);
        if (!vxi)
                return -EINVAL;
 
@@ -160,3 +163,55 @@ int vc_set_sched(uint32_t xid, void __user *data)
        return 0;
 }
 
+
+int vc_set_sched(uint32_t xid, void __user *data)
+{
+       struct vcmd_set_sched_v3 vc_data;
+       struct vx_info *vxi;
+       unsigned int set_mask;
+
+       if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+               return -EFAULT;
+
+       vxi = lookup_vx_info(xid);
+       if (!vxi)
+               return -EINVAL;
+
+       set_mask = vc_data.set_mask;
+
+       spin_lock(&vxi->sched.tokens_lock);
+
+       if (set_mask & VXSM_FILL_RATE)
+               vxi->sched.fill_rate = vc_data.fill_rate;
+       if (set_mask & VXSM_INTERVAL)
+               vxi->sched.interval = vc_data.interval;
+       if (set_mask & VXSM_TOKENS)
+               atomic_set(&vxi->sched.tokens, vc_data.tokens);
+       if (set_mask & VXSM_TOKENS_MIN)
+               vxi->sched.tokens_min = vc_data.tokens_min;
+       if (set_mask & VXSM_TOKENS_MAX)
+               vxi->sched.tokens_max = vc_data.tokens_max;
+       if (set_mask & VXSM_PRIO_BIAS)
+               vxi->sched.priority_bias = vc_data.priority_bias;
+
+       /* Sanity check the resultant values */
+       if (vxi->sched.fill_rate <= 0)
+               vxi->sched.fill_rate = 1;
+       if (vxi->sched.interval <= 0)
+               vxi->sched.interval = HZ;
+       if (vxi->sched.tokens_max == 0)
+               vxi->sched.tokens_max = 1;
+       if (atomic_read(&vxi->sched.tokens) > vxi->sched.tokens_max)
+               atomic_set(&vxi->sched.tokens, vxi->sched.tokens_max);
+       if (vxi->sched.tokens_min > vxi->sched.tokens_max)
+               vxi->sched.tokens_min = vxi->sched.tokens_max;
+       if (vxi->sched.priority_bias > MAX_PRIO_BIAS)
+               vxi->sched.priority_bias = MAX_PRIO_BIAS;
+       if (vxi->sched.priority_bias < MIN_PRIO_BIAS)
+               vxi->sched.priority_bias = MIN_PRIO_BIAS;
+
+       spin_unlock(&vxi->sched.tokens_lock);
+       put_vx_info(vxi);
+       return 0;
+}
+