+
+int vc_set_sched(uint32_t xid, void __user *data)
+{
+ struct vcmd_set_sched_v3 vc_data;
+ struct vx_info *vxi;
+ unsigned int set_mask;
+
+ if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ vxi = lookup_vx_info(xid);
+ if (!vxi)
+ return -EINVAL;
+
+ set_mask = vc_data.set_mask;
+
+ spin_lock(&vxi->sched.tokens_lock);
+
+ if (set_mask & VXSM_FILL_RATE)
+ vxi->sched.fill_rate = vc_data.fill_rate;
+ if (set_mask & VXSM_INTERVAL)
+ vxi->sched.interval = vc_data.interval;
+ if (set_mask & VXSM_TOKENS)
+ atomic_set(&vxi->sched.tokens, vc_data.tokens);
+ if (set_mask & VXSM_TOKENS_MIN)
+ vxi->sched.tokens_min = vc_data.tokens_min;
+ if (set_mask & VXSM_TOKENS_MAX)
+ vxi->sched.tokens_max = vc_data.tokens_max;
+ if (set_mask & VXSM_PRIO_BIAS)
+ vxi->sched.priority_bias = vc_data.priority_bias;
+
+ /* Sanity check the resultant values */
+ if (vxi->sched.fill_rate <= 0)
+ vxi->sched.fill_rate = 1;
+ if (vxi->sched.interval <= 0)
+ vxi->sched.interval = HZ;
+ if (vxi->sched.tokens_max == 0)
+ vxi->sched.tokens_max = 1;
+ if (atomic_read(&vxi->sched.tokens) > vxi->sched.tokens_max)
+ atomic_set(&vxi->sched.tokens, vxi->sched.tokens_max);
+ if (vxi->sched.tokens_min > vxi->sched.tokens_max)
+ vxi->sched.tokens_min = vxi->sched.tokens_max;
+ if (vxi->sched.priority_bias > MAX_PRIO_BIAS)
+ vxi->sched.priority_bias = MAX_PRIO_BIAS;
+ if (vxi->sched.priority_bias < MIN_PRIO_BIAS)
+ vxi->sched.priority_bias = MIN_PRIO_BIAS;
+
+ spin_unlock(&vxi->sched.tokens_lock);
+ put_vx_info(vxi);
+ return 0;
+}
+