/* Create network context */
if (vc_net_create(ctx) == VC_NOCTX) {
if (errno == EEXIST)
- goto process;
+ goto tag;
return -1;
}
if (vc_set_nflags(ctx, &vc_nf))
return -1;
+tag:
+ /* Create tag context */
+ if (vc_tag_create(ctx) == VC_NOCTX)
+ return -1;
+
process:
/*
* Create context info - this sets the STATE_SETUP and STATE_INIT flags.
if (vc_set_ccaps(ctx, &vc_caps))
return -1;
- if (pl_setsched(ctx, 1, 0) < 0) {
+ if (pl_setsched(ctx, 0, 1) < 0) {
PERROR("pl_setsched(%u)", ctx);
exit(1);
}
migrate:
if (net_migrated || !vc_net_migrate(ctx))
{
- if (!vc_ctx_migrate(ctx, 0))
+ if (!vc_tag_migrate(ctx) && !vc_ctx_migrate(ctx, 0))
break; /* done */
net_migrated = 1;
}
while (0)
int
-pl_setsched(xid_t ctx, uint32_t cpu_share, uint32_t cpu_sched_flags)
+pl_setsched(xid_t ctx, uint32_t cpu_min, uint32_t cpu_share)
{
struct vc_set_sched vc_sched;
struct vc_ctx_flags vc_flags;
- uint32_t new_flags;
vc_sched.set_mask = (VC_VXSM_FILL_RATE | VC_VXSM_INTERVAL | VC_VXSM_TOKENS |
VC_VXSM_TOKENS_MIN | VC_VXSM_TOKENS_MAX | VC_VXSM_MSEC |
- VC_VXSM_FILL_RATE2 | VC_VXSM_INTERVAL2 | VC_VXSM_FORCE |
- VC_VXSM_IDLE_TIME);
- vc_sched.fill_rate = 0;
- vc_sched.fill_rate2 = cpu_share; /* tokens accumulated per interval */
- vc_sched.interval = vc_sched.interval2 = 1000; /* milliseconds */
+ VC_VXSM_FILL_RATE2 | VC_VXSM_INTERVAL2 | VC_VXSM_FORCE);
+ vc_sched.fill_rate = cpu_min; /* percent reserved */
+ vc_sched.interval = 100;
+ vc_sched.fill_rate2 = cpu_share; /* best-effort fair share of unreserved */
+ vc_sched.interval2 = 1000; /* milliseconds */
vc_sched.tokens = 100; /* initial allocation of tokens */
vc_sched.tokens_min = 50; /* need this many tokens to run */
vc_sched.tokens_max = 100; /* max accumulated number of tokens */
- if (cpu_share == (uint32_t)VC_LIM_KEEP)
- vc_sched.set_mask &= ~(VC_VXSM_FILL_RATE|VC_VXSM_FILL_RATE2);
-
- /* guaranteed CPU corresponds to SCHED_SHARE flag being cleared */
- if (cpu_sched_flags & VS_SCHED_CPU_GUARANTEED) {
- new_flags = 0;
- vc_sched.fill_rate = vc_sched.fill_rate2;
+ if (cpu_share) {
+ if (cpu_share == (uint32_t)VC_LIM_KEEP)
+ vc_sched.set_mask &= ~(VC_VXSM_FILL_RATE|VC_VXSM_FILL_RATE2);
+ else
+ vc_sched.set_mask |= VC_VXSM_IDLE_TIME;
}
- else
- new_flags = VC_VXF_SCHED_SHARE;
VC_SYSCALL(vc_set_sched(ctx, &vc_sched));
vc_flags.mask = VC_VXF_SCHED_FLAGS;
- vc_flags.flagword = new_flags | VC_VXF_SCHED_HARD;
+ vc_flags.flagword = VC_VXF_SCHED_HARD;
VC_SYSCALL(vc_set_cflags(ctx, &vc_flags));
return 0;