From: Marc Fiuczynski Date: Wed, 21 Jul 2004 20:51:51 +0000 (+0000) Subject: Merged rbce-vserver integration support X-Git-Tag: before-ipod-patch~8 X-Git-Url: http://git.onelab.eu/?a=commitdiff_plain;h=c0624a420b39e052c5b036b6c6dc45363871b7be;p=linux-2.6.git Merged rbce-vserver integration support --- diff --git a/include/linux/ckrm.h b/include/linux/ckrm.h index 8dba64362..04f4ec00f 100644 --- a/include/linux/ckrm.h +++ b/include/linux/ckrm.h @@ -66,6 +66,7 @@ enum ckrm_event { CKRM_EVENT_EXEC, CKRM_EVENT_UID, CKRM_EVENT_GID, + CKRM_EVENT_XID, CKRM_EVENT_LOGIN, CKRM_EVENT_USERADD, CKRM_EVENT_USERDEL, @@ -136,6 +137,7 @@ CKRM_DEF_CB_ARG(FORK, fork, struct task_struct *); CKRM_DEF_CB_ARG(EXEC, exec, const char *); CKRM_DEF_CB(UID, uid); CKRM_DEF_CB(GID, gid); +CKRM_DEF_CB_ARG(XID, xid, struct task_struct *); CKRM_DEF_CB(APPTAG, apptag); CKRM_DEF_CB(LOGIN, login); CKRM_DEF_CB_ARG(USERADD, useradd, struct user_struct *); diff --git a/include/linux/types.h b/include/linux/types.h index 288ab653e..dd1505c9d 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -36,6 +36,8 @@ typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_uid16_t uid16_t; typedef __kernel_gid16_t gid16_t; + +/* The following two typedef's are for vserver */ typedef unsigned int xid_t; typedef unsigned int nid_t; diff --git a/kernel/ckrm/ckrm_tc.c b/kernel/ckrm/ckrm_tc.c index cc0377887..316266494 100644 --- a/kernel/ckrm/ckrm_tc.c +++ b/kernel/ckrm/ckrm_tc.c @@ -50,77 +50,74 @@ #include +#define TC_DEBUG(fmt, args...) do { \ +/* printk("%s: " fmt, __FUNCTION__ , ## args); */ } while (0) - -#define TC_DEBUG(fmt, args...) do { /* printk("%s: " fmt, __FUNCTION__ , ## args); */ } while (0) - - -static struct ckrm_task_class taskclass_dflt_class = { +static struct ckrm_task_class taskclass_dflt_class = { }; const char *dflt_taskclass_name = TASK_CLASS_TYPE_NAME; -static struct ckrm_core_class *ckrm_alloc_task_class(struct ckrm_core_class *parent, const char *name); +static struct ckrm_core_class *ckrm_alloc_task_class(struct ckrm_core_class + *parent, const char *name); static int ckrm_free_task_class(struct ckrm_core_class *core); -static int tc_forced_reclassify(ckrm_core_class_t *target, const char *resname); -static int tc_show_members(struct ckrm_core_class *core, struct seq_file *seq); +static int tc_forced_reclassify(ckrm_core_class_t * target, + const char *resname); +static int tc_show_members(struct ckrm_core_class *core, struct seq_file *seq); static void tc_add_resctrl(struct ckrm_core_class *core, int resid); struct ckrm_classtype CT_taskclass = { - .mfidx = TC_MF_IDX, - .name = TASK_CLASS_TYPE_NAME, - .typeID = CKRM_CLASSTYPE_TASK_CLASS, - .maxdepth = 3, // Hubertus .. just to start - .resid_reserved = 4, // Hubertus .. reservation - .max_res_ctlrs = CKRM_MAX_RES_CTLRS, - .max_resid = 0, - .bit_res_ctlrs = 0L, + .mfidx = TC_MF_IDX, + .name = TASK_CLASS_TYPE_NAME, + .typeID = CKRM_CLASSTYPE_TASK_CLASS, + .maxdepth = 3, // Hubertus .. just to start + .resid_reserved = 4, // Hubertus .. reservation + .max_res_ctlrs = CKRM_MAX_RES_CTLRS, + .max_resid = 0, + .bit_res_ctlrs = 0L, .res_ctlrs_lock = SPIN_LOCK_UNLOCKED, - .classes = LIST_HEAD_INIT(CT_taskclass.classes), + .classes = LIST_HEAD_INIT(CT_taskclass.classes), + + .default_class = &taskclass_dflt_class.core, - .default_class = &taskclass_dflt_class.core, - // private version of functions - .alloc = &ckrm_alloc_task_class, - .free = &ckrm_free_task_class, - .show_members = &tc_show_members, + .alloc = &ckrm_alloc_task_class, + .free = &ckrm_free_task_class, + .show_members = &tc_show_members, .forced_reclassify = &tc_forced_reclassify, // use of default functions - .show_shares = &ckrm_class_show_shares, - .show_stats = &ckrm_class_show_stats, - .show_config = &ckrm_class_show_config, - .set_config = &ckrm_class_set_config, - .set_shares = &ckrm_class_set_shares, - .reset_stats = &ckrm_class_reset_stats, + .show_shares = &ckrm_class_show_shares, + .show_stats = &ckrm_class_show_stats, + .show_config = &ckrm_class_show_config, + .set_config = &ckrm_class_set_config, + .set_shares = &ckrm_class_set_shares, + .reset_stats = &ckrm_class_reset_stats, // mandatory private version .. no dflt available - .add_resctrl = &tc_add_resctrl, + .add_resctrl = &tc_add_resctrl, }; /************************************************************************** * Helper Functions * **************************************************************************/ -static inline void -ckrm_init_task_lock(struct task_struct *tsk) +static inline void ckrm_init_task_lock(struct task_struct *tsk) { tsk->ckrm_tsklock = SPIN_LOCK_UNLOCKED; } // Hubertus .. following functions should move to ckrm_rc.h -static inline void -ckrm_task_lock(struct task_struct *tsk) +static inline void ckrm_task_lock(struct task_struct *tsk) { - spin_lock(&tsk->ckrm_tsklock); + spin_lock(&tsk->ckrm_tsklock); } -static inline void -ckrm_task_unlock(struct task_struct *tsk) +static inline void ckrm_task_unlock(struct task_struct *tsk) { - spin_unlock(&tsk->ckrm_tsklock); + spin_unlock(&tsk->ckrm_tsklock); } /* @@ -140,14 +137,13 @@ ckrm_task_unlock(struct task_struct *tsk) * Function is also called with a ckrm_core_grab on the new core, hence * it needs to be dropped if no assignment takes place. */ - static void -ckrm_set_taskclass(struct task_struct *tsk, ckrm_task_class_t *newcls, - ckrm_task_class_t *oldcls, enum ckrm_event event) +ckrm_set_taskclass(struct task_struct *tsk, ckrm_task_class_t * newcls, + ckrm_task_class_t * oldcls, enum ckrm_event event) { int i; - ckrm_classtype_t *clstype; - ckrm_res_ctlr_t *rcbs; + ckrm_classtype_t *clstype; + ckrm_res_ctlr_t *rcbs; ckrm_task_class_t *curcls; void *old_res_class, *new_res_class; int drop_old_cls; @@ -155,24 +151,37 @@ ckrm_set_taskclass(struct task_struct *tsk, ckrm_task_class_t *newcls, ckrm_task_lock(tsk); curcls = tsk->taskclass; + if ((void *)-1 == curcls) { + // task is disassociated from ckrm... don't bother it. + ckrm_task_unlock(tsk); + ckrm_core_drop(class_core(newcls)); + return; + } + + if ((curcls == NULL) && (newcls == (void *)-1)) { + // task need to disassociated from ckrm and has no curcls + // just disassociate and return. + tsk->taskclass = newcls; + ckrm_task_unlock(tsk); + return; + } // check whether compare_and_exchange should if (oldcls && (oldcls != curcls)) { ckrm_task_unlock(tsk); if (newcls) { /* compensate for previous grab */ TC_DEBUG("(%s:%d): Race-condition caught <%s> %d\n", - tsk->comm,tsk->pid,class_core(newcls)->name,event); + tsk->comm, tsk->pid, class_core(newcls)->name, + event); ckrm_core_drop(class_core(newcls)); } return; } - // make sure we have a real destination core if (!newcls) { newcls = &taskclass_dflt_class; ckrm_core_grab(class_core(newcls)); } - // take out of old class // remember that we need to drop the oldcore if ((drop_old_cls = (curcls != NULL))) { @@ -188,8 +197,14 @@ ckrm_set_taskclass(struct task_struct *tsk, ckrm_task_class_t *newcls, INIT_LIST_HEAD(&tsk->taskclass_link); tsk->taskclass = NULL; class_unlock(class_core(curcls)); - } - + if (newcls == (void *)-1) { + tsk->taskclass = newcls; + ckrm_task_unlock(tsk); + // still need to get out of old class + newcls = NULL; + goto rc_handling; + } + } // put into new class class_lock(class_core(newcls)); tsk->taskclass = newcls; @@ -201,69 +216,80 @@ ckrm_set_taskclass(struct task_struct *tsk, ckrm_task_class_t *newcls, goto out; } - CE_NOTIFY(&CT_taskclass,event,newcls,tsk); + CE_NOTIFY(&CT_taskclass, event, newcls, tsk); ckrm_task_unlock(tsk); - clstype = class_isa(newcls); // Hubertus .. can hardcode ckrm_CT_taskclass - if (clstype->bit_res_ctlrs) { // avoid running through the entire list if non is registered + rc_handling: + clstype = &CT_taskclass; + if (clstype->bit_res_ctlrs) { + // avoid running through the entire list if non is registered for (i = 0; i < clstype->max_resid; i++) { - if (clstype->res_ctlrs[i] == NULL) + if (clstype->res_ctlrs[i] == NULL) continue; atomic_inc(&clstype->nr_resusers[i]); - old_res_class = curcls ? class_core(curcls)->res_class[i] : NULL; - new_res_class = newcls ? class_core(newcls)->res_class[i] : NULL; + old_res_class = + curcls ? class_core(curcls)->res_class[i] : NULL; + new_res_class = + newcls ? class_core(newcls)->res_class[i] : NULL; rcbs = clstype->res_ctlrs[i]; - if (rcbs && rcbs->change_resclass && (old_res_class != new_res_class)) - (*rcbs->change_resclass)(tsk, old_res_class, new_res_class); + if (rcbs && rcbs->change_resclass + && (old_res_class != new_res_class)) + (*rcbs->change_resclass) (tsk, old_res_class, + new_res_class); atomic_dec(&clstype->nr_resusers[i]); } } - out: - if (drop_old_cls) + out: + if (drop_old_cls) ckrm_core_drop(class_core(curcls)); return; } -// HF SUGGEST: we could macro-tize this for other types DEF_FUNC_ADD_RESCTRL(funcname,link) +// HF SUGGEST: we could macro-tize this for other types +// DEF_FUNC_ADD_RESCTRL(funcname,link) // would DEF_FUNC_ADD_RESCTRL(tc_add_resctrl,taskclass_link) -static void -tc_add_resctrl(struct ckrm_core_class *core, int resid) +static void tc_add_resctrl(struct ckrm_core_class *core, int resid) { struct task_struct *tsk; struct ckrm_res_ctlr *rcbs; - if ((resid < 0) || (resid >= CKRM_MAX_RES_CTLRS) || ((rcbs = core->classtype->res_ctlrs[resid]) == NULL)) + if ((resid < 0) || (resid >= CKRM_MAX_RES_CTLRS) + || ((rcbs = core->classtype->res_ctlrs[resid]) == NULL)) return; class_lock(core); list_for_each_entry(tsk, &core->objlist, taskclass_link) { if (rcbs->change_resclass) - (*rcbs->change_resclass)(tsk, (void *) -1, core->res_class[resid]); + (*rcbs->change_resclass) (tsk, (void *)-1, + core->res_class[resid]); } class_unlock(core); } - /************************************************************************** * Functions called from classification points * **************************************************************************/ -#define ECB_PRINTK(fmt, args...) // do { if (CT_taskclass.ce_regd) printk("%s: " fmt, __FUNCTION__ , ## args); } while (0) - -#define CE_CLASSIFY_TASK(event, tsk) \ -do { \ - struct ckrm_task_class *newcls = NULL, *oldcls = tsk->taskclass; \ - \ - CE_CLASSIFY_RET(newcls,&CT_taskclass,event,tsk); \ - if (newcls) { \ - /* called synchrously. no need to get task struct */ \ - ckrm_set_taskclass(tsk, newcls, oldcls, event); \ - } \ +#define ECB_PRINTK(fmt, args...) \ +// do { if (CT_taskclass.ce_regd) +// printk("%s: " fmt, __FUNCTION__ , ## args); } while (0) + +#define CE_CLASSIFY_TASK(event, tsk) \ +do { \ + struct ckrm_task_class *newcls = NULL; \ + struct ckrm_task_class *oldcls = tsk->taskclass; \ + \ + CE_CLASSIFY_RET(newcls,&CT_taskclass,event,tsk); \ + if (newcls) { \ + /* called synchrously. no need to get task struct */ \ + ckrm_set_taskclass(tsk, newcls, oldcls, event); \ + } \ } while (0) + #define CE_CLASSIFY_TASK_PROTECT(event, tsk) \ do { \ ce_protect(&CT_taskclass); \ @@ -271,26 +297,20 @@ do { \ ce_release(&CT_taskclass); \ } while (0) - - - -static void -cb_taskclass_newtask(struct task_struct *tsk) +static void cb_taskclass_newtask(struct task_struct *tsk) { tsk->taskclass = NULL; INIT_LIST_HEAD(&tsk->taskclass_link); } - -static void -cb_taskclass_fork(struct task_struct *tsk) +static void cb_taskclass_fork(struct task_struct *tsk) { struct ckrm_task_class *cls = NULL; - ECB_PRINTK("%p:%d:%s\n",tsk,tsk->pid,tsk->comm); + ECB_PRINTK("%p:%d:%s\n", tsk, tsk->pid, tsk->comm); ce_protect(&CT_taskclass); - CE_CLASSIFY_RET(cls,&CT_taskclass,CKRM_EVENT_FORK,tsk); + CE_CLASSIFY_RET(cls, &CT_taskclass, CKRM_EVENT_FORK, tsk); if (cls == NULL) { ckrm_task_lock(tsk->parent); cls = tsk->parent->taskclass; @@ -299,65 +319,53 @@ cb_taskclass_fork(struct task_struct *tsk) } if (!list_empty(&tsk->taskclass_link)) printk("BUG in cb_fork.. tsk (%s:%d> already linked\n", - tsk->comm,tsk->pid); + tsk->comm, tsk->pid); ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_FORK); ce_release(&CT_taskclass); } -static void -cb_taskclass_exit(struct task_struct *tsk) +static void cb_taskclass_exit(struct task_struct *tsk) { - ckrm_task_class_t *cls; - - // Remove the task from the current core class - - ECB_PRINTK("%p:%d:%s\n",tsk,tsk->pid,tsk->comm); - ckrm_task_lock(tsk); - - CE_CLASSIFY_NORET( &CT_taskclass, CKRM_EVENT_EXIT, tsk); - - if ((cls = tsk->taskclass) != NULL) { - class_lock(class_core(cls)); - tsk->taskclass = NULL; - list_del(&tsk->taskclass_link); - class_unlock(class_core(cls)); - ckrm_core_drop(class_core(cls)); - } else { - INIT_LIST_HEAD(&tsk->taskclass_link); - } - ckrm_task_unlock(tsk); + CE_CLASSIFY_NORET(&CT_taskclass, CKRM_EVENT_EXIT, tsk); + ckrm_set_taskclass(tsk, (void *)-1, NULL, CKRM_EVENT_EXIT); } -static void -cb_taskclass_exec(const char *filename) +static void cb_taskclass_exec(const char *filename) { - ECB_PRINTK("%p:%d:%s <%s>\n",current,current->pid,current->comm,filename); + ECB_PRINTK("%p:%d:%s <%s>\n", current, current->pid, current->comm, + filename); CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_EXEC, current); } -static void -cb_taskclass_uid(void) +static void cb_taskclass_uid(void) { - ECB_PRINTK("%p:%d:%s\n",current,current->pid,current->comm); + ECB_PRINTK("%p:%d:%s\n", current, current->pid, current->comm); CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_UID, current); } +static void cb_taskclass_gid(void) +{ + ECB_PRINTK("%p:%d:%s\n", current, current->pid, current->comm); + CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_GID, current); +} + static void -cb_taskclass_gid(void) +cb_taskclass_xid(struct task_struct *tsk) { ECB_PRINTK("%p:%d:%s\n",current,current->pid,current->comm); - CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_GID, current); + CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_XID, tsk); } static struct ckrm_event_spec taskclass_events_callbacks[] = { - CKRM_EVENT_SPEC( NEWTASK, cb_taskclass_newtask ), - CKRM_EVENT_SPEC( EXEC , cb_taskclass_exec ), - CKRM_EVENT_SPEC( FORK , cb_taskclass_fork ), - CKRM_EVENT_SPEC( EXIT , cb_taskclass_exit ), - CKRM_EVENT_SPEC( UID , cb_taskclass_uid ), - CKRM_EVENT_SPEC( GID , cb_taskclass_gid ), - { -1 } + CKRM_EVENT_SPEC(NEWTASK, cb_taskclass_newtask), + CKRM_EVENT_SPEC(EXEC, cb_taskclass_exec), + CKRM_EVENT_SPEC(FORK, cb_taskclass_fork), + CKRM_EVENT_SPEC(EXIT, cb_taskclass_exit), + CKRM_EVENT_SPEC(UID, cb_taskclass_uid), + CKRM_EVENT_SPEC(GID, cb_taskclass_gid), + CKRM_EVENT_SPEC(XID, cb_taskclass_xid), + {-1} }; /*********************************************************************** @@ -372,8 +380,7 @@ static struct ckrm_event_spec taskclass_events_callbacks[] = { * ***********************************************************************/ -DECLARE_MUTEX(async_serializer); // serialize all async functions - +DECLARE_MUTEX(async_serializer); // serialize all async functions /* * Go through the task list and reclassify all tasks according to the current @@ -390,8 +397,7 @@ DECLARE_MUTEX(async_serializer); // serialize all async functions * We use a hybrid by comparing ratio nr_threads/pidmax */ -static void -ckrm_reclassify_all_tasks(void) +static void ckrm_reclassify_all_tasks(void) { extern int pid_max; @@ -401,22 +407,21 @@ ckrm_reclassify_all_tasks(void) int ratio; int use_bitmap; - ratio = curpidmax / nr_threads; if (curpidmax <= PID_MAX_DEFAULT) { - use_bitmap = 1; + use_bitmap = 1; } else { - use_bitmap = (ratio >= 2); + use_bitmap = (ratio >= 2); } ce_protect(&CT_taskclass); - retry: + retry: if (use_bitmap == 0) { // go through it in one walk read_lock(&tasklist_lock); - for ( i=0 ; icore)) return; - down(&async_serializer); // protect again race condition - - - TC_DEBUG("start %p:%s:%d:%d\n",cls,cls->core.name, - atomic_read(&cls->core.refcnt),atomic_read(&cls->core.hnode.parent->refcnt)); - // If no CE registered for this classtype, following will be needed repeatedly; - ce_regd = class_core(cls)->classtype->ce_regd; + down(&async_serializer); // protect again race condition + TC_DEBUG("start %p:%s:%d:%d\n", cls, cls->core.name, + atomic_read(&cls->core.refcnt), + atomic_read(&cls->core.hnode.parent->refcnt)); + // If no CE registered for this classtype, following will be needed + // repeatedly; + ce_regd = class_core(cls)->classtype->ce_regd; cnode = &(class_core(cls)->hnode); parcls = class_type(ckrm_task_class_t, cnode->parent); -next_task: + next_task: class_lock(class_core(cls)); if (!list_empty(&class_core(cls)->objlist)) { struct ckrm_task_class *newcls = NULL; - struct task_struct *tsk = - list_entry(class_core(cls)->objlist.next, - struct task_struct, taskclass_link); - + struct task_struct *tsk = + list_entry(class_core(cls)->objlist.next, + struct task_struct, taskclass_link); + get_task_struct(tsk); class_unlock(class_core(cls)); if (ce_regd) { - CE_CLASSIFY_RET(newcls,&CT_taskclass,CKRM_EVENT_RECLASSIFY,tsk); + CE_CLASSIFY_RET(newcls, &CT_taskclass, + CKRM_EVENT_RECLASSIFY, tsk); if (cls == newcls) { // don't allow reclassifying to the same class - // as we are in the process of cleaning up this class - ckrm_core_drop(class_core(newcls)); // to compensate CE's grab + // as we are in the process of cleaning up + // this class + + // compensate CE's grab + ckrm_core_drop(class_core(newcls)); newcls = NULL; } } @@ -575,27 +583,27 @@ next_task: num++; goto next_task; } - TC_DEBUG("stop %p:%s:%d:%d %d\n",cls,cls->core.name, - atomic_read(&cls->core.refcnt),atomic_read(&cls->core.hnode.parent->refcnt),num); + TC_DEBUG("stop %p:%s:%d:%d %d\n", cls, cls->core.name, + atomic_read(&cls->core.refcnt), + atomic_read(&cls->core.hnode.parent->refcnt), num); class_unlock(class_core(cls)); ckrm_core_drop(class_core(cls)); up(&async_serializer); - return ; + return; } /* * Change the core class of the given task. */ -int -ckrm_forced_reclassify_pid(pid_t pid, struct ckrm_task_class *cls) +int ckrm_forced_reclassify_pid(pid_t pid, struct ckrm_task_class *cls) { struct task_struct *tsk; if (!ckrm_validate_and_grab_core(class_core(cls))) - return - EINVAL; + return -EINVAL; read_lock(&tasklist_lock); if ((tsk = find_task_by_pid(pid)) == NULL) { @@ -605,39 +613,47 @@ ckrm_forced_reclassify_pid(pid_t pid, struct ckrm_task_class *cls) } get_task_struct(tsk); read_unlock(&tasklist_lock); - - down(&async_serializer); // protect again race condition - + + /* Check permissions */ + if ((!capable(CAP_SYS_NICE)) && + (!capable(CAP_SYS_RESOURCE)) && (current->user != tsk->user)) { + ckrm_core_drop(class_core(cls)); + put_task_struct(tsk); + return -EPERM; + } + + down(&async_serializer); // protect again race condition + ce_protect(&CT_taskclass); ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_MANUAL); ce_release(&CT_taskclass); put_task_struct(tsk); - + up(&async_serializer); return 0; } -static struct ckrm_core_class * -ckrm_alloc_task_class(struct ckrm_core_class *parent, const char *name) +static struct ckrm_core_class *ckrm_alloc_task_class(struct ckrm_core_class + *parent, const char *name) { struct ckrm_task_class *taskcls; taskcls = kmalloc(sizeof(struct ckrm_task_class), GFP_KERNEL); - if (taskcls == NULL) + if (taskcls == NULL) return NULL; + memset(taskcls, 0, sizeof(struct ckrm_task_class)); - ckrm_init_core_class(&CT_taskclass, - class_core(taskcls),parent,name); + ckrm_init_core_class(&CT_taskclass, class_core(taskcls), parent, name); ce_protect(&CT_taskclass); if (CT_taskclass.ce_cb_active && CT_taskclass.ce_callbacks.class_add) - (*CT_taskclass.ce_callbacks.class_add)(name,taskcls); + (*CT_taskclass.ce_callbacks.class_add) (name, taskcls, + CT_taskclass.typeID); ce_release(&CT_taskclass); return class_core(taskcls); } -static int -ckrm_free_task_class(struct ckrm_core_class *core) +static int ckrm_free_task_class(struct ckrm_core_class *core) { struct ckrm_task_class *taskcls; @@ -648,82 +664,79 @@ ckrm_free_task_class(struct ckrm_core_class *core) if (core == core->classtype->default_class) { // reset the name tag core->name = dflt_taskclass_name; - return 0; + return 0; } - TC_DEBUG("%p:%s:%d\n",core,core->name,atomic_read(&core->refcnt)); + TC_DEBUG("%p:%s:%d\n", core, core->name, atomic_read(&core->refcnt)); taskcls = class_type(struct ckrm_task_class, core); ce_protect(&CT_taskclass); if (CT_taskclass.ce_cb_active && CT_taskclass.ce_callbacks.class_delete) - (*CT_taskclass.ce_callbacks.class_delete)(core->name,taskcls); - ckrm_reclassify_class_tasks( taskcls ); + (*CT_taskclass.ce_callbacks.class_delete) (core->name, taskcls, + CT_taskclass.typeID); + ckrm_reclassify_class_tasks(taskcls); ce_release(&CT_taskclass); - ckrm_release_core_class(core); // Hubertus .... could just drop the class .. error message + ckrm_release_core_class(core); + // Hubertus .... could just drop the class .. error message return 0; } - -void __init -ckrm_meta_init_taskclass(void) +void __init ckrm_meta_init_taskclass(void) { - printk("...... Initializing ClassType<%s> ........\n",CT_taskclass.name); + printk("...... Initializing ClassType<%s> ........\n", + CT_taskclass.name); // intialize the default class ckrm_init_core_class(&CT_taskclass, class_core(&taskclass_dflt_class), - NULL,dflt_taskclass_name); + NULL, dflt_taskclass_name); // register classtype and initialize default task class ckrm_register_classtype(&CT_taskclass); ckrm_register_event_set(taskclass_events_callbacks); - // note registeration of all resource controllers will be done later dynamically - // as these are specified as modules + // note registeration of all resource controllers will be done + // later dynamically as these are specified as modules } - - -static int -tc_show_members(struct ckrm_core_class *core, struct seq_file *seq) +static int tc_show_members(struct ckrm_core_class *core, struct seq_file *seq) { struct list_head *lh; struct task_struct *tsk; class_lock(core); - list_for_each(lh, &core->objlist) { + list_for_each(lh, &core->objlist) { tsk = container_of(lh, struct task_struct, taskclass_link); - seq_printf(seq,"%ld\n", (long)tsk->pid); + seq_printf(seq, "%ld\n", (long)tsk->pid); } class_unlock(core); return 0; } -static int -tc_forced_reclassify(struct ckrm_core_class *target,const char *obj) -{ +static int tc_forced_reclassify(struct ckrm_core_class *target, const char *obj) +{ pid_t pid; int rc = -EINVAL; - pid = (pid_t) simple_strtoul(obj,NULL,10); + pid = (pid_t) simple_strtoul(obj, NULL, 10); if (pid > 0) { rc = ckrm_forced_reclassify_pid(pid, - class_type(ckrm_task_class_t,target)); + class_type(ckrm_task_class_t, + target)); } return rc; -} - +} + #if 1 -/*************************************************************************************** +/****************************************************************************** * Debugging Task Classes: Utility functions - **************************************************************************************/ + ******************************************************************************/ -void -check_tasklist_sanity(struct ckrm_task_class *cls) +void check_tasklist_sanity(struct ckrm_task_class *cls) { struct ckrm_core_class *core = class_core(cls); struct list_head *lh1, *lh2; @@ -734,35 +747,38 @@ check_tasklist_sanity(struct ckrm_task_class *cls) if (list_empty(&core->objlist)) { class_lock(core); printk("check_tasklist_sanity: class %s empty list\n", - core->name); + core->name); return; } list_for_each_safe(lh1, lh2, &core->objlist) { - struct task_struct *tsk = container_of(lh1, struct task_struct, taskclass_link); + struct task_struct *tsk = + container_of(lh1, struct task_struct, + taskclass_link); if (count++ > 20000) { printk("list is CORRUPTED\n"); break; } if (tsk->taskclass != cls) { const char *tclsname; - tclsname = (tsk->taskclass) ? class_core(tsk->taskclass)->name - : "NULL"; - printk("sanity: task %s:%d has ckrm_core |%s| but in list |%s|\n", - tsk->comm,tsk->pid,tclsname,core->name); + tclsname = (tsk->taskclass) ? + class_core(tsk->taskclass)->name:"NULL"; + printk("sanity: task %s:%d has ckrm_core " + "|%s| but in list |%s|\n", tsk->comm, + tsk->pid, tclsname, core->name); } } class_unlock(core); } } -void -ckrm_debug_free_task_class(struct ckrm_task_class *tskcls) +void ckrm_debug_free_task_class(struct ckrm_task_class *tskcls) { struct task_struct *proc, *thread; int count = 0; printk("Analyze Error <%s> %d\n", - class_core(tskcls)->name,atomic_read(&(class_core(tskcls)->refcnt))); + class_core(tskcls)->name, + atomic_read(&(class_core(tskcls)->refcnt))); read_lock(&tasklist_lock); class_lock(class_core(tskcls)); @@ -770,16 +786,19 @@ ckrm_debug_free_task_class(struct ckrm_task_class *tskcls) count += (tskcls == thread->taskclass); if ((thread->taskclass == tskcls) || (tskcls == NULL)) { const char *tclsname; - tclsname = (thread->taskclass) ? class_core(thread->taskclass)->name : "NULL"; - printk("%d thread=<%s:%d> -> <%s> <%lx>\n", - count,thread->comm,thread->pid,tclsname, thread->flags & PF_EXITING); + tclsname = (thread->taskclass) ? + class_core(thread->taskclass)->name :"NULL"; + printk("%d thread=<%s:%d> -> <%s> <%lx>\n", count, + thread->comm, thread->pid, tclsname, + thread->flags & PF_EXITING); } } while_each_thread(proc, thread); class_unlock(class_core(tskcls)); read_unlock(&tasklist_lock); printk("End Analyze Error <%s> %d\n", - class_core(tskcls)->name,atomic_read(&(class_core(tskcls)->refcnt))); -} + class_core(tskcls)->name, + atomic_read(&(class_core(tskcls)->refcnt))); +} #endif diff --git a/kernel/ckrm/rbce/rbcemod.c b/kernel/ckrm/rbce/rbcemod.c index f61d0879c..fa8d2c470 100644 --- a/kernel/ckrm/rbce/rbcemod.c +++ b/kernel/ckrm/rbce/rbcemod.c @@ -86,6 +86,7 @@ typedef enum { RBCE_RULE_APP_TAG, // task's application tag RBCE_RULE_IPV4, // IP address of listen(), ipv4 format RBCE_RULE_IPV6, // IP address of listen(), ipv6 format + RBCE_RULE_XID, // VSERVER RBCE_RULE_DEP_RULE, // dependent rule; must be the first term RBCE_RULE_INVALID, // invalid, for filler RBCE_RULE_INVALID2, // invalid, for filler @@ -134,8 +135,9 @@ struct rbce_rule { #define RBCE_TERM_TAG (3) #define RBCE_TERM_IPV4 (4) #define RBCE_TERM_IPV6 (5) +#define RBCE_TERM_XID (6) -#define NUM_TERM_MASK_VECTOR (6) +#define NUM_TERM_MASK_VECTOR (7) // must be one more the last RBCE_TERM_... // Rule flags. 1 bit for each type of rule term #define RBCE_TERMFLAG_CMD (1 << RBCE_TERM_CMD) @@ -144,9 +146,10 @@ struct rbce_rule { #define RBCE_TERMFLAG_TAG (1 << RBCE_TERM_TAG) #define RBCE_TERMFLAG_IPV4 (1 << RBCE_TERM_IPV4) #define RBCE_TERMFLAG_IPV6 (1 << RBCE_TERM_IPV6) -#define RBCE_TERMFLAG_ALL (RBCE_TERMFLAG_CMD | RBCE_TERMFLAG_UID | \ - RBCE_TERMFLAG_GID | RBCE_TERMFLAG_TAG | \ - RBCE_TERMFLAG_IPV4 | RBCE_TERMFLAG_IPV6) +#define RBCE_TERMFLAG_XID (1 << RBCE_TERM_XID) +#define RBCE_TERMFLAG_ALL (RBCE_TERMFLAG_CMD | RBCE_TERMFLAG_UID | \ + RBCE_TERMFLAG_GID | RBCE_TERMFLAG_TAG | RBCE_TERMFLAG_XID | \ + RBCE_TERMFLAG_IPV4 | RBCE_TERMFLAG_IPV6) int termop_2_vecidx[RBCE_RULE_INVALID] = { [RBCE_RULE_CMD_PATH] = RBCE_TERM_CMD, @@ -156,6 +159,7 @@ int termop_2_vecidx[RBCE_RULE_INVALID] = { [RBCE_RULE_REAL_GID] = RBCE_TERM_GID, [RBCE_RULE_EFFECTIVE_UID] = RBCE_TERM_UID, [RBCE_RULE_EFFECTIVE_GID] = RBCE_TERM_GID, + [RBCE_RULE_XID] = RBCE_TERM_XID, [RBCE_RULE_APP_TAG] = RBCE_TERM_TAG, [RBCE_RULE_IPV4] = RBCE_TERM_IPV4, [RBCE_RULE_IPV6] = RBCE_TERM_IPV6, @@ -265,6 +269,15 @@ static void print_context_vectors(void) #define print_context_vectors(x) #endif +/* ====================== VSERVER support ========================== */ +#define CONFIG_VSERVER +#ifdef CONFIG_VSERVER +#include +#else +typedef unsigned int xid_t; +#define vx_task_xid(t) (0) +#endif + /* ======================= Helper Functions ========================= */ #include "token.c" @@ -949,6 +962,7 @@ fill_rule(struct rbce_rule *newrule, struct rbce_rule_term *terms, int nterms) case RBCE_RULE_REAL_GID: case RBCE_RULE_EFFECTIVE_UID: case RBCE_RULE_EFFECTIVE_GID: + case RBCE_RULE_XID: term->u.id = terms[i].u.id; break; @@ -1205,6 +1219,9 @@ void get_rule(const char *rname, char *result) goto handleid; case RBCE_RULE_EFFECTIVE_GID: strcpy(idtype, "eg"); + goto handleid; + case RBCE_RULE_XID: + strcpy(idtype, "x"); handleid: if (term->operator == RBCE_LESS_THAN) { oper = '<'; @@ -1745,6 +1762,22 @@ __evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns, no_ip = 0; break; + case RBCE_RULE_XID: + { + xid_t xid = vx_task_xid(tsk); + + if (term->operator == RBCE_LESS_THAN) { + rc = (xid < term->u.id); + } else if (term->operator == RBCE_GREATER_THAN) { + rc = (xid > term->u.id); + } else if (term->operator == RBCE_NOT) { + rc = (xid != term->u.id); + } else { + rc = (xid == term->u.id); + } + break; + } + default: rc = 0; printk(KERN_ERR "Error evaluate term op=%d\n", @@ -2212,6 +2245,7 @@ static const char *event_names[CKRM_NUM_EVENTS] = { AENT(EXEC), AENT(UID), AENT(GID), + AENT(XID), AENT(LOGIN), AENT(USERADD), AENT(USERDEL), @@ -2263,6 +2297,10 @@ void *rbce_tc_classify(enum ckrm_event event, ...) cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_GID, tc_classtype); break; + case CKRM_EVENT_XID: + cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_XID, tc_classtype); + break; + case CKRM_EVENT_LOGIN: case CKRM_EVENT_USERADD: case CKRM_EVENT_USERDEL: @@ -2564,3 +2602,5 @@ EXPORT_SYMBOL(set_tasktag); module_init(init_rbce); module_exit(exit_rbce); + + diff --git a/kernel/ckrm/rbce/rbcemod_ext.c b/kernel/ckrm/rbce/rbcemod_ext.c index 90c8f154b..b7886ebf4 100644 --- a/kernel/ckrm/rbce/rbcemod_ext.c +++ b/kernel/ckrm/rbce/rbcemod_ext.c @@ -139,7 +139,7 @@ static inline void close_ukcc_channel(void) (r),(l),-1,NULL) > 0); \ chan_state = chan_isok ? UKCC_OK : UKCC_STANDBY; \ if (chan_wasok && !chan_isok) { \ - printk("Channel stalled\n"); \ + printk("Channel stalled\n"); \ } else if (!chan_wasok && chan_isok) { \ printk("Channel continues\n"); \ } \ diff --git a/kernel/ckrm/rbce/token.c b/kernel/ckrm/rbce/token.c index dd85aaf6e..0ace80a50 100644 --- a/kernel/ckrm/rbce/token.c +++ b/kernel/ckrm/rbce/token.c @@ -21,6 +21,10 @@ enum rule_token_t { TOKEN_EGID_LT, TOKEN_EGID_GT, TOKEN_EGID_NOT, + TOKEN_XID_EQ, + TOKEN_XID_LT, + TOKEN_XID_GT, + TOKEN_XID_NOT, TOKEN_TAG, TOKEN_IPV4, TOKEN_IPV6, @@ -53,6 +57,10 @@ int token_to_ruleop[TOKEN_INVALID + 1] = { [TOKEN_EGID_LT] = RBCE_RULE_EFFECTIVE_GID, [TOKEN_EGID_GT] = RBCE_RULE_EFFECTIVE_GID, [TOKEN_EGID_NOT] = RBCE_RULE_EFFECTIVE_GID, + [TOKEN_XID_EQ] = RBCE_RULE_XID, + [TOKEN_XID_LT] = RBCE_RULE_XID, + [TOKEN_XID_GT] = RBCE_RULE_XID, + [TOKEN_XID_NOT] = RBCE_RULE_XID, [TOKEN_TAG] = RBCE_RULE_APP_TAG, [TOKEN_IPV4] = RBCE_RULE_IPV4, [TOKEN_IPV6] = RBCE_RULE_IPV6, @@ -97,6 +105,10 @@ enum op_token token_to_operator[TOKEN_INVALID + 1] = { [TOKEN_EGID_LT] = TOKEN_OP_LESS_THAN, [TOKEN_EGID_GT] = TOKEN_OP_GREATER_THAN, [TOKEN_EGID_NOT] = TOKEN_OP_NOT, + [TOKEN_XID_EQ] = TOKEN_OP_EQUAL, + [TOKEN_XID_LT] = TOKEN_OP_LESS_THAN, + [TOKEN_XID_GT] = TOKEN_OP_GREATER_THAN, + [TOKEN_XID_NOT] = TOKEN_OP_NOT, [TOKEN_TAG] = TOKEN_OP_EQUAL, [TOKEN_IPV4] = TOKEN_OP_EQUAL, [TOKEN_IPV6] = TOKEN_OP_EQUAL, @@ -128,6 +140,10 @@ static match_table_t tokens = { {TOKEN_EGID_LT, "egid<%d"}, {TOKEN_EGID_GT, "egid>%d"}, {TOKEN_EGID_NOT, "egid!%d"}, + {TOKEN_XID_EQ, "xid=%d"}, + {TOKEN_XID_LT, "xid<%d"}, + {TOKEN_XID_GT, "xid>%d"}, + {TOKEN_XID_NOT, "xid!%d"}, {TOKEN_TAG, "tag=%s"}, {TOKEN_IPV4, "ipv4=%s"}, {TOKEN_IPV6, "ipv6=%s"}, @@ -224,6 +240,10 @@ rules_parse(char *rule_defn, struct rbce_rule_term **rterms, int *term_mask) case TOKEN_EGID_LT: case TOKEN_EGID_GT: case TOKEN_EGID_NOT: + case TOKEN_XID_EQ: + case TOKEN_XID_LT: + case TOKEN_XID_GT: + case TOKEN_XID_NOT: // all these tokens can be specified only once if (*term_mask & (1 << terms[i].op)) { nterms = -EINVAL; diff --git a/kernel/vserver/context.c b/kernel/vserver/context.c index 3d59cd170..dba8af562 100644 --- a/kernel/vserver/context.c +++ b/kernel/vserver/context.c @@ -26,6 +26,11 @@ #include #include +#define CKRM_VSERVER_INTEGRATION +#ifdef CKRM_VSERVER_INTEGRATION +#include +#endif //CKRM_VSERVER_INTEGRATION + #include @@ -471,6 +476,15 @@ int vx_migrate_task(struct task_struct *p, struct vx_info *vxi) // put_vx_info(old_vxi); } out: + + +#ifdef CKRM_VSERVER_INTEGRATION + do { + ckrm_cb_xid(p); + } while (0); +#endif //CKRM_VSERVER_INTEGRATION + + put_vx_info(old_vxi); return ret; }