1 /* ckrm.c - Class-based Kernel Resource Management (CKRM)
3 * Copyright (C) Hubertus Franke, IBM Corp. 2003, 2004
4 * (C) Shailabh Nagar, IBM Corp. 2003, 2004
5 * (C) Chandra Seetharaman, IBM Corp. 2003
6 * (C) Vivek Kashyap, IBM Corp. 2004
9 * Provides kernel API of CKRM for in-kernel,per-resource controllers
10 * (one each for cpu, memory, io, network) and callbacks for
11 * classification modules.
13 * Latest version, more details at http://ckrm.sf.net
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
27 * Made modifications to suit the new RBCE module.
29 * Fixed a bug in fork and exit callbacks. Added callbacks_active and
30 * surrounding logic. Added task paramter for all CE callbacks.
32 * moved to referenced counted class objects and correct locking
34 * Integrated ckrm hooks, classtypes, ...
38 #include <linux/config.h>
39 #include <linux/init.h>
40 #include <linux/linkage.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <asm/uaccess.h>
45 #include <asm/errno.h>
46 #include <linux/string.h>
47 #include <linux/list.h>
48 #include <linux/spinlock.h>
49 #include <linux/module.h>
50 #include <linux/ckrm_rc.h>
51 #include <linux/rcfs.h>
55 rwlock_t ckrm_class_lock = RW_LOCK_UNLOCKED; // protect classlists
57 struct rcfs_functions rcfs_fn;
58 EXPORT_SYMBOL(rcfs_fn);
60 // rcfs state needed by another module
62 EXPORT_SYMBOL(rcfs_engine_regd);
65 EXPORT_SYMBOL(rcfs_mounted);
67 /**************************************************************************
69 **************************************************************************/
72 * Return TRUE if the given core class pointer is valid.
76 * Return TRUE if the given resource is registered.
78 inline unsigned int is_res_regd(struct ckrm_classtype *clstype, int resid)
80 return ((resid >= 0) && (resid < clstype->max_resid) &&
81 test_bit(resid, &clstype->bit_res_ctlrs)
85 struct ckrm_res_ctlr *ckrm_resctlr_lookup(struct ckrm_classtype *clstype,
90 if (!clstype || !resname) {
93 for (resid = 0; resid < clstype->max_resid; resid++) {
94 if (test_bit(resid, &clstype->bit_res_ctlrs)) {
95 struct ckrm_res_ctlr *rctrl = clstype->res_ctlrs[resid];
96 if (!strncmp(resname, rctrl->res_name,
104 EXPORT_SYMBOL(ckrm_resctlr_lookup);
106 /* given a classname return the class handle and its classtype*/
107 void *ckrm_classobj(char *classname, int *classTypeID)
112 if (!classname || !*classname) {
116 read_lock(&ckrm_class_lock);
117 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
118 struct ckrm_classtype *ctype = ckrm_classtypes[i];
119 struct ckrm_core_class *core;
123 list_for_each_entry(core, &ctype->classes, clslist) {
124 if (core->name && !strcmp(core->name, classname)) {
125 // FIXME: should grep reference..
126 read_unlock(&ckrm_class_lock);
127 *classTypeID = ctype->typeID;
132 read_unlock(&ckrm_class_lock);
136 EXPORT_SYMBOL(is_res_regd);
137 EXPORT_SYMBOL(ckrm_classobj);
139 /**************************************************************************
140 * Internal Functions/macros *
141 **************************************************************************/
143 static inline void set_callbacks_active(struct ckrm_classtype *ctype)
145 ctype->ce_cb_active = ((atomic_read(&ctype->ce_nr_users) > 0) &&
146 (ctype->ce_callbacks.always_callback
147 || (ctype->num_classes > 1)));
150 int ckrm_validate_and_grab_core(struct ckrm_core_class *core)
153 read_lock(&ckrm_class_lock);
154 if (likely(ckrm_is_core_valid(core))) {
155 ckrm_core_grab(core);
158 read_unlock(&ckrm_class_lock);
162 /****************************************************************************
163 * Interfaces for classification engine *
164 ****************************************************************************/
167 * Registering a callback structure by the classification engine.
169 * Returns typeId of class on success -errno for failure.
171 int ckrm_register_engine(const char *typename, ckrm_eng_callback_t * ecbs)
173 struct ckrm_classtype *ctype;
175 ctype = ckrm_find_classtype_by_name(typename);
180 if (atomic_read(&ctype->ce_nr_users) != 1) {
181 // Some engine is acive, deregister it first.
186 /* One of the following must be set:
187 classify, class_delete (due to object reference) or
188 notify (case where notification supported but not classification)
189 The function pointer must be set the momement the mask is non-null
192 if (!(((ecbs->classify) && (ecbs->class_delete)) || (ecbs->notify)) ||
193 (ecbs->c_interest && ecbs->classify == NULL) ||
194 (ecbs->n_interest && ecbs->notify == NULL)) {
199 /* Is any other engine registered for this classtype ? */
200 if (ctype->ce_regd) {
206 ctype->ce_callbacks = *ecbs;
207 set_callbacks_active(ctype);
209 if (ctype->ce_callbacks.class_add) {
210 struct ckrm_core_class *core;
212 read_lock(&ckrm_class_lock);
214 list_for_each_entry(core, &ctype->classes, clslist) {
215 (*ctype->ce_callbacks.class_add) (core->name, core,
218 read_unlock(&ckrm_class_lock);
220 return ctype->typeID;
224 * Unregistering a callback structure by the classification engine.
226 * Returns 0 on success -errno for failure.
228 int ckrm_unregister_engine(const char *typename)
230 struct ckrm_classtype *ctype;
232 ctype = ckrm_find_classtype_by_name(typename);
236 ctype->ce_cb_active = 0;
238 if (atomic_dec_and_test(&ctype->ce_nr_users) != 1) {
239 // Somebody is currently using the engine, cannot deregister.
240 atomic_inc(&ctype->ce_nr_users);
245 memset(&ctype->ce_callbacks, 0, sizeof(ckrm_eng_callback_t));
249 /****************************************************************************
250 * Interfaces to manipulate class (core or resource) hierarchies
251 ****************************************************************************/
256 ckrm_add_child(struct ckrm_core_class *parent, struct ckrm_core_class *child)
258 struct ckrm_hnode *cnode = &child->hnode;
260 if (!ckrm_is_core_valid(child)) {
261 printk(KERN_ERR "Invalid child %p given in ckrm_add_child\n",
267 INIT_LIST_HEAD(&cnode->children);
268 INIT_LIST_HEAD(&cnode->siblings);
271 struct ckrm_hnode *pnode;
273 if (!ckrm_is_core_valid(parent)) {
275 "Invalid parent %p given in ckrm_add_child\n",
279 pnode = &parent->hnode;
280 write_lock(&parent->hnode_rwlock);
281 list_add(&cnode->siblings, &pnode->children);
282 write_unlock(&parent->hnode_rwlock);
285 cnode->parent = parent;
292 static int ckrm_remove_child(struct ckrm_core_class *child)
294 struct ckrm_hnode *cnode, *pnode;
295 struct ckrm_core_class *parent;
297 if (!ckrm_is_core_valid(child)) {
298 printk(KERN_ERR "Invalid child %p given"
299 " in ckrm_remove_child\n",
304 cnode = &child->hnode;
305 parent = cnode->parent;
306 if (!ckrm_is_core_valid(parent)) {
307 printk(KERN_ERR "Invalid parent %p in ckrm_remove_child\n",
312 pnode = &parent->hnode;
315 /* ensure that the node does not have children */
316 if (!list_empty(&cnode->children)) {
320 write_lock(&parent->hnode_rwlock);
321 list_del(&cnode->siblings);
322 write_unlock(&parent->hnode_rwlock);
323 cnode->parent = NULL;
328 void ckrm_lock_hier(struct ckrm_core_class *parent)
330 if (ckrm_is_core_valid(parent)) {
331 read_lock(&parent->hnode_rwlock);
335 void ckrm_unlock_hier(struct ckrm_core_class *parent)
337 if (ckrm_is_core_valid(parent)) {
338 read_unlock(&parent->hnode_rwlock);
343 * hnode_rwlock of the parent core class must held in read mode.
344 * external callers should 've called ckrm_lock_hier before calling this
347 #define hnode_2_core(ptr) \
348 ((ptr)? container_of(ptr, struct ckrm_core_class, hnode) : NULL)
350 struct ckrm_core_class *ckrm_get_next_child(struct ckrm_core_class *parent,
351 struct ckrm_core_class *child)
353 struct list_head *cnode;
354 struct ckrm_hnode *next_cnode;
355 struct ckrm_core_class *next_childcore;
357 if (!ckrm_is_core_valid(parent)) {
358 printk(KERN_ERR "Invalid parent %p in ckrm_get_next_child\n",
362 if (list_empty(&parent->hnode.children)) {
367 if (!ckrm_is_core_valid(child)) {
369 "Invalid child %p in ckrm_get_next_child\n",
373 cnode = child->hnode.siblings.next;
375 cnode = parent->hnode.children.next;
378 if (cnode == &parent->hnode.children) { // back at the anchor
382 next_cnode = container_of(cnode, struct ckrm_hnode, siblings);
383 next_childcore = hnode_2_core(next_cnode);
385 if (!ckrm_is_core_valid(next_childcore)) {
387 "Invalid next child %p in ckrm_get_next_child\n",
391 return next_childcore;
394 EXPORT_SYMBOL(ckrm_lock_hier);
395 EXPORT_SYMBOL(ckrm_unlock_hier);
396 EXPORT_SYMBOL(ckrm_get_next_child);
399 ckrm_alloc_res_class(struct ckrm_core_class *core,
400 struct ckrm_core_class *parent, int resid)
403 struct ckrm_classtype *clstype;
406 * Allocate a resource class only if the resource controller has
407 * registered with core and the engine requests for the class.
410 if (!ckrm_is_core_valid(core))
413 clstype = core->classtype;
414 core->res_class[resid] = NULL;
416 if (test_bit(resid, &clstype->bit_res_ctlrs)) {
417 ckrm_res_ctlr_t *rcbs;
419 atomic_inc(&clstype->nr_resusers[resid]);
420 rcbs = clstype->res_ctlrs[resid];
422 if (rcbs && rcbs->res_alloc) {
423 core->res_class[resid] =
424 (*rcbs->res_alloc) (core, parent);
425 if (core->res_class[resid])
427 printk(KERN_ERR "Error creating res class\n");
429 atomic_dec(&clstype->nr_resusers[resid]);
434 * Initialize a core class
438 #define CLS_DEBUG(fmt, args...) \
439 do { /* printk("%s: " fmt, __FUNCTION__ , ## args); */ } while (0)
442 ckrm_init_core_class(struct ckrm_classtype *clstype,
443 struct ckrm_core_class *dcore,
444 struct ckrm_core_class *parent, const char *name)
446 // Hubertus ... should replace name with dentry or add dentry ?
449 // Hubertus .. how is this used in initialization
451 CLS_DEBUG("name %s => %p\n", name ? name : "default", dcore);
453 if ((dcore != clstype->default_class) && (!ckrm_is_core_valid(parent))){
454 printk("error not a valid parent %p\n", parent);
458 // Hubertus .. dynamic allocation still breaks when RCs registers.
459 // See def in ckrm_rc.h
460 dcore->res_class = NULL;
461 if (clstype->max_resid > 0) {
463 (void **)kmalloc(clstype->max_resid * sizeof(void *),
465 if (dcore->res_class == NULL) {
466 printk("error no mem\n");
472 dcore->classtype = clstype;
473 dcore->magic = CKRM_CORE_MAGIC;
475 dcore->class_lock = SPIN_LOCK_UNLOCKED;
476 dcore->hnode_rwlock = RW_LOCK_UNLOCKED;
479 atomic_set(&dcore->refcnt, 0);
480 write_lock(&ckrm_class_lock);
482 INIT_LIST_HEAD(&dcore->objlist);
483 list_add_tail(&dcore->clslist, &clstype->classes);
485 clstype->num_classes++;
486 set_callbacks_active(clstype);
488 write_unlock(&ckrm_class_lock);
489 ckrm_add_child(parent, dcore);
491 for (i = 0; i < clstype->max_resid; i++)
492 ckrm_alloc_res_class(dcore, parent, i);
494 // fix for race condition seen in stress with numtasks
496 ckrm_core_grab(parent);
498 ckrm_core_grab(dcore);
502 static void ckrm_free_res_class(struct ckrm_core_class *core, int resid)
505 * Free a resource class only if the resource controller has
506 * registered with core
508 if (core->res_class[resid]) {
509 ckrm_res_ctlr_t *rcbs;
510 struct ckrm_classtype *clstype = core->classtype;
512 atomic_inc(&clstype->nr_resusers[resid]);
513 rcbs = clstype->res_ctlrs[resid];
515 if (rcbs->res_free) {
516 (*rcbs->res_free) (core->res_class[resid]);
517 // compensate inc in alloc
518 atomic_dec(&clstype->nr_resusers[resid]);
520 atomic_dec(&clstype->nr_resusers[resid]);
522 core->res_class[resid] = NULL;
527 * requires that all tasks were previously reassigned to another class
529 * Returns 0 on success -errno on failure.
532 void ckrm_free_core_class(struct ckrm_core_class *core)
535 struct ckrm_classtype *clstype = core->classtype;
536 struct ckrm_core_class *parent = core->hnode.parent;
538 CLS_DEBUG("core=%p:%s parent=%p:%s\n", core, core->name, parent,
541 /* this core was marked as late */
542 printk("class <%s> finally deleted %lu\n", core->name, jiffies);
544 if (ckrm_remove_child(core) == 0) {
545 printk("Core class removal failed. Chilren present\n");
548 for (i = 0; i < clstype->max_resid; i++) {
549 ckrm_free_res_class(core, i);
552 write_lock(&ckrm_class_lock);
554 // Clear the magic, so we would know if this core is reused.
556 #if 0 // Dynamic not yet enabled
557 core->res_class = NULL;
559 // Remove this core class from its linked list.
560 list_del(&core->clslist);
561 clstype->num_classes--;
562 set_callbacks_active(clstype);
563 write_unlock(&ckrm_class_lock);
565 // fix for race condition seen in stress with numtasks
567 ckrm_core_drop(parent);
572 int ckrm_release_core_class(struct ckrm_core_class *core)
574 if (!ckrm_is_core_valid(core)) {
579 if (core == core->classtype->default_class)
582 /* need to make sure that the classgot really dropped */
583 if (atomic_read(&core->refcnt) != 1) {
584 CLS_DEBUG("class <%s> deletion delayed refcnt=%d jif=%ld\n",
585 core->name, atomic_read(&core->refcnt), jiffies);
586 core->delayed = 1; /* just so we have a ref point */
588 ckrm_core_drop(core);
592 /****************************************************************************
593 * Interfaces for the resource controller *
594 ****************************************************************************/
596 * Registering a callback structure by the resource controller.
598 * Returns the resource id(0 or +ve) on success, -errno for failure.
601 ckrm_register_res_ctlr_intern(struct ckrm_classtype *clstype,
602 ckrm_res_ctlr_t * rcbs)
611 spin_lock(&clstype->res_ctlrs_lock);
613 printk(KERN_WARNING "resid is %d name is %s %s\n",
614 resid, rcbs->res_name, clstype->res_ctlrs[resid]->res_name);
617 if ((resid < CKRM_MAX_RES_CTLRS)
618 && (clstype->res_ctlrs[resid] == NULL)) {
619 clstype->res_ctlrs[resid] = rcbs;
620 atomic_set(&clstype->nr_resusers[resid], 0);
621 set_bit(resid, &clstype->bit_res_ctlrs);
623 if (resid >= clstype->max_resid) {
624 clstype->max_resid = resid + 1;
629 spin_unlock(&clstype->res_ctlrs_lock);
633 for (i = clstype->resid_reserved; i < clstype->max_res_ctlrs; i++) {
634 if (clstype->res_ctlrs[i] == NULL) {
635 clstype->res_ctlrs[i] = rcbs;
637 atomic_set(&clstype->nr_resusers[i], 0);
638 set_bit(i, &clstype->bit_res_ctlrs);
639 if (i >= clstype->max_resid) {
640 clstype->max_resid = i + 1;
642 spin_unlock(&clstype->res_ctlrs_lock);
647 spin_unlock(&clstype->res_ctlrs_lock);
652 ckrm_register_res_ctlr(struct ckrm_classtype *clstype, ckrm_res_ctlr_t * rcbs)
654 struct ckrm_core_class *core;
657 resid = ckrm_register_res_ctlr_intern(clstype, rcbs);
660 /* run through all classes and create the resource class
661 * object and if necessary "initialize" class in context
664 read_lock(&ckrm_class_lock);
665 list_for_each_entry(core, &clstype->classes, clslist) {
666 printk("CKRM .. create res clsobj for resouce <%s>"
667 "class <%s> par=%p\n", rcbs->res_name,
668 core->name, core->hnode.parent);
669 ckrm_alloc_res_class(core, core->hnode.parent, resid);
671 if (clstype->add_resctrl) {
672 // FIXME: this should be mandatory
673 (*clstype->add_resctrl) (core, resid);
676 read_unlock(&ckrm_class_lock);
682 * Unregistering a callback structure by the resource controller.
684 * Returns 0 on success -errno for failure.
686 int ckrm_unregister_res_ctlr(struct ckrm_res_ctlr *rcbs)
688 struct ckrm_classtype *clstype = rcbs->classtype;
689 struct ckrm_core_class *core = NULL;
690 int resid = rcbs->resid;
692 if ((clstype == NULL) || (resid < 0)) {
695 // FIXME: probably need to also call deregistration function
697 read_lock(&ckrm_class_lock);
698 // free up this resource from all the classes
699 list_for_each_entry(core, &clstype->classes, clslist) {
700 ckrm_free_res_class(core, resid);
702 read_unlock(&ckrm_class_lock);
704 if (atomic_read(&clstype->nr_resusers[resid])) {
708 spin_lock(&clstype->res_ctlrs_lock);
709 clstype->res_ctlrs[resid] = NULL;
710 clear_bit(resid, &clstype->bit_res_ctlrs);
711 clstype->max_resid = fls(clstype->bit_res_ctlrs);
713 spin_unlock(&clstype->res_ctlrs_lock);
718 /*******************************************************************
719 * Class Type Registration
720 *******************************************************************/
722 /* Hubertus ... we got to do some locking here */
725 struct ckrm_classtype *ckrm_classtypes[CKRM_MAX_CLASSTYPES];
726 // really should build a better interface for this
727 EXPORT_SYMBOL(ckrm_classtypes);
729 int ckrm_register_classtype(struct ckrm_classtype *clstype)
731 int tid = clstype->typeID;
734 if ((tid < 0) || (tid > CKRM_MAX_CLASSTYPES)
735 || (ckrm_classtypes[tid]))
739 for (i = CKRM_RESV_CLASSTYPES; i < CKRM_MAX_CLASSTYPES; i++) {
740 if (ckrm_classtypes[i] == NULL) {
748 clstype->typeID = tid;
749 ckrm_classtypes[tid] = clstype;
751 /* Hubertus .. we need to call the callbacks of the RCFS client */
752 if (rcfs_fn.register_classtype) {
753 (*rcfs_fn.register_classtype) (clstype);
754 // No error return for now ;
760 int ckrm_unregister_classtype(struct ckrm_classtype *clstype)
762 int tid = clstype->typeID;
764 if ((tid < 0) || (tid > CKRM_MAX_CLASSTYPES)
765 || (ckrm_classtypes[tid] != clstype))
768 if (rcfs_fn.deregister_classtype) {
769 (*rcfs_fn.deregister_classtype) (clstype);
770 // No error return for now
773 ckrm_classtypes[tid] = NULL;
774 clstype->typeID = -1;
778 struct ckrm_classtype *ckrm_find_classtype_by_name(const char *name)
781 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
782 struct ckrm_classtype *ctype = ckrm_classtypes[i];
783 if (ctype && !strncmp(ctype->name, name, CKRM_MAX_TYPENAME_LEN))
789 /*******************************************************************
790 * Event callback invocation
791 *******************************************************************/
793 struct ckrm_hook_cb *ckrm_event_callbacks[CKRM_NONLATCHABLE_EVENTS];
795 /* Registration / Deregistration / Invocation functions */
797 int ckrm_register_event_cb(enum ckrm_event ev, struct ckrm_hook_cb *cb)
799 struct ckrm_hook_cb **cbptr;
801 if ((ev < CKRM_LATCHABLE_EVENTS) || (ev >= CKRM_NONLATCHABLE_EVENTS))
803 cbptr = &ckrm_event_callbacks[ev];
804 while (*cbptr != NULL)
805 cbptr = &((*cbptr)->next);
810 int ckrm_unregister_event_cb(enum ckrm_event ev, struct ckrm_hook_cb *cb)
812 struct ckrm_hook_cb **cbptr;
814 if ((ev < CKRM_LATCHABLE_EVENTS) || (ev >= CKRM_NONLATCHABLE_EVENTS))
816 cbptr = &ckrm_event_callbacks[ev];
817 while ((*cbptr != NULL) && (*cbptr != cb))
818 cbptr = &((*cbptr)->next);
820 (*cbptr)->next = cb->next;
821 return (*cbptr == NULL);
824 int ckrm_register_event_set(struct ckrm_event_spec especs[])
826 struct ckrm_event_spec *espec = especs;
828 for (espec = especs; espec->ev != -1; espec++)
829 ckrm_register_event_cb(espec->ev, &espec->cb);
833 int ckrm_unregister_event_set(struct ckrm_event_spec especs[])
835 struct ckrm_event_spec *espec = especs;
837 for (espec = especs; espec->ev != -1; espec++)
838 ckrm_unregister_event_cb(espec->ev, &espec->cb);
842 #define ECC_PRINTK(fmt, args...) \
843 // printk("%s: " fmt, __FUNCTION__ , ## args)
845 void ckrm_invoke_event_cb_chain(enum ckrm_event ev, void *arg)
847 struct ckrm_hook_cb *cb, *anchor;
849 ECC_PRINTK("%d %x\n", current, ev, arg);
850 if ((anchor = ckrm_event_callbacks[ev]) != NULL) {
851 for (cb = anchor; cb; cb = cb->next)
856 /*******************************************************************
857 * Generic Functions that can be used as default functions
858 * in almost all classtypes
859 * (a) function iterator over all resource classes of a class
860 * (b) function invoker on a named resource
861 *******************************************************************/
863 int ckrm_class_show_shares(struct ckrm_core_class *core, struct seq_file *seq)
866 struct ckrm_res_ctlr *rcbs;
867 struct ckrm_classtype *clstype = core->classtype;
868 struct ckrm_shares shares;
870 for (i = 0; i < clstype->max_resid; i++) {
871 atomic_inc(&clstype->nr_resusers[i]);
872 rcbs = clstype->res_ctlrs[i];
873 if (rcbs && rcbs->get_share_values) {
874 (*rcbs->get_share_values) (core->res_class[i], &shares);
875 seq_printf(seq,"res=%s,guarantee=%d,limit=%d,"
876 "total_guarantee=%d,max_limit=%d\n",
877 rcbs->res_name, shares.my_guarantee,
878 shares.my_limit, shares.total_guarantee,
881 atomic_dec(&clstype->nr_resusers[i]);
886 int ckrm_class_show_stats(struct ckrm_core_class *core, struct seq_file *seq)
889 struct ckrm_res_ctlr *rcbs;
890 struct ckrm_classtype *clstype = core->classtype;
892 for (i = 0; i < clstype->max_resid; i++) {
893 atomic_inc(&clstype->nr_resusers[i]);
894 rcbs = clstype->res_ctlrs[i];
895 if (rcbs && rcbs->get_stats)
896 (*rcbs->get_stats) (core->res_class[i], seq);
897 atomic_dec(&clstype->nr_resusers[i]);
902 int ckrm_class_show_config(struct ckrm_core_class *core, struct seq_file *seq)
905 struct ckrm_res_ctlr *rcbs;
906 struct ckrm_classtype *clstype = core->classtype;
908 for (i = 0; i < clstype->max_resid; i++) {
909 atomic_inc(&clstype->nr_resusers[i]);
910 rcbs = clstype->res_ctlrs[i];
911 if (rcbs && rcbs->show_config)
912 (*rcbs->show_config) (core->res_class[i], seq);
913 atomic_dec(&clstype->nr_resusers[i]);
918 int ckrm_class_set_config(struct ckrm_core_class *core, const char *resname,
921 struct ckrm_classtype *clstype = core->classtype;
922 struct ckrm_res_ctlr *rcbs = ckrm_resctlr_lookup(clstype, resname);
925 if (rcbs == NULL || rcbs->set_config == NULL)
927 rc = (*rcbs->set_config) (core->res_class[rcbs->resid], cfgstr);
931 #define legalshare(a) \
933 || ((a) == CKRM_SHARE_UNCHANGED) \
934 || ((a) == CKRM_SHARE_DONTCARE) )
936 int ckrm_class_set_shares(struct ckrm_core_class *core, const char *resname,
937 struct ckrm_shares *shares)
939 struct ckrm_classtype *clstype = core->classtype;
940 struct ckrm_res_ctlr *rcbs;
943 // Check for legal values
944 if (!legalshare(shares->my_guarantee) || !legalshare(shares->my_limit)
945 || !legalshare(shares->total_guarantee)
946 || !legalshare(shares->max_limit))
949 rcbs = ckrm_resctlr_lookup(clstype, resname);
950 if (rcbs == NULL || rcbs->set_share_values == NULL)
952 rc = (*rcbs->set_share_values) (core->res_class[rcbs->resid], shares);
956 int ckrm_class_reset_stats(struct ckrm_core_class *core, const char *resname,
959 struct ckrm_classtype *clstype = core->classtype;
960 struct ckrm_res_ctlr *rcbs = ckrm_resctlr_lookup(clstype, resname);
963 if (rcbs == NULL || rcbs->reset_stats == NULL)
965 rc = (*rcbs->reset_stats) (core->res_class[rcbs->resid]);
969 /*******************************************************************
971 *******************************************************************/
973 void ckrm_cb_newtask(struct task_struct *tsk)
976 spin_lock_init(&tsk->ckrm_tsklock);
977 ckrm_invoke_event_cb_chain(CKRM_EVENT_NEWTASK, tsk);
980 void ckrm_cb_exit(struct task_struct *tsk)
982 ckrm_invoke_event_cb_chain(CKRM_EVENT_EXIT, tsk);
986 void __init ckrm_init(void)
988 printk("CKRM Initialization\n");
990 // register/initialize the Metatypes
992 #ifdef CONFIG_CKRM_TYPE_TASKCLASS
994 extern void ckrm_meta_init_taskclass(void);
995 ckrm_meta_init_taskclass();
998 #ifdef CONFIG_CKRM_TYPE_SOCKETCLASS
1000 extern void ckrm_meta_init_sockclass(void);
1001 ckrm_meta_init_sockclass();
1004 // prepare init_task and then rely on inheritance of properties
1005 ckrm_cb_newtask(&init_task);
1006 printk("CKRM Initialization done\n");
1009 EXPORT_SYMBOL(ckrm_register_engine);
1010 EXPORT_SYMBOL(ckrm_unregister_engine);
1012 EXPORT_SYMBOL(ckrm_register_res_ctlr);
1013 EXPORT_SYMBOL(ckrm_unregister_res_ctlr);
1015 EXPORT_SYMBOL(ckrm_init_core_class);
1016 EXPORT_SYMBOL(ckrm_free_core_class);
1017 EXPORT_SYMBOL(ckrm_release_core_class);
1019 EXPORT_SYMBOL(ckrm_register_classtype);
1020 EXPORT_SYMBOL(ckrm_unregister_classtype);
1021 EXPORT_SYMBOL(ckrm_find_classtype_by_name);
1023 EXPORT_SYMBOL(ckrm_core_grab);
1024 EXPORT_SYMBOL(ckrm_core_drop);
1025 EXPORT_SYMBOL(ckrm_is_core_valid);
1026 EXPORT_SYMBOL(ckrm_validate_and_grab_core);
1028 EXPORT_SYMBOL(ckrm_register_event_set);
1029 EXPORT_SYMBOL(ckrm_unregister_event_set);
1030 EXPORT_SYMBOL(ckrm_register_event_cb);
1031 EXPORT_SYMBOL(ckrm_unregister_event_cb);
1033 EXPORT_SYMBOL(ckrm_class_show_stats);
1034 EXPORT_SYMBOL(ckrm_class_show_config);
1035 EXPORT_SYMBOL(ckrm_class_show_shares);
1037 EXPORT_SYMBOL(ckrm_class_set_config);
1038 EXPORT_SYMBOL(ckrm_class_set_shares);
1040 EXPORT_SYMBOL(ckrm_class_reset_stats);