1 /* ckrm.c - Class-based Kernel Resource Management (CKRM)
3 * Copyright (C) Hubertus Franke, IBM Corp. 2003, 2004
4 * (C) Shailabh Nagar, IBM Corp. 2003, 2004
5 * (C) Chandra Seetharaman, IBM Corp. 2003
6 * (C) Vivek Kashyap, IBM Corp. 2004
9 * Provides kernel API of CKRM for in-kernel,per-resource controllers
10 * (one each for cpu, memory, io, network) and callbacks for
11 * classification modules.
13 * Latest version, more details at http://ckrm.sf.net
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
27 * Made modifications to suit the new RBCE module.
29 * Fixed a bug in fork and exit callbacks. Added callbacks_active and
30 * surrounding logic. Added task paramter for all CE callbacks.
32 * moved to referenced counted class objects and correct locking
34 * Integrated ckrm hooks, classtypes, ...
38 #include <linux/config.h>
39 #include <linux/init.h>
40 #include <linux/linkage.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <asm/uaccess.h>
45 #include <asm/errno.h>
46 #include <linux/string.h>
47 #include <linux/list.h>
48 #include <linux/spinlock.h>
49 #include <linux/module.h>
50 #include <linux/ckrm_rc.h>
51 #include <linux/rcfs.h>
55 rwlock_t ckrm_class_lock = RW_LOCK_UNLOCKED; // protect classlists
57 struct rcfs_functions rcfs_fn;
58 EXPORT_SYMBOL(rcfs_fn);
60 // rcfs state needed by another module
62 EXPORT_SYMBOL(rcfs_engine_regd);
65 EXPORT_SYMBOL(rcfs_mounted);
67 /**************************************************************************
69 **************************************************************************/
72 * Return TRUE if the given core class pointer is valid.
76 * Return TRUE if the given resource is registered.
78 inline unsigned int is_res_regd(struct ckrm_classtype *clstype, int resid)
80 return ((resid >= 0) && (resid < clstype->max_resid) &&
81 test_bit(resid, &clstype->bit_res_ctlrs)
86 struct ckrm_res_ctlr *ckrm_resctlr_lookup(struct ckrm_classtype *clstype,
91 if (!clstype || !resname) {
94 for (resid = 0; resid < clstype->max_resid; resid++) {
95 if (test_bit(resid, &clstype->bit_res_ctlrs)) {
96 struct ckrm_res_ctlr *rctrl = clstype->res_ctlrs[resid];
97 if (!strncmp(resname, rctrl->res_name,
105 /* given a classname return the class handle and its classtype*/
106 void *ckrm_classobj(const char *classname, int *classTypeID)
111 if (!classname || !*classname) {
115 read_lock(&ckrm_class_lock);
116 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
117 struct ckrm_classtype *ctype = ckrm_classtypes[i];
118 struct ckrm_core_class *core;
122 list_for_each_entry(core, &ctype->classes, clslist) {
123 if (core->name && !strcmp(core->name, classname)) {
124 // FIXME: should grep reference..
125 read_unlock(&ckrm_class_lock);
126 *classTypeID = ctype->typeID;
131 read_unlock(&ckrm_class_lock);
135 EXPORT_SYMBOL(is_res_regd);
136 EXPORT_SYMBOL(ckrm_classobj);
138 /**************************************************************************
139 * Internal Functions/macros *
140 **************************************************************************/
142 static inline void set_callbacks_active(struct ckrm_classtype *ctype)
144 ctype->ce_cb_active = ((atomic_read(&ctype->ce_regd) > 0) &&
145 (ctype->ce_callbacks.always_callback
146 || (ctype->num_classes > 1)));
149 int ckrm_validate_and_grab_core(struct ckrm_core_class *core)
152 read_lock(&ckrm_class_lock);
153 if (likely(ckrm_is_core_valid(core))) {
154 ckrm_core_grab(core);
157 read_unlock(&ckrm_class_lock);
161 /****************************************************************************
162 * Interfaces for classification engine *
163 ****************************************************************************/
166 * Registering a callback structure by the classification engine.
168 * Returns typeId of class on success -errno for failure.
170 int ckrm_register_engine(const char *typename, ckrm_eng_callback_t * ecbs)
172 struct ckrm_classtype *ctype;
174 ctype = ckrm_find_classtype_by_name(typename);
178 atomic_inc(&ctype->ce_regd);
180 /* another engine registered or trying to register ? */
181 if (atomic_read(&ctype->ce_regd) != 1) {
182 atomic_dec(&ctype->ce_regd);
186 /* One of the following must be set:
187 classify, class_delete (due to object reference) or
188 notify (case where notification supported but not classification)
189 The function pointer must be set the momement the mask is non-null
192 if (!(((ecbs->classify) && (ecbs->class_delete)) || (ecbs->notify)) ||
193 (ecbs->c_interest && ecbs->classify == NULL) ||
194 (ecbs->n_interest && ecbs->notify == NULL)) {
195 atomic_dec(&ctype->ce_regd);
199 ctype->ce_callbacks = *ecbs;
200 set_callbacks_active(ctype);
202 if (ctype->ce_callbacks.class_add) {
203 struct ckrm_core_class *core;
205 read_lock(&ckrm_class_lock);
207 list_for_each_entry(core, &ctype->classes, clslist) {
208 (*ctype->ce_callbacks.class_add) (core->name, core,
211 read_unlock(&ckrm_class_lock);
213 return ctype->typeID;
217 * Unregistering a callback structure by the classification engine.
219 * Returns 0 on success -errno for failure.
221 int ckrm_unregister_engine(const char *typename)
223 struct ckrm_classtype *ctype;
225 ctype = ckrm_find_classtype_by_name(typename);
229 ctype->ce_cb_active = 0;
231 if (atomic_read(&ctype->ce_nr_users) > 1) {
232 // Somebody is currently using the engine, cannot deregister.
236 atomic_set(&ctype->ce_regd, 0);
237 memset(&ctype->ce_callbacks, 0, sizeof(ckrm_eng_callback_t));
241 /****************************************************************************
242 * Interfaces to manipulate class (core or resource) hierarchies
243 ****************************************************************************/
248 ckrm_add_child(struct ckrm_core_class *parent, struct ckrm_core_class *child)
250 struct ckrm_hnode *cnode = &child->hnode;
252 if (!ckrm_is_core_valid(child)) {
253 printk(KERN_ERR "Invalid child %p given in ckrm_add_child\n",
259 INIT_LIST_HEAD(&cnode->children);
260 INIT_LIST_HEAD(&cnode->siblings);
263 struct ckrm_hnode *pnode;
265 if (!ckrm_is_core_valid(parent)) {
267 "Invalid parent %p given in ckrm_add_child\n",
271 pnode = &parent->hnode;
272 write_lock(&parent->hnode_rwlock);
273 list_add(&cnode->siblings, &pnode->children);
274 write_unlock(&parent->hnode_rwlock);
277 cnode->parent = parent;
284 static int ckrm_remove_child(struct ckrm_core_class *child)
286 struct ckrm_hnode *cnode, *pnode;
287 struct ckrm_core_class *parent;
289 if (!ckrm_is_core_valid(child)) {
290 printk(KERN_ERR "Invalid child %p given"
291 " in ckrm_remove_child\n",
296 cnode = &child->hnode;
297 parent = cnode->parent;
298 if (!ckrm_is_core_valid(parent)) {
299 printk(KERN_ERR "Invalid parent %p in ckrm_remove_child\n",
304 pnode = &parent->hnode;
307 /* ensure that the node does not have children */
308 if (!list_empty(&cnode->children)) {
312 write_lock(&parent->hnode_rwlock);
313 list_del(&cnode->siblings);
314 write_unlock(&parent->hnode_rwlock);
315 cnode->parent = NULL;
320 void ckrm_lock_hier(struct ckrm_core_class *parent)
322 if (ckrm_is_core_valid(parent)) {
323 read_lock(&parent->hnode_rwlock);
327 void ckrm_unlock_hier(struct ckrm_core_class *parent)
329 if (ckrm_is_core_valid(parent)) {
330 read_unlock(&parent->hnode_rwlock);
335 * hnode_rwlock of the parent core class must held in read mode.
336 * external callers should 've called ckrm_lock_hier before calling this
339 #define hnode_2_core(ptr) \
340 ((ptr)? container_of(ptr, struct ckrm_core_class, hnode) : NULL)
342 struct ckrm_core_class *ckrm_get_next_child(struct ckrm_core_class *parent,
343 struct ckrm_core_class *child)
345 struct list_head *cnode;
346 struct ckrm_hnode *next_cnode;
347 struct ckrm_core_class *next_childcore;
349 if (!ckrm_is_core_valid(parent)) {
350 printk(KERN_ERR "Invalid parent %p in ckrm_get_next_child\n",
354 if (list_empty(&parent->hnode.children)) {
359 if (!ckrm_is_core_valid(child)) {
361 "Invalid child %p in ckrm_get_next_child\n",
365 cnode = child->hnode.siblings.next;
367 cnode = parent->hnode.children.next;
370 if (cnode == &parent->hnode.children) { // back at the anchor
374 next_cnode = container_of(cnode, struct ckrm_hnode, siblings);
375 next_childcore = hnode_2_core(next_cnode);
377 if (!ckrm_is_core_valid(next_childcore)) {
379 "Invalid next child %p in ckrm_get_next_child\n",
383 return next_childcore;
386 EXPORT_SYMBOL(ckrm_lock_hier);
387 EXPORT_SYMBOL(ckrm_unlock_hier);
388 EXPORT_SYMBOL(ckrm_get_next_child);
391 ckrm_alloc_res_class(struct ckrm_core_class *core,
392 struct ckrm_core_class *parent, int resid)
395 struct ckrm_classtype *clstype;
398 * Allocate a resource class only if the resource controller has
399 * registered with core and the engine requests for the class.
402 if (!ckrm_is_core_valid(core))
405 clstype = core->classtype;
406 core->res_class[resid] = NULL;
408 if (test_bit(resid, &clstype->bit_res_ctlrs)) {
409 ckrm_res_ctlr_t *rcbs;
411 atomic_inc(&clstype->nr_resusers[resid]);
412 rcbs = clstype->res_ctlrs[resid];
414 if (rcbs && rcbs->res_alloc) {
415 core->res_class[resid] =
416 (*rcbs->res_alloc) (core, parent);
417 if (core->res_class[resid])
419 printk(KERN_ERR "Error creating res class\n");
421 atomic_dec(&clstype->nr_resusers[resid]);
426 * Initialize a core class
430 #define CLS_DEBUG(fmt, args...) \
431 do { /* printk("%s: " fmt, __FUNCTION__ , ## args); */ } while (0)
434 ckrm_init_core_class(struct ckrm_classtype *clstype,
435 struct ckrm_core_class *dcore,
436 struct ckrm_core_class *parent, const char *name)
438 // Hubertus ... should replace name with dentry or add dentry ?
441 // Hubertus .. how is this used in initialization
443 CLS_DEBUG("name %s => %p\n", name ? name : "default", dcore);
445 if ((dcore != clstype->default_class) && (!ckrm_is_core_valid(parent))){
446 printk(KERN_DEBUG "error not a valid parent %p\n", parent);
450 // Hubertus .. dynamic allocation still breaks when RCs registers.
451 // See def in ckrm_rc.h
452 dcore->res_class = NULL;
453 if (clstype->max_resid > 0) {
455 (void **)kmalloc(clstype->max_resid * sizeof(void *),
457 if (dcore->res_class == NULL) {
458 printk(KERN_DEBUG "error no mem\n");
464 dcore->classtype = clstype;
465 dcore->magic = CKRM_CORE_MAGIC;
467 dcore->class_lock = SPIN_LOCK_UNLOCKED;
468 dcore->hnode_rwlock = RW_LOCK_UNLOCKED;
471 atomic_set(&dcore->refcnt, 0);
472 write_lock(&ckrm_class_lock);
474 INIT_LIST_HEAD(&dcore->objlist);
475 list_add_tail(&dcore->clslist, &clstype->classes);
477 clstype->num_classes++;
478 set_callbacks_active(clstype);
480 write_unlock(&ckrm_class_lock);
481 ckrm_add_child(parent, dcore);
483 for (i = 0; i < clstype->max_resid; i++)
484 ckrm_alloc_res_class(dcore, parent, i);
486 // fix for race condition seen in stress with numtasks
488 ckrm_core_grab(parent);
490 ckrm_core_grab(dcore);
494 static void ckrm_free_res_class(struct ckrm_core_class *core, int resid)
497 * Free a resource class only if the resource controller has
498 * registered with core
500 if (core->res_class[resid]) {
501 ckrm_res_ctlr_t *rcbs;
502 struct ckrm_classtype *clstype = core->classtype;
504 atomic_inc(&clstype->nr_resusers[resid]);
505 rcbs = clstype->res_ctlrs[resid];
507 if (rcbs->res_free) {
508 (*rcbs->res_free) (core->res_class[resid]);
509 // compensate inc in alloc
510 atomic_dec(&clstype->nr_resusers[resid]);
512 atomic_dec(&clstype->nr_resusers[resid]);
514 core->res_class[resid] = NULL;
519 * requires that all tasks were previously reassigned to another class
521 * Returns 0 on success -errno on failure.
524 void ckrm_free_core_class(struct ckrm_core_class *core)
527 struct ckrm_classtype *clstype = core->classtype;
528 struct ckrm_core_class *parent = core->hnode.parent;
530 CLS_DEBUG("core=%p:%s parent=%p:%s\n", core, core->name, parent,
533 /* this core was marked as late */
534 printk(KERN_DEBUG "class <%s> finally deleted %lu\n", core->name, jiffies);
536 if (ckrm_remove_child(core) == 0) {
537 printk(KERN_DEBUG "Core class removal failed. Chilren present\n");
540 for (i = 0; i < clstype->max_resid; i++) {
541 ckrm_free_res_class(core, i);
544 write_lock(&ckrm_class_lock);
546 // Clear the magic, so we would know if this core is reused.
548 #if 0 // Dynamic not yet enabled
549 core->res_class = NULL;
551 // Remove this core class from its linked list.
552 list_del(&core->clslist);
553 clstype->num_classes--;
554 set_callbacks_active(clstype);
555 write_unlock(&ckrm_class_lock);
557 // fix for race condition seen in stress with numtasks
559 ckrm_core_drop(parent);
564 int ckrm_release_core_class(struct ckrm_core_class *core)
566 if (!ckrm_is_core_valid(core)) {
571 if (core == core->classtype->default_class)
574 /* need to make sure that the classgot really dropped */
575 if (atomic_read(&core->refcnt) != 1) {
576 CLS_DEBUG("class <%s> deletion delayed refcnt=%d jif=%ld\n",
577 core->name, atomic_read(&core->refcnt), jiffies);
578 core->delayed = 1; /* just so we have a ref point */
580 ckrm_core_drop(core);
584 /****************************************************************************
585 * Interfaces for the resource controller *
586 ****************************************************************************/
588 * Registering a callback structure by the resource controller.
590 * Returns the resource id(0 or +ve) on success, -errno for failure.
593 ckrm_register_res_ctlr_intern(struct ckrm_classtype *clstype,
594 ckrm_res_ctlr_t * rcbs)
603 spin_lock(&clstype->res_ctlrs_lock);
605 printk(KERN_WARNING "resid is %d name is %s %s\n",
606 resid, rcbs->res_name, clstype->res_ctlrs[resid]->res_name);
609 if ((resid < CKRM_MAX_RES_CTLRS)
610 && (clstype->res_ctlrs[resid] == NULL)) {
611 clstype->res_ctlrs[resid] = rcbs;
612 atomic_set(&clstype->nr_resusers[resid], 0);
613 set_bit(resid, &clstype->bit_res_ctlrs);
615 if (resid >= clstype->max_resid) {
616 clstype->max_resid = resid + 1;
621 spin_unlock(&clstype->res_ctlrs_lock);
625 for (i = clstype->resid_reserved; i < clstype->max_res_ctlrs; i++) {
626 if (clstype->res_ctlrs[i] == NULL) {
627 clstype->res_ctlrs[i] = rcbs;
629 atomic_set(&clstype->nr_resusers[i], 0);
630 set_bit(i, &clstype->bit_res_ctlrs);
631 if (i >= clstype->max_resid) {
632 clstype->max_resid = i + 1;
634 spin_unlock(&clstype->res_ctlrs_lock);
639 spin_unlock(&clstype->res_ctlrs_lock);
644 ckrm_register_res_ctlr(struct ckrm_classtype *clstype, ckrm_res_ctlr_t * rcbs)
646 struct ckrm_core_class *core;
649 resid = ckrm_register_res_ctlr_intern(clstype, rcbs);
652 /* run through all classes and create the resource class
653 * object and if necessary "initialize" class in context
656 read_lock(&ckrm_class_lock);
657 list_for_each_entry(core, &clstype->classes, clslist) {
658 printk(KERN_INFO "CKRM .. create res clsobj for resouce <%s>"
659 "class <%s> par=%p\n", rcbs->res_name,
660 core->name, core->hnode.parent);
661 ckrm_alloc_res_class(core, core->hnode.parent, resid);
663 if (clstype->add_resctrl) {
664 // FIXME: this should be mandatory
665 (*clstype->add_resctrl) (core, resid);
668 read_unlock(&ckrm_class_lock);
674 * Unregistering a callback structure by the resource controller.
676 * Returns 0 on success -errno for failure.
678 int ckrm_unregister_res_ctlr(struct ckrm_res_ctlr *rcbs)
680 struct ckrm_classtype *clstype = rcbs->classtype;
681 struct ckrm_core_class *core = NULL;
682 int resid = rcbs->resid;
684 if ((clstype == NULL) || (resid < 0)) {
687 // FIXME: probably need to also call deregistration function
689 read_lock(&ckrm_class_lock);
690 // free up this resource from all the classes
691 list_for_each_entry(core, &clstype->classes, clslist) {
692 ckrm_free_res_class(core, resid);
694 read_unlock(&ckrm_class_lock);
696 if (atomic_read(&clstype->nr_resusers[resid])) {
700 spin_lock(&clstype->res_ctlrs_lock);
701 clstype->res_ctlrs[resid] = NULL;
702 clear_bit(resid, &clstype->bit_res_ctlrs);
703 clstype->max_resid = fls(clstype->bit_res_ctlrs);
705 spin_unlock(&clstype->res_ctlrs_lock);
710 /*******************************************************************
711 * Class Type Registration
712 *******************************************************************/
714 /* Hubertus ... we got to do some locking here */
717 struct ckrm_classtype *ckrm_classtypes[CKRM_MAX_CLASSTYPES];
718 // really should build a better interface for this
719 EXPORT_SYMBOL(ckrm_classtypes);
721 int ckrm_register_classtype(struct ckrm_classtype *clstype)
723 int tid = clstype->typeID;
726 if ((tid < 0) || (tid > CKRM_MAX_CLASSTYPES)
727 || (ckrm_classtypes[tid]))
731 for (i = CKRM_RESV_CLASSTYPES; i < CKRM_MAX_CLASSTYPES; i++) {
732 if (ckrm_classtypes[i] == NULL) {
740 clstype->typeID = tid;
741 ckrm_classtypes[tid] = clstype;
743 /* Hubertus .. we need to call the callbacks of the RCFS client */
744 if (rcfs_fn.register_classtype) {
745 (*rcfs_fn.register_classtype) (clstype);
746 // No error return for now ;
752 int ckrm_unregister_classtype(struct ckrm_classtype *clstype)
754 int tid = clstype->typeID;
756 if ((tid < 0) || (tid > CKRM_MAX_CLASSTYPES)
757 || (ckrm_classtypes[tid] != clstype))
760 if (rcfs_fn.deregister_classtype) {
761 (*rcfs_fn.deregister_classtype) (clstype);
762 // No error return for now
765 ckrm_classtypes[tid] = NULL;
766 clstype->typeID = -1;
770 struct ckrm_classtype *ckrm_find_classtype_by_name(const char *name)
773 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
774 struct ckrm_classtype *ctype = ckrm_classtypes[i];
775 if (ctype && !strncmp(ctype->name, name, CKRM_MAX_TYPENAME_LEN))
781 /*******************************************************************
782 * Event callback invocation
783 *******************************************************************/
785 struct ckrm_hook_cb *ckrm_event_callbacks[CKRM_NONLATCHABLE_EVENTS];
787 /* Registration / Deregistration / Invocation functions */
789 int ckrm_register_event_cb(enum ckrm_event ev, struct ckrm_hook_cb *cb)
791 struct ckrm_hook_cb **cbptr;
793 if ((ev < CKRM_LATCHABLE_EVENTS) || (ev >= CKRM_NONLATCHABLE_EVENTS))
795 cbptr = &ckrm_event_callbacks[ev];
796 while (*cbptr != NULL)
797 cbptr = &((*cbptr)->next);
802 int ckrm_unregister_event_cb(enum ckrm_event ev, struct ckrm_hook_cb *cb)
804 struct ckrm_hook_cb **cbptr;
806 if ((ev < CKRM_LATCHABLE_EVENTS) || (ev >= CKRM_NONLATCHABLE_EVENTS))
808 cbptr = &ckrm_event_callbacks[ev];
809 while ((*cbptr != NULL) && (*cbptr != cb))
810 cbptr = &((*cbptr)->next);
812 (*cbptr)->next = cb->next;
813 return (*cbptr == NULL);
816 int ckrm_register_event_set(struct ckrm_event_spec especs[])
818 struct ckrm_event_spec *espec = especs;
820 for (espec = especs; espec->ev != -1; espec++)
821 ckrm_register_event_cb(espec->ev, &espec->cb);
825 int ckrm_unregister_event_set(struct ckrm_event_spec especs[])
827 struct ckrm_event_spec *espec = especs;
829 for (espec = especs; espec->ev != -1; espec++)
830 ckrm_unregister_event_cb(espec->ev, &espec->cb);
834 #define ECC_PRINTK(fmt, args...) \
835 // printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
837 void ckrm_invoke_event_cb_chain(enum ckrm_event ev, void *arg)
839 struct ckrm_hook_cb *cb, *anchor;
841 ECC_PRINTK("%d %x\n", current, ev, arg);
842 if ((anchor = ckrm_event_callbacks[ev]) != NULL) {
843 for (cb = anchor; cb; cb = cb->next)
848 /*******************************************************************
849 * Generic Functions that can be used as default functions
850 * in almost all classtypes
851 * (a) function iterator over all resource classes of a class
852 * (b) function invoker on a named resource
853 *******************************************************************/
855 int ckrm_class_show_shares(struct ckrm_core_class *core, struct seq_file *seq)
858 struct ckrm_res_ctlr *rcbs;
859 struct ckrm_classtype *clstype = core->classtype;
860 struct ckrm_shares shares;
862 for (i = 0; i < clstype->max_resid; i++) {
863 atomic_inc(&clstype->nr_resusers[i]);
864 rcbs = clstype->res_ctlrs[i];
865 if (rcbs && rcbs->get_share_values) {
866 int rc = (*rcbs->get_share_values)(core->res_class[i],
870 seq_printf(seq,"res=%s,guarantee=%d,limit=%d,"
871 "total_guarantee=%d,max_limit=%d\n",
872 rcbs->res_name, shares.my_guarantee,
873 shares.my_limit, shares.total_guarantee,
876 atomic_dec(&clstype->nr_resusers[i]);
881 int ckrm_class_show_stats(struct ckrm_core_class *core, struct seq_file *seq)
884 struct ckrm_res_ctlr *rcbs;
885 struct ckrm_classtype *clstype = core->classtype;
887 for (i = 0; i < clstype->max_resid; i++) {
888 atomic_inc(&clstype->nr_resusers[i]);
889 rcbs = clstype->res_ctlrs[i];
890 if (rcbs && rcbs->get_stats)
891 (*rcbs->get_stats) (core->res_class[i], seq);
892 atomic_dec(&clstype->nr_resusers[i]);
897 int ckrm_class_show_config(struct ckrm_core_class *core, struct seq_file *seq)
900 struct ckrm_res_ctlr *rcbs;
901 struct ckrm_classtype *clstype = core->classtype;
903 for (i = 0; i < clstype->max_resid; i++) {
904 atomic_inc(&clstype->nr_resusers[i]);
905 rcbs = clstype->res_ctlrs[i];
906 if (rcbs && rcbs->show_config)
907 (*rcbs->show_config) (core->res_class[i], seq);
908 atomic_dec(&clstype->nr_resusers[i]);
913 int ckrm_class_set_config(struct ckrm_core_class *core, const char *resname,
916 struct ckrm_classtype *clstype = core->classtype;
917 struct ckrm_res_ctlr *rcbs = ckrm_resctlr_lookup(clstype, resname);
920 if (rcbs == NULL || rcbs->set_config == NULL)
922 rc = (*rcbs->set_config) (core->res_class[rcbs->resid], cfgstr);
926 #define legalshare(a) \
928 || ((a) == CKRM_SHARE_UNCHANGED) \
929 || ((a) == CKRM_SHARE_DONTCARE) )
931 int ckrm_class_set_shares(struct ckrm_core_class *core, const char *resname,
932 struct ckrm_shares *shares)
934 struct ckrm_classtype *clstype = core->classtype;
935 struct ckrm_res_ctlr *rcbs;
938 // Check for legal values
939 if (!legalshare(shares->my_guarantee) || !legalshare(shares->my_limit)
940 || !legalshare(shares->total_guarantee)
941 || !legalshare(shares->max_limit))
944 rcbs = ckrm_resctlr_lookup(clstype, resname);
945 if (rcbs == NULL || rcbs->set_share_values == NULL)
947 rc = (*rcbs->set_share_values) (core->res_class[rcbs->resid], shares);
951 int ckrm_class_reset_stats(struct ckrm_core_class *core, const char *resname,
954 struct ckrm_classtype *clstype = core->classtype;
955 struct ckrm_res_ctlr *rcbs = ckrm_resctlr_lookup(clstype, resname);
958 if (rcbs == NULL || rcbs->reset_stats == NULL)
960 rc = (*rcbs->reset_stats) (core->res_class[rcbs->resid]);
964 /*******************************************************************
966 *******************************************************************/
968 void ckrm_cb_newtask(struct task_struct *tsk)
971 spin_lock_init(&tsk->ckrm_tsklock);
972 ckrm_invoke_event_cb_chain(CKRM_EVENT_NEWTASK, tsk);
975 void ckrm_cb_exit(struct task_struct *tsk)
977 ckrm_invoke_event_cb_chain(CKRM_EVENT_EXIT, tsk);
981 void __init ckrm_init(void)
983 printk(KERN_DEBUG "CKRM Initialization\n");
985 // register/initialize the Metatypes
987 #ifdef CONFIG_CKRM_TYPE_TASKCLASS
989 extern void ckrm_meta_init_taskclass(void);
990 ckrm_meta_init_taskclass();
993 #ifdef CONFIG_CKRM_TYPE_SOCKETCLASS
995 extern void ckrm_meta_init_sockclass(void);
996 ckrm_meta_init_sockclass();
999 // prepare init_task and then rely on inheritance of properties
1000 ckrm_cb_newtask(&init_task);
1001 printk(KERN_DEBUG "CKRM Initialization done\n");
1004 EXPORT_SYMBOL(ckrm_register_engine);
1005 EXPORT_SYMBOL(ckrm_unregister_engine);
1007 EXPORT_SYMBOL(ckrm_register_res_ctlr);
1008 EXPORT_SYMBOL(ckrm_unregister_res_ctlr);
1010 EXPORT_SYMBOL(ckrm_init_core_class);
1011 EXPORT_SYMBOL(ckrm_free_core_class);
1012 EXPORT_SYMBOL(ckrm_release_core_class);
1014 EXPORT_SYMBOL(ckrm_register_classtype);
1015 EXPORT_SYMBOL(ckrm_unregister_classtype);
1016 EXPORT_SYMBOL(ckrm_find_classtype_by_name);
1018 EXPORT_SYMBOL(ckrm_core_grab);
1019 EXPORT_SYMBOL(ckrm_core_drop);
1020 EXPORT_SYMBOL(ckrm_is_core_valid);
1021 EXPORT_SYMBOL(ckrm_validate_and_grab_core);
1023 EXPORT_SYMBOL(ckrm_register_event_set);
1024 EXPORT_SYMBOL(ckrm_unregister_event_set);
1025 EXPORT_SYMBOL(ckrm_register_event_cb);
1026 EXPORT_SYMBOL(ckrm_unregister_event_cb);
1028 EXPORT_SYMBOL(ckrm_class_show_stats);
1029 EXPORT_SYMBOL(ckrm_class_show_config);
1030 EXPORT_SYMBOL(ckrm_class_show_shares);
1032 EXPORT_SYMBOL(ckrm_class_set_config);
1033 EXPORT_SYMBOL(ckrm_class_set_shares);
1035 EXPORT_SYMBOL(ckrm_class_reset_stats);