This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / kernel / ckrm / ckrm_tc.c
1 /* ckrm_tc.c - Class-based Kernel Resource Management (CKRM)
2  *
3  * Copyright (C) Hubertus Franke, IBM Corp. 2003,2004
4  *           (C) Shailabh Nagar,  IBM Corp. 2003
5  *           (C) Chandra Seetharaman,  IBM Corp. 2003
6  *           (C) Vivek Kashyap, IBM Corp. 2004
7  * 
8  * 
9  * Provides kernel API of CKRM for in-kernel,per-resource controllers 
10  * (one each for cpu, memory, io, network) and callbacks for 
11  * classification modules.
12  *
13  * Latest version, more details at http://ckrm.sf.net
14  * 
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  *
20  */
21
22 /* Changes
23  *
24  * 28 Aug 2003
25  *        Created.
26  * 06 Nov 2003
27  *        Made modifications to suit the new RBCE module.
28  * 10 Nov 2003
29  *        Fixed a bug in fork and exit callbacks. Added callbacks_active and
30  *        surrounding logic. Added task paramter for all CE callbacks.
31  * 23 Mar 2004
32  *        moved to referenced counted class objects and correct locking
33  * 12 Apr 2004
34  *        introduced adopted to emerging classtype interface
35  */
36
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/linkage.h>
40 #include <linux/kernel.h>
41 #include <linux/errno.h>
42 #include <asm/uaccess.h>
43 #include <linux/mm.h>
44 #include <asm/errno.h>
45 #include <linux/string.h>
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
48 #include <linux/module.h>
49 #include <linux/ckrm_rc.h>
50
51 #include <linux/ckrm_tc.h>
52
53 #define TC_DEBUG(fmt, args...) do { \
54 /* printk("%s: " fmt, __FUNCTION__ , ## args); */ } while (0)
55
56 static struct ckrm_task_class taskclass_dflt_class = {
57 };
58
59 const char *dflt_taskclass_name = TASK_CLASS_TYPE_NAME;
60
61 static struct ckrm_core_class *ckrm_alloc_task_class(struct ckrm_core_class
62                                                      *parent, const char *name);
63 static int ckrm_free_task_class(struct ckrm_core_class *core);
64
65 static int tc_forced_reclassify(ckrm_core_class_t * target,
66                                 const char *resname);
67 static int tc_show_members(struct ckrm_core_class *core, struct seq_file *seq);
68 static void tc_add_resctrl(struct ckrm_core_class *core, int resid);
69
70 struct ckrm_classtype CT_taskclass = {
71         .mfidx = TC_MF_IDX,
72         .name = TASK_CLASS_TYPE_NAME,
73         .typeID = CKRM_CLASSTYPE_TASK_CLASS,
74         .maxdepth = 3,          // Hubertus .. just to start 
75         .resid_reserved = 4,    // Hubertus .. reservation
76         .max_res_ctlrs = CKRM_MAX_RES_CTLRS,
77         .max_resid = 0,
78         .bit_res_ctlrs = 0L,
79         .res_ctlrs_lock = SPIN_LOCK_UNLOCKED,
80         .classes = LIST_HEAD_INIT(CT_taskclass.classes),
81
82         .default_class = &taskclass_dflt_class.core,
83
84         // private version of functions 
85         .alloc = &ckrm_alloc_task_class,
86         .free = &ckrm_free_task_class,
87         .show_members = &tc_show_members,
88         .forced_reclassify = &tc_forced_reclassify,
89
90         // use of default functions 
91         .show_shares = &ckrm_class_show_shares,
92         .show_stats = &ckrm_class_show_stats,
93         .show_config = &ckrm_class_show_config,
94         .set_config = &ckrm_class_set_config,
95         .set_shares = &ckrm_class_set_shares,
96         .reset_stats = &ckrm_class_reset_stats,
97
98         // mandatory private version .. no dflt available
99         .add_resctrl = &tc_add_resctrl,
100 };
101
102 /**************************************************************************
103  *                   Helper Functions                                     *
104  **************************************************************************/
105
106 static inline void ckrm_init_task_lock(struct task_struct *tsk)
107 {
108         tsk->ckrm_tsklock = SPIN_LOCK_UNLOCKED;
109 }
110
111 // Hubertus .. following functions should move to ckrm_rc.h
112
113 static inline void ckrm_task_lock(struct task_struct *tsk)
114 {
115         spin_lock(&tsk->ckrm_tsklock);
116 }
117
118 static inline void ckrm_task_unlock(struct task_struct *tsk)
119 {
120         spin_unlock(&tsk->ckrm_tsklock);
121 }
122
123 /*
124  * Change the task class of the given task.
125  *
126  * Change the task's task class  to "newcls" if the task's current 
127  * class (task->taskclass) is same as given "oldcls", if it is non-NULL.
128  *
129  * Caller is responsible to make sure the task structure stays put through
130  * this function.
131  *
132  * This function should be called with the following locks NOT held
133  *      - tsk->ckrm_task_lock
134  *      - core->ckrm_lock, if core is NULL then ckrm_dflt_class.ckrm_lock
135  *      - tsk->taskclass->ckrm_lock 
136  * 
137  * Function is also called with a ckrm_core_grab on the new core, hence
138  * it needs to be dropped if no assignment takes place.
139  */
140 static void
141 ckrm_set_taskclass(struct task_struct *tsk, ckrm_task_class_t * newcls,
142                    ckrm_task_class_t * oldcls, enum ckrm_event event)
143 {
144         int i;
145         ckrm_classtype_t *clstype;
146         ckrm_res_ctlr_t *rcbs;
147         ckrm_task_class_t *curcls;
148         void *old_res_class, *new_res_class;
149         int drop_old_cls;
150
151         ckrm_task_lock(tsk);
152         curcls = tsk->taskclass;
153
154         if ((void *)-1 == curcls) {
155                 // task is disassociated from ckrm... don't bother it.
156                 ckrm_task_unlock(tsk);
157                 ckrm_core_drop(class_core(newcls));
158                 return;
159         }
160
161         if ((curcls == NULL) && (newcls == (void *)-1)) {
162                 // task need to disassociated from ckrm and has no curcls
163                 // just disassociate and return.
164                 tsk->taskclass = newcls;
165                 ckrm_task_unlock(tsk);
166                 return;
167         }
168         // check whether compare_and_exchange should
169         if (oldcls && (oldcls != curcls)) {
170                 ckrm_task_unlock(tsk);
171                 if (newcls) {
172                         /* compensate for previous grab */
173                         TC_DEBUG("(%s:%d): Race-condition caught <%s> %d\n",
174                                  tsk->comm, tsk->pid, class_core(newcls)->name,
175                                  event);
176                         ckrm_core_drop(class_core(newcls));
177                 }
178                 return;
179         }
180         // make sure we have a real destination core
181         if (!newcls) {
182                 newcls = &taskclass_dflt_class;
183                 ckrm_core_grab(class_core(newcls));
184         }
185         // take out of old class 
186         // remember that we need to drop the oldcore
187         if ((drop_old_cls = (curcls != NULL))) {
188                 class_lock(class_core(curcls));
189                 if (newcls == curcls) {
190                         // we are already in the destination class.
191                         // we still need to drop oldcore
192                         class_unlock(class_core(curcls));
193                         ckrm_task_unlock(tsk);
194                         goto out;
195                 }
196                 list_del(&tsk->taskclass_link);
197                 INIT_LIST_HEAD(&tsk->taskclass_link);
198                 tsk->taskclass = NULL;
199                 class_unlock(class_core(curcls));
200                 if (newcls == (void *)-1) {
201                         tsk->taskclass = newcls;
202                         ckrm_task_unlock(tsk);
203                         // still need to get out of old class
204                         newcls = NULL;
205                         goto rc_handling;
206                 }
207         }
208         // put into new class 
209         class_lock(class_core(newcls));
210         tsk->taskclass = newcls;
211         list_add(&tsk->taskclass_link, &class_core(newcls)->objlist);
212         class_unlock(class_core(newcls));
213
214         if (newcls == curcls) {
215                 ckrm_task_unlock(tsk);
216                 goto out;
217         }
218
219         CE_NOTIFY(&CT_taskclass, event, newcls, tsk);
220
221         ckrm_task_unlock(tsk);
222
223       rc_handling:
224         clstype = &CT_taskclass;
225         if (clstype->bit_res_ctlrs) {   
226                 // avoid running through the entire list if non is registered
227                 for (i = 0; i < clstype->max_resid; i++) {
228                         if (clstype->res_ctlrs[i] == NULL)
229                                 continue;
230                         atomic_inc(&clstype->nr_resusers[i]);
231                         old_res_class =
232                             curcls ? class_core(curcls)->res_class[i] : NULL;
233                         new_res_class =
234                             newcls ? class_core(newcls)->res_class[i] : NULL;
235                         rcbs = clstype->res_ctlrs[i];
236                         if (rcbs && rcbs->change_resclass
237                             && (old_res_class != new_res_class))
238                                 (*rcbs->change_resclass) (tsk, old_res_class,
239                                                           new_res_class);
240                         atomic_dec(&clstype->nr_resusers[i]);
241                 }
242         }
243
244       out:
245         if (drop_old_cls)
246                 ckrm_core_drop(class_core(curcls));
247         return;
248 }
249
250 // HF SUGGEST: we could macro-tize this for other types 
251 // DEF_FUNC_ADD_RESCTRL(funcname,link)
252 //          would DEF_FUNC_ADD_RESCTRL(tc_add_resctrl,taskclass_link)
253
254 static void tc_add_resctrl(struct ckrm_core_class *core, int resid)
255 {
256         struct task_struct *tsk;
257         struct ckrm_res_ctlr *rcbs;
258
259         if ((resid < 0) || (resid >= CKRM_MAX_RES_CTLRS)
260             || ((rcbs = core->classtype->res_ctlrs[resid]) == NULL))
261                 return;
262
263         class_lock(core);
264         list_for_each_entry(tsk, &core->objlist, taskclass_link) {
265                 if (rcbs->change_resclass)
266                         (*rcbs->change_resclass) (tsk, (void *)-1,
267                                                   core->res_class[resid]);
268         }
269         class_unlock(core);
270 }
271
272 /**************************************************************************
273  *                   Functions called from classification points          *
274  **************************************************************************/
275
276 #define ECB_PRINTK(fmt, args...)                                \
277 // do { if (CT_taskclass.ce_regd)                                
278 // printk("%s: " fmt, __FUNCTION__ , ## args); } while (0)
279
280 #define CE_CLASSIFY_TASK(event, tsk)                                    \
281 do {                                                                    \
282         struct ckrm_task_class *newcls = NULL;                          \
283         struct ckrm_task_class *oldcls = tsk->taskclass;                \
284                                                                         \
285         CE_CLASSIFY_RET(newcls,&CT_taskclass,event,tsk);                \
286         if (newcls) {                                                   \
287                 /* called synchrously. no need to get task struct */    \
288                 ckrm_set_taskclass(tsk, newcls, oldcls, event);         \
289         }                                                               \
290 } while (0)
291
292
293 #define CE_CLASSIFY_TASK_PROTECT(event, tsk)    \
294 do {                                            \
295         ce_protect(&CT_taskclass);              \
296         CE_CLASSIFY_TASK(event,tsk);            \
297         ce_release(&CT_taskclass);              \
298 } while (0)
299
300 static void cb_taskclass_newtask(struct task_struct *tsk)
301 {
302         tsk->taskclass = NULL;
303         INIT_LIST_HEAD(&tsk->taskclass_link);
304 }
305
306 static void cb_taskclass_fork(struct task_struct *tsk)
307 {
308         struct ckrm_task_class *cls = NULL;
309
310         ECB_PRINTK("%p:%d:%s\n", tsk, tsk->pid, tsk->comm);
311
312         ce_protect(&CT_taskclass);
313         CE_CLASSIFY_RET(cls, &CT_taskclass, CKRM_EVENT_FORK, tsk);
314         if (cls == NULL) {
315                 ckrm_task_lock(tsk->parent);
316                 cls = tsk->parent->taskclass;
317                 ckrm_core_grab(class_core(cls));
318                 ckrm_task_unlock(tsk->parent);
319         }
320         if (!list_empty(&tsk->taskclass_link))
321                 printk("BUG in cb_fork.. tsk (%s:%d> already linked\n",
322                        tsk->comm, tsk->pid);
323
324         ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_FORK);
325         ce_release(&CT_taskclass);
326 }
327
328 static void cb_taskclass_exit(struct task_struct *tsk)
329 {
330         CE_CLASSIFY_NORET(&CT_taskclass, CKRM_EVENT_EXIT, tsk);
331         ckrm_set_taskclass(tsk, (void *)-1, NULL, CKRM_EVENT_EXIT);
332 }
333
334 static void cb_taskclass_exec(const char *filename)
335 {
336         ECB_PRINTK("%p:%d:%s <%s>\n", current, current->pid, current->comm,
337                    filename);
338         CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_EXEC, current);
339 }
340
341 static void cb_taskclass_uid(void)
342 {
343         ECB_PRINTK("%p:%d:%s\n", current, current->pid, current->comm);
344         CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_UID, current);
345 }
346
347 static void cb_taskclass_gid(void)
348 {
349         ECB_PRINTK("%p:%d:%s\n", current, current->pid, current->comm);
350         CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_GID, current);
351 }
352
353 static void
354 cb_taskclass_xid(struct task_struct *tsk)
355 {
356         ECB_PRINTK("%p:%d:%s\n",current,current->pid,current->comm);
357         CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_XID, tsk);
358 }
359
360 static struct ckrm_event_spec taskclass_events_callbacks[] = {
361         CKRM_EVENT_SPEC(NEWTASK, cb_taskclass_newtask),
362         CKRM_EVENT_SPEC(EXEC, cb_taskclass_exec),
363         CKRM_EVENT_SPEC(FORK, cb_taskclass_fork),
364         CKRM_EVENT_SPEC(EXIT, cb_taskclass_exit),
365         CKRM_EVENT_SPEC(UID, cb_taskclass_uid),
366         CKRM_EVENT_SPEC(GID, cb_taskclass_gid),
367         CKRM_EVENT_SPEC(XID, cb_taskclass_xid),
368         {-1}
369 };
370
371 /***********************************************************************
372  *
373  * Asynchronous callback functions   (driven by RCFS)
374  * 
375  *    Async functions force a setting of the task structure
376  *    synchronous callbacks are protected against race conditions 
377  *    by using a cmpxchg on the core before setting it.
378  *    Async calls need to be serialized to ensure they can't 
379  *    race against each other 
380  *
381  ***********************************************************************/
382
383 DECLARE_MUTEX(async_serializer);        // serialize all async functions
384
385 /*
386  * Go through the task list and reclassify all tasks according to the current
387  * classification rules.
388  *
389  * We have the problem that we can not hold any lock (including the 
390  * tasklist_lock) while classifying. Two methods possible
391  *
392  * (a) go through entire pidrange (0..pidmax) and if a task exists at 
393  *     that pid then reclassify it
394  * (b) go several time through task list and build a bitmap for a particular 
395  *     subrange of pid otherwise the memory requirements ight be too much.
396  * 
397  * We use a hybrid by comparing ratio nr_threads/pidmax
398  */
399
400 static void ckrm_reclassify_all_tasks(void)
401 {
402         extern int pid_max;
403
404         struct task_struct *proc, *thread;
405         int i;
406         int curpidmax = pid_max;
407         int ratio;
408         int use_bitmap;
409
410         ratio = curpidmax / nr_threads;
411         if (curpidmax <= PID_MAX_DEFAULT) {
412                 use_bitmap = 1;
413         } else {
414                 use_bitmap = (ratio >= 2);
415         }
416
417         ce_protect(&CT_taskclass);
418
419       retry:
420         if (use_bitmap == 0) {
421                 // go through it in one walk
422                 read_lock(&tasklist_lock);
423                 for (i = 0; i < curpidmax; i++) {
424                         if ((thread = find_task_by_pid(i)) == NULL)
425                                 continue;
426                         get_task_struct(thread);
427                         read_unlock(&tasklist_lock);
428                         CE_CLASSIFY_TASK(CKRM_EVENT_RECLASSIFY, thread);
429                         put_task_struct(thread);
430                         read_lock(&tasklist_lock);
431                 }
432                 read_unlock(&tasklist_lock);
433         } else {
434                 unsigned long *bitmap;
435                 int bitmapsize;
436                 int order = 0;
437                 int num_loops;
438                 int pid, do_next;
439
440                 bitmap = (unsigned long *)__get_free_pages(GFP_KERNEL, order);
441                 if (bitmap == NULL) {
442                         use_bitmap = 0;
443                         goto retry;
444                 }
445
446                 bitmapsize = 8 * (1 << (order + PAGE_SHIFT));
447                 num_loops = (curpidmax + bitmapsize - 1) / bitmapsize;
448
449                 do_next = 1;
450                 for (i = 0; i < num_loops && do_next; i++) {
451                         int pid_start = i * bitmapsize;
452                         int pid_end = pid_start + bitmapsize;
453                         int num_found = 0;
454                         int pos;
455
456                         memset(bitmap, 0, bitmapsize / 8);      // start afresh
457                         do_next = 0;
458
459                         read_lock(&tasklist_lock);
460                         do_each_thread(proc, thread) {
461                                 pid = thread->pid;
462                                 if ((pid < pid_start) || (pid >= pid_end)) {
463                                         if (pid >= pid_end) {
464                                                 do_next = 1;
465                                         }
466                                         continue;
467                                 }
468                                 pid -= pid_start;
469                                 set_bit(pid, bitmap);
470                                 num_found++;
471                         }
472                         while_each_thread(proc, thread);
473                         read_unlock(&tasklist_lock);
474
475                         if (num_found == 0)
476                                 continue;
477
478                         pos = 0;
479                         for (; num_found--;) {
480                                 pos = find_next_bit(bitmap, bitmapsize, pos);
481                                 pid = pos + pid_start;
482
483                                 read_lock(&tasklist_lock);
484                                 if ((thread = find_task_by_pid(pid)) != NULL) {
485                                         get_task_struct(thread);
486                                         read_unlock(&tasklist_lock);
487                                         CE_CLASSIFY_TASK(CKRM_EVENT_RECLASSIFY,
488                                                          thread);
489                                         put_task_struct(thread);
490                                 } else {
491                                         read_unlock(&tasklist_lock);
492                                 }
493                         }
494                 }
495
496         }
497         ce_release(&CT_taskclass);
498 }
499
500 int ckrm_reclassify(int pid)
501 {
502         struct task_struct *tsk;
503         int rc = 0;
504
505         down(&async_serializer);        // protect again race condition
506         if (pid < 0) {
507                 // do we want to treat this as process group .. should YES ToDo
508                 rc = -EINVAL;
509         } else if (pid == 0) {
510                 // reclassify all tasks in the system
511                 ckrm_reclassify_all_tasks();
512         } else {
513                 // reclassify particular pid
514                 read_lock(&tasklist_lock);
515                 if ((tsk = find_task_by_pid(pid)) != NULL) {
516                         get_task_struct(tsk);
517                         read_unlock(&tasklist_lock);
518                         CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_RECLASSIFY, tsk);
519                         put_task_struct(tsk);
520                 } else {
521                         read_unlock(&tasklist_lock);
522                         rc = -EINVAL;
523                 }
524         }
525         up(&async_serializer);
526         return rc;
527 }
528
529 /*
530  * Reclassify all tasks in the given core class.
531  */
532
533 static void ckrm_reclassify_class_tasks(struct ckrm_task_class *cls)
534 {
535         int ce_regd;
536         struct ckrm_hnode *cnode;
537         struct ckrm_task_class *parcls;
538         int num = 0;
539
540         if (!ckrm_validate_and_grab_core(&cls->core))
541                 return;
542
543         down(&async_serializer);        // protect again race condition
544         TC_DEBUG("start %p:%s:%d:%d\n", cls, cls->core.name,
545                  atomic_read(&cls->core.refcnt),
546                  atomic_read(&cls->core.hnode.parent->refcnt));
547         // If no CE registered for this classtype, following will be needed 
548         // repeatedly;
549         ce_regd = class_core(cls)->classtype->ce_regd;
550         cnode = &(class_core(cls)->hnode);
551         parcls = class_type(ckrm_task_class_t, cnode->parent);
552
553       next_task:
554         class_lock(class_core(cls));
555         if (!list_empty(&class_core(cls)->objlist)) {
556                 struct ckrm_task_class *newcls = NULL;
557                 struct task_struct *tsk =
558                     list_entry(class_core(cls)->objlist.next,
559                                struct task_struct, taskclass_link);
560
561                 get_task_struct(tsk);
562                 class_unlock(class_core(cls));
563
564                 if (ce_regd) {
565                         CE_CLASSIFY_RET(newcls, &CT_taskclass,
566                                         CKRM_EVENT_RECLASSIFY, tsk);
567                         if (cls == newcls) {
568                                 // don't allow reclassifying to the same class
569                                 // as we are in the process of cleaning up 
570                                 // this class
571
572                                 // compensate CE's grab
573                                 ckrm_core_drop(class_core(newcls));     
574                                 newcls = NULL;
575                         }
576                 }
577                 if (newcls == NULL) {
578                         newcls = parcls;
579                         ckrm_core_grab(class_core(newcls));
580                 }
581                 ckrm_set_taskclass(tsk, newcls, cls, CKRM_EVENT_RECLASSIFY);
582                 put_task_struct(tsk);
583                 num++;
584                 goto next_task;
585         }
586         TC_DEBUG("stop  %p:%s:%d:%d   %d\n", cls, cls->core.name,
587                  atomic_read(&cls->core.refcnt),
588                  atomic_read(&cls->core.hnode.parent->refcnt), num);
589         class_unlock(class_core(cls));
590         ckrm_core_drop(class_core(cls));
591
592         up(&async_serializer);
593
594         return;
595 }
596
597 /*
598  * Change the core class of the given task.
599  */
600
601 int ckrm_forced_reclassify_pid(pid_t pid, struct ckrm_task_class *cls)
602 {
603         struct task_struct *tsk;
604
605         if (!ckrm_validate_and_grab_core(class_core(cls)))
606                 return -EINVAL;
607
608         read_lock(&tasklist_lock);
609         if ((tsk = find_task_by_pid(pid)) == NULL) {
610                 read_unlock(&tasklist_lock);
611                 ckrm_core_drop(class_core(cls));
612                 return -EINVAL;
613         }
614         get_task_struct(tsk);
615         read_unlock(&tasklist_lock);
616
617         /* Check permissions */
618         if ((!capable(CAP_SYS_NICE)) &&
619             (!capable(CAP_SYS_RESOURCE)) && (current->user != tsk->user)) {
620                 ckrm_core_drop(class_core(cls));
621                 put_task_struct(tsk);
622                 return -EPERM;
623         }
624
625         down(&async_serializer);        // protect again race condition
626
627         ce_protect(&CT_taskclass);
628         ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_MANUAL);
629         ce_release(&CT_taskclass);
630         put_task_struct(tsk);
631
632         up(&async_serializer);
633         return 0;
634 }
635
636 static struct ckrm_core_class *ckrm_alloc_task_class(struct ckrm_core_class
637                                                      *parent, const char *name)
638 {
639         struct ckrm_task_class *taskcls;
640         taskcls = kmalloc(sizeof(struct ckrm_task_class), GFP_KERNEL);
641         if (taskcls == NULL)
642                 return NULL;
643         memset(taskcls, 0, sizeof(struct ckrm_task_class));
644
645         ckrm_init_core_class(&CT_taskclass, class_core(taskcls), parent, name);
646
647         ce_protect(&CT_taskclass);
648         if (CT_taskclass.ce_cb_active && CT_taskclass.ce_callbacks.class_add)
649                 (*CT_taskclass.ce_callbacks.class_add) (name, taskcls,
650                                                         CT_taskclass.typeID);
651         ce_release(&CT_taskclass);
652
653         return class_core(taskcls);
654 }
655
656 static int ckrm_free_task_class(struct ckrm_core_class *core)
657 {
658         struct ckrm_task_class *taskcls;
659
660         if (!ckrm_is_core_valid(core)) {
661                 // Invalid core
662                 return (-EINVAL);
663         }
664         if (core == core->classtype->default_class) {
665                 // reset the name tag
666                 core->name = dflt_taskclass_name;
667                 return 0;
668         }
669
670         TC_DEBUG("%p:%s:%d\n", core, core->name, atomic_read(&core->refcnt));
671
672         taskcls = class_type(struct ckrm_task_class, core);
673
674         ce_protect(&CT_taskclass);
675
676         if (CT_taskclass.ce_cb_active && CT_taskclass.ce_callbacks.class_delete)
677                 (*CT_taskclass.ce_callbacks.class_delete) (core->name, taskcls,
678                                                            CT_taskclass.typeID);
679         ckrm_reclassify_class_tasks(taskcls);
680
681         ce_release(&CT_taskclass);
682
683         ckrm_release_core_class(core);  
684         // Hubertus .... could just drop the class .. error message
685         return 0;
686 }
687
688 void __init ckrm_meta_init_taskclass(void)
689 {
690         printk("...... Initializing ClassType<%s> ........\n",
691                CT_taskclass.name);
692         // intialize the default class
693         ckrm_init_core_class(&CT_taskclass, class_core(&taskclass_dflt_class),
694                              NULL, dflt_taskclass_name);
695
696         // register classtype and initialize default task class
697         ckrm_register_classtype(&CT_taskclass);
698         ckrm_register_event_set(taskclass_events_callbacks);
699
700         // note registeration of all resource controllers will be done 
701         // later dynamically as these are specified as modules
702 }
703
704 static int tc_show_members(struct ckrm_core_class *core, struct seq_file *seq)
705 {
706         struct list_head *lh;
707         struct task_struct *tsk;
708
709         class_lock(core);
710         list_for_each(lh, &core->objlist) {
711                 tsk = container_of(lh, struct task_struct, taskclass_link);
712                 seq_printf(seq, "%ld\n", (long)tsk->pid);
713         }
714         class_unlock(core);
715
716         return 0;
717 }
718
719 static int tc_forced_reclassify(struct ckrm_core_class *target, const char *obj)
720 {
721         pid_t pid;
722         int rc = -EINVAL;
723
724         pid = (pid_t) simple_strtoul(obj, NULL, 10);
725         if (pid > 0) {
726                 rc = ckrm_forced_reclassify_pid(pid,
727                                                 class_type(ckrm_task_class_t,
728                                                            target));
729         }
730         return rc;
731 }
732
733 #if 1
734
735 /******************************************************************************
736  * Debugging Task Classes:  Utility functions
737  ******************************************************************************/
738
739 void check_tasklist_sanity(struct ckrm_task_class *cls)
740 {
741         struct ckrm_core_class *core = class_core(cls);
742         struct list_head *lh1, *lh2;
743         int count = 0;
744
745         if (core) {
746                 class_lock(core);
747                 if (list_empty(&core->objlist)) {
748                         class_lock(core);
749                         printk("check_tasklist_sanity: class %s empty list\n",
750                                core->name);
751                         return;
752                 }
753                 list_for_each_safe(lh1, lh2, &core->objlist) {
754                         struct task_struct *tsk =
755                             container_of(lh1, struct task_struct,
756                                          taskclass_link);
757                         if (count++ > 20000) {
758                                 printk("list is CORRUPTED\n");
759                                 break;
760                         }
761                         if (tsk->taskclass != cls) {
762                                 const char *tclsname;
763                                 tclsname = (tsk->taskclass) ? 
764                                         class_core(tsk->taskclass)->name:"NULL";
765                                 printk("sanity: task %s:%d has ckrm_core "
766                                        "|%s| but in list |%s|\n", tsk->comm, 
767                                        tsk->pid, tclsname, core->name);
768                         }
769                 }
770                 class_unlock(core);
771         }
772 }
773
774 void ckrm_debug_free_task_class(struct ckrm_task_class *tskcls)
775 {
776         struct task_struct *proc, *thread;
777         int count = 0;
778
779         printk("Analyze Error <%s> %d\n",
780                class_core(tskcls)->name,
781                atomic_read(&(class_core(tskcls)->refcnt)));
782
783         read_lock(&tasklist_lock);
784         class_lock(class_core(tskcls));
785         do_each_thread(proc, thread) {
786                 count += (tskcls == thread->taskclass);
787                 if ((thread->taskclass == tskcls) || (tskcls == NULL)) {
788                         const char *tclsname;
789                         tclsname = (thread->taskclass) ? 
790                                 class_core(thread->taskclass)->name :"NULL";
791                         printk("%d thread=<%s:%d>  -> <%s> <%lx>\n", count,
792                                thread->comm, thread->pid, tclsname,
793                                thread->flags & PF_EXITING);
794                 }
795         } while_each_thread(proc, thread);
796         class_unlock(class_core(tskcls));
797         read_unlock(&tasklist_lock);
798
799         printk("End Analyze Error <%s> %d\n",
800                class_core(tskcls)->name,
801                atomic_read(&(class_core(tskcls)->refcnt)));
802 }
803
804 #endif