- PL2545 WAR: disable CKRM numtasks controller
[linux-2.6.git] / kernel / ckrm / ckrm_cpu_class.c
1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
2  *
3  * Copyright (C) Haoqiang Zheng,  IBM Corp. 2004
4  *           (C) Hubertus Franke, IBM Corp. 2004
5  * 
6  * Latest version, more details at http://ckrm.sf.net
7  * 
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm_events.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
25
26 struct ckrm_res_ctlr cpu_rcbs;
27
28 /**
29  * insert_cpu_class - insert a class to active_cpu_class list
30  *
31  * insert the class in decreasing order of class weight
32  */
33 static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
34 {
35         list_add(&cls->links,&active_cpu_classes);
36 }
37
38 /*
39  *  initialize a class object and its local queues
40  */
41 void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares) 
42 {
43         int i,j,k;      
44         prio_array_t *array;    
45         ckrm_lrq_t* queue;
46
47         cls->shares = *shares;
48         cls->cnt_lock = SPIN_LOCK_UNLOCKED;
49         ckrm_cpu_stat_init(&cls->stat);
50         ckrm_usage_init(&cls->usage);
51         cls->magic = CKRM_CPU_CLASS_MAGIC;
52
53         for (i = 0 ; i < NR_CPUS ; i++) {
54                 queue = &cls->local_queues[i];
55         queue->active   = queue->arrays;
56         queue->expired  = queue->arrays+1;
57         
58         for (j = 0; j < 2; j++) {
59                         array = queue->arrays + j;
60                 for (k = 0; k < MAX_PRIO; k++) {
61                         INIT_LIST_HEAD(array->queue + k);
62                         __clear_bit(k, array->bitmap);
63                 }
64                 // delimiter for bitsearch
65                 __set_bit(MAX_PRIO, array->bitmap);
66                 array->nr_active = 0;
67         }
68         
69         queue->expired_timestamp = 0;
70         
71         queue->cpu_class = cls;
72                 queue->classqueue = get_cpu_classqueue(i);
73         queue->top_priority = MAX_PRIO;
74         cq_node_init(&queue->classqueue_linkobj);
75                 queue->local_cvt = 0;
76         queue->lrq_load = 0;
77         queue->local_weight = cpu_class_weight(cls);
78         queue->uncounted_ns = 0;
79         queue->savings = 0;
80                 queue->magic = 0x43FF43D7;
81         }
82
83         // add to class list
84         write_lock(&class_list_lock);
85         insert_cpu_class(cls);
86         write_unlock(&class_list_lock);
87 }
88
89 static inline void set_default_share(ckrm_shares_t *shares)
90 {
91         shares->my_guarantee     = 0;
92         shares->total_guarantee  = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
93         shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
94         shares->my_limit         = CKRM_SHARE_DFLT_MAX_LIMIT;
95         shares->max_limit        = CKRM_SHARE_DFLT_MAX_LIMIT;
96         shares->cur_max_limit    = 0;
97 }
98
99 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
100 {
101         struct ckrm_cpu_class * cls;
102         cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
103         if (valid_cpu_class(cls))
104                 return cls;
105         else
106                 return NULL;
107 }
108
109
110 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent) 
111 {               
112         struct ckrm_cpu_class *cls;
113
114         if (! parent) /*root class*/
115                 cls =  get_default_cpu_class();
116         else
117                 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
118
119         if (cls) {
120                 ckrm_shares_t shares;           
121                 if ((! parent) && (core)) { 
122                         /*
123                          * the default class is already initialized
124                          * so only update the core structure
125                          */
126                         cls->core = core;                       
127                 } else {
128                         set_default_share(&shares);
129                         init_cpu_class(cls,&shares);
130                         cls->core = core;
131                         cls->parent = parent;                   
132                 }
133         } else
134                 printk(KERN_ERR"alloc_cpu_class failed\n");
135
136         return cls;
137 }               
138
139 /*
140  * hzheng: this is not a stable implementation
141  *         need to check race condition issue here
142  */             
143 static void ckrm_free_cpu_class(void *my_res) 
144 {                       
145         struct ckrm_cpu_class *cls = my_res, *parres, *childres;
146         ckrm_core_class_t *child = NULL;
147         int maxlimit;
148         ckrm_lrq_t* queue;
149         int i;
150
151         if (!cls) 
152                 return;
153
154         /*the default class can't be freed*/
155         if (cls == get_default_cpu_class()) 
156                 return;
157 #if 1
158 #warning "ACB: Remove freed class from any classqueues [PL #4233]"
159         for (i = 0 ; i < NR_CPUS ; i++) {
160           queue = &cls->local_queues[i];
161           if (cls_in_classqueue(&queue->classqueue_linkobj))
162             classqueue_dequeue(queue->classqueue,
163                                &queue->classqueue_linkobj);
164         }
165 #endif
166
167         // Assuming there will be no children when this function is called
168         parres = ckrm_get_cpu_class(cls->parent);
169
170         // return child's limit/guarantee to parent node
171         spin_lock(&parres->cnt_lock);
172         child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
173         // run thru parent's children and get the new max_limit of the parent
174         ckrm_lock_hier(parres->core);
175         maxlimit = 0;
176         while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
177                 childres = ckrm_get_cpu_class(child);
178                 if (maxlimit < childres->shares.my_limit) {
179                         maxlimit = childres->shares.my_limit;
180                 }
181         }
182         ckrm_unlock_hier(parres->core);
183         if (parres->shares.cur_max_limit < maxlimit) {
184                 parres->shares.cur_max_limit = maxlimit;
185         }
186
187         spin_unlock(&parres->cnt_lock);
188
189         write_lock(&class_list_lock);
190         list_del(&cls->links);
191         write_unlock(&class_list_lock);
192
193         kfree(cls);
194
195         //call ckrm_cpu_monitor after class removed
196         ckrm_cpu_monitor(0);
197 }                               
198
199 /*
200  *  the system will adjust to the new share automatically  
201  */                     
202 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share) 
203 {       
204         struct ckrm_cpu_class *parres, *cls = my_res;
205         struct ckrm_shares *cur = &cls->shares, *par;
206         int rc = -EINVAL;
207
208         if (!cls)
209                 return rc;
210
211         if (cls->parent) {
212                 parres = ckrm_get_cpu_class(cls->parent);
213                 spin_lock(&parres->cnt_lock);
214                 spin_lock(&cls->cnt_lock);
215                 par = &parres->shares;
216         } else {
217                 spin_lock(&cls->cnt_lock);
218                 par = NULL;
219                 parres = NULL;
220         }
221
222         /*
223          * hzheng: CKRM_SHARE_DONTCARE should be handled
224          */
225         if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
226                 new_share->my_guarantee = 0;
227
228         rc = set_shares(new_share, cur, par);
229         if (cur->my_limit == CKRM_SHARE_DONTCARE)
230                 cur->my_limit = cur->max_limit;
231
232
233         spin_unlock(&cls->cnt_lock);
234         if (cls->parent) {
235                 spin_unlock(&parres->cnt_lock);
236         }
237
238         //call ckrm_cpu_monitor after changes are changed
239         ckrm_cpu_monitor(0);
240
241         return rc;
242 }                                                       
243                         
244 static int ckrm_cpu_get_share(void *my_res,
245                               struct ckrm_shares *shares)
246 {                       
247         struct ckrm_cpu_class *cls = my_res;
248
249         if (!cls)
250                 return -EINVAL;
251         *shares = cls->shares;
252         return 0;
253 }                               
254
255 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
256 {
257         struct ckrm_cpu_class *cls = my_res;
258         struct ckrm_cpu_class_stat* stat = &cls->stat;
259         ckrm_lrq_t* lrq;
260         int i;
261
262         if (!cls) 
263                 return -EINVAL;
264
265         seq_printf(sfile, "-------- CPU Class Status Start---------\n");
266         seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
267                    cls->shares.my_guarantee,
268                    cls->shares.my_limit,
269                    cls->shares.total_guarantee,
270                    cls->shares.max_limit);
271         seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
272                    cls->shares.unused_guarantee,
273                    cls->shares.cur_max_limit);
274
275         seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
276         seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
277         seq_printf(sfile, "\tehl= %d\n",stat->ehl);
278         seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
279         seq_printf(sfile, "\teshare= %d\n",stat->eshare);
280         seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
281         seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
282         seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
283         seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
284                    get_ckrm_usage(cls,2*HZ),
285                    get_ckrm_usage(cls,10*HZ),
286                    get_ckrm_usage(cls,60*HZ)
287                    );
288         for_each_online_cpu(i) {
289                 lrq = get_ckrm_lrq(cls,i);              
290                 seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
291         }
292
293         seq_printf(sfile, "-------- CPU Class Status END ---------\n");
294
295         return 0;
296 }
297
298 /*
299  * task will remain in the same cpu but on a different local runqueue
300  */
301 void ckrm_cpu_change_class(void *task, void *old, void *new)
302 {               
303         struct task_struct *tsk = task;                    
304         struct ckrm_cpu_class *newcls = new;
305
306         /*sanity checking*/
307         if (!task || ! old || !new)
308                 return; 
309
310         _ckrm_cpu_change_class(tsk,newcls);
311 }                                                       
312
313 /*dummy function, not used*/
314 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
315 {
316         struct ckrm_cpu_class *cls = my_res;
317
318         if (!cls) 
319                 return -EINVAL;
320
321         seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
322         return 0;
323 }
324
325 /*dummy function, not used*/
326 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
327 {
328         struct ckrm_cpu_class *cls = my_res;
329
330         if (!cls) 
331                 return -EINVAL;
332         printk(KERN_DEBUG "ckrm_cpu config='%s'\n",cfgstr);
333         return 0;
334 }
335         
336 struct ckrm_res_ctlr cpu_rcbs = {
337         .res_name          = "cpu",
338         .res_hdepth        = 1,
339         .resid             = -1,
340         .res_alloc         = ckrm_alloc_cpu_class,
341         .res_free          = ckrm_free_cpu_class,
342         .set_share_values  = ckrm_cpu_set_share,
343         .get_share_values  = ckrm_cpu_get_share,
344         .get_stats         = ckrm_cpu_get_stats,
345         .show_config       = ckrm_cpu_show_config,
346         .set_config        = ckrm_cpu_set_config,
347         .change_resclass   = ckrm_cpu_change_class,
348 };
349
350 int __init init_ckrm_sched_res(void)
351 {
352         struct ckrm_classtype *clstype;
353         int resid = cpu_rcbs.resid;
354
355         clstype = ckrm_find_classtype_by_name("taskclass");
356         if (clstype == NULL) {
357                 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
358                 return -ENOENT;
359         }
360
361         if (resid == -1) { /*not registered */
362                 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
363                 printk(KERN_DEBUG "........init_ckrm_sched_res , resid= %d\n",resid);
364         }
365         return 0;
366 }
367
368 /*
369  * initialize the class structure
370  * add the default class: class 0
371  */
372 void init_cpu_classes(void) 
373 {
374         int i;
375
376         //init classqueues for each processor
377         for (i=0; i < NR_CPUS; i++)
378                 classqueue_init(get_cpu_classqueue(i)); 
379
380         /*
381          * hzheng: initialize the default cpu class
382          *  required for E14/E15 since ckrm_init is called after sched_init
383          */
384         ckrm_alloc_cpu_class(NULL,NULL);
385         }
386
387
388 EXPORT_SYMBOL(ckrm_get_cpu_class);