various fixes to ckrm core and the cpu controller
[linux-2.6.git] / kernel / ckrm / ckrm_cpu_class.c
1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
2  *
3  * Copyright (C) Haoqiang Zheng,  IBM Corp. 2004
4  *           (C) Hubertus Franke, IBM Corp. 2004
5  * 
6  * Latest version, more details at http://ckrm.sf.net
7  * 
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
25
26 struct ckrm_res_ctlr cpu_rcbs;
27
28 /**
29  * insert_cpu_class - insert a class to active_cpu_class list
30  *
31  * insert the class in decreasing order of class weight
32  */
33 static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
34 {
35         list_add(&cls->links,&active_cpu_classes);
36 }
37
38 /*
39  *  initialize a class object and its local queues
40  */
41 void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares) 
42 {
43         int i,j,k;      
44         prio_array_t *array;    
45         ckrm_lrq_t* queue;
46
47         cls->shares = *shares;
48         cls->cnt_lock = SPIN_LOCK_UNLOCKED;
49         ckrm_cpu_stat_init(&cls->stat);
50         cls->magic = CKRM_CPU_CLASS_MAGIC;
51
52         for (i = 0 ; i < NR_CPUS ; i++) {
53                 queue = &cls->local_queues[i];
54                 queue->active  = queue->arrays;
55                 queue->expired = queue->arrays+1;
56                 
57                 for (j = 0; j < 2; j++) {
58                         array = queue->arrays + j;
59                         for (k = 0; k < MAX_PRIO; k++) {
60                                 INIT_LIST_HEAD(array->queue + k);
61                                 __clear_bit(k, array->bitmap);
62                         }
63                         // delimiter for bitsearch
64                         __set_bit(MAX_PRIO, array->bitmap);
65                         array->nr_active = 0;
66                 }
67
68                 queue->expired_timestamp = 0;
69                 
70                 queue->cpu_class = cls;
71                 queue->classqueue = get_cpu_classqueue(i);
72                 queue->top_priority = MAX_PRIO;
73                 cq_node_init(&queue->classqueue_linkobj);
74                 queue->local_cvt = CVT_INTERACTIVE_BONUS;
75                 queue->lrq_load = 0;
76                 queue->local_weight = cpu_class_weight(cls);
77                 queue->uncounted_ns = 0;
78                 queue->magic = 0x43FF43D7;
79         }
80
81         // add to class list
82         write_lock(&class_list_lock);
83         insert_cpu_class(cls);
84         write_unlock(&class_list_lock);
85 }
86
87 static inline void set_default_share(ckrm_shares_t *shares)
88 {
89         shares->my_guarantee     = 0;
90         shares->total_guarantee  = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
91         shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
92         shares->my_limit         = CKRM_SHARE_DFLT_MAX_LIMIT;
93         shares->max_limit        = CKRM_SHARE_DFLT_MAX_LIMIT;
94         shares->cur_max_limit    = 0;
95 }
96
97 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
98 {
99         struct ckrm_cpu_class * cls;
100         cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
101         if (valid_cpu_class(cls))
102                 return cls;
103         else
104                 return NULL;
105 }
106
107
108 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent) 
109 {               
110         struct ckrm_cpu_class *cls;
111
112         if (! parent) /*root class*/
113                 cls =  get_default_cpu_class();
114         else
115                 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
116
117         if (cls) {
118                 ckrm_shares_t shares;           
119                 if ((! parent) && (core)) { 
120                         /*
121                          * the default class is already initialized
122                          * so only update the core structure
123                          */
124                         cls->core = core;                       
125                 } else {
126                         set_default_share(&shares);
127                         init_cpu_class(cls,&shares);
128                         cls->core = core;
129                         cls->parent = parent;
130                 }
131         } else
132                 printk(KERN_ERR"alloc_cpu_class failed\n");
133
134         return cls;
135 }               
136
137 /*
138  * hzheng: this is not a stable implementation
139  *         need to check race condition issue here
140  */             
141 static void ckrm_free_cpu_class(void *my_res) 
142 {                       
143         struct ckrm_cpu_class *cls = my_res, *parres, *childres;
144         ckrm_core_class_t *child = NULL;
145         int maxlimit;
146
147         if (!cls) 
148                 return;
149
150         /*the default class can't be freed*/
151         if (cls == get_default_cpu_class()) 
152                 return;
153
154         // Assuming there will be no children when this function is called
155         parres = ckrm_get_cpu_class(cls->parent);
156
157         // return child's limit/guarantee to parent node
158         spin_lock(&parres->cnt_lock);
159         child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
160         // run thru parent's children and get the new max_limit of the parent
161         ckrm_lock_hier(parres->core);
162         maxlimit = 0;
163         while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
164                 childres = ckrm_get_cpu_class(child);
165                 if (maxlimit < childres->shares.my_limit) {
166                         maxlimit = childres->shares.my_limit;
167                 }
168         }
169         ckrm_unlock_hier(parres->core);
170         if (parres->shares.cur_max_limit < maxlimit) {
171                 parres->shares.cur_max_limit = maxlimit;
172         }
173
174         spin_unlock(&parres->cnt_lock);
175
176         write_lock(&class_list_lock);
177         list_del(&cls->links);
178         write_unlock(&class_list_lock);
179
180         kfree(cls);
181 }                               
182
183 /*
184  *  the system will adjust to the new share automatically  
185  */                     
186 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share) 
187 {       
188         struct ckrm_cpu_class *parres, *cls = my_res;
189         struct ckrm_shares *cur = &cls->shares, *par;
190         int rc = -EINVAL;
191
192         if (!cls) 
193                 return rc;
194
195         if (cls->parent) {
196                 parres = ckrm_get_cpu_class(cls->parent);
197                 spin_lock(&parres->cnt_lock);
198                 spin_lock(&cls->cnt_lock);
199                 par = &parres->shares;
200         } else {
201                 spin_lock(&cls->cnt_lock);
202                 par = NULL;
203                 parres = NULL;
204         }
205
206         /*
207          * hzheng: CKRM_SHARE_DONTCARE should be handled
208          */
209         if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
210                 new_share->my_guarantee = 0;
211
212         rc = set_shares(new_share, cur, par);
213         if (cur->my_limit == CKRM_SHARE_DONTCARE)
214                 cur->my_limit = cur->max_limit;
215
216
217         spin_unlock(&cls->cnt_lock);
218         if (cls->parent) {
219                 spin_unlock(&parres->cnt_lock);
220         }
221         return rc;
222 }                                                       
223                         
224 static int ckrm_cpu_get_share(void *my_res,
225                               struct ckrm_shares *shares)
226 {                       
227         struct ckrm_cpu_class *cls = my_res;
228
229         if (!cls) 
230                 return -EINVAL;
231         *shares = cls->shares;
232         return 0;
233 }                               
234
235 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
236 {
237         struct ckrm_cpu_class *cls = my_res;
238         struct ckrm_cpu_class_stat* stat = &cls->stat;
239         ckrm_lrq_t* lrq;
240         int i;
241
242         if (!cls) 
243                 return -EINVAL;
244
245         seq_printf(sfile, "-------- CPU Class Status Start---------\n");
246         seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
247                    cls->shares.my_guarantee,
248                    cls->shares.my_limit,
249                    cls->shares.total_guarantee,
250                    cls->shares.max_limit);
251         seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
252                    cls->shares.unused_guarantee,
253                    cls->shares.cur_max_limit);
254
255         seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
256         seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
257         seq_printf(sfile, "\tehl= %d\n",stat->ehl);
258         seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
259         seq_printf(sfile, "\teshare= %d\n",stat->eshare);
260         seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
261         seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
262         seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
263         for_each_online_cpu(i) {
264                 lrq = get_ckrm_lrq(cls,i);              
265                 seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt);
266         }
267
268         seq_printf(sfile, "-------- CPU Class Status END ---------\n");
269
270         return 0;
271 }
272
273 /*
274  * task will remain in the same cpu but on a different local runqueue
275  */
276 void ckrm_cpu_change_class(void *task, void *old, void *new)
277 {               
278         struct task_struct *tsk = task;                    
279         struct ckrm_cpu_class *newcls = new;
280
281         /*sanity checking*/
282         if (!task || ! old || !new)
283                 return; 
284
285         _ckrm_cpu_change_class(tsk,newcls);
286 }                                                       
287
288 /*dummy function, not used*/
289 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
290 {
291         struct ckrm_cpu_class *cls = my_res;
292
293         if (!cls) 
294                 return -EINVAL;
295
296         seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
297         return 0;
298 }
299
300 /*dummy function, not used*/
301 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
302 {
303         struct ckrm_cpu_class *cls = my_res;
304
305         if (!cls) 
306                 return -EINVAL;
307         printk("ckrm_cpu config='%s'\n",cfgstr);
308         return 0;
309 }
310         
311 struct ckrm_res_ctlr cpu_rcbs = {
312         .res_name          = "cpu",
313         .res_hdepth        = 1,
314         .resid             = -1,
315         .res_alloc         = ckrm_alloc_cpu_class,
316         .res_free          = ckrm_free_cpu_class,
317         .set_share_values  = ckrm_cpu_set_share,
318         .get_share_values  = ckrm_cpu_get_share,
319         .get_stats         = ckrm_cpu_get_stats,
320         .show_config       = ckrm_cpu_show_config,
321         .set_config        = ckrm_cpu_set_config,
322         .change_resclass   = ckrm_cpu_change_class,
323 };
324
325 int __init init_ckrm_sched_res(void)
326 {
327         struct ckrm_classtype *clstype;
328         int resid = cpu_rcbs.resid;
329
330         clstype = ckrm_find_classtype_by_name("taskclass");
331         if (clstype == NULL) {
332                 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
333                 return -ENOENT;
334         }
335
336         if (resid == -1) { /*not registered */
337                 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
338                 printk("........init_ckrm_sched_res , resid= %d\n",resid);
339         }
340         return 0;
341 }
342
343 /*
344  * initialize the class structure
345  * add the default class: class 0
346  */
347 void init_cpu_classes(void) 
348 {
349         int i;
350
351         //init classqueues for each processor
352         for (i=0; i < NR_CPUS; i++)
353                 classqueue_init(get_cpu_classqueue(i)); 
354
355         /*
356          * hzheng: initialize the default cpu class
357          *  required for E14/E15 since ckrm_init is called after sched_init
358          */
359         ckrm_alloc_cpu_class(NULL,NULL);
360 }
361
362
363 EXPORT_SYMBOL(ckrm_get_cpu_class);