Merge in the version 7 cpu controller from CKRM.
[linux-2.6.git] / kernel / ckrm / ckrm_cpu_class.c
1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
2  *
3  * Copyright (C) Haoqiang Zheng,  IBM Corp. 2004
4  *           (C) Hubertus Franke, IBM Corp. 2004
5  * 
6  * Latest version, more details at http://ckrm.sf.net
7  * 
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
25
26 struct ckrm_res_ctlr cpu_rcbs;
27
28 /**
29  * insert_cpu_class - insert a class to active_cpu_class list
30  *
31  * insert the class in decreasing order of class weight
32  */
33 static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
34 {
35         list_add(&cls->links,&active_cpu_classes);
36 }
37
38 /*
39  *  initialize a class object and its local queues
40  */
41 void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares) 
42 {
43         int i,j,k;      
44         prio_array_t *array;    
45         ckrm_lrq_t* queue;
46
47         cls->shares = *shares;
48         cls->cnt_lock = SPIN_LOCK_UNLOCKED;
49         ckrm_cpu_stat_init(&cls->stat);
50         ckrm_usage_init(&cls->usage);
51         cls->magic = CKRM_CPU_CLASS_MAGIC;
52
53         for (i = 0 ; i < NR_CPUS ; i++) {
54                 queue = &cls->local_queues[i];
55                 queue->active  = queue->arrays;
56                 queue->expired = queue->arrays+1;
57                 
58                 for (j = 0; j < 2; j++) {
59                         array = queue->arrays + j;
60                         for (k = 0; k < MAX_PRIO; k++) {
61                                 INIT_LIST_HEAD(array->queue + k);
62                                 __clear_bit(k, array->bitmap);
63                         }
64                         // delimiter for bitsearch
65                         __set_bit(MAX_PRIO, array->bitmap);
66                         array->nr_active = 0;
67                 }
68
69                 queue->expired_timestamp = 0;
70                 
71                 queue->cpu_class = cls;
72                 queue->classqueue = get_cpu_classqueue(i);
73                 queue->top_priority = MAX_PRIO;
74                 cq_node_init(&queue->classqueue_linkobj);
75                 queue->local_cvt = 0;
76                 queue->lrq_load = 0;
77                 queue->local_weight = cpu_class_weight(cls);
78                 queue->uncounted_ns = 0;
79                 queue->savings = 0;
80                 queue->magic = 0x43FF43D7;
81         }
82
83         // add to class list
84         write_lock(&class_list_lock);
85         insert_cpu_class(cls);
86         write_unlock(&class_list_lock);
87 }
88
89 static inline void set_default_share(ckrm_shares_t *shares)
90 {
91         shares->my_guarantee     = 0;
92         shares->total_guarantee  = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
93         shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
94         shares->my_limit         = CKRM_SHARE_DFLT_MAX_LIMIT;
95         shares->max_limit        = CKRM_SHARE_DFLT_MAX_LIMIT;
96         shares->cur_max_limit    = 0;
97 }
98
99 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
100 {
101         struct ckrm_cpu_class * cls;
102         cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
103         if (valid_cpu_class(cls))
104                 return cls;
105         else
106                 return NULL;
107 }
108
109
110 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent) 
111 {               
112         struct ckrm_cpu_class *cls;
113
114         if (! parent) /*root class*/
115                 cls =  get_default_cpu_class();
116         else
117                 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
118
119         if (cls) {
120                 ckrm_shares_t shares;           
121                 if ((! parent) && (core)) { 
122                         /*
123                          * the default class is already initialized
124                          * so only update the core structure
125                          */
126                         cls->core = core;                       
127                 } else {
128                         set_default_share(&shares);
129                         init_cpu_class(cls,&shares);
130                         cls->core = core;
131                         cls->parent = parent;
132                 }
133         } else
134                 printk(KERN_ERR"alloc_cpu_class failed\n");
135
136         return cls;
137 }               
138
139 /*
140  * hzheng: this is not a stable implementation
141  *         need to check race condition issue here
142  */             
143 static void ckrm_free_cpu_class(void *my_res) 
144 {                       
145         struct ckrm_cpu_class *cls = my_res, *parres, *childres;
146         ckrm_core_class_t *child = NULL;
147         int maxlimit;
148
149         if (!cls) 
150                 return;
151
152         /*the default class can't be freed*/
153         if (cls == get_default_cpu_class()) 
154                 return;
155
156         // Assuming there will be no children when this function is called
157         parres = ckrm_get_cpu_class(cls->parent);
158
159         // return child's limit/guarantee to parent node
160         spin_lock(&parres->cnt_lock);
161         child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
162         // run thru parent's children and get the new max_limit of the parent
163         ckrm_lock_hier(parres->core);
164         maxlimit = 0;
165         while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
166                 childres = ckrm_get_cpu_class(child);
167                 if (maxlimit < childres->shares.my_limit) {
168                         maxlimit = childres->shares.my_limit;
169                 }
170         }
171         ckrm_unlock_hier(parres->core);
172         if (parres->shares.cur_max_limit < maxlimit) {
173                 parres->shares.cur_max_limit = maxlimit;
174         }
175
176         spin_unlock(&parres->cnt_lock);
177
178         write_lock(&class_list_lock);
179         list_del(&cls->links);
180         write_unlock(&class_list_lock);
181
182         kfree(cls);
183 }                               
184
185 /*
186  *  the system will adjust to the new share automatically  
187  */                     
188 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share) 
189 {       
190         struct ckrm_cpu_class *parres, *cls = my_res;
191         struct ckrm_shares *cur = &cls->shares, *par;
192         int rc = -EINVAL;
193
194         if (!cls) 
195                 return rc;
196
197         if (cls->parent) {
198                 parres = ckrm_get_cpu_class(cls->parent);
199                 spin_lock(&parres->cnt_lock);
200                 spin_lock(&cls->cnt_lock);
201                 par = &parres->shares;
202         } else {
203                 spin_lock(&cls->cnt_lock);
204                 par = NULL;
205                 parres = NULL;
206         }
207
208         /*
209          * hzheng: CKRM_SHARE_DONTCARE should be handled
210          */
211         if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
212                 new_share->my_guarantee = 0;
213
214         rc = set_shares(new_share, cur, par);
215         if (cur->my_limit == CKRM_SHARE_DONTCARE)
216                 cur->my_limit = cur->max_limit;
217
218
219         spin_unlock(&cls->cnt_lock);
220         if (cls->parent) {
221                 spin_unlock(&parres->cnt_lock);
222         }
223         return rc;
224 }                                                       
225                         
226 static int ckrm_cpu_get_share(void *my_res,
227                               struct ckrm_shares *shares)
228 {                       
229         struct ckrm_cpu_class *cls = my_res;
230
231         if (!cls) 
232                 return -EINVAL;
233         *shares = cls->shares;
234         return 0;
235 }                               
236
237 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
238 {
239         struct ckrm_cpu_class *cls = my_res;
240         struct ckrm_cpu_class_stat* stat = &cls->stat;
241         ckrm_lrq_t* lrq;
242         int i;
243
244         if (!cls) 
245                 return -EINVAL;
246
247         seq_printf(sfile, "-------- CPU Class Status Start---------\n");
248         seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
249                    cls->shares.my_guarantee,
250                    cls->shares.my_limit,
251                    cls->shares.total_guarantee,
252                    cls->shares.max_limit);
253         seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
254                    cls->shares.unused_guarantee,
255                    cls->shares.cur_max_limit);
256
257         seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
258         seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
259         seq_printf(sfile, "\tehl= %d\n",stat->ehl);
260         seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
261         seq_printf(sfile, "\teshare= %d\n",stat->eshare);
262         seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
263         seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
264         seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
265         seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
266                    get_ckrm_usage(cls,2*HZ),
267                    get_ckrm_usage(cls,10*HZ),
268                    get_ckrm_usage(cls,60*HZ)
269                    );
270         for_each_online_cpu(i) {
271                 lrq = get_ckrm_lrq(cls,i);              
272                 seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav=%lu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
273         }
274
275         seq_printf(sfile, "-------- CPU Class Status END ---------\n");
276
277         return 0;
278 }
279
280 /*
281  * task will remain in the same cpu but on a different local runqueue
282  */
283 void ckrm_cpu_change_class(void *task, void *old, void *new)
284 {               
285         struct task_struct *tsk = task;                    
286         struct ckrm_cpu_class *newcls = new;
287
288         /*sanity checking*/
289         if (!task || ! old || !new)
290                 return; 
291
292         _ckrm_cpu_change_class(tsk,newcls);
293 }                                                       
294
295 /*dummy function, not used*/
296 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
297 {
298         struct ckrm_cpu_class *cls = my_res;
299
300         if (!cls) 
301                 return -EINVAL;
302
303         seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
304         return 0;
305 }
306
307 /*dummy function, not used*/
308 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
309 {
310         struct ckrm_cpu_class *cls = my_res;
311
312         if (!cls) 
313                 return -EINVAL;
314         printk("ckrm_cpu config='%s'\n",cfgstr);
315         return 0;
316 }
317         
318 struct ckrm_res_ctlr cpu_rcbs = {
319         .res_name          = "cpu",
320         .res_hdepth        = 1,
321         .resid             = -1,
322         .res_alloc         = ckrm_alloc_cpu_class,
323         .res_free          = ckrm_free_cpu_class,
324         .set_share_values  = ckrm_cpu_set_share,
325         .get_share_values  = ckrm_cpu_get_share,
326         .get_stats         = ckrm_cpu_get_stats,
327         .show_config       = ckrm_cpu_show_config,
328         .set_config        = ckrm_cpu_set_config,
329         .change_resclass   = ckrm_cpu_change_class,
330 };
331
332 int __init init_ckrm_sched_res(void)
333 {
334         struct ckrm_classtype *clstype;
335         int resid = cpu_rcbs.resid;
336
337         clstype = ckrm_find_classtype_by_name("taskclass");
338         if (clstype == NULL) {
339                 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
340                 return -ENOENT;
341         }
342
343         if (resid == -1) { /*not registered */
344                 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
345                 printk("........init_ckrm_sched_res , resid= %d\n",resid);
346         }
347         return 0;
348 }
349
350 /*
351  * initialize the class structure
352  * add the default class: class 0
353  */
354 void init_cpu_classes(void) 
355 {
356         int i;
357
358         //init classqueues for each processor
359         for (i=0; i < NR_CPUS; i++)
360                 classqueue_init(get_cpu_classqueue(i)); 
361
362         /*
363          * hzheng: initialize the default cpu class
364          *  required for E14/E15 since ckrm_init is called after sched_init
365          */
366         ckrm_alloc_cpu_class(NULL,NULL);
367 }
368
369
370 EXPORT_SYMBOL(ckrm_get_cpu_class);