This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / kernel / ckrm / ckrm_cpu_class.c
1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
2  *
3  * Copyright (C) Haoqiang Zheng,  IBM Corp. 2004
4  *           (C) Hubertus Franke, IBM Corp. 2004
5  * 
6  * Latest version, more details at http://ckrm.sf.net
7  * 
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
25
26 struct ckrm_res_ctlr cpu_rcbs;
27
28 /**
29  * insert_cpu_class - insert a class to active_cpu_class list
30  *
31  * insert the class in decreasing order of class weight
32  */
33 static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
34 {
35         list_add(&cls->links,&active_cpu_classes);
36 }
37
38 /*
39  *  initialize a class object and its local queues
40  */
41 void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares) 
42 {
43         int i,j,k;      
44         prio_array_t *array;    
45         ckrm_lrq_t* queue;
46
47         cls->shares = *shares;
48         cls->cnt_lock = SPIN_LOCK_UNLOCKED;
49         ckrm_cpu_stat_init(&cls->stat);
50         ckrm_usage_init(&cls->usage);
51         cls->magic = CKRM_CPU_CLASS_MAGIC;
52
53         for (i = 0 ; i < NR_CPUS ; i++) {
54                 queue = &cls->local_queues[i];
55         queue->active   = queue->arrays;
56         queue->expired  = queue->arrays+1;
57         
58         for (j = 0; j < 2; j++) {
59                         array = queue->arrays + j;
60                 for (k = 0; k < MAX_PRIO; k++) {
61                         INIT_LIST_HEAD(array->queue + k);
62                         __clear_bit(k, array->bitmap);
63                 }
64                 // delimiter for bitsearch
65                 __set_bit(MAX_PRIO, array->bitmap);
66                 array->nr_active = 0;
67         }
68         
69         queue->expired_timestamp = 0;
70         
71         queue->cpu_class = cls;
72                 queue->classqueue = get_cpu_classqueue(i);
73         queue->top_priority = MAX_PRIO;
74         cq_node_init(&queue->classqueue_linkobj);
75                 queue->local_cvt = 0;
76         queue->lrq_load = 0;
77         queue->local_weight = cpu_class_weight(cls);
78         queue->uncounted_ns = 0;
79         queue->savings = 0;
80                 queue->magic = 0x43FF43D7;
81         }
82
83         // add to class list
84         write_lock(&class_list_lock);
85         insert_cpu_class(cls);
86         write_unlock(&class_list_lock);
87 }
88
89 static inline void set_default_share(ckrm_shares_t *shares)
90 {
91         shares->my_guarantee     = 0;
92         shares->total_guarantee  = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
93         shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
94         shares->my_limit         = CKRM_SHARE_DFLT_MAX_LIMIT;
95         shares->max_limit        = CKRM_SHARE_DFLT_MAX_LIMIT;
96         shares->cur_max_limit    = 0;
97 }
98
99 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
100 {
101         struct ckrm_cpu_class * cls;
102         cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
103         if (valid_cpu_class(cls))
104                 return cls;
105         else
106                 return NULL;
107 }
108
109
110 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent) 
111 {               
112         struct ckrm_cpu_class *cls;
113
114         if (! parent) /*root class*/
115                 cls =  get_default_cpu_class();
116         else
117                 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
118
119         if (cls) {
120                 ckrm_shares_t shares;           
121                 if ((! parent) && (core)) { 
122                         /*
123                          * the default class is already initialized
124                          * so only update the core structure
125                          */
126                         cls->core = core;                       
127                 } else {
128                         set_default_share(&shares);
129                         init_cpu_class(cls,&shares);
130                         cls->core = core;
131                         cls->parent = parent;                   
132                 }
133         } else
134                 printk(KERN_ERR"alloc_cpu_class failed\n");
135
136         return cls;
137 }               
138
139 /*
140  * hzheng: this is not a stable implementation
141  *         need to check race condition issue here
142  */             
143 static void ckrm_free_cpu_class(void *my_res) 
144 {                       
145         struct ckrm_cpu_class *cls = my_res, *parres, *childres;
146         ckrm_core_class_t *child = NULL;
147         int maxlimit;
148
149         if (!cls) 
150                 return;
151
152         /*the default class can't be freed*/
153         if (cls == get_default_cpu_class()) 
154                 return;
155
156         // Assuming there will be no children when this function is called
157         parres = ckrm_get_cpu_class(cls->parent);
158
159         // return child's limit/guarantee to parent node
160         spin_lock(&parres->cnt_lock);
161         child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
162         // run thru parent's children and get the new max_limit of the parent
163         ckrm_lock_hier(parres->core);
164         maxlimit = 0;
165         while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
166                 childres = ckrm_get_cpu_class(child);
167                 if (maxlimit < childres->shares.my_limit) {
168                         maxlimit = childres->shares.my_limit;
169                 }
170         }
171         ckrm_unlock_hier(parres->core);
172         if (parres->shares.cur_max_limit < maxlimit) {
173                 parres->shares.cur_max_limit = maxlimit;
174         }
175
176         spin_unlock(&parres->cnt_lock);
177
178         write_lock(&class_list_lock);
179         list_del(&cls->links);
180         write_unlock(&class_list_lock);
181
182         kfree(cls);
183
184         //call ckrm_cpu_monitor after class removed
185         ckrm_cpu_monitor(0);
186 }                               
187
188 /*
189  *  the system will adjust to the new share automatically  
190  */                     
191 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share) 
192 {       
193         struct ckrm_cpu_class *parres, *cls = my_res;
194         struct ckrm_shares *cur = &cls->shares, *par;
195         int rc = -EINVAL;
196
197         if (!cls) {
198                 printk("ckrm_cpu_set_share: cls == NULL\n");
199                 return rc;
200         }
201
202         if (cls->parent) {
203                 parres = ckrm_get_cpu_class(cls->parent);
204                 spin_lock(&parres->cnt_lock);
205                 spin_lock(&cls->cnt_lock);
206                 par = &parres->shares;
207         } else {
208                 spin_lock(&cls->cnt_lock);
209                 par = NULL;
210                 parres = NULL;
211         }
212
213         /*
214          * hzheng: CKRM_SHARE_DONTCARE should be handled
215          */
216         if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
217                 new_share->my_guarantee = 0;
218
219         rc = set_shares(new_share, cur, par);
220         if (cur->my_limit == CKRM_SHARE_DONTCARE)
221                 cur->my_limit = cur->max_limit;
222
223
224         spin_unlock(&cls->cnt_lock);
225         if (cls->parent) {
226                 spin_unlock(&parres->cnt_lock);
227         }
228
229         //call ckrm_cpu_monitor after changes are changed
230         ckrm_cpu_monitor(0);
231
232         return rc;
233 }                                                       
234                         
235 static int ckrm_cpu_get_share(void *my_res,
236                               struct ckrm_shares *shares)
237 {                       
238         struct ckrm_cpu_class *cls = my_res;
239
240         if (!cls)
241                 return -EINVAL;
242         *shares = cls->shares;
243         return 0;
244 }                               
245
246 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
247 {
248         struct ckrm_cpu_class *cls = my_res;
249         struct ckrm_cpu_class_stat* stat = &cls->stat;
250         ckrm_lrq_t* lrq;
251         int i;
252
253         if (!cls) 
254                 return -EINVAL;
255
256         seq_printf(sfile, "-------- CPU Class Status Start---------\n");
257         seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
258                    cls->shares.my_guarantee,
259                    cls->shares.my_limit,
260                    cls->shares.total_guarantee,
261                    cls->shares.max_limit);
262         seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
263                    cls->shares.unused_guarantee,
264                    cls->shares.cur_max_limit);
265
266         seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
267         seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
268         seq_printf(sfile, "\tehl= %d\n",stat->ehl);
269         seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
270         seq_printf(sfile, "\teshare= %d\n",stat->eshare);
271         seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
272         seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
273         seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
274         seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
275                    get_ckrm_usage(cls,2*HZ),
276                    get_ckrm_usage(cls,10*HZ),
277                    get_ckrm_usage(cls,60*HZ)
278                    );
279         for_each_online_cpu(i) {
280                 lrq = get_ckrm_lrq(cls,i);              
281                 seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
282         }
283
284         seq_printf(sfile, "-------- CPU Class Status END ---------\n");
285
286         return 0;
287 }
288
289 /*
290  * task will remain in the same cpu but on a different local runqueue
291  */
292 void ckrm_cpu_change_class(void *task, void *old, void *new)
293 {               
294         struct task_struct *tsk = task;                    
295         struct ckrm_cpu_class *newcls = new;
296
297         /*sanity checking*/
298         if (!task || ! old || !new)
299                 return; 
300
301         _ckrm_cpu_change_class(tsk,newcls);
302 }                                                       
303
304 /*dummy function, not used*/
305 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
306 {
307         struct ckrm_cpu_class *cls = my_res;
308
309         if (!cls) 
310                 return -EINVAL;
311
312         seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
313         return 0;
314 }
315
316 /*dummy function, not used*/
317 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
318 {
319         struct ckrm_cpu_class *cls = my_res;
320
321         if (!cls) 
322                 return -EINVAL;
323         printk(KERN_DEBUG "ckrm_cpu config='%s'\n",cfgstr);
324         return 0;
325 }
326         
327 struct ckrm_res_ctlr cpu_rcbs = {
328         .res_name          = "cpu",
329         .res_hdepth        = 1,
330         .resid             = -1,
331         .res_alloc         = ckrm_alloc_cpu_class,
332         .res_free          = ckrm_free_cpu_class,
333         .set_share_values  = ckrm_cpu_set_share,
334         .get_share_values  = ckrm_cpu_get_share,
335         .get_stats         = ckrm_cpu_get_stats,
336         .show_config       = ckrm_cpu_show_config,
337         .set_config        = ckrm_cpu_set_config,
338         .change_resclass   = ckrm_cpu_change_class,
339 };
340
341 int __init init_ckrm_sched_res(void)
342 {
343         struct ckrm_classtype *clstype;
344         int resid = cpu_rcbs.resid;
345
346         clstype = ckrm_find_classtype_by_name("taskclass");
347         if (clstype == NULL) {
348                 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
349                 return -ENOENT;
350         }
351
352         if (resid == -1) { /*not registered */
353                 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
354                 printk(KERN_DEBUG "........init_ckrm_sched_res , resid= %d\n",resid);
355         }
356         return 0;
357 }
358
359 /*
360  * initialize the class structure
361  * add the default class: class 0
362  */
363 void init_cpu_classes(void) 
364 {
365         int i;
366
367         //init classqueues for each processor
368         for (i=0; i < NR_CPUS; i++)
369                 classqueue_init(get_cpu_classqueue(i)); 
370
371         /*
372          * hzheng: initialize the default cpu class
373          *  required for E14/E15 since ckrm_init is called after sched_init
374          */
375         ckrm_alloc_cpu_class(NULL,NULL);
376         }
377
378
379 EXPORT_SYMBOL(ckrm_get_cpu_class);