This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / kernel / ckrm / ckrm_cpu_class.c
1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
2  *
3  * Copyright (C) Haoqiang Zheng,  IBM Corp. 2004
4  *           (C) Hubertus Franke, IBM Corp. 2004
5  * 
6  * Latest version, more details at http://ckrm.sf.net
7  * 
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
25
26 struct ckrm_res_ctlr cpu_rcbs;
27
28 /**
29  * insert_cpu_class - insert a class to active_cpu_class list
30  *
31  * insert the class in decreasing order of class weight
32  */
33 static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
34 {
35         list_add(&cls->links,&active_cpu_classes);
36 }
37
38 /*
39  *  initialize a class object and its local queues
40  */
41 void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares) 
42 {
43         int i,j,k;      
44         prio_array_t *array;    
45         ckrm_lrq_t* queue;
46
47         cls->shares = *shares;
48         cls->cnt_lock = SPIN_LOCK_UNLOCKED;
49         ckrm_cpu_stat_init(&cls->stat);
50         ckrm_usage_init(&cls->usage);
51         cls->magic = CKRM_CPU_CLASS_MAGIC;
52
53         for (i = 0 ; i < NR_CPUS ; i++) {
54                 queue = &cls->local_queues[i];
55         queue->active   = queue->arrays;
56         queue->expired  = queue->arrays+1;
57         
58         for (j = 0; j < 2; j++) {
59                         array = queue->arrays + j;
60                 for (k = 0; k < MAX_PRIO; k++) {
61                         INIT_LIST_HEAD(array->queue + k);
62                         __clear_bit(k, array->bitmap);
63                 }
64                 // delimiter for bitsearch
65                 __set_bit(MAX_PRIO, array->bitmap);
66                 array->nr_active = 0;
67         }
68         
69         queue->expired_timestamp = 0;
70         
71         queue->cpu_class = cls;
72                 queue->classqueue = get_cpu_classqueue(i);
73         queue->top_priority = MAX_PRIO;
74         cq_node_init(&queue->classqueue_linkobj);
75                 queue->local_cvt = 0;
76         queue->lrq_load = 0;
77         queue->local_weight = cpu_class_weight(cls);
78         queue->uncounted_ns = 0;
79         queue->savings = 0;
80                 queue->magic = 0x43FF43D7;
81         }
82
83         // add to class list
84         write_lock(&class_list_lock);
85         insert_cpu_class(cls);
86         write_unlock(&class_list_lock);
87 }
88
89 static inline void set_default_share(ckrm_shares_t *shares)
90 {
91         shares->my_guarantee     = 0;
92         shares->total_guarantee  = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
93         shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
94         shares->my_limit         = CKRM_SHARE_DFLT_MAX_LIMIT;
95         shares->max_limit        = CKRM_SHARE_DFLT_MAX_LIMIT;
96         shares->cur_max_limit    = 0;
97 }
98
99 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
100 {
101         struct ckrm_cpu_class * cls;
102         cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
103         if (valid_cpu_class(cls))
104                 return cls;
105         else
106                 return NULL;
107 }
108
109
110 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent) 
111 {               
112         struct ckrm_cpu_class *cls;
113
114         if (! parent) /*root class*/
115                 cls =  get_default_cpu_class();
116         else
117                 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
118
119         if (cls) {
120                 ckrm_shares_t shares;           
121                 if ((! parent) && (core)) { 
122                         /*
123                          * the default class is already initialized
124                          * so only update the core structure
125                          */
126                         cls->core = core;                       
127                 } else {
128                         set_default_share(&shares);
129                         init_cpu_class(cls,&shares);
130                         cls->core = core;
131                         cls->parent = parent;                   
132                 }
133         } else
134                 printk(KERN_ERR"alloc_cpu_class failed\n");
135
136         return cls;
137 }               
138
139 /*
140  * hzheng: this is not a stable implementation
141  *         need to check race condition issue here
142  */             
143 static void ckrm_free_cpu_class(void *my_res) 
144 {                       
145         struct ckrm_cpu_class *cls = my_res, *parres, *childres;
146         ckrm_core_class_t *child = NULL;
147         int maxlimit;
148
149         if (!cls) 
150                 return;
151
152         /*the default class can't be freed*/
153         if (cls == get_default_cpu_class()) 
154                 return;
155
156         // Assuming there will be no children when this function is called
157         parres = ckrm_get_cpu_class(cls->parent);
158
159         // return child's limit/guarantee to parent node
160         spin_lock(&parres->cnt_lock);
161         child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
162         // run thru parent's children and get the new max_limit of the parent
163         ckrm_lock_hier(parres->core);
164         maxlimit = 0;
165         while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
166                 childres = ckrm_get_cpu_class(child);
167                 if (maxlimit < childres->shares.my_limit) {
168                         maxlimit = childres->shares.my_limit;
169                 }
170         }
171         ckrm_unlock_hier(parres->core);
172         if (parres->shares.cur_max_limit < maxlimit) {
173                 parres->shares.cur_max_limit = maxlimit;
174         }
175
176         spin_unlock(&parres->cnt_lock);
177
178         write_lock(&class_list_lock);
179         list_del(&cls->links);
180         write_unlock(&class_list_lock);
181
182         kfree(cls);
183
184         //call ckrm_cpu_monitor after class removed
185         ckrm_cpu_monitor(0);
186 }                               
187
188 /*
189  *  the system will adjust to the new share automatically  
190  */                     
191 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share) 
192 {       
193         struct ckrm_cpu_class *parres, *cls = my_res;
194         struct ckrm_shares *cur = &cls->shares, *par;
195         int rc = -EINVAL;
196
197         if (!cls)
198                 return rc;
199
200         if (cls->parent) {
201                 parres = ckrm_get_cpu_class(cls->parent);
202                 spin_lock(&parres->cnt_lock);
203                 spin_lock(&cls->cnt_lock);
204                 par = &parres->shares;
205         } else {
206                 spin_lock(&cls->cnt_lock);
207                 par = NULL;
208                 parres = NULL;
209         }
210
211         /*
212          * hzheng: CKRM_SHARE_DONTCARE should be handled
213          */
214         if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
215                 new_share->my_guarantee = 0;
216
217         rc = set_shares(new_share, cur, par);
218         if (cur->my_limit == CKRM_SHARE_DONTCARE)
219                 cur->my_limit = cur->max_limit;
220
221
222         spin_unlock(&cls->cnt_lock);
223         if (cls->parent) {
224                 spin_unlock(&parres->cnt_lock);
225         }
226
227         //call ckrm_cpu_monitor after changes are changed
228         ckrm_cpu_monitor(0);
229
230         return rc;
231 }                                                       
232                         
233 static int ckrm_cpu_get_share(void *my_res,
234                               struct ckrm_shares *shares)
235 {                       
236         struct ckrm_cpu_class *cls = my_res;
237
238         if (!cls)
239                 return -EINVAL;
240         *shares = cls->shares;
241         return 0;
242 }                               
243
244 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
245 {
246         struct ckrm_cpu_class *cls = my_res;
247         struct ckrm_cpu_class_stat* stat = &cls->stat;
248         ckrm_lrq_t* lrq;
249         int i;
250
251         if (!cls) 
252                 return -EINVAL;
253
254         seq_printf(sfile, "-------- CPU Class Status Start---------\n");
255         seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
256                    cls->shares.my_guarantee,
257                    cls->shares.my_limit,
258                    cls->shares.total_guarantee,
259                    cls->shares.max_limit);
260         seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
261                    cls->shares.unused_guarantee,
262                    cls->shares.cur_max_limit);
263
264         seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
265         seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
266         seq_printf(sfile, "\tehl= %d\n",stat->ehl);
267         seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
268         seq_printf(sfile, "\teshare= %d\n",stat->eshare);
269         seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
270         seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
271         seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
272         seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
273                    get_ckrm_usage(cls,2*HZ),
274                    get_ckrm_usage(cls,10*HZ),
275                    get_ckrm_usage(cls,60*HZ)
276                    );
277         for_each_online_cpu(i) {
278                 lrq = get_ckrm_lrq(cls,i);              
279                 seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
280         }
281
282         seq_printf(sfile, "-------- CPU Class Status END ---------\n");
283
284         return 0;
285 }
286
287 /*
288  * task will remain in the same cpu but on a different local runqueue
289  */
290 void ckrm_cpu_change_class(void *task, void *old, void *new)
291 {               
292         struct task_struct *tsk = task;                    
293         struct ckrm_cpu_class *newcls = new;
294
295         /*sanity checking*/
296         if (!task || ! old || !new)
297                 return; 
298
299         _ckrm_cpu_change_class(tsk,newcls);
300 }                                                       
301
302 /*dummy function, not used*/
303 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
304 {
305         struct ckrm_cpu_class *cls = my_res;
306
307         if (!cls) 
308                 return -EINVAL;
309
310         seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
311         return 0;
312 }
313
314 /*dummy function, not used*/
315 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
316 {
317         struct ckrm_cpu_class *cls = my_res;
318
319         if (!cls) 
320                 return -EINVAL;
321         printk(KERN_DEBUG "ckrm_cpu config='%s'\n",cfgstr);
322         return 0;
323 }
324         
325 struct ckrm_res_ctlr cpu_rcbs = {
326         .res_name          = "cpu",
327         .res_hdepth        = 1,
328         .resid             = -1,
329         .res_alloc         = ckrm_alloc_cpu_class,
330         .res_free          = ckrm_free_cpu_class,
331         .set_share_values  = ckrm_cpu_set_share,
332         .get_share_values  = ckrm_cpu_get_share,
333         .get_stats         = ckrm_cpu_get_stats,
334         .show_config       = ckrm_cpu_show_config,
335         .set_config        = ckrm_cpu_set_config,
336         .change_resclass   = ckrm_cpu_change_class,
337 };
338
339 int __init init_ckrm_sched_res(void)
340 {
341         struct ckrm_classtype *clstype;
342         int resid = cpu_rcbs.resid;
343
344         clstype = ckrm_find_classtype_by_name("taskclass");
345         if (clstype == NULL) {
346                 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
347                 return -ENOENT;
348         }
349
350         if (resid == -1) { /*not registered */
351                 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
352                 printk(KERN_DEBUG "........init_ckrm_sched_res , resid= %d\n",resid);
353         }
354         return 0;
355 }
356
357 /*
358  * initialize the class structure
359  * add the default class: class 0
360  */
361 void init_cpu_classes(void) 
362 {
363         int i;
364
365         //init classqueues for each processor
366         for (i=0; i < NR_CPUS; i++)
367                 classqueue_init(get_cpu_classqueue(i)); 
368
369         /*
370          * hzheng: initialize the default cpu class
371          *  required for E14/E15 since ckrm_init is called after sched_init
372          */
373         ckrm_alloc_cpu_class(NULL,NULL);
374         }
375
376
377 EXPORT_SYMBOL(ckrm_get_cpu_class);