This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / kernel / ckrm / ckrm_cpu_class.c
1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
2  *
3  * Copyright (C) Haoqiang Zheng,  IBM Corp. 2004
4  *           (C) Hubertus Franke, IBM Corp. 2004
5  * 
6  * Latest version, more details at http://ckrm.sf.net
7  * 
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
25
26
27 struct ckrm_res_ctlr cpu_rcbs;
28
29 /*
30  *  initialize a class object and its local queues
31  */
32  static void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares) 
33 {
34         int i,j,k;      
35         prio_array_t *array;    
36         struct ckrm_local_runqueue* queue;
37
38         for (i = 0 ; i < NR_CPUS ; i++) {
39                 queue = &cls->local_queues[i];
40                 queue->active  = queue->arrays;
41                 queue->expired = queue->arrays+1;
42                 
43                 for (j = 0; j < 2; j++) {
44                         array = queue->arrays + j;
45                         for (k = 0; k < MAX_PRIO; k++) {
46                                 INIT_LIST_HEAD(array->queue + k);
47                                 __clear_bit(k, array->bitmap);
48                         }
49                         // delimiter for bitsearch
50                         __set_bit(MAX_PRIO, array->bitmap);
51                         array->nr_active = 0;
52                 }
53
54                 queue->expired_timestamp = 0;
55                 
56                 queue->cpu_class = cls;
57                 queue->classqueue = get_cpu_classqueue(i);
58                 queue->top_priority = MAX_PRIO;
59                 cq_node_init(&queue->classqueue_linkobj);
60                 queue->local_cvt = 0;
61                 queue->uncounted_cvt = 0;
62                 queue->uncounted_ns = 0;
63                 queue->magic = 0x43FF43D7;
64         }
65
66         cls->shares = *shares;
67         cls->global_cvt = 0;
68         cls->cnt_lock = SPIN_LOCK_UNLOCKED;
69         ckrm_cpu_stat_init(&cls->stat);
70
71         // add to class list
72         write_lock(&class_list_lock);
73         list_add(&cls->links,&active_cpu_classes);
74         write_unlock(&class_list_lock);
75 }
76
77 static inline void set_default_share(ckrm_shares_t *shares)
78 {
79         shares->my_guarantee     = 0;
80         shares->my_limit         = CKRM_SHARE_DFLT_MAX_LIMIT;
81         shares->total_guarantee  = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
82         shares->max_limit        = CKRM_SHARE_DFLT_MAX_LIMIT;
83         shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
84         shares->cur_max_limit    = CKRM_SHARE_DFLT_MAX_LIMIT;
85 }
86
87 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core) {
88         return ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
89 }
90
91
92 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent) 
93 {               
94         struct ckrm_cpu_class *cls;
95
96         if (! parent) /*root class*/
97                 cls =  default_cpu_class;
98         else
99                 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
100
101         if (cls) {
102                 ckrm_shares_t shares;           
103                 if ((! parent) && (core)) { 
104                         /*
105                          * the default class is already initialized
106                          * so only update the core structure
107                          */
108                         cls->core = core;                       
109                 } else {
110                         set_default_share(&shares);
111                         init_cpu_class(cls,&shares);
112                         cls->core = core;
113                         cls->parent = parent;
114                 }
115         } else
116                 printk("alloc_cpu_class failed GFP_ATOMIC\n");
117
118         return cls;
119 }               
120
121 /*
122  * hzheng: this is not a stable implementation
123  *         need to check race condition issue here
124  */             
125 static void ckrm_free_cpu_class(void *my_res) 
126 {                       
127         struct ckrm_cpu_class *cls = my_res, *parres, *childres;
128         ckrm_core_class_t *child = NULL;
129         int maxlimit;
130
131         if (!cls) 
132                 return;
133
134         /*the default class can't be freed*/
135         if (cls == default_cpu_class) 
136                 return;
137
138         // Assuming there will be no children when this function is called
139         parres = ckrm_get_cpu_class(cls->parent);
140
141         // return child's limit/guarantee to parent node
142         spin_lock(&parres->cnt_lock);
143         child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
144         // run thru parent's children and get the new max_limit of the parent
145         ckrm_lock_hier(parres->core);
146         maxlimit = 0;
147         while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
148                 childres = ckrm_get_cpu_class(child);
149                 if (maxlimit < childres->shares.my_limit) {
150                         maxlimit = childres->shares.my_limit;
151                 }
152         }
153         ckrm_unlock_hier(parres->core);
154         if (parres->shares.cur_max_limit < maxlimit) {
155                 parres->shares.cur_max_limit = maxlimit;
156         }
157
158         spin_unlock(&parres->cnt_lock);
159
160         write_lock(&class_list_lock);
161         list_del(&cls->links);
162         write_unlock(&class_list_lock);
163
164         kfree(cls);
165 }                               
166
167 /*
168  *  the system will adjust to the new share automatically  
169  */                     
170 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share) 
171 {       
172         struct ckrm_cpu_class *parres, *cls = my_res;
173         struct ckrm_shares *cur = &cls->shares, *par;
174         int rc = -EINVAL;
175
176         if (!cls) 
177                 return rc;
178
179         if (cls->parent) {
180                 parres = ckrm_get_cpu_class(cls->parent);
181                 spin_lock(&parres->cnt_lock);
182                 spin_lock(&cls->cnt_lock);
183                 par = &parres->shares;
184         } else {
185                 spin_lock(&cls->cnt_lock);
186                 par = NULL;
187                 parres = NULL;
188         }
189
190         rc = set_shares(new_share, cur, par);
191
192         spin_unlock(&cls->cnt_lock);
193         if (cls->parent) {
194                 spin_unlock(&parres->cnt_lock);
195         }
196         return rc;
197 }                                                       
198                         
199 /*
200  * translate the global_CVT to ticks
201  */
202 static int ckrm_cpu_get_share(void *my_res,
203                               struct ckrm_shares *shares)
204 {                       
205         struct ckrm_cpu_class *cls = my_res;
206
207         if (!cls) 
208                 return -EINVAL;
209         *shares = cls->shares;
210         return 0;
211 }                               
212
213 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
214 {
215         struct ckrm_cpu_class *cls = my_res;
216
217         if (!cls) 
218                 return -EINVAL;
219
220         seq_printf(sfile, "-------- CPU Class Status Start---------\n");
221         seq_printf(sfile, "  gua= %d limit= %d\n",
222                    cls->shares.my_guarantee,
223                    cls->shares.my_limit);
224         seq_printf(sfile, "  total_gua= %d limit= %d\n",
225                    cls->shares.total_guarantee,
226                    cls->shares.max_limit);
227         seq_printf(sfile, "  used_gua= %d cur_limit= %d\n",
228                    cls->shares.unused_guarantee,
229                    cls->shares.cur_max_limit);
230
231         seq_printf(sfile, "  Share= %d\n",cpu_class_weight(cls));
232         seq_printf(sfile, "  cvt= %llu\n",cls->local_queues[0].local_cvt);
233         seq_printf(sfile, "  total_ns= %llu\n",cls->stat.total_ns);
234         seq_printf(sfile, "  prio= %d\n",cls->local_queues[0].classqueue_linkobj.prio);
235         seq_printf(sfile, "  index= %d\n",cls->local_queues[0].classqueue_linkobj.index);
236         seq_printf(sfile, "  run= %llu\n",cls->stat.local_stats[0].run);
237         seq_printf(sfile, "  total= %llu\n",cls->stat.local_stats[0].total);
238         seq_printf(sfile, "  cpu_demand= %lu\n",cls->stat.cpu_demand);
239
240         seq_printf(sfile, "  effective_guarantee= %d\n",cls->stat.effective_guarantee);
241         seq_printf(sfile, "  effective_limit= %d\n",cls->stat.effective_limit);
242         seq_printf(sfile, "  effective_share= %d\n",cls->stat.effective_share);
243         seq_printf(sfile, "-------- CPU Class Status END ---------\n");
244
245
246         return 0;
247 }
248
249 /*
250  * task will remain in the same cpu but on a different local runqueue
251  */
252 static void ckrm_cpu_change_class(void *task, void *old, void *new)
253 {               
254         struct task_struct *tsk = task;                    
255         struct ckrm_cpu_class *newcls = new;
256         unsigned long flags;
257         struct runqueue *rq;
258         prio_array_t *array;
259
260         /*sanity checking*/
261         if (!task || ! old || !new)
262                 return; 
263
264         rq = task_rq_lock(tsk,&flags); 
265         array = tsk->array;
266         if (array) {
267                 dequeue_task(tsk,array);
268                 tsk->cpu_class = newcls;
269                 enqueue_task(tsk,rq_active(tsk,rq));
270         } else {
271                 tsk->cpu_class = newcls;
272         }
273         task_rq_unlock(rq,&flags);
274 }                                                       
275
276 /*dummy function, not used*/
277 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
278 {
279         struct ckrm_cpu_class *cls = my_res;
280
281         if (!cls) 
282                 return -EINVAL;
283
284         seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
285         return 0;
286 }
287
288 /*dummy function, not used*/
289 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
290 {
291         struct ckrm_cpu_class *cls = my_res;
292
293         if (!cls) 
294                 return -EINVAL;
295         printk("ckrm_cpu config='%s'\n",cfgstr);
296         return 0;
297 }
298         
299 struct ckrm_res_ctlr cpu_rcbs = {
300         .res_name          = "CKRM CPU Class",
301         .res_hdepth        = 1,
302         .resid             = -1,
303         .res_alloc         = ckrm_alloc_cpu_class,
304         .res_free          = ckrm_free_cpu_class,
305         .set_share_values  = ckrm_cpu_set_share,
306         .get_share_values  = ckrm_cpu_get_share,
307         .get_stats         = ckrm_cpu_get_stats,
308         .show_config       = ckrm_cpu_show_config,
309         .set_config        = ckrm_cpu_set_config,
310         .change_resclass   = ckrm_cpu_change_class,
311 };
312
313 int __init init_ckrm_sched_res(void)
314 {
315         struct ckrm_classtype *clstype;
316         int resid = cpu_rcbs.resid;
317
318         clstype = ckrm_find_classtype_by_name("taskclass");
319         if (clstype == NULL) {
320                 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
321                 return -ENOENT;
322         }
323
324         if (resid == -1) { /*not registered */
325                 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
326                 printk("........init_ckrm_sched_res , resid= %d\n",resid);
327         }
328         return 0;
329 }
330
331 /*
332  * initialize the class structure
333  * add the default class: class 0
334  */
335 void init_cpu_classes(void) 
336 {
337         int i;
338
339         //init classqueues for each processor
340         for (i=0; i < NR_CPUS; i++)
341                 classqueue_init(get_cpu_classqueue(i)); 
342 /*
343  * hzheng: initialize the default cpu class
344  *         required for E14 since ckrm_init is called after sched_init
345  */
346         ckrm_alloc_cpu_class(NULL,NULL);
347 }
348
349
350 EXPORT_SYMBOL(ckrm_get_cpu_class);