1 /* kernel/ckrm/ckrm_cpu_class.c - CPU Class resource controller for CKRM
3 * Copyright (C) Haoqiang Zheng, IBM Corp. 2004
4 * (C) Hubertus Franke, IBM Corp. 2004
6 * Latest version, more details at http://ckrm.sf.net
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <asm/errno.h>
18 #include <linux/sched.h>
19 #include <linux/ckrm_events.h>
20 #include <linux/ckrm_rc.h>
21 #include <linux/ckrm_tc.h>
22 #include <linux/ckrm_sched.h>
23 #include <linux/ckrm_classqueue.h>
24 #include <linux/seq_file.h>
26 struct ckrm_res_ctlr cpu_rcbs;
29 * insert_cpu_class - insert a class to active_cpu_class list
31 * insert the class in decreasing order of class weight
33 static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
35 list_add(&cls->links,&active_cpu_classes);
39 * initialize a class object and its local queues
41 void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares)
47 cls->shares = *shares;
48 cls->cnt_lock = SPIN_LOCK_UNLOCKED;
49 ckrm_cpu_stat_init(&cls->stat);
50 ckrm_usage_init(&cls->usage);
51 cls->magic = CKRM_CPU_CLASS_MAGIC;
53 for (i = 0 ; i < NR_CPUS ; i++) {
54 queue = &cls->local_queues[i];
55 queue->active = queue->arrays;
56 queue->expired = queue->arrays+1;
58 for (j = 0; j < 2; j++) {
59 array = queue->arrays + j;
60 for (k = 0; k < MAX_PRIO; k++) {
61 INIT_LIST_HEAD(array->queue + k);
62 __clear_bit(k, array->bitmap);
64 // delimiter for bitsearch
65 __set_bit(MAX_PRIO, array->bitmap);
69 queue->expired_timestamp = 0;
71 queue->cpu_class = cls;
72 queue->classqueue = get_cpu_classqueue(i);
73 queue->top_priority = MAX_PRIO;
74 cq_node_init(&queue->classqueue_linkobj);
77 queue->local_weight = cpu_class_weight(cls);
78 queue->uncounted_ns = 0;
80 queue->magic = 0x43FF43D7;
84 write_lock(&class_list_lock);
85 insert_cpu_class(cls);
86 write_unlock(&class_list_lock);
89 static inline void set_default_share(ckrm_shares_t *shares)
91 shares->my_guarantee = 0;
92 shares->total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
93 shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
94 shares->my_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
95 shares->max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
96 shares->cur_max_limit = 0;
99 struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
101 struct ckrm_cpu_class * cls;
102 cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
103 if (valid_cpu_class(cls))
110 void* ckrm_alloc_cpu_class(struct ckrm_core_class *core, struct ckrm_core_class *parent)
112 struct ckrm_cpu_class *cls;
114 if (! parent) /*root class*/
115 cls = get_default_cpu_class();
117 cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
120 ckrm_shares_t shares;
121 if ((! parent) && (core)) {
123 * the default class is already initialized
124 * so only update the core structure
128 set_default_share(&shares);
129 init_cpu_class(cls,&shares);
131 cls->parent = parent;
134 printk(KERN_ERR"alloc_cpu_class failed\n");
140 * hzheng: this is not a stable implementation
141 * need to check race condition issue here
143 static void ckrm_free_cpu_class(void *my_res)
145 struct ckrm_cpu_class *cls = my_res, *parres, *childres;
146 ckrm_core_class_t *child = NULL;
154 /*the default class can't be freed*/
155 if (cls == get_default_cpu_class())
158 #warning "ACB: Remove freed class from any classqueues [PL #4233]"
159 for (i = 0 ; i < NR_CPUS ; i++) {
160 queue = &cls->local_queues[i];
161 if (cls_in_classqueue(&queue->classqueue_linkobj))
162 classqueue_dequeue(queue->classqueue,
163 &queue->classqueue_linkobj);
167 // Assuming there will be no children when this function is called
168 parres = ckrm_get_cpu_class(cls->parent);
170 // return child's limit/guarantee to parent node
171 spin_lock(&parres->cnt_lock);
172 child_guarantee_changed(&parres->shares, cls->shares.my_guarantee, 0);
173 // run thru parent's children and get the new max_limit of the parent
174 ckrm_lock_hier(parres->core);
176 while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
177 childres = ckrm_get_cpu_class(child);
178 if (maxlimit < childres->shares.my_limit) {
179 maxlimit = childres->shares.my_limit;
182 ckrm_unlock_hier(parres->core);
183 if (parres->shares.cur_max_limit < maxlimit) {
184 parres->shares.cur_max_limit = maxlimit;
187 spin_unlock(&parres->cnt_lock);
189 write_lock(&class_list_lock);
190 list_del(&cls->links);
191 write_unlock(&class_list_lock);
195 //call ckrm_cpu_monitor after class removed
200 * the system will adjust to the new share automatically
202 int ckrm_cpu_set_share(void *my_res, struct ckrm_shares *new_share)
204 struct ckrm_cpu_class *parres, *cls = my_res;
205 struct ckrm_shares *cur = &cls->shares, *par;
212 parres = ckrm_get_cpu_class(cls->parent);
213 spin_lock(&parres->cnt_lock);
214 spin_lock(&cls->cnt_lock);
215 par = &parres->shares;
217 spin_lock(&cls->cnt_lock);
223 * hzheng: CKRM_SHARE_DONTCARE should be handled
225 if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
226 new_share->my_guarantee = 0;
228 rc = set_shares(new_share, cur, par);
229 if (cur->my_limit == CKRM_SHARE_DONTCARE)
230 cur->my_limit = cur->max_limit;
233 spin_unlock(&cls->cnt_lock);
235 spin_unlock(&parres->cnt_lock);
238 //call ckrm_cpu_monitor after changes are changed
244 static int ckrm_cpu_get_share(void *my_res,
245 struct ckrm_shares *shares)
247 struct ckrm_cpu_class *cls = my_res;
251 *shares = cls->shares;
255 int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
257 struct ckrm_cpu_class *cls = my_res;
258 struct ckrm_cpu_class_stat* stat = &cls->stat;
265 seq_printf(sfile, "-------- CPU Class Status Start---------\n");
266 seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
267 cls->shares.my_guarantee,
268 cls->shares.my_limit,
269 cls->shares.total_guarantee,
270 cls->shares.max_limit);
271 seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
272 cls->shares.unused_guarantee,
273 cls->shares.cur_max_limit);
275 seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
276 seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
277 seq_printf(sfile, "\tehl= %d\n",stat->ehl);
278 seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
279 seq_printf(sfile, "\teshare= %d\n",stat->eshare);
280 seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
281 seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
282 seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
283 seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
284 get_ckrm_usage(cls,2*HZ),
285 get_ckrm_usage(cls,10*HZ),
286 get_ckrm_usage(cls,60*HZ)
288 for_each_online_cpu(i) {
289 lrq = get_ckrm_lrq(cls,i);
290 seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
293 seq_printf(sfile, "-------- CPU Class Status END ---------\n");
299 * task will remain in the same cpu but on a different local runqueue
301 void ckrm_cpu_change_class(void *task, void *old, void *new)
303 struct task_struct *tsk = task;
304 struct ckrm_cpu_class *newcls = new;
307 if (!task || ! old || !new)
310 _ckrm_cpu_change_class(tsk,newcls);
313 /*dummy function, not used*/
314 static int ckrm_cpu_show_config(void *my_res, struct seq_file *sfile)
316 struct ckrm_cpu_class *cls = my_res;
321 seq_printf(sfile, "cls=%s,parameter=somevalue\n","ckrm_cpu class");
325 /*dummy function, not used*/
326 static int ckrm_cpu_set_config(void *my_res, const char *cfgstr)
328 struct ckrm_cpu_class *cls = my_res;
332 printk(KERN_DEBUG "ckrm_cpu config='%s'\n",cfgstr);
336 struct ckrm_res_ctlr cpu_rcbs = {
340 .res_alloc = ckrm_alloc_cpu_class,
341 .res_free = ckrm_free_cpu_class,
342 .set_share_values = ckrm_cpu_set_share,
343 .get_share_values = ckrm_cpu_get_share,
344 .get_stats = ckrm_cpu_get_stats,
345 .show_config = ckrm_cpu_show_config,
346 .set_config = ckrm_cpu_set_config,
347 .change_resclass = ckrm_cpu_change_class,
350 int __init init_ckrm_sched_res(void)
352 struct ckrm_classtype *clstype;
353 int resid = cpu_rcbs.resid;
355 clstype = ckrm_find_classtype_by_name("taskclass");
356 if (clstype == NULL) {
357 printk(KERN_INFO" Unknown ckrm classtype<taskclass>");
361 if (resid == -1) { /*not registered */
362 resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
363 printk(KERN_DEBUG "........init_ckrm_sched_res , resid= %d\n",resid);
369 * initialize the class structure
370 * add the default class: class 0
372 void init_cpu_classes(void)
376 //init classqueues for each processor
377 for (i=0; i < NR_CPUS; i++)
378 classqueue_init(get_cpu_classqueue(i));
381 * hzheng: initialize the default cpu class
382 * required for E14/E15 since ckrm_init is called after sched_init
384 ckrm_alloc_cpu_class(NULL,NULL);
388 EXPORT_SYMBOL(ckrm_get_cpu_class);