1 /* kernel/ckrm_sched.c - Supporting functions for ckrm scheduling
3 * Copyright (C) Haoqiang Zheng, IBM Corp. 2004
4 * (C) Hubertus Franke, IBM Corp. 2004
6 * Latest version, more details at http://ckrm.sf.net
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
15 #include <linux/init.h>
16 #include <linux/ckrm_sched.h>
18 rwlock_t class_list_lock = RW_LOCK_UNLOCKED;
19 LIST_HEAD(active_cpu_classes); // list of active cpu classes; anchor
21 struct ckrm_cpu_class default_cpu_class_obj;
23 struct ckrm_cpu_class * get_default_cpu_class(void) {
24 return (&default_cpu_class_obj);
27 /*******************************************************/
29 /*******************************************************/
31 static inline void check_inactive_class(ckrm_lrq_t * lrq,CVT_t cur_cvt)
36 //just a safty measure
37 if (unlikely(! cur_cvt))
41 * Always leaving a small bonus for inactive classes
42 * allows them to compete for cycles immediately when the become
43 * active. This should improve interactive behavior
45 bonus = INTERACTIVE_BONUS(lrq);
46 //cvt can't be negative
48 min_cvt = cur_cvt - bonus;
52 if (lrq->local_cvt < min_cvt) {
55 lost_cvt = scale_cvt(min_cvt - lrq->local_cvt,lrq);
56 lrq->local_cvt = min_cvt;
58 /* add what the class lost to its savings*/
59 lrq->savings += lost_cvt;
60 if (lrq->savings > MAX_SAVINGS)
61 lrq->savings = MAX_SAVINGS;
62 } else if (lrq->savings) {
64 *if a class saving and falling behind
65 * then start to use it saving in a leaking bucket way
69 savings_used = scale_cvt((lrq->local_cvt - min_cvt),lrq);
70 if (savings_used > lrq->savings)
71 savings_used = lrq->savings;
73 if (savings_used > SAVINGS_LEAK_SPEED)
74 savings_used = SAVINGS_LEAK_SPEED;
76 BUG_ON(lrq->savings < savings_used);
77 lrq->savings -= savings_used;
78 unscale_cvt(savings_used,lrq);
79 BUG_ON(lrq->local_cvt < savings_used);
80 // lrq->local_cvt -= savings_used;
85 * return the max_cvt of all the classes
87 static inline CVT_t get_max_cvt(int this_cpu)
89 struct ckrm_cpu_class *clsptr;
95 /*update class time, at the same time get max_cvt */
96 list_for_each_entry(clsptr, &active_cpu_classes, links) {
97 lrq = get_ckrm_lrq(clsptr, this_cpu);
98 if (lrq->local_cvt > max_cvt)
99 max_cvt = lrq->local_cvt;
106 * update_class_cputime - updates cvt of inactive classes
107 * -- an inactive class shouldn't starve others when it comes back
108 * -- the cpu time it lost when it's inactive should be accumulated
109 * -- its accumulated saving should be compensated (in a leaky bucket fashion)
111 * class_list_lock must have been acquired
113 void update_class_cputime(int this_cpu)
115 struct ckrm_cpu_class *clsptr;
120 * a class's local_cvt must not be significantly smaller than min_cvt
121 * of active classes otherwise, it will starve other classes when it
124 * Hence we keep all local_cvt's within a range of the min_cvt off
125 * all active classes (approximated by the local_cvt of the currently
126 * running class) and account for how many cycles where thus taken
127 * from an inactive class building a savings (not to exceed a few seconds)
128 * for a class to gradually make up upon reactivation, without
129 * starvation of other classes.
132 cur_cvt = get_local_cur_cvt(this_cpu);
135 * cur_cvt == 0 means the system is now idle
136 * in this case, we use max_cvt as cur_cvt
137 * max_cvt roughly represents the cvt of the class
138 * that has just finished running
140 * fairness wouldn't be a problem since we account for whatever lost in savings
141 * if the system is not busy, the system responsiveness is not a problem.
142 * still fine if the sytem is busy, but happened to be idle at this certain point
143 * since bias toward interactive classes (class priority) is a more important way to improve system responsiveness
145 if (unlikely(! cur_cvt)) {
146 cur_cvt = get_max_cvt(this_cpu);
151 * - check the local cvt of all the classes
152 * - update total_ns received by the class
153 * - do a usage sampling for the whole class
155 list_for_each_entry(clsptr, &active_cpu_classes, links) {
156 lrq = get_ckrm_lrq(clsptr, this_cpu);
158 spin_lock(&clsptr->stat.stat_lock);
159 clsptr->stat.total_ns += lrq->uncounted_ns;
160 ckrm_sample_usage(clsptr);
161 spin_unlock(&clsptr->stat.stat_lock);
162 lrq->uncounted_ns = 0;
164 check_inactive_class(lrq,cur_cvt);
168 /*******************************************************/
169 /* PID load balancing stuff */
170 /*******************************************************/
171 #define PID_SAMPLE_T 32
177 * sample pid load periodically
179 void ckrm_load_sample(ckrm_load_t* pid,int cpu)
184 if (jiffies % PID_SAMPLE_T)
187 adjust_local_weight();
189 load = ckrm_cpu_load(cpu);
190 err = load - pid->load_p;
198 long pid_get_pressure(ckrm_load_t* ckrm_load, int local_group)
201 pressure = ckrm_load->load_p * PID_KP;
202 pressure += ckrm_load->load_i * PID_KI;
203 pressure += ckrm_load->load_d * PID_KD;