X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fcpufreq%2Fcpufreq_ondemand.c;h=f697449327c6fca54f856189801965861855f6a6;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=a9320ae41cb9f35d367e36db3ec6aba1683f752b;hpb=a2c21200f1c81b08cb55e417b68150bba439b646;p=linux-2.6.git diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index a9320ae41..f69744932 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -10,25 +10,14 @@ * published by the Free Software Foundation. */ -#include #include #include -#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include #include -#include +#include /* * dbs is used in this file as a shortform for demandbased switching @@ -36,58 +25,150 @@ */ #define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MIN_FREQUENCY_UP_THRESHOLD (0) +#define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) -#define DEF_FREQUENCY_DOWN_THRESHOLD (20) -#define MIN_FREQUENCY_DOWN_THRESHOLD (0) -#define MAX_FREQUENCY_DOWN_THRESHOLD (100) - -/* - * The polling frequency of this governor depends on the capability of +/* + * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling * rate. * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) * this governor will not work. * All times here are in uS. */ -static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE (def_sampling_rate / 2) +static unsigned int def_sampling_rate; +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE \ + (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) +#define MIN_SAMPLING_RATE \ + (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) -#define DEF_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) -#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000))) -static void do_dbs_timer(void *data); +static void do_dbs_timer(struct work_struct *work); + +/* Sampling types */ +enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + enum dbs_sample sample_type; + unsigned int enable; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ -static DECLARE_MUTEX (dbs_sem); -static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); - -struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *kondemand_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int ignore_nice; + unsigned int powersave_bias; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .ignore_nice = 0, + .powersave_bias = 0, }; -struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, -}; +static inline cputime64_t get_cpu_idle_time(unsigned int cpu) +{ + cputime64_t retval; + + retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, + kstat_cpu(cpu).cpustat.iowait); + + if (dbs_tuners_ins.ignore_nice) + retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); + + return retval; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void ondemand_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); + dbs_info->freq_table = cpufreq_frequency_get_table(i); + dbs_info->freq_lo = 0; + } +} /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) @@ -100,11 +181,9 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); } -#define define_one_ro(_name) \ -static struct freq_attr _name = { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = show_##_name, \ -} +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) define_one_ro(sampling_rate_max); define_one_ro(sampling_rate_min); @@ -117,97 +196,121 @@ static ssize_t show_##file_name \ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ } show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); - down(&dbs_sem); - if (ret != 1 ) - goto out; - - dbs_tuners_ins.sampling_down_factor = input; -out: - up(&dbs_sem); + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > MAX_SAMPLING_RATE + || input < MIN_SAMPLING_RATE) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + return count; } -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, +static ssize_t store_up_threshold(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); - down(&dbs_sem); - if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) - goto out; + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = input; -out: - up(&dbs_sem); return count; } -static ssize_t store_up_threshold(struct cpufreq_policy *unused, +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); - down(&dbs_sem); - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD || - input <= dbs_tuners_ins.down_threshold) - goto out; - dbs_tuners_ins.up_threshold = input; -out: - up(&dbs_sem); + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if ( ret != 1 ) + return -EINVAL; + + if ( input > 1 ) + input = 1; + + mutex_lock(&dbs_mutex); + if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j); + dbs_info->prev_cpu_wall = get_jiffies_64(); + } + mutex_unlock(&dbs_mutex); + return count; } -static ssize_t store_down_threshold(struct cpufreq_policy *unused, +static ssize_t store_powersave_bias(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); - down(&dbs_sem); - if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || - input < MIN_FREQUENCY_DOWN_THRESHOLD || - input >= dbs_tuners_ins.up_threshold) - goto out; - - dbs_tuners_ins.down_threshold = input; -out: - up(&dbs_sem); + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.powersave_bias = input; + ondemand_powersave_bias_init(); + mutex_unlock(&dbs_mutex); + return count; } -#define define_one_rw(_name) \ -static struct freq_attr _name = { \ - .attr = { .name = __stringify(_name), .mode = 0644 }, \ - .show = show_##_name, \ - .store = store_##_name, \ -} +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); define_one_rw(up_threshold); -define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); +define_one_rw(powersave_bias); static struct attribute * dbs_attributes[] = { &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, - &sampling_down_factor.attr, &up_threshold.attr, - &down_threshold.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, NULL }; @@ -218,107 +321,159 @@ static struct attribute_group dbs_attr_group = { /************************** sysfs end ************************/ -static void dbs_check_cpu(int cpu) +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int total_idle_ticks; - unsigned int freq_down_step; - unsigned int freq_down_sampling_rate; - static int down_skip[NR_CPUS]; - struct cpu_dbs_info_s *this_dbs_info; + unsigned int idle_ticks, total_ticks; + unsigned int load; + cputime64_t cur_jiffies; + + struct cpufreq_policy *policy; + unsigned int j; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); if (!this_dbs_info->enable) return; - /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + total_ticks = (unsigned int) cputime64_sub(cur_jiffies, + this_dbs_info->prev_cpu_wall); + this_dbs_info->prev_cpu_wall = cur_jiffies; + if (!total_ticks) + return; + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% of max_frequency + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency */ + + /* Get Idle Time */ + idle_ticks = UINT_MAX; + for_each_cpu_mask(j, policy->cpus) { + cputime64_t total_idle_ticks; + unsigned int tmp_idle_ticks; + struct cpu_dbs_info_s *j_dbs_info; + + j_dbs_info = &per_cpu(cpu_dbs_info, j); + total_idle_ticks = get_cpu_idle_time(j); + tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + } + load = (100 * (total_ticks - idle_ticks)) / total_ticks; + /* Check for frequency increase */ - total_idle_ticks = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate); - - if (idle_ticks < up_idle_ticks) { - __cpufreq_driver_target(this_dbs_info->cur_policy, - this_dbs_info->cur_policy->max, - CPUFREQ_RELATION_H); - down_skip[cpu] = 0; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + if (load > dbs_tuners_ins.up_threshold) { + /* if we are already at full speed then break out early */ + if (!dbs_tuners_ins.powersave_bias) { + if (policy->cur == policy->max) + return; + + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } else { + int freq = powersave_bias_target(policy, policy->max, + CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } return; } /* Check for frequency decrease */ - down_skip[cpu]++; - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) return; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - down_skip[cpu] = 0; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (load < (dbs_tuners_ins.up_threshold - 10)) { + unsigned int freq_next, freq_cur; + + freq_cur = cpufreq_driver_getavg(policy); + if (!freq_cur) + freq_cur = policy->cur; + + freq_next = (freq_cur * load) / + (dbs_tuners_ins.up_threshold - 10); + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * - dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - sampling_rate_in_HZ(freq_down_sampling_rate); +static void do_dbs_timer(struct work_struct *work) +{ + unsigned int cpu = smp_processor_id(); + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); + enum dbs_sample sample_type = dbs_info->sample_type; + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - if (idle_ticks > down_idle_ticks ) { - freq_down_step = (5 * this_dbs_info->cur_policy->max) / 100; + /* Permit rescheduling of this work item */ + work_release(work); - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_down_step == 0)) - freq_down_step = 5; + delay -= jiffies % delay; - __cpufreq_driver_target(this_dbs_info->cur_policy, - this_dbs_info->cur_policy->cur - freq_down_step, - CPUFREQ_RELATION_H); + if (!dbs_info->enable) return; + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + lock_cpu_hotplug(); + dbs_check_cpu(dbs_info); + unlock_cpu_hotplug(); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, + CPUFREQ_RELATION_H); } + queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); } -static void do_dbs_timer(void *data) -{ - int i; - down(&dbs_sem); - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); - up(&dbs_sem); -} - -static inline void dbs_timer_init(void) +static inline void dbs_timer_init(unsigned int cpu) { - INIT_WORK(&dbs_work, do_dbs_timer, NULL); - schedule_work(&dbs_work); - return; + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + ondemand_powersave_bias_init(); + INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); } -static inline void dbs_timer_exit(void) +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { - cancel_delayed_work(&dbs_work); - return; + dbs_info->enable = 0; + cancel_delayed_work(&dbs_info->work); + flush_workqueue(kondemand_wq); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, @@ -326,33 +481,56 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, { unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; this_dbs_info = &per_cpu(cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || - (!policy->cur)) + if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; if (policy->cpuinfo.transition_latency > - (TRANSITION_LATENCY_LIMIT * 1000)) + (TRANSITION_LATENCY_LIMIT * 1000)) { + printk(KERN_WARNING "ondemand governor failed to load " + "due to too long transition latency\n"); return -EINVAL; + } if (this_dbs_info->enable) /* Already enabled */ break; - - down(&dbs_sem); - this_dbs_info->cur_policy = policy; - - this_dbs_info->prev_cpu_idle_up = - kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait; - this_dbs_info->prev_cpu_idle_down = - kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait; - this_dbs_info->enable = 1; - sysfs_create_group(&policy->kobj, &dbs_attr_group); + + mutex_lock(&dbs_mutex); dbs_enable++; + if (dbs_enable == 1) { + kondemand_wq = create_workqueue("kondemand"); + if (!kondemand_wq) { + printk(KERN_ERR + "Creation of kondemand failed\n"); + dbs_enable--; + mutex_unlock(&dbs_mutex); + return -ENOSPC; + } + } + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + if (dbs_enable == 1) + destroy_workqueue(kondemand_wq); + dbs_enable--; + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu_mask(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_wall = get_jiffies_64(); + } + this_dbs_info->enable = 1; /* * Start the timerschedule work, when this governor * is used for first time @@ -360,59 +538,56 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; - latency = policy->cpuinfo.transition_latency; - if (latency < 1000) - latency = 1000; - - def_sampling_rate = (latency / 1000) * + def_sampling_rate = latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; - dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_timer_init(); + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + + dbs_tuners_ins.sampling_rate = def_sampling_rate; } - - up(&dbs_sem); + dbs_timer_init(policy->cpu); + + mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_STOP: - down(&dbs_sem); - this_dbs_info->enable = 0; + mutex_lock(&dbs_mutex); + dbs_timer_exit(this_dbs_info); sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) - dbs_timer_exit(); - - up(&dbs_sem); + if (dbs_enable == 0) + destroy_workqueue(kondemand_wq); + + mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_LIMITS: - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, + CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - up(&dbs_sem); + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, + CPUFREQ_RELATION_L); + mutex_unlock(&dbs_mutex); break; } return 0; } -struct cpufreq_governor cpufreq_gov_dbs = { - .name = "ondemand", - .governor = cpufreq_governor_dbs, - .owner = THIS_MODULE, +static struct cpufreq_governor cpufreq_gov_dbs = { + .name = "ondemand", + .governor = cpufreq_governor_dbs, + .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_dbs); static int __init cpufreq_gov_dbs_init(void) { @@ -421,17 +596,15 @@ static int __init cpufreq_gov_dbs_init(void) static void __exit cpufreq_gov_dbs_exit(void) { - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); - cpufreq_unregister_governor(&cpufreq_gov_dbs); } -MODULE_AUTHOR ("Venkatesh Pallipadi "); -MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); module_init(cpufreq_gov_dbs_init); module_exit(cpufreq_gov_dbs_exit);