X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fcpufreq%2Fcpufreq_conservative.c;fp=drivers%2Fcpufreq%2Fcpufreq_conservative.c;h=ac38766b2583eec5b5db349b7227e443d6b1177c;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=c4c578defabfa0e5df0251d430fde58fee7d45ab;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c4c578def..ac38766b2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +35,12 @@ */ #define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MIN_FREQUENCY_UP_THRESHOLD (0) +#define MAX_FREQUENCY_UP_THRESHOLD (100) + #define DEF_FREQUENCY_DOWN_THRESHOLD (20) +#define MIN_FREQUENCY_DOWN_THRESHOLD (0) +#define MAX_FREQUENCY_DOWN_THRESHOLD (100) /* * The polling frequency of this governor depends on the capability of @@ -49,14 +53,10 @@ * All times here are in uS. */ static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) -#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) +#define MIN_SAMPLING_RATE (def_sampling_rate / 2) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) -#define DEF_SAMPLING_DOWN_FACTOR (1) -#define MAX_SAMPLING_DOWN_FACTOR (10) +#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000) +#define DEF_SAMPLING_DOWN_FACTOR (5) #define TRANSITION_LATENCY_LIMIT (10 * 1000) static void do_dbs_timer(void *data); @@ -66,21 +66,11 @@ struct cpu_dbs_info_s { unsigned int prev_cpu_idle_up; unsigned int prev_cpu_idle_down; unsigned int enable; - unsigned int down_skip; - unsigned int requested_freq; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ -/* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - */ static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); @@ -97,8 +87,6 @@ static struct dbs_tuners dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, - .freq_step = 5, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -148,7 +136,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, unsigned int input; int ret; ret = sscanf (buf, "%u", &input); - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + if (ret != 1 ) return -EINVAL; mutex_lock(&dbs_mutex); @@ -185,7 +173,9 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD || + input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; } @@ -204,7 +194,9 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { + if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || + input < MIN_FREQUENCY_DOWN_THRESHOLD || + input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; } @@ -305,17 +297,31 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int tmp_idle_ticks, total_idle_ticks; unsigned int freq_step; unsigned int freq_down_sampling_rate; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + static int down_skip[NR_CPUS]; + static int requested_freq[NR_CPUS]; + static unsigned short init_flag = 0; + struct cpu_dbs_info_s *this_dbs_info; + struct cpu_dbs_info_s *dbs_info; + struct cpufreq_policy *policy; + unsigned int j; + this_dbs_info = &per_cpu(cpu_dbs_info, cpu); if (!this_dbs_info->enable) return; policy = this_dbs_info->cur_policy; + if ( init_flag == 0 ) { + for_each_online_cpu(j) { + dbs_info = &per_cpu(cpu_dbs_info, j); + requested_freq[j] = dbs_info->cur_policy->cur; + } + init_flag = 1; + } + /* * The default safe range is 20% to 80% * Every sampling_rate, we check @@ -331,29 +337,39 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - idle_ticks = UINT_MAX; - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + idle_ticks = UINT_MAX; + for_each_cpu_mask(j, policy->cpus) { + unsigned int tmp_idle_ticks, total_idle_ticks; + struct cpu_dbs_info_s *j_dbs_info; - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(j); + tmp_idle_ticks = total_idle_ticks - + j_dbs_info->prev_cpu_idle_up; + j_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + } /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (idle_ticks < up_idle_ticks) { - this_dbs_info->down_skip = 0; - this_dbs_info->prev_cpu_idle_down = - this_dbs_info->prev_cpu_idle_up; + down_skip[cpu] = 0; + for_each_cpu_mask(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_down = + j_dbs_info->prev_cpu_idle_up; + } /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) + if (requested_freq[cpu] == policy->max) return; freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; @@ -362,45 +378,49 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - this_dbs_info->requested_freq += freq_step; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; + requested_freq[cpu] += freq_step; + if (requested_freq[cpu] > policy->max) + requested_freq[cpu] = policy->max; - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + __cpufreq_driver_target(policy, requested_freq[cpu], CPUFREQ_RELATION_H); return; } /* Check for frequency decrease */ - this_dbs_info->down_skip++; - if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) + down_skip[cpu]++; + if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) return; - /* Check for frequency decrease */ - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + idle_ticks = UINT_MAX; + for_each_cpu_mask(j, policy->cpus) { + unsigned int tmp_idle_ticks, total_idle_ticks; + struct cpu_dbs_info_s *j_dbs_info; + + j_dbs_info = &per_cpu(cpu_dbs_info, j); + total_idle_ticks = j_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + j_dbs_info->prev_cpu_idle_down; + j_dbs_info->prev_cpu_idle_down = total_idle_ticks; - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + } /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; - this_dbs_info->down_skip = 0; + down_skip[cpu] = 0; freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * dbs_tuners_ins.sampling_down_factor; down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); + usecs_to_jiffies(freq_down_sampling_rate); if (idle_ticks > down_idle_ticks) { - /* - * if we are already at the lowest speed then break out early + /* if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want - * freq_step to be zero - */ - if (this_dbs_info->requested_freq == policy->min + * freq_step to be zero */ + if (requested_freq[cpu] == policy->min || dbs_tuners_ins.freq_step == 0) return; @@ -410,12 +430,13 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - this_dbs_info->requested_freq -= freq_step; - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; + requested_freq[cpu] -= freq_step; + if (requested_freq[cpu] < policy->min) + requested_freq[cpu] = policy->min; - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, + requested_freq[cpu], + CPUFREQ_RELATION_H); return; } } @@ -423,14 +444,12 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(void *data) { int i; - lock_cpu_hotplug(); mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); - unlock_cpu_hotplug(); } static inline void dbs_timer_init(void) @@ -474,13 +493,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } this_dbs_info->enable = 1; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; sysfs_create_group(&policy->kobj, &dbs_attr_group); dbs_enable++; /* @@ -490,17 +507,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - def_sampling_rate = 10 * latency * - DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) - def_sampling_rate = MIN_STAT_SAMPLING_RATE; + latency = policy->cpuinfo.transition_latency; + if (latency < 1000) + latency = 1000; + def_sampling_rate = (latency / 1000) * + DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; dbs_tuners_ins.sampling_rate = def_sampling_rate; + dbs_tuners_ins.ignore_nice = 0; + dbs_tuners_ins.freq_step = 5; dbs_timer_init(); }