* published by the Free Software Foundation.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
-#include <linux/config.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
unsigned int down_threshold;
};
-struct dbs_tuners dbs_tuners_ins = {
+static struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
}
#define define_one_ro(_name) \
-static struct freq_attr _name = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = show_##_name, \
-}
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
define_one_ro(sampling_rate_min);
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
- down(&dbs_sem);
if (ret != 1 )
- goto out;
+ return -EINVAL;
+ down(&dbs_sem);
dbs_tuners_ins.sampling_down_factor = input;
-out:
up(&dbs_sem);
+
return count;
}
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
+
down(&dbs_sem);
- if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE)
- goto out;
+ if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
dbs_tuners_ins.sampling_rate = input;
-out:
up(&dbs_sem);
+
return count;
}
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
+
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD ||
- input <= dbs_tuners_ins.down_threshold)
- goto out;
+ input <= dbs_tuners_ins.down_threshold) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
dbs_tuners_ins.up_threshold = input;
-out:
up(&dbs_sem);
+
return count;
}
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
+
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
input < MIN_FREQUENCY_DOWN_THRESHOLD ||
- input >= dbs_tuners_ins.up_threshold)
- goto out;
+ input >= dbs_tuners_ins.up_threshold) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
dbs_tuners_ins.down_threshold = input;
-out:
up(&dbs_sem);
+
return count;
}
-#define define_one_rw(_name) \
-static struct freq_attr _name = { \
- .attr = { .name = __stringify(_name), .mode = 0644 }, \
- .show = show_##_name, \
- .store = store_##_name, \
-}
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info;
+ struct cpufreq_policy *policy;
+ unsigned int j;
+
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!this_dbs_info->enable)
return;
+ policy = this_dbs_info->cur_policy;
/*
* The default safe range is 20% to 80%
* Every sampling_rate, we check
* Frequency reduction happens at minimum steps of
* 5% of max_frequency
*/
+
/* Check for frequency increase */
total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait;
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
+
+
+ for_each_cpu_mask(j, policy->cpus) {
+ unsigned int tmp_idle_ticks;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ if (j == cpu)
+ continue;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ /* Check for frequency increase */
+ total_idle_ticks = kstat_cpu(j).cpustat.idle +
+ kstat_cpu(j).cpustat.iowait;
+ tmp_idle_ticks = total_idle_ticks -
+ j_dbs_info->prev_cpu_idle_up;
+ j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+ }
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- this_dbs_info->cur_policy->max,
+ __cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
+ total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait;
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_down;
+ this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
+
+ for_each_cpu_mask(j, policy->cpus) {
+ unsigned int tmp_idle_ticks;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ if (j == cpu)
+ continue;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ /* Check for frequency increase */
+ total_idle_ticks = kstat_cpu(j).cpustat.idle +
+ kstat_cpu(j).cpustat.iowait;
+ tmp_idle_ticks = total_idle_ticks -
+ j_dbs_info->prev_cpu_idle_down;
+ j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+ }
+
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
down_skip[cpu] = 0;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
sampling_rate_in_HZ(freq_down_sampling_rate);
if (idle_ticks > down_idle_ticks ) {
- freq_down_step = (5 * this_dbs_info->cur_policy->max) / 100;
+ freq_down_step = (5 * policy->max) / 100;
/* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_down_step == 0))
freq_down_step = 5;
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- this_dbs_info->cur_policy->cur - freq_down_step,
+ __cpufreq_driver_target(policy,
+ policy->cur - freq_down_step,
CPUFREQ_RELATION_H);
return;
}
static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
- schedule_work(&dbs_work);
+ schedule_delayed_work(&dbs_work,
+ sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
return;
}
{
unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info;
+ unsigned int j;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
break;
down(&dbs_sem);
- this_dbs_info->cur_policy = policy;
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->cur_policy = policy;
- this_dbs_info->prev_cpu_idle_up =
- kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- this_dbs_info->prev_cpu_idle_down =
- kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
+ j_dbs_info->prev_cpu_idle_up =
+ kstat_cpu(j).cpustat.idle +
+ kstat_cpu(j).cpustat.iowait;
+ j_dbs_info->prev_cpu_idle_down =
+ kstat_cpu(j).cpustat.idle +
+ kstat_cpu(j).cpustat.iowait;
+ }
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
dbs_enable++;