linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / drivers / cpufreq / cpufreq_ondemand.c
index 52cf1f0..9ee9411 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
 #include <linux/cpufreq.h>
-#include <linux/cpu.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/workqueue.h>
 #include <linux/jiffies.h>
 #include <linux/kernel_stat.h>
+#include <linux/percpu.h>
 #include <linux/mutex.h>
 
 /*
 #define MIN_FREQUENCY_UP_THRESHOLD             (11)
 #define MAX_FREQUENCY_UP_THRESHOLD             (100)
 
-/*
- * The polling frequency of this governor depends on the capability of
+/* 
+ * The polling frequency of this governor depends on the capability of 
  * the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
+ * latency of the processor. The governor will work on any processor with 
+ * transition latency <= 10mS, using appropriate sampling 
  * rate.
  * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
  * this governor will not work.
  * All times here are in uS.
  */
-static unsigned int def_sampling_rate;
+static unsigned int                            def_sampling_rate;
 #define MIN_SAMPLING_RATE_RATIO                        (2)
 /* for correct statistics, we need at least 10 ticks between each measure */
 #define MIN_STAT_SAMPLING_RATE                 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
 #define MIN_SAMPLING_RATE                      (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
 #define MAX_SAMPLING_RATE                      (500 * def_sampling_rate)
 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER   (1000)
+#define DEF_SAMPLING_DOWN_FACTOR               (1)
+#define MAX_SAMPLING_DOWN_FACTOR               (10)
 #define TRANSITION_LATENCY_LIMIT               (10 * 1000)
 
 static void do_dbs_timer(void *data);
 
 struct cpu_dbs_info_s {
-       cputime64_t prev_cpu_idle;
-       cputime64_t prev_cpu_wall;
-       struct cpufreq_policy *cur_policy;
-       struct work_struct work;
-       unsigned int enable;
+       struct cpufreq_policy   *cur_policy;
+       unsigned int            prev_cpu_idle_up;
+       unsigned int            prev_cpu_idle_down;
+       unsigned int            enable;
 };
 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;        /* number of CPUs using this policy */
 
-/*
- * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
- * lock and dbs_mutex. cpu_hotplug lock should always be held before
- * dbs_mutex. If any function that can potentially take cpu_hotplug lock
- * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
- * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
- * is recursive for the same process. -Venki
- */
-static DEFINE_MUTEX(dbs_mutex);
-
-static struct workqueue_struct *kondemand_wq;
+static DEFINE_MUTEX    (dbs_mutex);
+static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
 
 struct dbs_tuners {
-       unsigned int sampling_rate;
-       unsigned int up_threshold;
-       unsigned int ignore_nice;
+       unsigned int            sampling_rate;
+       unsigned int            sampling_down_factor;
+       unsigned int            up_threshold;
+       unsigned int            ignore_nice;
 };
 
 static struct dbs_tuners dbs_tuners_ins = {
-       .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
-       .ignore_nice = 0,
+       .up_threshold           = DEF_FREQUENCY_UP_THRESHOLD,
+       .sampling_down_factor   = DEF_SAMPLING_DOWN_FACTOR,
 };
 
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
 {
-       cputime64_t retval;
-
-       retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
-                       kstat_cpu(cpu).cpustat.iowait);
-
-       if (dbs_tuners_ins.ignore_nice)
-               retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
-
-       return retval;
+       return  kstat_cpu(cpu).cpustat.idle +
+               kstat_cpu(cpu).cpustat.iowait +
+               ( dbs_tuners_ins.ignore_nice ?
+                 kstat_cpu(cpu).cpustat.nice :
+                 0);
 }
 
 /************************** sysfs interface ************************/
@@ -107,8 +106,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
        return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
 }
 
-#define define_one_ro(_name)           \
-static struct freq_attr _name =                \
+#define define_one_ro(_name)                                   \
+static struct freq_attr _name =                                \
 __ATTR(_name, 0444, show_##_name, NULL)
 
 define_one_ro(sampling_rate_max);
@@ -122,15 +121,35 @@ static ssize_t show_##file_name                                           \
        return sprintf(buf, "%u\n", dbs_tuners_ins.object);             \
 }
 show_one(sampling_rate, sampling_rate);
+show_one(sampling_down_factor, sampling_down_factor);
 show_one(up_threshold, up_threshold);
 show_one(ignore_nice_load, ignore_nice);
 
-static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
+static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 
+               const char *buf, size_t count)
+{
+       unsigned int input;
+       int ret;
+       ret = sscanf (buf, "%u", &input);
+       if (ret != 1 )
+               return -EINVAL;
+
+       if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+               return -EINVAL;
+
+       mutex_lock(&dbs_mutex);
+       dbs_tuners_ins.sampling_down_factor = input;
+       mutex_unlock(&dbs_mutex);
+
+       return count;
+}
+
+static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 
                const char *buf, size_t count)
 {
        unsigned int input;
        int ret;
-       ret = sscanf(buf, "%u", &input);
+       ret = sscanf (buf, "%u", &input);
 
        mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
@@ -144,15 +163,15 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
        return count;
 }
 
-static ssize_t store_up_threshold(struct cpufreq_policy *unused,
+static ssize_t store_up_threshold(struct cpufreq_policy *unused, 
                const char *buf, size_t count)
 {
        unsigned int input;
        int ret;
-       ret = sscanf(buf, "%u", &input);
+       ret = sscanf (buf, "%u", &input);
 
        mutex_lock(&dbs_mutex);
-       if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+       if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 
                        input < MIN_FREQUENCY_UP_THRESHOLD) {
                mutex_unlock(&dbs_mutex);
                return -EINVAL;
@@ -171,14 +190,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
        int ret;
 
        unsigned int j;
-
-       ret = sscanf(buf, "%u", &input);
+       
+       ret = sscanf (buf, "%u", &input);
        if ( ret != 1 )
                return -EINVAL;
 
        if ( input > 1 )
                input = 1;
-
+       
        mutex_lock(&dbs_mutex);
        if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
                mutex_unlock(&dbs_mutex);
@@ -186,12 +205,12 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
        }
        dbs_tuners_ins.ignore_nice = input;
 
-       /* we need to re-evaluate prev_cpu_idle */
+       /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
        for_each_online_cpu(j) {
-               struct cpu_dbs_info_s *dbs_info;
-               dbs_info = &per_cpu(cpu_dbs_info, j);
-               dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
-               dbs_info->prev_cpu_wall = get_jiffies_64();
+               struct cpu_dbs_info_s *j_dbs_info;
+               j_dbs_info = &per_cpu(cpu_dbs_info, j);
+               j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+               j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
        }
        mutex_unlock(&dbs_mutex);
 
@@ -203,6 +222,7 @@ static struct freq_attr _name = \
 __ATTR(_name, 0644, show_##_name, store_##_name)
 
 define_one_rw(sampling_rate);
+define_one_rw(sampling_down_factor);
 define_one_rw(up_threshold);
 define_one_rw(ignore_nice_load);
 
@@ -210,6 +230,7 @@ static struct attribute * dbs_attributes[] = {
        &sampling_rate_max.attr,
        &sampling_rate_min.attr,
        &sampling_rate.attr,
+       &sampling_down_factor.attr,
        &up_threshold.attr,
        &ignore_nice_load.attr,
        NULL
@@ -222,115 +243,140 @@ static struct attribute_group dbs_attr_group = {
 
 /************************** sysfs end ************************/
 
-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+static void dbs_check_cpu(int cpu)
 {
-       unsigned int idle_ticks, total_ticks;
-       unsigned int load;
-       cputime64_t cur_jiffies;
+       unsigned int idle_ticks, up_idle_ticks, total_ticks;
+       unsigned int freq_next;
+       unsigned int freq_down_sampling_rate;
+       static int down_skip[NR_CPUS];
+       struct cpu_dbs_info_s *this_dbs_info;
 
        struct cpufreq_policy *policy;
        unsigned int j;
 
+       this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
        if (!this_dbs_info->enable)
                return;
 
        policy = this_dbs_info->cur_policy;
-       cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
-       total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
-                       this_dbs_info->prev_cpu_wall);
-       this_dbs_info->prev_cpu_wall = cur_jiffies;
-       if (!total_ticks)
-               return;
-       /*
+       /* 
         * Every sampling_rate, we check, if current idle time is less
         * than 20% (default), then we try to increase frequency
-        * Every sampling_rate, we look for a the lowest
+        * Every sampling_rate*sampling_down_factor, we look for a the lowest
         * frequency which can sustain the load while keeping idle time over
         * 30%. If such a frequency exist, we try to decrease to this frequency.
         *
-        * Any frequency increase takes it to the maximum frequency.
-        * Frequency reduction happens at minimum steps of
-        * 5% (default) of current frequency
+        * Any frequency increase takes it to the maximum frequency. 
+        * Frequency reduction happens at minimum steps of 
+        * 5% (default) of current frequency 
         */
 
-       /* Get Idle Time */
+       /* Check for frequency increase */
        idle_ticks = UINT_MAX;
        for_each_cpu_mask(j, policy->cpus) {
-               cputime64_t total_idle_ticks;
-               unsigned int tmp_idle_ticks;
+               unsigned int tmp_idle_ticks, total_idle_ticks;
                struct cpu_dbs_info_s *j_dbs_info;
 
                j_dbs_info = &per_cpu(cpu_dbs_info, j);
                total_idle_ticks = get_cpu_idle_time(j);
-               tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
-                               j_dbs_info->prev_cpu_idle);
-               j_dbs_info->prev_cpu_idle = total_idle_ticks;
+               tmp_idle_ticks = total_idle_ticks -
+                       j_dbs_info->prev_cpu_idle_up;
+               j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
 
                if (tmp_idle_ticks < idle_ticks)
                        idle_ticks = tmp_idle_ticks;
        }
-       load = (100 * (total_ticks - idle_ticks)) / total_ticks;
 
-       /* Check for frequency increase */
-       if (load > dbs_tuners_ins.up_threshold) {
+       /* Scale idle ticks by 100 and compare with up and down ticks */
+       idle_ticks *= 100;
+       up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
+                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+       if (idle_ticks < up_idle_ticks) {
+               down_skip[cpu] = 0;
+               for_each_cpu_mask(j, policy->cpus) {
+                       struct cpu_dbs_info_s *j_dbs_info;
+
+                       j_dbs_info = &per_cpu(cpu_dbs_info, j);
+                       j_dbs_info->prev_cpu_idle_down = 
+                                       j_dbs_info->prev_cpu_idle_up;
+               }
                /* if we are already at full speed then break out early */
                if (policy->cur == policy->max)
                        return;
-
-               __cpufreq_driver_target(policy, policy->max,
+               
+               __cpufreq_driver_target(policy, policy->max, 
                        CPUFREQ_RELATION_H);
                return;
        }
 
        /* Check for frequency decrease */
+       down_skip[cpu]++;
+       if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
+               return;
+
+       idle_ticks = UINT_MAX;
+       for_each_cpu_mask(j, policy->cpus) {
+               unsigned int tmp_idle_ticks, total_idle_ticks;
+               struct cpu_dbs_info_s *j_dbs_info;
+
+               j_dbs_info = &per_cpu(cpu_dbs_info, j);
+               /* Check for frequency decrease */
+               total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
+               tmp_idle_ticks = total_idle_ticks -
+                       j_dbs_info->prev_cpu_idle_down;
+               j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
+
+               if (tmp_idle_ticks < idle_ticks)
+                       idle_ticks = tmp_idle_ticks;
+       }
+
+       down_skip[cpu] = 0;
        /* if we cannot reduce the frequency anymore, break out early */
        if (policy->cur == policy->min)
                return;
 
+       /* Compute how many ticks there are between two measurements */
+       freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
+               dbs_tuners_ins.sampling_down_factor;
+       total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
+
        /*
         * The optimal frequency is the frequency that is the lowest that
         * can support the current CPU usage without triggering the up
         * policy. To be safe, we focus 10 points under the threshold.
         */
-       if (load < (dbs_tuners_ins.up_threshold - 10)) {
-               unsigned int freq_next;
-               freq_next = (policy->cur * load) /
+       freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
+       freq_next = (freq_next * policy->cur) / 
                        (dbs_tuners_ins.up_threshold - 10);
 
+       if (freq_next <= ((policy->cur * 95) / 100))
                __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
-       }
 }
 
 static void do_dbs_timer(void *data)
-{
-       unsigned int cpu = smp_processor_id();
-       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
-
-       if (!dbs_info->enable)
-               return;
-
-       lock_cpu_hotplug();
-       dbs_check_cpu(dbs_info);
-       unlock_cpu_hotplug();
-       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
+{ 
+       int i;
+       mutex_lock(&dbs_mutex);
+       for_each_online_cpu(i)
+               dbs_check_cpu(i);
+       schedule_delayed_work(&dbs_work, 
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
-}
+       mutex_unlock(&dbs_mutex);
+} 
 
-static inline void dbs_timer_init(unsigned int cpu)
+static inline void dbs_timer_init(void)
 {
-       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
-
-       INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
-       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
+       INIT_WORK(&dbs_work, do_dbs_timer, NULL);
+       schedule_delayed_work(&dbs_work,
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        return;
 }
 
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+static inline void dbs_timer_exit(void)
 {
-       dbs_info->enable = 0;
-       cancel_delayed_work(&dbs_info->work);
-       flush_workqueue(kondemand_wq);
+       cancel_delayed_work(&dbs_work);
+       return;
 }
 
 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -344,39 +390,29 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 
        switch (event) {
        case CPUFREQ_GOV_START:
-               if ((!cpu_online(cpu)) || (!policy->cur))
+               if ((!cpu_online(cpu)) || 
+                   (!policy->cur))
                        return -EINVAL;
 
                if (policy->cpuinfo.transition_latency >
-                               (TRANSITION_LATENCY_LIMIT * 1000)) {
-                       printk(KERN_WARNING "ondemand governor failed to load "
-                              "due to too long transition latency\n");
+                               (TRANSITION_LATENCY_LIMIT * 1000))
                        return -EINVAL;
-               }
                if (this_dbs_info->enable) /* Already enabled */
                        break;
-
+                
                mutex_lock(&dbs_mutex);
-               dbs_enable++;
-               if (dbs_enable == 1) {
-                       kondemand_wq = create_workqueue("kondemand");
-                       if (!kondemand_wq) {
-                               printk(KERN_ERR "Creation of kondemand failed\n");
-                               dbs_enable--;
-                               mutex_unlock(&dbs_mutex);
-                               return -ENOSPC;
-                       }
-               }
                for_each_cpu_mask(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
-
-                       j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
-                       j_dbs_info->prev_cpu_wall = get_jiffies_64();
+               
+                       j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+                       j_dbs_info->prev_cpu_idle_down
+                               = j_dbs_info->prev_cpu_idle_up;
                }
                this_dbs_info->enable = 1;
                sysfs_create_group(&policy->kobj, &dbs_attr_group);
+               dbs_enable++;
                /*
                 * Start the timerschedule work, when this governor
                 * is used for first time
@@ -395,20 +431,26 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                def_sampling_rate = MIN_STAT_SAMPLING_RATE;
 
                        dbs_tuners_ins.sampling_rate = def_sampling_rate;
-               }
-               dbs_timer_init(policy->cpu);
+                       dbs_tuners_ins.ignore_nice = 0;
 
+                       dbs_timer_init();
+               }
+               
                mutex_unlock(&dbs_mutex);
                break;
 
        case CPUFREQ_GOV_STOP:
                mutex_lock(&dbs_mutex);
-               dbs_timer_exit(this_dbs_info);
+               this_dbs_info->enable = 0;
                sysfs_remove_group(&policy->kobj, &dbs_attr_group);
                dbs_enable--;
-               if (dbs_enable == 0)
-                       destroy_workqueue(kondemand_wq);
-
+               /*
+                * Stop the timerschedule work, when this governor
+                * is used for first time
+                */
+               if (dbs_enable == 0) 
+                       dbs_timer_exit();
+               
                mutex_unlock(&dbs_mutex);
 
                break;
@@ -416,13 +458,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
        case CPUFREQ_GOV_LIMITS:
                mutex_lock(&dbs_mutex);
                if (policy->max < this_dbs_info->cur_policy->cur)
-                       __cpufreq_driver_target(this_dbs_info->cur_policy,
-                                               policy->max,
-                                               CPUFREQ_RELATION_H);
+                       __cpufreq_driver_target(
+                                       this_dbs_info->cur_policy,
+                                       policy->max, CPUFREQ_RELATION_H);
                else if (policy->min > this_dbs_info->cur_policy->cur)
-                       __cpufreq_driver_target(this_dbs_info->cur_policy,
-                                               policy->min,
-                                               CPUFREQ_RELATION_L);
+                       __cpufreq_driver_target(
+                                       this_dbs_info->cur_policy,
+                                       policy->min, CPUFREQ_RELATION_L);
                mutex_unlock(&dbs_mutex);
                break;
        }
@@ -430,9 +472,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 }
 
 static struct cpufreq_governor cpufreq_gov_dbs = {
-       .name = "ondemand",
-       .governor = cpufreq_governor_dbs,
-       .owner = THIS_MODULE,
+       .name           = "ondemand",
+       .governor       = cpufreq_governor_dbs,
+       .owner          = THIS_MODULE,
 };
 
 static int __init cpufreq_gov_dbs_init(void)
@@ -442,15 +484,17 @@ static int __init cpufreq_gov_dbs_init(void)
 
 static void __exit cpufreq_gov_dbs_exit(void)
 {
+       /* Make sure that the scheduled work is indeed not running */
+       flush_scheduled_work();
+
        cpufreq_unregister_governor(&cpufreq_gov_dbs);
 }
 
 
-MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
-MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
-MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
-                   "Low Latency Frequency Transition capable processors");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
+MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
+               "Low Latency Frequency Transition capable processors");
+MODULE_LICENSE ("GPL");
 
 module_init(cpufreq_gov_dbs_init);
 module_exit(cpufreq_gov_dbs_exit);