2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/notifier.h>
18 #include <linux/cpufreq.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/cpu.h>
25 #include <linux/completion.h>
28 * The "cpufreq driver" - the arch- or hardware-dependend low
29 * level driver of CPUFreq support, and its spinlock. This lock
30 * also protects the cpufreq_cpu_data array.
32 static struct cpufreq_driver *cpufreq_driver;
33 static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
34 static spinlock_t cpufreq_driver_lock = SPIN_LOCK_UNLOCKED;
36 /* internal prototype */
37 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
41 * Two notifier lists: the "policy" list is involved in the
42 * validation process for a new CPU frequency policy; the
43 * "transition" list for kernel code that needs to handle
44 * changes to devices when the CPU clock speed changes.
45 * The mutex locks both lists.
47 static struct notifier_block *cpufreq_policy_notifier_list;
48 static struct notifier_block *cpufreq_transition_notifier_list;
49 static DECLARE_RWSEM (cpufreq_notifier_rwsem);
52 static LIST_HEAD(cpufreq_governor_list);
53 static DECLARE_MUTEX (cpufreq_governor_sem);
55 static struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
57 struct cpufreq_policy *data;
63 /* get the cpufreq driver */
64 spin_lock_irqsave(&cpufreq_driver_lock, flags);
69 if (!try_module_get(cpufreq_driver->owner))
74 data = cpufreq_cpu_data[cpu];
77 goto err_out_put_module;
79 if (!kobject_get(&data->kobj))
80 goto err_out_put_module;
83 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
88 module_put(cpufreq_driver->owner);
90 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
95 static void cpufreq_cpu_put(struct cpufreq_policy *data)
97 kobject_put(&data->kobj);
98 module_put(cpufreq_driver->owner);
101 /*********************************************************************
103 *********************************************************************/
106 * cpufreq_parse_governor - parse a governor string
108 int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
109 struct cpufreq_governor **governor)
113 if (cpufreq_driver->setpolicy) {
114 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
115 *policy = CPUFREQ_POLICY_PERFORMANCE;
117 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
118 *policy = CPUFREQ_POLICY_POWERSAVE;
123 struct cpufreq_governor *t;
124 down(&cpufreq_governor_sem);
125 if (!cpufreq_driver || !cpufreq_driver->target)
127 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
128 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
130 up(&cpufreq_governor_sem);
135 up(&cpufreq_governor_sem);
139 EXPORT_SYMBOL_GPL(cpufreq_parse_governor);
142 /* drivers/base/cpu.c */
143 extern struct sysdev_class cpu_sysdev_class;
147 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information
149 * Write out information from cpufreq_driver->policy[cpu]; object must be
153 #define show_one(file_name, object) \
154 static ssize_t show_##file_name \
155 (struct cpufreq_policy * policy, char *buf) \
157 return sprintf (buf, "%u\n", policy->object); \
160 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
161 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
162 show_one(scaling_min_freq, min);
163 show_one(scaling_max_freq, max);
166 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
168 #define store_one(file_name, object) \
169 static ssize_t store_##file_name \
170 (struct cpufreq_policy * policy, const char *buf, size_t count) \
172 unsigned int ret = -EINVAL; \
173 struct cpufreq_policy new_policy; \
175 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
179 ret = sscanf (buf, "%u", &new_policy.object); \
183 ret = cpufreq_set_policy(&new_policy); \
185 return ret ? ret : count; \
188 store_one(scaling_min_freq,min);
189 store_one(scaling_max_freq,max);
192 * show_scaling_governor - show the current policy for the specified CPU
194 static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
196 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
197 return sprintf(buf, "powersave\n");
198 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
199 return sprintf(buf, "performance\n");
200 else if (policy->governor)
201 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
207 * store_scaling_governor - store policy for the specified CPU
209 static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
210 const char *buf, size_t count)
212 unsigned int ret = -EINVAL;
213 char str_governor[16];
214 struct cpufreq_policy new_policy;
216 ret = cpufreq_get_policy(&new_policy, policy->cpu);
220 ret = sscanf (buf, "%15s", str_governor);
224 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
227 ret = cpufreq_set_policy(&new_policy);
229 return ret ? ret : count;
233 * show_scaling_driver - show the cpufreq driver currently loaded
235 static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
237 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
241 * show_scaling_available_governors - show the available CPUfreq governors
243 static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy,
247 struct cpufreq_governor *t;
249 if (!cpufreq_driver->target) {
250 i += sprintf(buf, "performance powersave");
254 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
255 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
257 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
260 i += sprintf(&buf[i], "\n");
265 #define define_one_ro(_name) \
266 struct freq_attr _name = { \
267 .attr = { .name = __stringify(_name), .mode = 0444 }, \
268 .show = show_##_name, \
271 #define define_one_rw(_name) \
272 struct freq_attr _name = { \
273 .attr = { .name = __stringify(_name), .mode = 0644 }, \
274 .show = show_##_name, \
275 .store = store_##_name, \
278 define_one_ro(cpuinfo_min_freq);
279 define_one_ro(cpuinfo_max_freq);
280 define_one_ro(scaling_available_governors);
281 define_one_ro(scaling_driver);
282 define_one_rw(scaling_min_freq);
283 define_one_rw(scaling_max_freq);
284 define_one_rw(scaling_governor);
286 static struct attribute * default_attrs[] = {
287 &cpuinfo_min_freq.attr,
288 &cpuinfo_max_freq.attr,
289 &scaling_min_freq.attr,
290 &scaling_max_freq.attr,
291 &scaling_governor.attr,
292 &scaling_driver.attr,
293 &scaling_available_governors.attr,
297 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
298 #define to_attr(a) container_of(a,struct freq_attr,attr)
300 static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
302 struct cpufreq_policy * policy = to_policy(kobj);
303 struct freq_attr * fattr = to_attr(attr);
305 policy = cpufreq_cpu_get(policy->cpu);
308 ret = fattr->show ? fattr->show(policy,buf) : 0;
309 cpufreq_cpu_put(policy);
313 static ssize_t store(struct kobject * kobj, struct attribute * attr,
314 const char * buf, size_t count)
316 struct cpufreq_policy * policy = to_policy(kobj);
317 struct freq_attr * fattr = to_attr(attr);
319 policy = cpufreq_cpu_get(policy->cpu);
322 ret = fattr->store ? fattr->store(policy,buf,count) : 0;
323 cpufreq_cpu_put(policy);
327 static void cpufreq_sysfs_release(struct kobject * kobj)
329 struct cpufreq_policy * policy = to_policy(kobj);
330 complete(&policy->kobj_unregister);
333 static struct sysfs_ops sysfs_ops = {
338 static struct kobj_type ktype_cpufreq = {
339 .sysfs_ops = &sysfs_ops,
340 .default_attrs = default_attrs,
341 .release = cpufreq_sysfs_release,
346 * cpufreq_add_dev - add a CPU device
348 * Adds the cpufreq interface for a CPU device.
350 static int cpufreq_add_dev (struct sys_device * sys_dev)
352 unsigned int cpu = sys_dev->id;
354 struct cpufreq_policy new_policy;
355 struct cpufreq_policy *policy;
356 struct freq_attr **drv_attr;
359 if (!try_module_get(cpufreq_driver->owner))
362 policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
367 memset(policy, 0, sizeof(struct cpufreq_policy));
370 init_MUTEX_LOCKED(&policy->lock);
371 init_completion(&policy->kobj_unregister);
373 /* call driver. From then on the cpufreq must be able
374 * to accept all calls to ->verify and ->setpolicy for this CPU
376 ret = cpufreq_driver->init(policy);
380 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
382 /* prepare interface data */
383 policy->kobj.parent = &sys_dev->kobj;
384 policy->kobj.ktype = &ktype_cpufreq;
385 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
387 ret = kobject_register(&policy->kobj);
391 /* set up files for this cpu device */
392 drv_attr = cpufreq_driver->attr;
393 while ((drv_attr) && (*drv_attr)) {
394 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
398 spin_lock_irqsave(&cpufreq_driver_lock, flags);
399 cpufreq_cpu_data[cpu] = policy;
400 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
401 policy->governor = NULL; /* to assure that the starting sequence is
402 * run in cpufreq_set_policy */
405 /* set default policy */
407 ret = cpufreq_set_policy(&new_policy);
409 goto err_out_unregister;
411 module_put(cpufreq_driver->owner);
416 spin_lock_irqsave(&cpufreq_driver_lock, flags);
417 cpufreq_cpu_data[cpu] = NULL;
418 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
420 kobject_unregister(&policy->kobj);
421 wait_for_completion(&policy->kobj_unregister);
427 module_put(cpufreq_driver->owner);
433 * cpufreq_remove_dev - remove a CPU device
435 * Removes the cpufreq interface for a CPU device.
437 static int cpufreq_remove_dev (struct sys_device * sys_dev)
439 unsigned int cpu = sys_dev->id;
441 struct cpufreq_policy *data;
443 spin_lock_irqsave(&cpufreq_driver_lock, flags);
444 data = cpufreq_cpu_data[cpu];
447 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
450 cpufreq_cpu_data[cpu] = NULL;
451 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
453 if (!kobject_get(&data->kobj))
456 if (cpufreq_driver->target)
457 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
459 kobject_unregister(&data->kobj);
461 kobject_put(&data->kobj);
463 /* we need to make sure that the underlying kobj is actually
464 * not referenced anymore by anybody before we proceed with
467 wait_for_completion(&data->kobj_unregister);
469 if (cpufreq_driver->exit)
470 cpufreq_driver->exit(data);
478 * cpufreq_resume - restore the CPU clock frequency after resume
480 * Restore the CPU clock frequency so that our idea of the current
481 * frequency reflects the actual hardware.
483 static int cpufreq_resume(struct sys_device * sysdev)
485 int cpu = sysdev->id;
486 unsigned int ret = 0;
487 struct cpufreq_policy *cpu_policy;
489 if (!cpu_online(cpu))
492 /* we may be lax here as interrupts are off. Nonetheless
493 * we need to grab the correct cpu policy, as to check
494 * whether we really run on this CPU.
497 cpu_policy = cpufreq_cpu_get(cpu);
501 if (cpufreq_driver->resume)
502 ret = cpufreq_driver->resume(cpu_policy);
504 printk(KERN_ERR "cpufreq: resume failed in ->resume step on CPU %u\n", cpu_policy->cpu);
508 if (cpufreq_driver->setpolicy)
509 ret = cpufreq_driver->setpolicy(cpu_policy);
511 /* CPUFREQ_RELATION_H or CPUFREQ_RELATION_L have the same effect here, as cpu_policy->cur is known
512 * to be a valid and exact target frequency
514 ret = cpufreq_driver->target(cpu_policy, cpu_policy->cur, CPUFREQ_RELATION_H);
517 printk(KERN_ERR "cpufreq: resume failed in ->setpolicy/target step on CPU %u\n", cpu_policy->cpu);
520 cpufreq_cpu_put(cpu_policy);
524 static struct sysdev_driver cpufreq_sysdev_driver = {
525 .add = cpufreq_add_dev,
526 .remove = cpufreq_remove_dev,
527 .resume = cpufreq_resume,
531 /*********************************************************************
532 * NOTIFIER LISTS INTERFACE *
533 *********************************************************************/
536 * cpufreq_register_notifier - register a driver with cpufreq
537 * @nb: notifier function to register
538 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
540 * Add a driver to one of two lists: either a list of drivers that
541 * are notified about clock rate changes (once before and once after
542 * the transition), or a list of drivers that are notified about
543 * changes in cpufreq policy.
545 * This function may sleep, and has the same return conditions as
546 * notifier_chain_register.
548 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
552 down_write(&cpufreq_notifier_rwsem);
554 case CPUFREQ_TRANSITION_NOTIFIER:
555 ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb);
557 case CPUFREQ_POLICY_NOTIFIER:
558 ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb);
563 up_write(&cpufreq_notifier_rwsem);
567 EXPORT_SYMBOL(cpufreq_register_notifier);
571 * cpufreq_unregister_notifier - unregister a driver with cpufreq
572 * @nb: notifier block to be unregistered
573 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
575 * Remove a driver from the CPU frequency notifier list.
577 * This function may sleep, and has the same return conditions as
578 * notifier_chain_unregister.
580 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
584 down_write(&cpufreq_notifier_rwsem);
586 case CPUFREQ_TRANSITION_NOTIFIER:
587 ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb);
589 case CPUFREQ_POLICY_NOTIFIER:
590 ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb);
595 up_write(&cpufreq_notifier_rwsem);
599 EXPORT_SYMBOL(cpufreq_unregister_notifier);
602 /*********************************************************************
604 *********************************************************************/
607 int __cpufreq_driver_target(struct cpufreq_policy *policy,
608 unsigned int target_freq,
609 unsigned int relation)
611 return cpufreq_driver->target(policy, target_freq, relation);
613 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
616 int cpufreq_driver_target(struct cpufreq_policy *policy,
617 unsigned int target_freq,
618 unsigned int relation)
622 policy = cpufreq_cpu_get(policy->cpu);
628 ret = __cpufreq_driver_target(policy, target_freq, relation);
632 cpufreq_cpu_put(policy);
636 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
639 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
643 if (!try_module_get(policy->governor->owner))
646 ret = policy->governor->governor(policy, event);
648 /* we keep one module reference alive for each CPU governed by this CPU */
649 if ((event != CPUFREQ_GOV_START) || ret)
650 module_put(policy->governor->owner);
651 if ((event == CPUFREQ_GOV_STOP) && !ret)
652 module_put(policy->governor->owner);
658 int cpufreq_governor(unsigned int cpu, unsigned int event)
661 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
667 ret = __cpufreq_governor(policy, event);
670 cpufreq_cpu_put(policy);
674 EXPORT_SYMBOL_GPL(cpufreq_governor);
677 int cpufreq_register_governor(struct cpufreq_governor *governor)
679 struct cpufreq_governor *t;
684 down(&cpufreq_governor_sem);
686 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
687 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
688 up(&cpufreq_governor_sem);
692 list_add(&governor->governor_list, &cpufreq_governor_list);
694 up(&cpufreq_governor_sem);
698 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
701 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
706 down(&cpufreq_governor_sem);
707 list_del(&governor->governor_list);
708 up(&cpufreq_governor_sem);
711 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
715 /*********************************************************************
717 *********************************************************************/
720 * cpufreq_get_policy - get the current cpufreq_policy
721 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
723 * Reads the current cpufreq policy.
725 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
727 struct cpufreq_policy *cpu_policy;
731 cpu_policy = cpufreq_cpu_get(cpu);
735 down(&cpu_policy->lock);
736 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
737 up(&cpu_policy->lock);
739 cpufreq_cpu_put(cpu_policy);
743 EXPORT_SYMBOL(cpufreq_get_policy);
746 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
750 memcpy(&policy->cpuinfo,
752 sizeof(struct cpufreq_cpuinfo));
754 /* verify the cpu speed can be set within this limit */
755 ret = cpufreq_driver->verify(policy);
759 down_read(&cpufreq_notifier_rwsem);
761 /* adjust if necessary - all reasons */
762 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST,
765 /* adjust if necessary - hardware incompatibility*/
766 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE,
769 /* verify the cpu speed can be set within this limit,
770 which might be different to the first one */
771 ret = cpufreq_driver->verify(policy);
773 up_read(&cpufreq_notifier_rwsem);
777 /* notification of the new policy */
778 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY,
781 up_read(&cpufreq_notifier_rwsem);
783 data->min = policy->min;
784 data->max = policy->max;
786 if (cpufreq_driver->setpolicy) {
787 data->policy = policy->policy;
788 ret = cpufreq_driver->setpolicy(policy);
790 if (policy->governor != data->governor) {
791 /* save old, working values */
792 struct cpufreq_governor *old_gov = data->governor;
794 /* end old governor */
796 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
798 /* start new governor */
799 data->governor = policy->governor;
800 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
801 /* new governor failed, so re-start old one */
803 data->governor = old_gov;
804 __cpufreq_governor(data, CPUFREQ_GOV_START);
809 /* might be a policy change, too, so fall through */
811 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
819 * cpufreq_set_policy - set a new CPUFreq policy
820 * @policy: policy to be set.
822 * Sets a new CPU frequency and voltage scaling policy.
824 int cpufreq_set_policy(struct cpufreq_policy *policy)
827 struct cpufreq_policy *data;
832 data = cpufreq_cpu_get(policy->cpu);
839 ret = __cpufreq_set_policy(data, policy);
840 data->user_policy.min = data->min;
841 data->user_policy.max = data->max;
842 data->user_policy.policy = data->policy;
843 data->user_policy.governor = data->governor;
846 cpufreq_cpu_put(data);
850 EXPORT_SYMBOL(cpufreq_set_policy);
854 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
855 * @cpu: CPU which shall be re-evaluated
857 * Usefull for policy notifiers which have different necessities
858 * at different times.
860 int cpufreq_update_policy(unsigned int cpu)
862 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
863 struct cpufreq_policy policy;
873 sizeof(struct cpufreq_policy));
874 policy.min = data->user_policy.min;
875 policy.max = data->user_policy.max;
876 policy.policy = data->user_policy.policy;
877 policy.governor = data->user_policy.governor;
879 ret = __cpufreq_set_policy(data, &policy);
883 cpufreq_cpu_put(data);
886 EXPORT_SYMBOL(cpufreq_update_policy);
889 /*********************************************************************
890 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
891 *********************************************************************/
894 * adjust_jiffies - adjust the system "loops_per_jiffy"
896 * This function alters the system "loops_per_jiffy" for the clock
897 * speed change. Note that loops_per_jiffy cannot be updated on SMP
898 * systems as each CPU might be scaled differently. So, use the arch
899 * per-CPU loops_per_jiffy value wherever possible.
902 static unsigned long l_p_j_ref;
903 static unsigned int l_p_j_ref_freq;
905 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
907 if (!l_p_j_ref_freq) {
908 l_p_j_ref = loops_per_jiffy;
909 l_p_j_ref_freq = ci->old;
911 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
912 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new))
913 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
916 #define adjust_jiffies(x...) do {} while (0)
921 * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
923 * This function calls the transition notifiers and the "adjust_jiffies" function. It is called
924 * twice on all CPU frequency changes that have external effects.
926 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
929 return; /* Only valid if we're in the resume process where
930 * everyone knows what CPU frequency we are at */
932 down_read(&cpufreq_notifier_rwsem);
934 case CPUFREQ_PRECHANGE:
935 notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs);
936 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
938 case CPUFREQ_POSTCHANGE:
939 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
940 notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs);
941 cpufreq_cpu_data[freqs->cpu]->cur = freqs->new;
944 up_read(&cpufreq_notifier_rwsem);
946 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
950 /*********************************************************************
951 * REGISTER / UNREGISTER CPUFREQ DRIVER *
952 *********************************************************************/
955 * cpufreq_register_driver - register a CPU Frequency driver
956 * @driver_data: A struct cpufreq_driver containing the values#
957 * submitted by the CPU Frequency driver.
959 * Registers a CPU Frequency driver to this core code. This code
960 * returns zero on success, -EBUSY when another driver got here first
961 * (and isn't unregistered in the meantime).
964 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
969 if (!driver_data || !driver_data->verify || !driver_data->init ||
970 ((!driver_data->setpolicy) && (!driver_data->target)))
973 spin_lock_irqsave(&cpufreq_driver_lock, flags);
974 if (cpufreq_driver) {
975 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
978 cpufreq_driver = driver_data;
979 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
981 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
983 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
987 /* check for at least one working CPU */
988 for (i=0; i<NR_CPUS; i++)
989 if (cpufreq_cpu_data[i])
992 /* if all ->init() calls failed, unregister */
994 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
996 spin_lock_irqsave(&cpufreq_driver_lock, flags);
997 cpufreq_driver = NULL;
998 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1004 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1008 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1010 * Unregister the current CPUFreq driver. Only call this if you have
1011 * the right to do so, i.e. if you have succeeded in initialising before!
1012 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1013 * currently not initialised.
1015 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1017 unsigned long flags;
1019 if (!cpufreq_driver || (driver != cpufreq_driver))
1022 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1024 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1025 cpufreq_driver = NULL;
1026 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1030 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);