upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/notifier.h>
18 #include <linux/cpufreq.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/cpu.h>
25 #include <linux/completion.h>
26
27 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
28
29 /**
30  * The "cpufreq driver" - the arch- or hardware-dependend low
31  * level driver of CPUFreq support, and its spinlock. This lock
32  * also protects the cpufreq_cpu_data array.
33  */
34 static struct cpufreq_driver    *cpufreq_driver;
35 static struct cpufreq_policy    *cpufreq_cpu_data[NR_CPUS];
36 static spinlock_t               cpufreq_driver_lock = SPIN_LOCK_UNLOCKED;
37
38
39 /* we keep a copy of all ->add'ed CPU's struct sys_device here;
40  * as it is only accessed in ->add and ->remove, no lock or reference
41  * count is necessary.
42  */
43 static struct sys_device        *cpu_sys_devices[NR_CPUS];
44
45
46 /* internal prototypes */
47 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
48 static void handle_update(void *data);
49 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci);
50
51 /**
52  * Two notifier lists: the "policy" list is involved in the 
53  * validation process for a new CPU frequency policy; the 
54  * "transition" list for kernel code that needs to handle
55  * changes to devices when the CPU clock speed changes.
56  * The mutex locks both lists.
57  */
58 static struct notifier_block    *cpufreq_policy_notifier_list;
59 static struct notifier_block    *cpufreq_transition_notifier_list;
60 static DECLARE_RWSEM            (cpufreq_notifier_rwsem);
61
62
63 static LIST_HEAD(cpufreq_governor_list);
64 static DECLARE_MUTEX            (cpufreq_governor_sem);
65
66 static struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
67 {
68         struct cpufreq_policy *data;
69         unsigned long flags;
70
71         if (cpu >= NR_CPUS)
72                 goto err_out;
73
74         /* get the cpufreq driver */
75         spin_lock_irqsave(&cpufreq_driver_lock, flags);
76
77         if (!cpufreq_driver)
78                 goto err_out_unlock;
79
80         if (!try_module_get(cpufreq_driver->owner))
81                 goto err_out_unlock;
82
83
84         /* get the CPU */
85         data = cpufreq_cpu_data[cpu];
86
87         if (!data)
88                 goto err_out_put_module;
89
90         if (!kobject_get(&data->kobj))
91                 goto err_out_put_module;
92
93
94         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
95
96         return data;
97
98  err_out_put_module:
99         module_put(cpufreq_driver->owner);
100  err_out_unlock:
101         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
102  err_out:
103         return NULL;
104 }
105
106 static void cpufreq_cpu_put(struct cpufreq_policy *data)
107 {
108         kobject_put(&data->kobj);
109         module_put(cpufreq_driver->owner);
110 }
111
112
113 /*********************************************************************
114  *                     UNIFIED DEBUG HELPERS                         *
115  *********************************************************************/
116 #ifdef CONFIG_CPU_FREQ_DEBUG
117
118 /* what part(s) of the CPUfreq subsystem are debugged? */
119 static unsigned int debug;
120
121 /* is the debug output ratelimit'ed using printk_ratelimit? User can
122  * set or modify this value.
123  */
124 static unsigned int debug_ratelimit = 1;
125
126 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
127  * loading of a cpufreq driver, temporarily disabled when a new policy
128  * is set, and disabled upon cpufreq driver removal
129  */
130 static unsigned int disable_ratelimit = 1;
131 static spinlock_t disable_ratelimit_lock = SPIN_LOCK_UNLOCKED;
132
133 static inline void cpufreq_debug_enable_ratelimit(void)
134 {
135         unsigned long flags;
136
137         spin_lock_irqsave(&disable_ratelimit_lock, flags);
138         if (disable_ratelimit)
139                 disable_ratelimit--;
140         spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
141 }
142
143 static inline void cpufreq_debug_disable_ratelimit(void)
144 {
145         unsigned long flags;
146
147         spin_lock_irqsave(&disable_ratelimit_lock, flags);
148         disable_ratelimit++;
149         spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
150 }
151
152 void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...)
153 {
154         char s[256];
155         va_list args;
156         unsigned int len;
157         unsigned long flags;
158         
159         WARN_ON(!prefix);
160         if (type & debug) {
161                 spin_lock_irqsave(&disable_ratelimit_lock, flags);
162                 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) {
163                         spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
164                         return;
165                 }
166                 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
167
168                 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
169
170                 va_start(args, fmt);
171                 len += vsnprintf(&s[len], (256 - len), fmt, args);
172                 va_end(args);
173
174                 printk(s);
175
176                 WARN_ON(len < 5);
177         }
178 }
179 EXPORT_SYMBOL(cpufreq_debug_printk);
180
181
182 module_param(debug, uint, 0644);
183 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors.");
184
185 module_param(debug_ratelimit, uint, 0644);
186 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting.");
187
188 #else /* !CONFIG_CPU_FREQ_DEBUG */
189
190 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
191 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
192
193 #endif /* CONFIG_CPU_FREQ_DEBUG */
194
195
196 /*********************************************************************
197  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
198  *********************************************************************/
199
200 /**
201  * adjust_jiffies - adjust the system "loops_per_jiffy"
202  *
203  * This function alters the system "loops_per_jiffy" for the clock
204  * speed change. Note that loops_per_jiffy cannot be updated on SMP
205  * systems as each CPU might be scaled differently. So, use the arch 
206  * per-CPU loops_per_jiffy value wherever possible.
207  */
208 #ifndef CONFIG_SMP
209 static unsigned long l_p_j_ref;
210 static unsigned int  l_p_j_ref_freq;
211
212 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
213 {
214         if (ci->flags & CPUFREQ_CONST_LOOPS)
215                 return;
216
217         if (!l_p_j_ref_freq) {
218                 l_p_j_ref = loops_per_jiffy;
219                 l_p_j_ref_freq = ci->old;
220                 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
221         }
222         if ((val == CPUFREQ_PRECHANGE  && ci->old < ci->new) ||
223             (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
224             (val == CPUFREQ_RESUMECHANGE)) {
225                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
226                 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new);
227         }
228 }
229 #else
230 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
231 #endif
232
233
234 /**
235  * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
236  *
237  * This function calls the transition notifiers and the "adjust_jiffies" function. It is called
238  * twice on all CPU frequency changes that have external effects. 
239  */
240 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
241 {
242         BUG_ON(irqs_disabled());
243
244         freqs->flags = cpufreq_driver->flags;
245         dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new);
246
247         down_read(&cpufreq_notifier_rwsem);
248         switch (state) {
249         case CPUFREQ_PRECHANGE:
250                 /* detect if the driver reported a value as "old frequency" which
251                  * is not equal to what the cpufreq core thinks is "old frequency".
252                  */
253                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
254                         if ((likely(cpufreq_cpu_data[freqs->cpu])) &&
255                             (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)) &&
256                             (likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
257                             (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
258                         {
259                                 printk(KERN_WARNING "Warning: CPU frequency is %u, "
260                                        "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
261                                 freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
262                         }
263                 }
264                 notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs);
265                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
266                 break;
267         case CPUFREQ_POSTCHANGE:
268                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
269                 notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs);
270                 if ((likely(cpufreq_cpu_data[freqs->cpu])) && 
271                     (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)))
272                         cpufreq_cpu_data[freqs->cpu]->cur = freqs->new;
273                 break;
274         }
275         up_read(&cpufreq_notifier_rwsem);
276 }
277 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
278
279
280
281 /*********************************************************************
282  *                          SYSFS INTERFACE                          *
283  *********************************************************************/
284
285 /**
286  * cpufreq_parse_governor - parse a governor string
287  */
288 int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
289                                 struct cpufreq_governor **governor)
290 {
291         if (!cpufreq_driver)
292                 return -EINVAL;
293         if (cpufreq_driver->setpolicy) {
294                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
295                         *policy = CPUFREQ_POLICY_PERFORMANCE;
296                         return 0;
297                 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
298                         *policy = CPUFREQ_POLICY_POWERSAVE;
299                         return 0;
300                 }
301                 return -EINVAL;
302         } else {
303                 struct cpufreq_governor *t;
304                 down(&cpufreq_governor_sem);
305                 if (!cpufreq_driver || !cpufreq_driver->target)
306                         goto out;
307                 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
308                         if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
309                                 *governor = t;
310                                 up(&cpufreq_governor_sem);
311                                 return 0;
312                         }
313                 }
314         out:
315                 up(&cpufreq_governor_sem);
316         }
317         return -EINVAL;
318 }
319 EXPORT_SYMBOL_GPL(cpufreq_parse_governor);
320
321
322 /* drivers/base/cpu.c */
323 extern struct sysdev_class cpu_sysdev_class;
324
325
326 /**
327  * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information
328  *
329  * Write out information from cpufreq_driver->policy[cpu]; object must be
330  * "unsigned int".
331  */
332
333 #define show_one(file_name, object)                                     \
334 static ssize_t show_##file_name                                         \
335 (struct cpufreq_policy * policy, char *buf)                             \
336 {                                                                       \
337         return sprintf (buf, "%u\n", policy->object);                   \
338 }
339
340 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
341 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
342 show_one(scaling_min_freq, min);
343 show_one(scaling_max_freq, max);
344 show_one(scaling_cur_freq, cur);
345
346 /**
347  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
348  */
349 #define store_one(file_name, object)                    \
350 static ssize_t store_##file_name                                        \
351 (struct cpufreq_policy * policy, const char *buf, size_t count)         \
352 {                                                                       \
353         unsigned int ret = -EINVAL;                                     \
354         struct cpufreq_policy new_policy;                               \
355                                                                         \
356         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
357         if (ret)                                                        \
358                 return -EINVAL;                                         \
359                                                                         \
360         ret = sscanf (buf, "%u", &new_policy.object);                   \
361         if (ret != 1)                                                   \
362                 return -EINVAL;                                         \
363                                                                         \
364         ret = cpufreq_set_policy(&new_policy);                          \
365                                                                         \
366         return ret ? ret : count;                                       \
367 }
368
369 store_one(scaling_min_freq,min);
370 store_one(scaling_max_freq,max);
371
372 /**
373  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
374  */
375 static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
376 {
377         unsigned int cur_freq = cpufreq_get(policy->cpu);
378         if (!cur_freq)
379                 return sprintf(buf, "<unknown>");
380         return sprintf(buf, "%u\n", cur_freq);
381 }
382
383
384 /**
385  * show_scaling_governor - show the current policy for the specified CPU
386  */
387 static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
388 {
389         if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
390                 return sprintf(buf, "powersave\n");
391         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
392                 return sprintf(buf, "performance\n");
393         else if (policy->governor)
394                 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
395         return -EINVAL;
396 }
397
398
399 /**
400  * store_scaling_governor - store policy for the specified CPU
401  */
402 static ssize_t store_scaling_governor (struct cpufreq_policy * policy, 
403                                        const char *buf, size_t count) 
404 {
405         unsigned int ret = -EINVAL;
406         char    str_governor[16];
407         struct cpufreq_policy new_policy;
408
409         ret = cpufreq_get_policy(&new_policy, policy->cpu);
410         if (ret)
411                 return ret;
412
413         ret = sscanf (buf, "%15s", str_governor);
414         if (ret != 1)
415                 return -EINVAL;
416
417         if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
418                 return -EINVAL;
419
420         ret = cpufreq_set_policy(&new_policy);
421
422         return ret ? ret : count;
423 }
424
425 /**
426  * show_scaling_driver - show the cpufreq driver currently loaded
427  */
428 static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
429 {
430         return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
431 }
432
433 /**
434  * show_scaling_available_governors - show the available CPUfreq governors
435  */
436 static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy,
437                                 char *buf)
438 {
439         ssize_t i = 0;
440         struct cpufreq_governor *t;
441
442         if (!cpufreq_driver->target) {
443                 i += sprintf(buf, "performance powersave");
444                 goto out;
445         }
446
447         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
448                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
449                         goto out;
450                 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
451         }
452  out:
453         i += sprintf(&buf[i], "\n");
454         return i;
455 }
456 /**
457  * show_affected_cpus - show the CPUs affected by each transition
458  */
459 static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
460 {
461         ssize_t i = 0;
462         unsigned int cpu;
463
464         for_each_cpu_mask(cpu, policy->cpus) {
465                 if (i)
466                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
467                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
468                 if (i >= (PAGE_SIZE - 5))
469                     break;
470         }
471         i += sprintf(&buf[i], "\n");
472         return i;
473 }
474
475
476 #define define_one_ro(_name) \
477 static struct freq_attr _name = \
478 __ATTR(_name, 0444, show_##_name, NULL)
479
480 #define define_one_ro0400(_name) \
481 static struct freq_attr _name = \
482 __ATTR(_name, 0400, show_##_name, NULL)
483
484 #define define_one_rw(_name) \
485 static struct freq_attr _name = \
486 __ATTR(_name, 0644, show_##_name, store_##_name)
487
488 define_one_ro0400(cpuinfo_cur_freq);
489 define_one_ro(cpuinfo_min_freq);
490 define_one_ro(cpuinfo_max_freq);
491 define_one_ro(scaling_available_governors);
492 define_one_ro(scaling_driver);
493 define_one_ro(scaling_cur_freq);
494 define_one_ro(affected_cpus);
495 define_one_rw(scaling_min_freq);
496 define_one_rw(scaling_max_freq);
497 define_one_rw(scaling_governor);
498
499 static struct attribute * default_attrs[] = {
500         &cpuinfo_min_freq.attr,
501         &cpuinfo_max_freq.attr,
502         &scaling_min_freq.attr,
503         &scaling_max_freq.attr,
504         &affected_cpus.attr,
505         &scaling_governor.attr,
506         &scaling_driver.attr,
507         &scaling_available_governors.attr,
508         NULL
509 };
510
511 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
512 #define to_attr(a) container_of(a,struct freq_attr,attr)
513
514 static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
515 {
516         struct cpufreq_policy * policy = to_policy(kobj);
517         struct freq_attr * fattr = to_attr(attr);
518         ssize_t ret;
519         policy = cpufreq_cpu_get(policy->cpu);
520         if (!policy)
521                 return -EINVAL;
522         ret = fattr->show ? fattr->show(policy,buf) : 0;
523         cpufreq_cpu_put(policy);
524         return ret;
525 }
526
527 static ssize_t store(struct kobject * kobj, struct attribute * attr, 
528                      const char * buf, size_t count)
529 {
530         struct cpufreq_policy * policy = to_policy(kobj);
531         struct freq_attr * fattr = to_attr(attr);
532         ssize_t ret;
533         policy = cpufreq_cpu_get(policy->cpu);
534         if (!policy)
535                 return -EINVAL;
536         ret = fattr->store ? fattr->store(policy,buf,count) : 0;
537         cpufreq_cpu_put(policy);
538         return ret;
539 }
540
541 static void cpufreq_sysfs_release(struct kobject * kobj)
542 {
543         struct cpufreq_policy * policy = to_policy(kobj);
544         dprintk("last reference is dropped\n");
545         complete(&policy->kobj_unregister);
546 }
547
548 static struct sysfs_ops sysfs_ops = {
549         .show   = show,
550         .store  = store,
551 };
552
553 static struct kobj_type ktype_cpufreq = {
554         .sysfs_ops      = &sysfs_ops,
555         .default_attrs  = default_attrs,
556         .release        = cpufreq_sysfs_release,
557 };
558
559
560 /**
561  * cpufreq_add_dev - add a CPU device
562  *
563  * Adds the cpufreq interface for a CPU device. 
564  */
565 static int cpufreq_add_dev (struct sys_device * sys_dev)
566 {
567         unsigned int cpu = sys_dev->id;
568         int ret = 0;
569         struct cpufreq_policy new_policy;
570         struct cpufreq_policy *policy;
571         struct freq_attr **drv_attr;
572         unsigned long flags;
573         unsigned int j;
574
575         cpufreq_debug_disable_ratelimit();
576         dprintk("adding CPU %u\n", cpu);
577
578 #ifdef CONFIG_SMP
579         /* check whether a different CPU already registered this
580          * CPU because it is in the same boat. */
581         policy = cpufreq_cpu_get(cpu);
582         if (unlikely(policy)) {
583                 cpu_sys_devices[cpu] = sys_dev;
584                 dprintk("CPU already managed, adding link\n");
585                 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq");
586                 cpufreq_debug_enable_ratelimit();
587                 return 0;
588         }
589 #endif
590
591         if (!try_module_get(cpufreq_driver->owner)) {
592                 ret = -EINVAL;
593                 goto module_out;
594         }
595
596         policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
597         if (!policy) {
598                 ret = -ENOMEM;
599                 goto nomem_out;
600         }
601         memset(policy, 0, sizeof(struct cpufreq_policy));
602
603         policy->cpu = cpu;
604         policy->cpus = cpumask_of_cpu(cpu);
605
606         init_MUTEX_LOCKED(&policy->lock);
607         init_completion(&policy->kobj_unregister);
608         INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
609
610         /* call driver. From then on the cpufreq must be able
611          * to accept all calls to ->verify and ->setpolicy for this CPU
612          */
613         ret = cpufreq_driver->init(policy);
614         if (ret) {
615                 dprintk("initialization failed\n");
616                 goto err_out;
617         }
618
619         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
620
621         /* prepare interface data */
622         policy->kobj.parent = &sys_dev->kobj;
623         policy->kobj.ktype = &ktype_cpufreq;
624         strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
625
626         ret = kobject_register(&policy->kobj);
627         if (ret)
628                 goto err_out;
629
630         /* set up files for this cpu device */
631         drv_attr = cpufreq_driver->attr;
632         while ((drv_attr) && (*drv_attr)) {
633                 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
634                 drv_attr++;
635         }
636         if (cpufreq_driver->get)
637                 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
638         if (cpufreq_driver->target)
639                 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
640
641         spin_lock_irqsave(&cpufreq_driver_lock, flags);
642         for_each_cpu_mask(j, policy->cpus)
643                 cpufreq_cpu_data[j] = policy;
644         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
645         policy->governor = NULL; /* to assure that the starting sequence is
646                                   * run in cpufreq_set_policy */
647         up(&policy->lock);
648         
649         /* set default policy */
650         
651         ret = cpufreq_set_policy(&new_policy);
652         if (ret) {
653                 dprintk("setting policy failed\n");
654                 goto err_out_unregister;
655         }
656
657         module_put(cpufreq_driver->owner);
658         cpu_sys_devices[cpu] = sys_dev;
659         dprintk("initialization complete\n");
660         cpufreq_debug_enable_ratelimit();
661         
662         return 0;
663
664
665 err_out_unregister:
666         spin_lock_irqsave(&cpufreq_driver_lock, flags);
667         for_each_cpu_mask(j, policy->cpus)
668                 cpufreq_cpu_data[j] = NULL;
669         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
670
671         kobject_unregister(&policy->kobj);
672         wait_for_completion(&policy->kobj_unregister);
673
674 err_out:
675         kfree(policy);
676
677 nomem_out:
678         module_put(cpufreq_driver->owner);
679  module_out:
680         cpufreq_debug_enable_ratelimit();
681         return ret;
682 }
683
684
685 /**
686  * cpufreq_remove_dev - remove a CPU device
687  *
688  * Removes the cpufreq interface for a CPU device.
689  */
690 static int cpufreq_remove_dev (struct sys_device * sys_dev)
691 {
692         unsigned int cpu = sys_dev->id;
693         unsigned long flags;
694         struct cpufreq_policy *data;
695 #ifdef CONFIG_SMP
696         unsigned int j;
697 #endif
698
699         cpufreq_debug_disable_ratelimit();
700         dprintk("unregistering CPU %u\n", cpu);
701
702         spin_lock_irqsave(&cpufreq_driver_lock, flags);
703         data = cpufreq_cpu_data[cpu];
704
705         if (!data) {
706                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
707                 cpu_sys_devices[cpu] = NULL;
708                 cpufreq_debug_enable_ratelimit();
709                 return -EINVAL;
710         }
711         cpufreq_cpu_data[cpu] = NULL;
712
713
714 #ifdef CONFIG_SMP
715         /* if this isn't the CPU which is the parent of the kobj, we
716          * only need to unlink, put and exit 
717          */
718         if (unlikely(cpu != data->cpu)) {
719                 dprintk("removing link\n");
720                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
721                 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
722                 cpu_sys_devices[cpu] = NULL;
723                 cpufreq_cpu_put(data);
724                 cpufreq_debug_enable_ratelimit();
725                 return 0;
726         }
727 #endif
728
729         cpu_sys_devices[cpu] = NULL;
730
731         if (!kobject_get(&data->kobj)) {
732                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
733                 cpufreq_debug_enable_ratelimit();
734                 return -EFAULT;
735         }
736
737 #ifdef CONFIG_SMP
738         /* if we have other CPUs still registered, we need to unlink them,
739          * or else wait_for_completion below will lock up. Clean the
740          * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
741          * links afterwards.
742          */
743         if (unlikely(cpus_weight(data->cpus) > 1)) {
744                 for_each_cpu_mask(j, data->cpus) {
745                         if (j == cpu)
746                                 continue;
747                         cpufreq_cpu_data[j] = NULL;
748                 }
749         }
750
751         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
752
753         if (unlikely(cpus_weight(data->cpus) > 1)) {
754                 for_each_cpu_mask(j, data->cpus) {
755                         if (j == cpu)
756                                 continue;
757                         dprintk("removing link for cpu %u\n", j);
758                         sysfs_remove_link(&cpu_sys_devices[j]->kobj, "cpufreq");
759                         cpufreq_cpu_put(data);
760                 }
761         }
762 #else
763         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
764 #endif
765
766         if (cpufreq_driver->target)
767                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
768
769         kobject_unregister(&data->kobj);
770
771         kobject_put(&data->kobj);
772
773         /* we need to make sure that the underlying kobj is actually
774          * not referenced anymore by anybody before we proceed with 
775          * unloading.
776          */
777         dprintk("waiting for dropping of refcount\n");
778         wait_for_completion(&data->kobj_unregister);
779         dprintk("wait complete\n");
780
781         if (cpufreq_driver->exit)
782                 cpufreq_driver->exit(data);
783
784         kfree(data);
785
786         cpufreq_debug_enable_ratelimit();
787
788         return 0;
789 }
790
791
792 static void handle_update(void *data)
793 {
794         unsigned int cpu = (unsigned int)(long)data;
795         dprintk("handle_update for cpu %u called\n", cpu);
796         cpufreq_update_policy(cpu);
797 }
798
799 /**
800  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
801  *      @cpu: cpu number
802  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
803  *      @new_freq: CPU frequency the CPU actually runs at
804  *
805  *      We adjust to current frequency first, and need to clean up later. So either call
806  *      to cpufreq_update_policy() or schedule handle_update()).
807  */
808 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq)
809 {
810         struct cpufreq_freqs freqs;
811
812         printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
813                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
814
815         freqs.cpu = cpu;
816         freqs.old = old_freq;
817         freqs.new = new_freq;
818         cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
819         cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
820 }
821
822
823 /** 
824  * cpufreq_get - get the current CPU frequency (in kHz)
825  * @cpu: CPU number
826  *
827  * Get the CPU current (static) CPU frequency
828  */
829 unsigned int cpufreq_get(unsigned int cpu)
830 {
831         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
832         unsigned int ret = 0;
833
834         if (!policy)
835                 return 0;
836
837         if (!cpufreq_driver->get)
838                 goto out;
839
840         down(&policy->lock);
841
842         ret = cpufreq_driver->get(cpu);
843
844         if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) 
845         {
846                 /* verify no discrepancy between actual and saved value exists */
847                 if (unlikely(ret != policy->cur)) {
848                         cpufreq_out_of_sync(cpu, policy->cur, ret);
849                         schedule_work(&policy->update);
850                 }
851         }
852
853         up(&policy->lock);
854
855  out:
856         cpufreq_cpu_put(policy);
857
858         return (ret);
859 }
860 EXPORT_SYMBOL(cpufreq_get);
861
862
863 /**
864  *      cpufreq_resume -  restore proper CPU frequency handling after resume
865  *
866  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
867  *      2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
868  *      3.) schedule call cpufreq_update_policy() ASAP as interrupts are restored.
869  */
870 static int cpufreq_resume(struct sys_device * sysdev)
871 {
872         int cpu = sysdev->id;
873         unsigned int ret = 0;
874         struct cpufreq_policy *cpu_policy;
875
876         dprintk("resuming cpu %u\n", cpu);
877
878         if (!cpu_online(cpu))
879                 return 0;
880
881         /* we may be lax here as interrupts are off. Nonetheless
882          * we need to grab the correct cpu policy, as to check
883          * whether we really run on this CPU.
884          */
885
886         cpu_policy = cpufreq_cpu_get(cpu);
887         if (!cpu_policy)
888                 return -EINVAL;
889
890         /* only handle each CPU group once */
891         if (unlikely(cpu_policy->cpu != cpu)) {
892                 cpufreq_cpu_put(cpu_policy);
893                 return 0;
894         }
895
896         if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
897                 unsigned int cur_freq = 0;
898
899                 if (cpufreq_driver->get)
900                         cur_freq = cpufreq_driver->get(cpu_policy->cpu);
901
902                 if (!cur_freq || !cpu_policy->cur) {
903                         printk(KERN_ERR "cpufreq: resume failed to assert current frequency is what timing core thinks it is.\n");
904                         goto out;
905                 }
906
907                 if (unlikely(cur_freq != cpu_policy->cur)) {
908                         struct cpufreq_freqs freqs;
909
910                         printk(KERN_WARNING "Warning: CPU frequency is %u, "
911                                "cpufreq assumed %u kHz.\n", cur_freq, cpu_policy->cur);
912
913                         freqs.cpu = cpu;
914                         freqs.old = cpu_policy->cur;
915                         freqs.new = cur_freq;
916
917                         notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs);
918                         adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
919
920                         cpu_policy->cur = cur_freq;
921                 }
922         }
923
924 out:
925         schedule_work(&cpu_policy->update);
926         cpufreq_cpu_put(cpu_policy);
927         return ret;
928 }
929
930 static struct sysdev_driver cpufreq_sysdev_driver = {
931         .add            = cpufreq_add_dev,
932         .remove         = cpufreq_remove_dev,
933         .resume         = cpufreq_resume,
934 };
935
936
937 /*********************************************************************
938  *                     NOTIFIER LISTS INTERFACE                      *
939  *********************************************************************/
940
941 /**
942  *      cpufreq_register_notifier - register a driver with cpufreq
943  *      @nb: notifier function to register
944  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
945  *
946  *      Add a driver to one of two lists: either a list of drivers that 
947  *      are notified about clock rate changes (once before and once after
948  *      the transition), or a list of drivers that are notified about
949  *      changes in cpufreq policy.
950  *
951  *      This function may sleep, and has the same return conditions as
952  *      notifier_chain_register.
953  */
954 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
955 {
956         int ret;
957
958         down_write(&cpufreq_notifier_rwsem);
959         switch (list) {
960         case CPUFREQ_TRANSITION_NOTIFIER:
961                 ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb);
962                 break;
963         case CPUFREQ_POLICY_NOTIFIER:
964                 ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb);
965                 break;
966         default:
967                 ret = -EINVAL;
968         }
969         up_write(&cpufreq_notifier_rwsem);
970
971         return ret;
972 }
973 EXPORT_SYMBOL(cpufreq_register_notifier);
974
975
976 /**
977  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
978  *      @nb: notifier block to be unregistered
979  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
980  *
981  *      Remove a driver from the CPU frequency notifier list.
982  *
983  *      This function may sleep, and has the same return conditions as
984  *      notifier_chain_unregister.
985  */
986 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
987 {
988         int ret;
989
990         down_write(&cpufreq_notifier_rwsem);
991         switch (list) {
992         case CPUFREQ_TRANSITION_NOTIFIER:
993                 ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb);
994                 break;
995         case CPUFREQ_POLICY_NOTIFIER:
996                 ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb);
997                 break;
998         default:
999                 ret = -EINVAL;
1000         }
1001         up_write(&cpufreq_notifier_rwsem);
1002
1003         return ret;
1004 }
1005 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1006
1007
1008 /*********************************************************************
1009  *                              GOVERNORS                            *
1010  *********************************************************************/
1011
1012
1013 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1014                             unsigned int target_freq,
1015                             unsigned int relation)
1016 {
1017         int retval = -EINVAL;
1018         lock_cpu_hotplug();
1019         dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1020                 target_freq, relation);
1021         if (cpu_online(policy->cpu))
1022                 retval = cpufreq_driver->target(policy, target_freq, relation);
1023         unlock_cpu_hotplug();
1024         return retval;
1025 }
1026 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1027
1028
1029 int cpufreq_driver_target(struct cpufreq_policy *policy,
1030                           unsigned int target_freq,
1031                           unsigned int relation)
1032 {
1033         unsigned int ret;
1034
1035         policy = cpufreq_cpu_get(policy->cpu);
1036         if (!policy)
1037                 return -EINVAL;
1038
1039         down(&policy->lock);
1040
1041         ret = __cpufreq_driver_target(policy, target_freq, relation);
1042
1043         up(&policy->lock);
1044
1045         cpufreq_cpu_put(policy);
1046
1047         return ret;
1048 }
1049 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1050
1051
1052 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1053 {
1054         int ret = -EINVAL;
1055
1056         if (!try_module_get(policy->governor->owner))
1057                 return -EINVAL;
1058
1059         dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event);
1060         ret = policy->governor->governor(policy, event);
1061
1062         /* we keep one module reference alive for each CPU governed by this CPU */
1063         if ((event != CPUFREQ_GOV_START) || ret)
1064                 module_put(policy->governor->owner);
1065         if ((event == CPUFREQ_GOV_STOP) && !ret)
1066                 module_put(policy->governor->owner);
1067
1068         return ret;
1069 }
1070
1071
1072 int cpufreq_governor(unsigned int cpu, unsigned int event)
1073 {
1074         int ret = 0;
1075         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1076
1077         if (!policy)
1078                 return -EINVAL;
1079
1080         down(&policy->lock);
1081         ret = __cpufreq_governor(policy, event);
1082         up(&policy->lock);
1083
1084         cpufreq_cpu_put(policy);
1085
1086         return ret;
1087 }
1088 EXPORT_SYMBOL_GPL(cpufreq_governor);
1089
1090
1091 int cpufreq_register_governor(struct cpufreq_governor *governor)
1092 {
1093         struct cpufreq_governor *t;
1094
1095         if (!governor)
1096                 return -EINVAL;
1097
1098         down(&cpufreq_governor_sem);
1099         
1100         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
1101                 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
1102                         up(&cpufreq_governor_sem);
1103                         return -EBUSY;
1104                 }
1105         }
1106         list_add(&governor->governor_list, &cpufreq_governor_list);
1107
1108         up(&cpufreq_governor_sem);
1109
1110         return 0;
1111 }
1112 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1113
1114
1115 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1116 {
1117         if (!governor)
1118                 return;
1119
1120         down(&cpufreq_governor_sem);
1121         list_del(&governor->governor_list);
1122         up(&cpufreq_governor_sem);
1123         return;
1124 }
1125 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1126
1127
1128
1129 /*********************************************************************
1130  *                          POLICY INTERFACE                         *
1131  *********************************************************************/
1132
1133 /**
1134  * cpufreq_get_policy - get the current cpufreq_policy
1135  * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1136  *
1137  * Reads the current cpufreq policy.
1138  */
1139 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1140 {
1141         struct cpufreq_policy *cpu_policy;
1142         if (!policy)
1143                 return -EINVAL;
1144
1145         cpu_policy = cpufreq_cpu_get(cpu);
1146         if (!cpu_policy)
1147                 return -EINVAL;
1148
1149         down(&cpu_policy->lock);
1150         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1151         up(&cpu_policy->lock);
1152
1153         cpufreq_cpu_put(cpu_policy);
1154
1155         return 0;
1156 }
1157 EXPORT_SYMBOL(cpufreq_get_policy);
1158
1159
1160 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1161 {
1162         int ret = 0;
1163
1164         cpufreq_debug_disable_ratelimit();
1165         dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1166                 policy->min, policy->max);
1167
1168         memcpy(&policy->cpuinfo, 
1169                &data->cpuinfo, 
1170                sizeof(struct cpufreq_cpuinfo));
1171
1172         /* verify the cpu speed can be set within this limit */
1173         ret = cpufreq_driver->verify(policy);
1174         if (ret)
1175                 goto error_out;
1176
1177         down_read(&cpufreq_notifier_rwsem);
1178
1179         /* adjust if necessary - all reasons */
1180         notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST,
1181                             policy);
1182
1183         /* adjust if necessary - hardware incompatibility*/
1184         notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE,
1185                             policy);
1186
1187         /* verify the cpu speed can be set within this limit,
1188            which might be different to the first one */
1189         ret = cpufreq_driver->verify(policy);
1190         if (ret) {
1191                 up_read(&cpufreq_notifier_rwsem);
1192                 goto error_out;
1193         }
1194
1195         /* notification of the new policy */
1196         notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY,
1197                             policy);
1198
1199         up_read(&cpufreq_notifier_rwsem);
1200
1201         data->min    = policy->min;
1202         data->max    = policy->max;
1203
1204         dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max);
1205
1206         if (cpufreq_driver->setpolicy) {
1207                 data->policy = policy->policy;
1208                 dprintk("setting range\n");
1209                 ret = cpufreq_driver->setpolicy(policy);
1210         } else {
1211                 if (policy->governor != data->governor) {
1212                         /* save old, working values */
1213                         struct cpufreq_governor *old_gov = data->governor;
1214
1215                         dprintk("governor switch\n");
1216
1217                         /* end old governor */
1218                         if (data->governor)
1219                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1220
1221                         /* start new governor */
1222                         data->governor = policy->governor;
1223                         if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1224                                 /* new governor failed, so re-start old one */
1225                                 dprintk("starting governor %s failed\n", data->governor->name);
1226                                 if (old_gov) {
1227                                         data->governor = old_gov;
1228                                         __cpufreq_governor(data, CPUFREQ_GOV_START);
1229                                 }
1230                                 ret = -EINVAL;
1231                                 goto error_out;
1232                         }
1233                         /* might be a policy change, too, so fall through */
1234                 }
1235                 dprintk("governor: change or update limits\n");
1236                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1237         }
1238
1239  error_out:
1240         cpufreq_debug_enable_ratelimit();
1241         return ret;
1242 }
1243
1244 /**
1245  *      cpufreq_set_policy - set a new CPUFreq policy
1246  *      @policy: policy to be set.
1247  *
1248  *      Sets a new CPU frequency and voltage scaling policy.
1249  */
1250 int cpufreq_set_policy(struct cpufreq_policy *policy)
1251 {
1252         int ret = 0;
1253         struct cpufreq_policy *data;
1254
1255         if (!policy)
1256                 return -EINVAL;
1257
1258         data = cpufreq_cpu_get(policy->cpu);
1259         if (!data)
1260                 return -EINVAL;
1261
1262         /* lock this CPU */
1263         down(&data->lock);
1264
1265         ret = __cpufreq_set_policy(data, policy);
1266         data->user_policy.min = data->min;
1267         data->user_policy.max = data->max;
1268         data->user_policy.policy = data->policy;
1269         data->user_policy.governor = data->governor;
1270
1271         up(&data->lock);
1272         cpufreq_cpu_put(data);
1273
1274         return ret;
1275 }
1276 EXPORT_SYMBOL(cpufreq_set_policy);
1277
1278
1279 /**
1280  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1281  *      @cpu: CPU which shall be re-evaluated
1282  *
1283  *      Usefull for policy notifiers which have different necessities
1284  *      at different times.
1285  */
1286 int cpufreq_update_policy(unsigned int cpu)
1287 {
1288         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1289         struct cpufreq_policy policy;
1290         int ret = 0;
1291
1292         if (!data)
1293                 return -ENODEV;
1294
1295         down(&data->lock);
1296
1297         dprintk("updating policy for CPU %u\n", cpu);
1298         memcpy(&policy, 
1299                data,
1300                sizeof(struct cpufreq_policy));
1301         policy.min = data->user_policy.min;
1302         policy.max = data->user_policy.max;
1303         policy.policy = data->user_policy.policy;
1304         policy.governor = data->user_policy.governor;
1305
1306         ret = __cpufreq_set_policy(data, &policy);
1307
1308         up(&data->lock);
1309
1310         cpufreq_cpu_put(data);
1311         return ret;
1312 }
1313 EXPORT_SYMBOL(cpufreq_update_policy);
1314
1315
1316 /*********************************************************************
1317  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1318  *********************************************************************/
1319
1320 /**
1321  * cpufreq_register_driver - register a CPU Frequency driver
1322  * @driver_data: A struct cpufreq_driver containing the values#
1323  * submitted by the CPU Frequency driver.
1324  *
1325  *   Registers a CPU Frequency driver to this core code. This code 
1326  * returns zero on success, -EBUSY when another driver got here first
1327  * (and isn't unregistered in the meantime). 
1328  *
1329  */
1330 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1331 {
1332         unsigned long flags;
1333         int ret;
1334
1335         if (!driver_data || !driver_data->verify || !driver_data->init ||
1336             ((!driver_data->setpolicy) && (!driver_data->target)))
1337                 return -EINVAL;
1338
1339         dprintk("trying to register driver %s\n", driver_data->name);
1340
1341         if (driver_data->setpolicy)
1342                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1343
1344         spin_lock_irqsave(&cpufreq_driver_lock, flags);
1345         if (cpufreq_driver) {
1346                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1347                 return -EBUSY;
1348         }
1349         cpufreq_driver = driver_data;
1350         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1351
1352         ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
1353
1354         if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1355                 int i;
1356                 ret = -ENODEV;
1357
1358                 /* check for at least one working CPU */
1359                 for (i=0; i<NR_CPUS; i++)
1360                         if (cpufreq_cpu_data[i])
1361                                 ret = 0;
1362
1363                 /* if all ->init() calls failed, unregister */
1364                 if (ret) {
1365                         dprintk("no CPU initialized for driver %s\n", driver_data->name);
1366                         sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1367
1368                         spin_lock_irqsave(&cpufreq_driver_lock, flags);
1369                         cpufreq_driver = NULL;
1370                         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1371                 }
1372         }
1373
1374         if (!ret) {
1375                 dprintk("driver %s up and running\n", driver_data->name);
1376                 cpufreq_debug_enable_ratelimit();
1377         }
1378
1379         return (ret);
1380 }
1381 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1382
1383
1384 /**
1385  * cpufreq_unregister_driver - unregister the current CPUFreq driver
1386  *
1387  *    Unregister the current CPUFreq driver. Only call this if you have 
1388  * the right to do so, i.e. if you have succeeded in initialising before!
1389  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1390  * currently not initialised.
1391  */
1392 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1393 {
1394         unsigned long flags;
1395
1396         cpufreq_debug_disable_ratelimit();
1397
1398         if (!cpufreq_driver || (driver != cpufreq_driver)) {
1399                 cpufreq_debug_enable_ratelimit();
1400                 return -EINVAL;
1401         }
1402
1403         dprintk("unregistering driver %s\n", driver->name);
1404
1405         sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1406
1407         spin_lock_irqsave(&cpufreq_driver_lock, flags);
1408         cpufreq_driver = NULL;
1409         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1410
1411         return 0;
1412 }
1413 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);