2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 * 1. Make # power states dynamic.
27 * 2. Support duty_cycle values that span bit 4.
28 * 3. Optimize by having scheduler determine business instead of
29 * having us try to calculate it here.
30 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/pci.h>
39 #include <linux/cpufreq.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/dmi.h>
43 #include <linux/moduleparam.h>
46 #include <asm/system.h>
47 #include <asm/delay.h>
48 #include <asm/uaccess.h>
49 #include <asm/processor.h>
53 #include <acpi/acpi_bus.h>
54 #include <acpi/acpi_drivers.h>
55 #include <acpi/processor.h>
58 #define ACPI_PROCESSOR_COMPONENT 0x01000000
59 #define ACPI_PROCESSOR_CLASS "processor"
60 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
61 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
62 #define ACPI_PROCESSOR_FILE_INFO "info"
63 #define ACPI_PROCESSOR_FILE_POWER "power"
64 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
65 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
66 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
70 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
71 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
72 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
75 #define ACPI_PROCESSOR_LIMIT_USER 0
76 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
78 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
79 ACPI_MODULE_NAME ("acpi_processor")
81 MODULE_AUTHOR("Paul Diefenbaugh");
82 MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME);
83 MODULE_LICENSE("GPL");
86 static int acpi_processor_add (struct acpi_device *device);
87 static int acpi_processor_remove (struct acpi_device *device, int type);
88 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
89 static int acpi_processor_throttling_open_fs(struct inode *inode, struct file *file);
90 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file);
91 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file);
92 static int acpi_processor_get_limit_info(struct acpi_processor *pr);
94 static struct acpi_driver acpi_processor_driver = {
95 .name = ACPI_PROCESSOR_DRIVER_NAME,
96 .class = ACPI_PROCESSOR_CLASS,
97 .ids = ACPI_PROCESSOR_HID,
99 .add = acpi_processor_add,
100 .remove = acpi_processor_remove,
105 struct acpi_processor_errata {
115 static struct file_operations acpi_processor_info_fops = {
116 .open = acpi_processor_info_open_fs,
119 .release = single_release,
122 static struct file_operations acpi_processor_power_fops = {
123 .open = acpi_processor_power_open_fs,
126 .release = single_release,
129 static struct file_operations acpi_processor_throttling_fops = {
130 .open = acpi_processor_throttling_open_fs,
133 .release = single_release,
136 static struct file_operations acpi_processor_limit_fops = {
137 .open = acpi_processor_limit_open_fs,
140 .release = single_release,
143 static struct acpi_processor *processors[NR_CPUS];
144 static struct acpi_processor_errata errata;
145 static void (*pm_idle_save)(void);
148 /* --------------------------------------------------------------------------
150 -------------------------------------------------------------------------- */
153 acpi_processor_errata_piix4 (
160 ACPI_FUNCTION_TRACE("acpi_processor_errata_piix4");
163 return_VALUE(-EINVAL);
166 * Note that 'dev' references the PIIX4 ACPI Controller.
169 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
173 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
176 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
179 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
182 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
185 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
191 case 0: /* PIIX4 A-step */
192 case 1: /* PIIX4 B-step */
194 * See specification changes #13 ("Manual Throttle Duty Cycle")
195 * and #14 ("Enabling and Disabling Manual Throttle"), plus
196 * erratum #5 ("STPCLK# Deassertion Time") from the January
197 * 2002 PIIX4 specification update. Applies to only older
200 errata.piix4.throttle = 1;
205 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
206 * Livelock") from the January 2002 PIIX4 specification update.
207 * Applies to all PIIX4 models.
213 * Find the PIIX4 IDE Controller and get the Bus Master IDE
214 * Status register address. We'll use this later to read
215 * each IDE controller's DMA status to make sure we catch all
218 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
219 PCI_DEVICE_ID_INTEL_82371AB,
220 PCI_ANY_ID, PCI_ANY_ID, NULL);
222 errata.piix4.bmisx = pci_resource_start(dev, 4);
229 * Find the PIIX4 ISA Controller and read the Motherboard
230 * DMA controller's status to see if Type-F (Fast) DMA mode
231 * is enabled (bit 7) on either channel. Note that we'll
232 * disable C3 support if this is enabled, as some legacy
233 * devices won't operate well if fast DMA is disabled.
235 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
236 PCI_DEVICE_ID_INTEL_82371AB_0,
237 PCI_ANY_ID, PCI_ANY_ID, NULL);
239 pci_read_config_byte(dev, 0x76, &value1);
240 pci_read_config_byte(dev, 0x77, &value2);
241 if ((value1 & 0x80) || (value2 & 0x80))
242 errata.piix4.fdma = 1;
249 if (errata.piix4.bmisx)
250 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
251 "Bus master activity detection (BM-IDE) erratum enabled\n"));
252 if (errata.piix4.fdma)
253 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
254 "Type-F DMA livelock erratum (C3 disabled)\n"));
261 acpi_processor_errata (
262 struct acpi_processor *pr)
265 struct pci_dev *dev = NULL;
267 ACPI_FUNCTION_TRACE("acpi_processor_errata");
270 return_VALUE(-EINVAL);
275 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
276 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, PCI_ANY_ID, NULL);
278 result = acpi_processor_errata_piix4(dev);
282 return_VALUE(result);
286 /* --------------------------------------------------------------------------
288 -------------------------------------------------------------------------- */
297 else if (!acpi_fadt.tmr_val_ext)
298 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
300 return ((0xFFFFFFFF - t1) + t2);
305 acpi_processor_power_activate (
306 struct acpi_processor *pr,
312 pr->power.states[pr->power.state].promotion.count = 0;
313 pr->power.states[pr->power.state].demotion.count = 0;
315 /* Cleanup from old state. */
316 switch (pr->power.state) {
318 /* Disable bus master reload */
319 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
323 /* Prepare to use new state. */
326 /* Enable bus master reload */
327 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
331 pr->power.state = state;
338 acpi_processor_idle (void)
340 struct acpi_processor *pr = NULL;
341 struct acpi_processor_cx *cx = NULL;
342 unsigned int next_state = 0;
343 unsigned int sleep_ticks = 0;
346 pr = processors[smp_processor_id()];
351 * Interrupts must be disabled during bus mastering calculations and
352 * for C2/C3 transitions.
357 * Check whether we truly need to go idle, or should
360 if (unlikely(need_resched())) {
365 cx = &(pr->power.states[pr->power.state]);
370 * Check for bus mastering activity (if required), record, and check
373 if (pr->flags.bm_check) {
376 pr->power.bm_activity <<= 1;
378 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
379 &bm_status, ACPI_MTX_DO_NOT_LOCK);
381 pr->power.bm_activity++;
382 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
383 1, ACPI_MTX_DO_NOT_LOCK);
386 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
387 * the true state of bus mastering activity; forcing us to
388 * manually check the BMIDEA bit of each IDE channel.
390 else if (errata.piix4.bmisx) {
391 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
392 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
393 pr->power.bm_activity++;
396 * Apply bus mastering demotion policy. Automatically demote
397 * to avoid a faulty transition. Note that the processor
398 * won't enter a low-power state during this call (to this
399 * funciton) but should upon the next.
401 * TBD: A better policy might be to fallback to the demotion
402 * state (use it for this quantum only) istead of
403 * demoting -- and rely on duration as our sole demotion
404 * qualification. This may, however, introduce DMA
405 * issues (e.g. floppy DMA transfer overrun/underrun).
407 if (pr->power.bm_activity & cx->demotion.threshold.bm) {
409 next_state = cx->demotion.state;
419 * Invoke the current Cx state to put the processor to sleep.
421 switch (pr->power.state) {
426 * Use the appropriate idle routine, the one that would
427 * be used without acpi C-states.
434 * TBD: Can't get time duration while in C1, as resumes
435 * go to an ISR rather than here. Need to instrument
436 * base interrupt handler.
438 sleep_ticks = 0xFFFFFFFF;
442 /* Get start time (ticks) */
443 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
445 inb(pr->power.states[ACPI_STATE_C2].address);
446 /* Dummy op - must do something useless after P_LVL2 read */
447 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
448 /* Get end time (ticks) */
449 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
450 /* Re-enable interrupts */
452 /* Compute time (ticks) that we were actually asleep */
453 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
457 /* Disable bus master arbitration */
458 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
459 /* Get start time (ticks) */
460 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
462 inb(pr->power.states[ACPI_STATE_C3].address);
463 /* Dummy op - must do something useless after P_LVL3 read */
464 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
465 /* Get end time (ticks) */
466 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
467 /* Enable bus master arbitration */
468 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
469 /* Re-enable interrupts */
471 /* Compute time (ticks) that we were actually asleep */
472 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
480 next_state = pr->power.state;
485 * Track the number of longs (time asleep is greater than threshold)
486 * and promote when the count threshold is reached. Note that bus
487 * mastering activity may prevent promotions.
488 * Do not promote above max_cstate.
490 if (cx->promotion.state && (cx->promotion.state <= max_cstate)) {
491 if (sleep_ticks > cx->promotion.threshold.ticks) {
492 cx->promotion.count++;
493 cx->demotion.count = 0;
494 if (cx->promotion.count >= cx->promotion.threshold.count) {
495 if (pr->flags.bm_check) {
496 if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) {
497 next_state = cx->promotion.state;
502 next_state = cx->promotion.state;
512 * Track the number of shorts (time asleep is less than time threshold)
513 * and demote when the usage threshold is reached.
515 if (cx->demotion.state) {
516 if (sleep_ticks < cx->demotion.threshold.ticks) {
517 cx->demotion.count++;
518 cx->promotion.count = 0;
519 if (cx->demotion.count >= cx->demotion.threshold.count) {
520 next_state = cx->demotion.state;
528 * Demote if current state exceeds max_cstate
530 if (pr->power.state > max_cstate) {
531 next_state = max_cstate;
537 * If we're going to start using a new Cx state we must clean up
538 * from the previous and prepare to use the new.
540 if (next_state != pr->power.state)
541 acpi_processor_power_activate(pr, next_state);
548 acpi_processor_set_power_policy (
549 struct acpi_processor *pr)
551 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
554 * This function sets the default Cx state policy (OS idle handler).
555 * Our scheme is to promote quickly to C2 but more conservatively
556 * to C3. We're favoring C2 for its characteristics of low latency
557 * (quick response), good power savings, and ability to allow bus
558 * mastering activity. Note that the Cx state policy is completely
559 * customizable and can be altered dynamically.
563 return_VALUE(-EINVAL);
569 pr->power.state = ACPI_STATE_C1;
570 pr->power.default_state = ACPI_STATE_C1;
575 * Set the default C1 promotion and C2 demotion policies, where we
576 * promote from C1 to C2 after several (10) successive C1 transitions,
577 * as we cannot (currently) measure the time spent in C1. Demote from
578 * C2 to C1 anytime we experience a 'short' (time spent in C2 is less
579 * than the C2 transtion latency). Note the simplifying assumption
580 * that the 'cost' of a transition is amortized when we sleep for at
581 * least as long as the transition's latency (thus the total transition
582 * time is two times the latency).
584 * TBD: Measure C1 sleep times by instrumenting the core IRQ handler.
585 * TBD: Demote to default C-State after long periods of activity.
586 * TBD: Investigate policy's use of CPU utilization -vs- sleep duration.
588 if (pr->power.states[ACPI_STATE_C2].valid) {
589 pr->power.states[ACPI_STATE_C1].promotion.threshold.count = 10;
590 pr->power.states[ACPI_STATE_C1].promotion.threshold.ticks =
591 pr->power.states[ACPI_STATE_C2].latency_ticks;
592 pr->power.states[ACPI_STATE_C1].promotion.state = ACPI_STATE_C2;
594 pr->power.states[ACPI_STATE_C2].demotion.threshold.count = 1;
595 pr->power.states[ACPI_STATE_C2].demotion.threshold.ticks =
596 pr->power.states[ACPI_STATE_C2].latency_ticks;
597 pr->power.states[ACPI_STATE_C2].demotion.state = ACPI_STATE_C1;
603 * Set default C2 promotion and C3 demotion policies, where we promote
604 * from C2 to C3 after several (4) cycles of no bus mastering activity
605 * while maintaining sleep time criteria. Demote immediately on a
606 * short or whenever bus mastering activity occurs.
608 if ((pr->power.states[ACPI_STATE_C2].valid) &&
609 (pr->power.states[ACPI_STATE_C3].valid)) {
610 pr->power.states[ACPI_STATE_C2].promotion.threshold.count = 4;
611 pr->power.states[ACPI_STATE_C2].promotion.threshold.ticks =
612 pr->power.states[ACPI_STATE_C3].latency_ticks;
613 pr->power.states[ACPI_STATE_C2].promotion.threshold.bm = 0x0F;
614 pr->power.states[ACPI_STATE_C2].promotion.state = ACPI_STATE_C3;
616 pr->power.states[ACPI_STATE_C3].demotion.threshold.count = 1;
617 pr->power.states[ACPI_STATE_C3].demotion.threshold.ticks =
618 pr->power.states[ACPI_STATE_C3].latency_ticks;
619 pr->power.states[ACPI_STATE_C3].demotion.threshold.bm = 0x0F;
620 pr->power.states[ACPI_STATE_C3].demotion.state = ACPI_STATE_C2;
628 acpi_processor_get_power_info (
629 struct acpi_processor *pr)
633 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
636 return_VALUE(-EINVAL);
638 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
639 "lvl2[0x%08x] lvl3[0x%08x]\n",
640 pr->power.states[ACPI_STATE_C2].address,
641 pr->power.states[ACPI_STATE_C3].address));
643 /* TBD: Support ACPI 2.0 objects */
648 * This state exists only as filler in our array.
650 pr->power.states[ACPI_STATE_C0].valid = 1;
655 * ACPI requires C1 support for all processors.
657 * TBD: What about PROC_C1?
659 pr->power.states[ACPI_STATE_C1].valid = 1;
664 * We're (currently) only supporting C2 on UP systems.
666 * TBD: Support for C2 on MP (P_LVL2_UP).
668 if (pr->power.states[ACPI_STATE_C2].address) {
670 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
673 * C2 latency must be less than or equal to 100 microseconds.
675 if (acpi_fadt.plvl2_lat > ACPI_PROCESSOR_MAX_C2_LATENCY)
676 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
677 "C2 latency too large [%d]\n",
678 acpi_fadt.plvl2_lat));
680 * Only support C2 on UP systems (see TBD above).
683 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
684 "C2 not supported in SMP mode\n"));
686 * Otherwise we've met all of our C2 requirements.
687 * Normalize the C2 latency to expidite policy.
690 pr->power.states[ACPI_STATE_C2].valid = 1;
691 pr->power.states[ACPI_STATE_C2].latency_ticks =
692 US_TO_PM_TIMER_TICKS(acpi_fadt.plvl2_lat);
699 * TBD: Investigate use of WBINVD on UP/SMP system in absence of
702 if (pr->power.states[ACPI_STATE_C3].address) {
704 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
707 * C3 latency must be less than or equal to 1000 microseconds.
709 if (acpi_fadt.plvl3_lat > ACPI_PROCESSOR_MAX_C3_LATENCY)
710 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
711 "C3 latency too large [%d]\n",
712 acpi_fadt.plvl3_lat));
714 * Only support C3 when bus mastering arbitration control
715 * is present (able to disable bus mastering to maintain
716 * cache coherency while in C3).
718 else if (!pr->flags.bm_control)
719 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
720 "C3 support requires bus mastering control\n"));
722 * Only support C3 on UP systems, as bm_control is only viable
723 * on a UP system and flushing caches (e.g. WBINVD) is simply
724 * too costly (at this time).
727 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
728 "C3 not supported in SMP mode\n"));
730 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
731 * DMA transfers are used by any ISA device to avoid livelock.
732 * Note that we could disable Type-F DMA (as recommended by
733 * the erratum), but this is known to disrupt certain ISA
734 * devices thus we take the conservative approach.
736 else if (errata.piix4.fdma) {
737 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
738 "C3 not supported on PIIX4 with Type-F DMA\n"));
741 * Otherwise we've met all of our C3 requirements.
742 * Normalize the C2 latency to expidite policy. Enable
743 * checking of bus mastering status (bm_check) so we can
744 * use this in our C3 policy.
747 pr->power.states[ACPI_STATE_C3].valid = 1;
748 pr->power.states[ACPI_STATE_C3].latency_ticks =
749 US_TO_PM_TIMER_TICKS(acpi_fadt.plvl3_lat);
750 pr->flags.bm_check = 1;
757 * Now that we know which state are supported, set the default
758 * policy. Note that this policy can be changed dynamically
759 * (e.g. encourage deeper sleeps to conserve battery life when
762 result = acpi_processor_set_power_policy(pr);
764 return_VALUE(result);
767 * If this processor supports C2 or C3 we denote it as being 'power
768 * manageable'. Note that there's really no policy involved for
769 * when only C1 is supported.
771 if (pr->power.states[ACPI_STATE_C2].valid
772 || pr->power.states[ACPI_STATE_C3].valid)
779 /* --------------------------------------------------------------------------
780 Performance Management
781 -------------------------------------------------------------------------- */
782 #ifdef CONFIG_CPU_FREQ
784 static DECLARE_MUTEX(performance_sem);
787 * _PPC support is implemented as a CPUfreq policy notifier:
788 * This means each time a CPUfreq driver registered also with
789 * the ACPI core is asked to change the speed policy, the maximum
790 * value is adjusted so that it is within the platform limit.
792 * Also, when a new platform limit value is detected, the CPUfreq
793 * policy is adjusted accordingly.
796 #define PPC_REGISTERED 1
799 static int acpi_processor_ppc_status = 0;
801 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
805 struct cpufreq_policy *policy = data;
806 struct acpi_processor *pr;
807 unsigned int ppc = 0;
809 down(&performance_sem);
811 if (event != CPUFREQ_INCOMPATIBLE)
814 pr = processors[policy->cpu];
815 if (!pr || !pr->performance)
818 ppc = (unsigned int) pr->performance_platform_limit;
822 if (ppc > pr->performance->state_count)
825 cpufreq_verify_within_limits(policy, 0,
826 pr->performance->states[ppc].core_frequency * 1000);
829 up(&performance_sem);
835 static struct notifier_block acpi_ppc_notifier_block = {
836 .notifier_call = acpi_processor_ppc_notifier,
841 acpi_processor_get_platform_limit (
842 struct acpi_processor* pr)
844 acpi_status status = 0;
845 unsigned long ppc = 0;
847 ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit");
850 return_VALUE(-EINVAL);
853 * _PPC indicates the maximum state currently supported by the platform
854 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
856 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
858 if (status != AE_NOT_FOUND)
859 acpi_processor_ppc_status |= PPC_IN_USE;
861 if(ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
862 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n"));
863 return_VALUE(-ENODEV);
866 pr->performance_platform_limit = (int) ppc;
872 static int acpi_processor_ppc_has_changed(
873 struct acpi_processor *pr)
875 int ret = acpi_processor_get_platform_limit(pr);
879 return cpufreq_update_policy(pr->id);
883 static void acpi_processor_ppc_init(void) {
884 if (!cpufreq_register_notifier(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
885 acpi_processor_ppc_status |= PPC_REGISTERED;
887 printk(KERN_DEBUG "Warning: Processor Platform Limit not supported.\n");
891 static void acpi_processor_ppc_exit(void) {
892 if (acpi_processor_ppc_status & PPC_REGISTERED)
893 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER);
895 acpi_processor_ppc_status &= ~PPC_REGISTERED;
899 * when registering a cpufreq driver with this ACPI processor driver, the
900 * _PCT and _PSS structures are read out and written into struct
901 * acpi_processor_performance.
903 static int acpi_processor_set_pdc (struct acpi_processor *pr)
905 acpi_status status = AE_OK;
907 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
908 struct acpi_object_list no_object = {1, &arg0};
909 struct acpi_object_list *pdc;
911 ACPI_FUNCTION_TRACE("acpi_processor_set_pdc");
913 arg0.buffer.length = 12;
914 arg0.buffer.pointer = (u8 *) arg0_buf;
915 arg0_buf[0] = ACPI_PDC_REVISION_ID;
919 pdc = (pr->performance->pdc) ? pr->performance->pdc : &no_object;
921 status = acpi_evaluate_object(pr->handle, "_PDC", pdc, NULL);
923 if ((ACPI_FAILURE(status)) && (pr->performance->pdc))
924 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Error evaluating _PDC, using legacy perf. control...\n"));
926 return_VALUE(status);
931 acpi_processor_get_performance_control (
932 struct acpi_processor *pr)
935 acpi_status status = 0;
936 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
937 union acpi_object *pct = NULL;
938 union acpi_object obj = {0};
940 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control");
942 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
943 if(ACPI_FAILURE(status)) {
944 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n"));
945 return_VALUE(-ENODEV);
948 pct = (union acpi_object *) buffer.pointer;
949 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
950 || (pct->package.count != 2)) {
951 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n"));
960 obj = pct->package.elements[0];
962 if ((obj.type != ACPI_TYPE_BUFFER)
963 || (obj.buffer.length < sizeof(struct acpi_pct_register))
964 || (obj.buffer.pointer == NULL)) {
965 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
966 "Invalid _PCT data (control_register)\n"));
970 memcpy(&pr->performance->control_register, obj.buffer.pointer, sizeof(struct acpi_pct_register));
977 obj = pct->package.elements[1];
979 if ((obj.type != ACPI_TYPE_BUFFER)
980 || (obj.buffer.length < sizeof(struct acpi_pct_register))
981 || (obj.buffer.pointer == NULL)) {
982 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
983 "Invalid _PCT data (status_register)\n"));
988 memcpy(&pr->performance->status_register, obj.buffer.pointer, sizeof(struct acpi_pct_register));
991 acpi_os_free(buffer.pointer);
993 return_VALUE(result);
998 acpi_processor_get_performance_states (
999 struct acpi_processor *pr)
1002 acpi_status status = AE_OK;
1003 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
1004 struct acpi_buffer format = {sizeof("NNNNNN"), "NNNNNN"};
1005 struct acpi_buffer state = {0, NULL};
1006 union acpi_object *pss = NULL;
1009 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states");
1011 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1012 if(ACPI_FAILURE(status)) {
1013 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n"));
1014 return_VALUE(-ENODEV);
1017 pss = (union acpi_object *) buffer.pointer;
1018 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
1019 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n"));
1024 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
1025 pss->package.count));
1027 pr->performance->state_count = pss->package.count;
1028 pr->performance->states = kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, GFP_KERNEL);
1029 if (!pr->performance->states) {
1034 for (i = 0; i < pr->performance->state_count; i++) {
1036 struct acpi_processor_px *px = &(pr->performance->states[i]);
1038 state.length = sizeof(struct acpi_processor_px);
1041 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
1043 status = acpi_extract_package(&(pss->package.elements[i]),
1045 if (ACPI_FAILURE(status)) {
1046 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n"));
1048 kfree(pr->performance->states);
1052 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1053 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
1055 (u32) px->core_frequency,
1057 (u32) px->transition_latency,
1058 (u32) px->bus_master_latency,
1062 if (!px->core_frequency) {
1063 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data: freq is zero\n"));
1065 kfree(pr->performance->states);
1071 acpi_os_free(buffer.pointer);
1073 return_VALUE(result);
1078 acpi_processor_get_performance_info (
1079 struct acpi_processor *pr)
1082 acpi_status status = AE_OK;
1083 acpi_handle handle = NULL;
1085 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");
1087 if (!pr || !pr->performance || !pr->handle)
1088 return_VALUE(-EINVAL);
1090 acpi_processor_set_pdc(pr);
1092 status = acpi_get_handle(pr->handle, "_PCT", &handle);
1093 if (ACPI_FAILURE(status)) {
1094 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1095 "ACPI-based processor performance control unavailable\n"));
1096 return_VALUE(-ENODEV);
1099 result = acpi_processor_get_performance_control(pr);
1101 return_VALUE(result);
1103 result = acpi_processor_get_performance_states(pr);
1105 return_VALUE(result);
1107 result = acpi_processor_get_platform_limit(pr);
1109 return_VALUE(result);
1115 int acpi_processor_notify_smm(struct module *calling_module) {
1117 static int is_done = 0;
1119 ACPI_FUNCTION_TRACE("acpi_processor_notify_smm");
1121 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
1122 return_VALUE(-EBUSY);
1124 if (!try_module_get(calling_module))
1125 return_VALUE(-EINVAL);
1127 /* is_done is set to negative if an error occured,
1128 * and to postitive if _no_ error occured, but SMM
1129 * was already notified. This avoids double notification
1130 * which might lead to unexpected results...
1133 module_put(calling_module);
1136 else if (is_done < 0) {
1137 module_put(calling_module);
1138 return_VALUE(is_done);
1143 /* Can't write pstate_cnt to smi_cmd if either value is zero */
1144 if ((!acpi_fadt.smi_cmd) ||
1145 (!acpi_fadt.pstate_cnt)) {
1146 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1147 "No SMI port or pstate_cnt\n"));
1148 module_put(calling_module);
1152 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
1154 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
1155 * it anyway, so we need to support it... */
1156 if (acpi_fadt_is_v1) {
1157 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Using v1.0 FADT reserved value for pstate_cnt\n"));
1160 status = acpi_os_write_port (acpi_fadt.smi_cmd,
1161 (u32) acpi_fadt.pstate_cnt, 8);
1162 if (ACPI_FAILURE (status)) {
1163 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1164 "Failed to write pstate_cnt [0x%x] to "
1165 "smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
1166 module_put(calling_module);
1167 return_VALUE(status);
1170 /* Success. If there's no _PPC, we need to fear nothing, so
1171 * we can allow the cpufreq driver to be rmmod'ed. */
1174 if (!(acpi_processor_ppc_status & PPC_IN_USE))
1175 module_put(calling_module);
1179 EXPORT_SYMBOL(acpi_processor_notify_smm);
1182 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
1183 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
1185 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
1186 static struct file_operations acpi_processor_perf_fops = {
1187 .open = acpi_processor_perf_open_fs,
1189 .llseek = seq_lseek,
1190 .release = single_release,
1193 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
1195 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1198 ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");
1203 if (!pr->performance) {
1204 seq_puts(seq, "<not supported>\n");
1208 seq_printf(seq, "state count: %d\n"
1209 "active state: P%d\n",
1210 pr->performance->state_count,
1211 pr->performance->state);
1213 seq_puts(seq, "states:\n");
1214 for (i = 0; i < pr->performance->state_count; i++)
1215 seq_printf(seq, " %cP%d: %d MHz, %d mW, %d uS\n",
1216 (i == pr->performance->state?'*':' '), i,
1217 (u32) pr->performance->states[i].core_frequency,
1218 (u32) pr->performance->states[i].power,
1219 (u32) pr->performance->states[i].transition_latency);
1225 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
1227 return single_open(file, acpi_processor_perf_seq_show,
1232 acpi_processor_write_performance (
1234 const char __user *buffer,
1239 struct seq_file *m = (struct seq_file *) file->private_data;
1240 struct acpi_processor *pr = (struct acpi_processor *) m->private;
1241 struct acpi_processor_performance *perf;
1242 char state_string[12] = {'\0'};
1243 unsigned int new_state = 0;
1244 struct cpufreq_policy policy;
1246 ACPI_FUNCTION_TRACE("acpi_processor_write_performance");
1248 if (!pr || (count > sizeof(state_string) - 1))
1249 return_VALUE(-EINVAL);
1251 perf = pr->performance;
1253 return_VALUE(-EINVAL);
1255 if (copy_from_user(state_string, buffer, count))
1256 return_VALUE(-EFAULT);
1258 state_string[count] = '\0';
1259 new_state = simple_strtoul(state_string, NULL, 0);
1261 if (new_state >= perf->state_count)
1262 return_VALUE(-EINVAL);
1264 cpufreq_get_policy(&policy, pr->id);
1266 policy.cpu = pr->id;
1267 policy.min = perf->states[new_state].core_frequency * 1000;
1268 policy.max = perf->states[new_state].core_frequency * 1000;
1270 result = cpufreq_set_policy(&policy);
1272 return_VALUE(result);
1274 return_VALUE(count);
1278 acpi_cpufreq_add_file (
1279 struct acpi_processor *pr)
1281 struct proc_dir_entry *entry = NULL;
1282 struct acpi_device *device = NULL;
1284 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
1286 if (acpi_bus_get_device(pr->handle, &device))
1289 /* add file 'performance' [R/W] */
1290 entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
1291 S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));
1293 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1294 "Unable to create '%s' fs entry\n",
1295 ACPI_PROCESSOR_FILE_PERFORMANCE));
1297 entry->proc_fops = &acpi_processor_perf_fops;
1298 entry->proc_fops->write = acpi_processor_write_performance;
1299 entry->data = acpi_driver_data(device);
1300 entry->owner = THIS_MODULE;
1306 acpi_cpufreq_remove_file (
1307 struct acpi_processor *pr)
1309 struct acpi_device *device = NULL;
1311 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
1313 if (acpi_bus_get_device(pr->handle, &device))
1316 /* remove file 'performance' */
1317 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
1318 acpi_device_dir(device));
1324 static void acpi_cpufreq_add_file (struct acpi_processor *pr) { return; }
1325 static void acpi_cpufreq_remove_file (struct acpi_processor *pr) { return; }
1326 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
1330 acpi_processor_register_performance (
1331 struct acpi_processor_performance * performance,
1334 struct acpi_processor *pr;
1336 ACPI_FUNCTION_TRACE("acpi_processor_register_performance");
1338 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
1339 return_VALUE(-EINVAL);
1341 down(&performance_sem);
1343 pr = processors[cpu];
1345 up(&performance_sem);
1346 return_VALUE(-ENODEV);
1349 if (pr->performance) {
1350 up(&performance_sem);
1351 return_VALUE(-EBUSY);
1354 pr->performance = performance;
1356 if (acpi_processor_get_performance_info(pr)) {
1357 pr->performance = NULL;
1358 up(&performance_sem);
1362 acpi_cpufreq_add_file(pr);
1364 up(&performance_sem);
1367 EXPORT_SYMBOL(acpi_processor_register_performance);
1371 acpi_processor_unregister_performance (
1372 struct acpi_processor_performance * performance,
1375 struct acpi_processor *pr;
1377 ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
1379 down(&performance_sem);
1381 pr = processors[cpu];
1383 up(&performance_sem);
1387 kfree(pr->performance->states);
1388 pr->performance = NULL;
1390 acpi_cpufreq_remove_file(pr);
1392 up(&performance_sem);
1396 EXPORT_SYMBOL(acpi_processor_unregister_performance);
1399 /* for the rest of it, check arch/i386/kernel/cpu/cpufreq/acpi.c */
1401 #else /* !CONFIG_CPU_FREQ */
1403 static void acpi_processor_ppc_init(void) { return; }
1404 static void acpi_processor_ppc_exit(void) { return; }
1406 static int acpi_processor_ppc_has_changed(struct acpi_processor *pr) {
1407 static unsigned int printout = 1;
1409 printk(KERN_WARNING "Warning: Processor Platform Limit event detected, but not handled.\n");
1410 printk(KERN_WARNING "Consider compiling CPUfreq support into your kernel.\n");
1416 #endif /* CONFIG_CPU_FREQ */
1418 /* --------------------------------------------------------------------------
1420 -------------------------------------------------------------------------- */
1423 acpi_processor_get_throttling (
1424 struct acpi_processor *pr)
1431 ACPI_FUNCTION_TRACE("acpi_processor_get_throttling");
1434 return_VALUE(-EINVAL);
1436 if (!pr->flags.throttling)
1437 return_VALUE(-ENODEV);
1439 pr->throttling.state = 0;
1441 local_irq_disable();
1443 duty_mask = pr->throttling.state_count - 1;
1445 duty_mask <<= pr->throttling.duty_offset;
1447 value = inl(pr->throttling.address);
1450 * Compute the current throttling state when throttling is enabled
1454 duty_value = value & duty_mask;
1455 duty_value >>= pr->throttling.duty_offset;
1458 state = pr->throttling.state_count-duty_value;
1461 pr->throttling.state = state;
1465 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1466 "Throttling state is T%d (%d%% throttling applied)\n",
1467 state, pr->throttling.states[state].performance));
1474 acpi_processor_set_throttling (
1475 struct acpi_processor *pr,
1482 ACPI_FUNCTION_TRACE("acpi_processor_set_throttling");
1485 return_VALUE(-EINVAL);
1487 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1488 return_VALUE(-EINVAL);
1490 if (!pr->flags.throttling)
1491 return_VALUE(-ENODEV);
1493 if (state == pr->throttling.state)
1496 local_irq_disable();
1499 * Calculate the duty_value and duty_mask.
1502 duty_value = pr->throttling.state_count - state;
1504 duty_value <<= pr->throttling.duty_offset;
1506 /* Used to clear all duty_value bits */
1507 duty_mask = pr->throttling.state_count - 1;
1509 duty_mask <<= acpi_fadt.duty_offset;
1510 duty_mask = ~duty_mask;
1514 * Disable throttling by writing a 0 to bit 4. Note that we must
1515 * turn it off before you can change the duty_value.
1517 value = inl(pr->throttling.address);
1519 value &= 0xFFFFFFEF;
1520 outl(value, pr->throttling.address);
1524 * Write the new duty_value and then enable throttling. Note
1525 * that a state value of 0 leaves throttling disabled.
1529 value |= duty_value;
1530 outl(value, pr->throttling.address);
1532 value |= 0x00000010;
1533 outl(value, pr->throttling.address);
1536 pr->throttling.state = state;
1540 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1541 "Throttling state set to T%d (%d%%)\n", state,
1542 (pr->throttling.states[state].performance?pr->throttling.states[state].performance/10:0)));
1549 acpi_processor_get_throttling_info (
1550 struct acpi_processor *pr)
1556 ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info");
1558 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1559 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1560 pr->throttling.address,
1561 pr->throttling.duty_offset,
1562 pr->throttling.duty_width));
1565 return_VALUE(-EINVAL);
1567 /* TBD: Support ACPI 2.0 objects */
1569 if (!pr->throttling.address) {
1570 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
1573 else if (!pr->throttling.duty_width) {
1574 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
1577 /* TBD: Support duty_cycle values that span bit 4. */
1578 else if ((pr->throttling.duty_offset
1579 + pr->throttling.duty_width) > 4) {
1580 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "duty_cycle spans bit 4\n"));
1585 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1586 * This shouldn't be an issue as few (if any) mobile systems ever
1589 if (errata.piix4.throttle) {
1590 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1591 "Throttling not supported on PIIX4 A- or B-step\n"));
1595 pr->throttling.state_count = 1 << acpi_fadt.duty_width;
1598 * Compute state values. Note that throttling displays a linear power/
1599 * performance relationship (at 50% performance the CPU will consume
1600 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
1603 step = (1000 / pr->throttling.state_count);
1605 for (i=0; i<pr->throttling.state_count; i++) {
1606 pr->throttling.states[i].performance = step * i;
1607 pr->throttling.states[i].power = step * i;
1610 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1611 pr->throttling.state_count));
1613 pr->flags.throttling = 1;
1616 * Disable throttling (if enabled). We'll let subsequent policy (e.g.
1617 * thermal) decide to lower performance if it so chooses, but for now
1618 * we'll crank up the speed.
1621 result = acpi_processor_get_throttling(pr);
1625 if (pr->throttling.state) {
1626 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabling throttling (was T%d)\n",
1627 pr->throttling.state));
1628 result = acpi_processor_set_throttling(pr, 0);
1635 pr->flags.throttling = 0;
1637 return_VALUE(result);
1641 /* --------------------------------------------------------------------------
1643 -------------------------------------------------------------------------- */
1646 acpi_processor_apply_limit (
1647 struct acpi_processor* pr)
1653 ACPI_FUNCTION_TRACE("acpi_processor_apply_limit");
1656 return_VALUE(-EINVAL);
1658 if (!pr->flags.limit)
1659 return_VALUE(-ENODEV);
1661 if (pr->flags.throttling) {
1662 if (pr->limit.user.tx > tx)
1663 tx = pr->limit.user.tx;
1664 if (pr->limit.thermal.tx > tx)
1665 tx = pr->limit.thermal.tx;
1667 result = acpi_processor_set_throttling(pr, tx);
1672 pr->limit.state.px = px;
1673 pr->limit.state.tx = tx;
1675 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d] limit set to (P%d:T%d)\n",
1678 pr->limit.state.tx));
1682 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to set limit\n"));
1684 return_VALUE(result);
1688 #ifdef CONFIG_CPU_FREQ
1690 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
1691 * offers (in most cases) voltage scaling in addition to frequency scaling, and
1692 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
1693 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
1696 static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS];
1697 static unsigned int acpi_thermal_cpufreq_is_init = 0;
1700 static int cpu_has_cpufreq(unsigned int cpu)
1702 struct cpufreq_policy policy;
1703 if (!acpi_thermal_cpufreq_is_init)
1705 if (!cpufreq_get_policy(&policy, cpu))
1711 static int acpi_thermal_cpufreq_increase(unsigned int cpu)
1713 if (!cpu_has_cpufreq(cpu))
1716 if (cpufreq_thermal_reduction_pctg[cpu] < 60) {
1717 cpufreq_thermal_reduction_pctg[cpu] += 20;
1718 cpufreq_update_policy(cpu);
1726 static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
1728 if (!cpu_has_cpufreq(cpu))
1731 if (cpufreq_thermal_reduction_pctg[cpu] >= 20) {
1732 cpufreq_thermal_reduction_pctg[cpu] -= 20;
1733 cpufreq_update_policy(cpu);
1741 static int acpi_thermal_cpufreq_notifier(
1742 struct notifier_block *nb,
1743 unsigned long event,
1746 struct cpufreq_policy *policy = data;
1747 unsigned long max_freq = 0;
1749 if (event != CPUFREQ_ADJUST)
1752 max_freq = (policy->cpuinfo.max_freq * (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100;
1754 cpufreq_verify_within_limits(policy, 0, max_freq);
1761 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
1762 .notifier_call = acpi_thermal_cpufreq_notifier,
1766 static void acpi_thermal_cpufreq_init(void) {
1769 for (i=0; i<NR_CPUS; i++)
1770 cpufreq_thermal_reduction_pctg[i] = 0;
1772 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER);
1774 acpi_thermal_cpufreq_is_init = 1;
1777 static void acpi_thermal_cpufreq_exit(void) {
1778 if (acpi_thermal_cpufreq_is_init)
1779 cpufreq_unregister_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER);
1781 acpi_thermal_cpufreq_is_init = 0;
1784 #else /* ! CONFIG_CPU_FREQ */
1786 static void acpi_thermal_cpufreq_init(void) { return; }
1787 static void acpi_thermal_cpufreq_exit(void) { return; }
1788 static int acpi_thermal_cpufreq_increase(unsigned int cpu) { return -ENODEV; }
1789 static int acpi_thermal_cpufreq_decrease(unsigned int cpu) { return -ENODEV; }
1796 acpi_processor_set_thermal_limit (
1801 struct acpi_processor *pr = NULL;
1802 struct acpi_device *device = NULL;
1805 ACPI_FUNCTION_TRACE("acpi_processor_set_thermal_limit");
1807 if ((type < ACPI_PROCESSOR_LIMIT_NONE)
1808 || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
1809 return_VALUE(-EINVAL);
1811 result = acpi_bus_get_device(handle, &device);
1813 return_VALUE(result);
1815 pr = (struct acpi_processor *) acpi_driver_data(device);
1817 return_VALUE(-ENODEV);
1819 /* Thermal limits are always relative to the current Px/Tx state. */
1820 if (pr->flags.throttling)
1821 pr->limit.thermal.tx = pr->throttling.state;
1824 * Our default policy is to only use throttling at the lowest
1825 * performance state.
1828 tx = pr->limit.thermal.tx;
1832 case ACPI_PROCESSOR_LIMIT_NONE:
1834 result = acpi_thermal_cpufreq_decrease(pr->id);
1839 case ACPI_PROCESSOR_LIMIT_INCREMENT:
1840 /* if going up: P-states first, T-states later */
1842 result = acpi_thermal_cpufreq_increase(pr->id);
1845 else if (result == -ERANGE)
1846 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1847 "At maximum performance state\n"));
1849 if (pr->flags.throttling) {
1850 if (tx == (pr->throttling.state_count - 1))
1851 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1852 "At maximum throttling state\n"));
1858 case ACPI_PROCESSOR_LIMIT_DECREMENT:
1859 /* if going down: T-states first, P-states later */
1861 if (pr->flags.throttling) {
1863 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1864 "At minimum throttling state\n"));
1871 result = acpi_thermal_cpufreq_decrease(pr->id);
1872 if (result == -ERANGE)
1873 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1874 "At minimum performance state\n"));
1880 if (pr->flags.throttling) {
1881 pr->limit.thermal.px = 0;
1882 pr->limit.thermal.tx = tx;
1884 result = acpi_processor_apply_limit(pr);
1886 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1887 "Unable to set thermal limit\n"));
1889 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
1890 pr->limit.thermal.px,
1891 pr->limit.thermal.tx));
1895 return_VALUE(result);
1900 acpi_processor_get_limit_info (
1901 struct acpi_processor *pr)
1903 ACPI_FUNCTION_TRACE("acpi_processor_get_limit_info");
1906 return_VALUE(-EINVAL);
1908 if (pr->flags.throttling)
1909 pr->flags.limit = 1;
1915 /* --------------------------------------------------------------------------
1916 FS Interface (/proc)
1917 -------------------------------------------------------------------------- */
1919 struct proc_dir_entry *acpi_processor_dir = NULL;
1921 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
1923 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1925 ACPI_FUNCTION_TRACE("acpi_processor_info_seq_show");
1930 seq_printf(seq, "processor id: %d\n"
1932 "bus mastering control: %s\n"
1933 "power management: %s\n"
1934 "throttling control: %s\n"
1935 "limit interface: %s\n",
1938 pr->flags.bm_control ? "yes" : "no",
1939 pr->flags.power ? "yes" : "no",
1940 pr->flags.throttling ? "yes" : "no",
1941 pr->flags.limit ? "yes" : "no");
1947 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
1949 return single_open(file, acpi_processor_info_seq_show,
1953 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1955 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1958 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
1963 seq_printf(seq, "active state: C%d\n"
1964 "default state: C%d\n"
1966 "bus master activity: %08x\n",
1968 pr->power.default_state,
1970 pr->power.bm_activity);
1972 seq_puts(seq, "states:\n");
1974 for (i = 1; i < ACPI_C_STATE_COUNT; i++) {
1975 seq_printf(seq, " %cC%d: ",
1976 (i == pr->power.state?'*':' '), i);
1978 if (!pr->power.states[i].valid) {
1979 seq_puts(seq, "<not supported>\n");
1983 if (pr->power.states[i].promotion.state)
1984 seq_printf(seq, "promotion[C%d] ",
1985 pr->power.states[i].promotion.state);
1987 seq_puts(seq, "promotion[--] ");
1989 if (pr->power.states[i].demotion.state)
1990 seq_printf(seq, "demotion[C%d] ",
1991 pr->power.states[i].demotion.state);
1993 seq_puts(seq, "demotion[--] ");
1995 seq_printf(seq, "latency[%03d] usage[%08d]\n",
1996 pr->power.states[i].latency,
1997 pr->power.states[i].usage);
2004 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
2006 return single_open(file, acpi_processor_power_seq_show,
2010 static int acpi_processor_throttling_seq_show(struct seq_file *seq, void *offset)
2012 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
2016 ACPI_FUNCTION_TRACE("acpi_processor_throttling_seq_show");
2021 if (!(pr->throttling.state_count > 0)) {
2022 seq_puts(seq, "<not supported>\n");
2026 result = acpi_processor_get_throttling(pr);
2029 seq_puts(seq, "Could not determine current throttling state.\n");
2033 seq_printf(seq, "state count: %d\n"
2034 "active state: T%d\n",
2035 pr->throttling.state_count,
2036 pr->throttling.state);
2038 seq_puts(seq, "states:\n");
2039 for (i = 0; i < pr->throttling.state_count; i++)
2040 seq_printf(seq, " %cT%d: %02d%%\n",
2041 (i == pr->throttling.state?'*':' '), i,
2042 (pr->throttling.states[i].performance?pr->throttling.states[i].performance/10:0));
2048 static int acpi_processor_throttling_open_fs(struct inode *inode, struct file *file)
2050 return single_open(file, acpi_processor_throttling_seq_show,
2055 acpi_processor_write_throttling (
2057 const char __user *buffer,
2062 struct seq_file *m = (struct seq_file *)file->private_data;
2063 struct acpi_processor *pr = (struct acpi_processor *)m->private;
2064 char state_string[12] = {'\0'};
2066 ACPI_FUNCTION_TRACE("acpi_processor_write_throttling");
2068 if (!pr || (count > sizeof(state_string) - 1))
2069 return_VALUE(-EINVAL);
2071 if (copy_from_user(state_string, buffer, count))
2072 return_VALUE(-EFAULT);
2074 state_string[count] = '\0';
2076 result = acpi_processor_set_throttling(pr,
2077 simple_strtoul(state_string, NULL, 0));
2079 return_VALUE(result);
2081 return_VALUE(count);
2084 static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
2086 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
2088 ACPI_FUNCTION_TRACE("acpi_processor_limit_seq_show");
2093 if (!pr->flags.limit) {
2094 seq_puts(seq, "<not supported>\n");
2098 seq_printf(seq, "active limit: P%d:T%d\n"
2099 "user limit: P%d:T%d\n"
2100 "thermal limit: P%d:T%d\n",
2101 pr->limit.state.px, pr->limit.state.tx,
2102 pr->limit.user.px, pr->limit.user.tx,
2103 pr->limit.thermal.px, pr->limit.thermal.tx);
2109 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file)
2111 return single_open(file, acpi_processor_limit_seq_show,
2116 acpi_processor_write_limit (
2118 const char __user *buffer,
2123 struct seq_file *m = (struct seq_file *)file->private_data;
2124 struct acpi_processor *pr = (struct acpi_processor *)m->private;
2125 char limit_string[25] = {'\0'};
2129 ACPI_FUNCTION_TRACE("acpi_processor_write_limit");
2131 if (!pr || (count > sizeof(limit_string) - 1)) {
2132 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n"));
2133 return_VALUE(-EINVAL);
2136 if (copy_from_user(limit_string, buffer, count)) {
2137 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data\n"));
2138 return_VALUE(-EFAULT);
2141 limit_string[count] = '\0';
2143 if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) {
2144 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data format\n"));
2145 return_VALUE(-EINVAL);
2148 if (pr->flags.throttling) {
2149 if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) {
2150 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid tx\n"));
2151 return_VALUE(-EINVAL);
2153 pr->limit.user.tx = tx;
2156 result = acpi_processor_apply_limit(pr);
2158 return_VALUE(count);
2163 acpi_processor_add_fs (
2164 struct acpi_device *device)
2166 struct proc_dir_entry *entry = NULL;
2168 ACPI_FUNCTION_TRACE("acpi_processor_add_fs");
2170 if (!acpi_device_dir(device)) {
2171 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
2172 acpi_processor_dir);
2173 if (!acpi_device_dir(device))
2174 return_VALUE(-ENODEV);
2176 acpi_device_dir(device)->owner = THIS_MODULE;
2179 entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
2180 S_IRUGO, acpi_device_dir(device));
2182 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2183 "Unable to create '%s' fs entry\n",
2184 ACPI_PROCESSOR_FILE_INFO));
2186 entry->proc_fops = &acpi_processor_info_fops;
2187 entry->data = acpi_driver_data(device);
2188 entry->owner = THIS_MODULE;
2192 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
2193 S_IRUGO, acpi_device_dir(device));
2195 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2196 "Unable to create '%s' fs entry\n",
2197 ACPI_PROCESSOR_FILE_POWER));
2199 entry->proc_fops = &acpi_processor_power_fops;
2200 entry->data = acpi_driver_data(device);
2201 entry->owner = THIS_MODULE;
2204 /* 'throttling' [R/W] */
2205 entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
2206 S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));
2208 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2209 "Unable to create '%s' fs entry\n",
2210 ACPI_PROCESSOR_FILE_THROTTLING));
2212 entry->proc_fops = &acpi_processor_throttling_fops;
2213 entry->proc_fops->write = acpi_processor_write_throttling;
2214 entry->data = acpi_driver_data(device);
2215 entry->owner = THIS_MODULE;
2219 entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
2220 S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));
2222 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2223 "Unable to create '%s' fs entry\n",
2224 ACPI_PROCESSOR_FILE_LIMIT));
2226 entry->proc_fops = &acpi_processor_limit_fops;
2227 entry->proc_fops->write = acpi_processor_write_limit;
2228 entry->data = acpi_driver_data(device);
2229 entry->owner = THIS_MODULE;
2237 acpi_processor_remove_fs (
2238 struct acpi_device *device)
2240 ACPI_FUNCTION_TRACE("acpi_processor_remove_fs");
2242 if (acpi_device_dir(device)) {
2243 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,acpi_device_dir(device));
2244 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,acpi_device_dir(device));
2245 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
2246 acpi_device_dir(device));
2247 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,acpi_device_dir(device));
2248 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
2249 acpi_device_dir(device) = NULL;
2255 /* Use the acpiid in MADT to map cpus in case of SMP */
2257 #define convert_acpiid_to_cpu(acpi_id) (0xff)
2261 #define arch_acpiid_to_apicid ia64_acpiid_to_sapicid
2262 #define arch_cpu_to_apicid ia64_cpu_to_sapicid
2263 #define ARCH_BAD_APICID (0xffff)
2265 #define arch_acpiid_to_apicid x86_acpiid_to_apicid
2266 #define arch_cpu_to_apicid x86_cpu_to_apicid
2267 #define ARCH_BAD_APICID (0xff)
2270 static u8 convert_acpiid_to_cpu(u8 acpi_id)
2275 apic_id = arch_acpiid_to_apicid[acpi_id];
2276 if (apic_id == ARCH_BAD_APICID)
2279 for (i = 0; i < NR_CPUS; i++) {
2280 if (arch_cpu_to_apicid[i] == apic_id)
2287 /* --------------------------------------------------------------------------
2289 -------------------------------------------------------------------------- */
2292 acpi_processor_get_info (
2293 struct acpi_processor *pr)
2295 acpi_status status = 0;
2296 union acpi_object object = {0};
2297 struct acpi_buffer buffer = {sizeof(union acpi_object), &object};
2299 static int cpu0_initialized;
2301 ACPI_FUNCTION_TRACE("acpi_processor_get_info");
2304 return_VALUE(-EINVAL);
2306 if (num_online_cpus() > 1)
2309 acpi_processor_errata(pr);
2312 * Check to see if we have bus mastering arbitration control. This
2313 * is required for proper C3 usage (to maintain cache coherency).
2315 if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) {
2316 pr->flags.bm_control = 1;
2317 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2318 "Bus mastering arbitration control present\n"));
2321 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2322 "No bus mastering arbitration control\n"));
2325 * Evalute the processor object. Note that it is common on SMP to
2326 * have the first (boot) processor with a valid PBLK address while
2327 * all others have a NULL address.
2329 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
2330 if (ACPI_FAILURE(status)) {
2331 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2332 "Error evaluating processor object\n"));
2333 return_VALUE(-ENODEV);
2337 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
2338 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
2340 pr->acpi_id = object.processor.proc_id;
2342 cpu_index = convert_acpiid_to_cpu(pr->acpi_id);
2344 if ( !cpu0_initialized && (cpu_index == 0xff)) {
2345 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
2347 } else if (cpu_index > num_online_cpus()) {
2349 * Extra Processor objects may be enumerated on MP systems with
2350 * less than the max # of CPUs. They should be ignored.
2352 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2353 "Error getting cpuindex for acpiid 0x%x\n",
2355 return_VALUE(-ENODEV);
2357 cpu0_initialized = 1;
2361 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
2364 if (!object.processor.pblk_address)
2365 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
2366 else if (object.processor.pblk_length != 6)
2367 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n",
2368 object.processor.pblk_length));
2370 pr->throttling.address = object.processor.pblk_address;
2371 pr->throttling.duty_offset = acpi_fadt.duty_offset;
2372 pr->throttling.duty_width = acpi_fadt.duty_width;
2373 pr->power.states[ACPI_STATE_C2].address =
2374 object.processor.pblk_address + 4;
2375 pr->power.states[ACPI_STATE_C3].address =
2376 object.processor.pblk_address + 5;
2379 * We don't care about error returns - we just try to mark
2380 * these reserved so that nobody else is confused into thinking
2381 * that this region might be unused..
2383 * (In particular, allocating the IO range for Cardbus)
2385 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
2388 acpi_processor_get_power_info(pr);
2389 #ifdef CONFIG_CPU_FREQ
2390 acpi_processor_ppc_has_changed(pr);
2392 acpi_processor_get_throttling_info(pr);
2393 acpi_processor_get_limit_info(pr);
2400 acpi_processor_notify (
2405 struct acpi_processor *pr = (struct acpi_processor *) data;
2406 struct acpi_device *device = NULL;
2408 ACPI_FUNCTION_TRACE("acpi_processor_notify");
2413 if (acpi_bus_get_device(pr->handle, &device))
2417 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
2418 acpi_processor_ppc_has_changed(pr);
2419 acpi_bus_generate_event(device, event,
2420 pr->performance_platform_limit);
2422 case ACPI_PROCESSOR_NOTIFY_POWER:
2424 acpi_bus_generate_event(device, event, 0);
2427 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2428 "Unsupported event [0x%x]\n", event));
2437 acpi_processor_add (
2438 struct acpi_device *device)
2441 acpi_status status = AE_OK;
2442 struct acpi_processor *pr = NULL;
2445 ACPI_FUNCTION_TRACE("acpi_processor_add");
2448 return_VALUE(-EINVAL);
2450 pr = kmalloc(sizeof(struct acpi_processor), GFP_KERNEL);
2452 return_VALUE(-ENOMEM);
2453 memset(pr, 0, sizeof(struct acpi_processor));
2455 pr->handle = device->handle;
2456 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
2457 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
2458 acpi_driver_data(device) = pr;
2460 result = acpi_processor_get_info(pr);
2464 result = acpi_processor_add_fs(device);
2468 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
2469 acpi_processor_notify, pr);
2470 if (ACPI_FAILURE(status)) {
2471 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2472 "Error installing notify handler\n"));
2477 processors[pr->id] = pr;
2480 * Install the idle handler if processor power management is supported.
2481 * Note that we use previously set idle handler will be used on
2482 * platforms that only support C1.
2484 if ((pr->flags.power) && (!boot_option_idle_override)) {
2485 printk(KERN_INFO PREFIX "%s [%s] (supports",
2486 acpi_device_name(device), acpi_device_bid(device));
2487 for (i = 1; i < ACPI_C_STATE_COUNT; i++)
2488 if (pr->power.states[i].valid)
2492 pm_idle_save = pm_idle;
2493 pm_idle = acpi_processor_idle;
2497 if (pr->flags.throttling) {
2498 printk(KERN_INFO PREFIX "%s [%s] (supports",
2499 acpi_device_name(device), acpi_device_bid(device));
2500 printk(" %d throttling states", pr->throttling.state_count);
2506 acpi_processor_remove_fs(device);
2510 return_VALUE(result);
2515 acpi_processor_remove (
2516 struct acpi_device *device,
2519 acpi_status status = AE_OK;
2520 struct acpi_processor *pr = NULL;
2522 ACPI_FUNCTION_TRACE("acpi_processor_remove");
2524 if (!device || !acpi_driver_data(device))
2525 return_VALUE(-EINVAL);
2527 pr = (struct acpi_processor *) acpi_driver_data(device);
2529 /* Unregister the idle handler when processor #0 is removed. */
2531 pm_idle = pm_idle_save;
2533 * We are about to unload the current idle thread pm callback
2534 * (pm_idle), Wait for all processors to update cached/local
2535 * copies of pm_idle before proceeding.
2537 synchronize_kernel();
2540 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
2541 acpi_processor_notify);
2542 if (ACPI_FAILURE(status)) {
2543 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2544 "Error removing notify handler\n"));
2547 acpi_processor_remove_fs(device);
2549 processors[pr->id] = NULL;
2557 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
2558 * For now disable this. Probably a bug somewhere else.
2560 * To skip this limit, boot/load with a large max_cstate limit.
2562 static int no_c2c3(struct dmi_system_id *id)
2564 if (max_cstate > ACPI_C_STATES_MAX)
2567 printk(KERN_NOTICE PREFIX "%s detected - C2,C3 disabled."
2568 " Override with \"processor.max_cstate=9\"\n", id->ident);
2575 static struct dmi_system_id __initdata processor_dmi_table[] = {
2576 { no_c2c3, "IBM ThinkPad R40e", {
2577 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
2578 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }},
2579 { no_c2c3, "Medion 41700", {
2580 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
2581 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }},
2585 /* We keep the driver loaded even when ACPI is not running.
2586 This is needed for the powernow-k8 driver, that works even without
2587 ACPI, but needs symbols from this driver */
2590 acpi_processor_init (void)
2594 ACPI_FUNCTION_TRACE("acpi_processor_init");
2596 memset(&processors, 0, sizeof(processors));
2597 memset(&errata, 0, sizeof(errata));
2599 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
2600 if (!acpi_processor_dir)
2602 acpi_processor_dir->owner = THIS_MODULE;
2604 result = acpi_bus_register_driver(&acpi_processor_driver);
2606 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
2610 acpi_thermal_cpufreq_init();
2612 acpi_processor_ppc_init();
2614 dmi_check_system(processor_dmi_table);
2616 if (max_cstate < ACPI_C_STATES_MAX)
2617 printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate);
2624 acpi_processor_exit (void)
2626 ACPI_FUNCTION_TRACE("acpi_processor_exit");
2628 acpi_processor_ppc_exit();
2630 acpi_thermal_cpufreq_exit();
2632 acpi_bus_unregister_driver(&acpi_processor_driver);
2634 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
2640 module_init(acpi_processor_init);
2641 module_exit(acpi_processor_exit);
2642 module_param_named(max_cstate, max_cstate, uint, 0);
2644 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);