2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 * 1. Make # power states dynamic.
27 * 2. Support duty_cycle values that span bit 4.
28 * 3. Optimize by having scheduler determine business instead of
29 * having us try to calculate it here.
30 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/pci.h>
39 #include <linux/cpufreq.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
44 #include <asm/system.h>
45 #include <asm/delay.h>
46 #include <asm/uaccess.h>
48 #include <acpi/acpi_bus.h>
49 #include <acpi/acpi_drivers.h>
50 #include <acpi/processor.h>
53 #define ACPI_PROCESSOR_COMPONENT 0x01000000
54 #define ACPI_PROCESSOR_CLASS "processor"
55 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
56 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
57 #define ACPI_PROCESSOR_FILE_INFO "info"
58 #define ACPI_PROCESSOR_FILE_POWER "power"
59 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
60 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
61 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
62 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
63 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
65 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
66 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
67 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define ACPI_PROCESSOR_LIMIT_USER 0
71 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
73 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
74 ACPI_MODULE_NAME ("acpi_processor")
76 MODULE_AUTHOR("Paul Diefenbaugh");
77 MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME);
78 MODULE_LICENSE("GPL");
81 static int acpi_processor_add (struct acpi_device *device);
82 static int acpi_processor_remove (struct acpi_device *device, int type);
83 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
84 static int acpi_processor_throttling_open_fs(struct inode *inode, struct file *file);
85 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file);
86 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file);
87 static int acpi_processor_get_limit_info(struct acpi_processor *pr);
89 static struct acpi_driver acpi_processor_driver = {
90 .name = ACPI_PROCESSOR_DRIVER_NAME,
91 .class = ACPI_PROCESSOR_CLASS,
92 .ids = ACPI_PROCESSOR_HID,
94 .add = acpi_processor_add,
95 .remove = acpi_processor_remove,
100 struct acpi_processor_errata {
110 static struct file_operations acpi_processor_info_fops = {
111 .open = acpi_processor_info_open_fs,
114 .release = single_release,
117 static struct file_operations acpi_processor_power_fops = {
118 .open = acpi_processor_power_open_fs,
121 .release = single_release,
124 static struct file_operations acpi_processor_throttling_fops = {
125 .open = acpi_processor_throttling_open_fs,
128 .release = single_release,
131 static struct file_operations acpi_processor_limit_fops = {
132 .open = acpi_processor_limit_open_fs,
135 .release = single_release,
138 static struct acpi_processor *processors[NR_CPUS];
139 static struct acpi_processor_errata errata;
140 static void (*pm_idle_save)(void);
143 /* --------------------------------------------------------------------------
145 -------------------------------------------------------------------------- */
148 acpi_processor_errata_piix4 (
155 ACPI_FUNCTION_TRACE("acpi_processor_errata_piix4");
158 return_VALUE(-EINVAL);
161 * Note that 'dev' references the PIIX4 ACPI Controller.
164 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
168 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
171 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
174 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
177 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
180 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
186 case 0: /* PIIX4 A-step */
187 case 1: /* PIIX4 B-step */
189 * See specification changes #13 ("Manual Throttle Duty Cycle")
190 * and #14 ("Enabling and Disabling Manual Throttle"), plus
191 * erratum #5 ("STPCLK# Deassertion Time") from the January
192 * 2002 PIIX4 specification update. Applies to only older
195 errata.piix4.throttle = 1;
200 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
201 * Livelock") from the January 2002 PIIX4 specification update.
202 * Applies to all PIIX4 models.
208 * Find the PIIX4 IDE Controller and get the Bus Master IDE
209 * Status register address. We'll use this later to read
210 * each IDE controller's DMA status to make sure we catch all
213 dev = pci_find_subsys(PCI_VENDOR_ID_INTEL,
214 PCI_DEVICE_ID_INTEL_82371AB,
215 PCI_ANY_ID, PCI_ANY_ID, NULL);
217 errata.piix4.bmisx = pci_resource_start(dev, 4);
222 * Find the PIIX4 ISA Controller and read the Motherboard
223 * DMA controller's status to see if Type-F (Fast) DMA mode
224 * is enabled (bit 7) on either channel. Note that we'll
225 * disable C3 support if this is enabled, as some legacy
226 * devices won't operate well if fast DMA is disabled.
228 dev = pci_find_subsys(PCI_VENDOR_ID_INTEL,
229 PCI_DEVICE_ID_INTEL_82371AB_0,
230 PCI_ANY_ID, PCI_ANY_ID, NULL);
232 pci_read_config_byte(dev, 0x76, &value1);
233 pci_read_config_byte(dev, 0x77, &value2);
234 if ((value1 & 0x80) || (value2 & 0x80))
235 errata.piix4.fdma = 1;
241 if (errata.piix4.bmisx)
242 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
243 "Bus master activity detection (BM-IDE) erratum enabled\n"));
244 if (errata.piix4.fdma)
245 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
246 "Type-F DMA livelock erratum (C3 disabled)\n"));
253 acpi_processor_errata (
254 struct acpi_processor *pr)
257 struct pci_dev *dev = NULL;
259 ACPI_FUNCTION_TRACE("acpi_processor_errata");
262 return_VALUE(-EINVAL);
267 dev = pci_find_subsys(PCI_VENDOR_ID_INTEL,
268 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, PCI_ANY_ID, NULL);
270 result = acpi_processor_errata_piix4(dev);
272 return_VALUE(result);
276 /* --------------------------------------------------------------------------
278 -------------------------------------------------------------------------- */
287 else if (!acpi_fadt.tmr_val_ext)
288 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
290 return ((0xFFFFFFFF - t1) + t2);
295 acpi_processor_power_activate (
296 struct acpi_processor *pr,
302 pr->power.states[pr->power.state].promotion.count = 0;
303 pr->power.states[pr->power.state].demotion.count = 0;
305 /* Cleanup from old state. */
306 switch (pr->power.state) {
308 /* Disable bus master reload */
309 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
313 /* Prepare to use new state. */
316 /* Enable bus master reload */
317 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
321 pr->power.state = state;
328 acpi_processor_idle (void)
330 struct acpi_processor *pr = NULL;
331 struct acpi_processor_cx *cx = NULL;
336 pr = processors[smp_processor_id()];
341 * Interrupts must be disabled during bus mastering calculations and
342 * for C2/C3 transitions.
346 cx = &(pr->power.states[pr->power.state]);
351 * Check for bus mastering activity (if required), record, and check
354 if (pr->flags.bm_check) {
357 pr->power.bm_activity <<= 1;
359 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
360 &bm_status, ACPI_MTX_DO_NOT_LOCK);
362 pr->power.bm_activity++;
363 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
364 1, ACPI_MTX_DO_NOT_LOCK);
367 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
368 * the true state of bus mastering activity; forcing us to
369 * manually check the BMIDEA bit of each IDE channel.
371 else if (errata.piix4.bmisx) {
372 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
373 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
374 pr->power.bm_activity++;
377 * Apply bus mastering demotion policy. Automatically demote
378 * to avoid a faulty transition. Note that the processor
379 * won't enter a low-power state during this call (to this
380 * funciton) but should upon the next.
382 * TBD: A better policy might be to fallback to the demotion
383 * state (use it for this quantum only) istead of
384 * demoting -- and rely on duration as our sole demotion
385 * qualification. This may, however, introduce DMA
386 * issues (e.g. floppy DMA transfer overrun/underrun).
388 if (pr->power.bm_activity & cx->demotion.threshold.bm) {
390 next_state = cx->demotion.state;
400 * Invoke the current Cx state to put the processor to sleep.
402 switch (pr->power.state) {
408 * TBD: Can't get time duration while in C1, as resumes
409 * go to an ISR rather than here. Need to instrument
410 * base interrupt handler.
412 sleep_ticks = 0xFFFFFFFF;
416 /* Get start time (ticks) */
417 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
419 inb(pr->power.states[ACPI_STATE_C2].address);
420 /* Dummy op - must do something useless after P_LVL2 read */
421 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
422 /* Get end time (ticks) */
423 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
424 /* Re-enable interrupts */
426 /* Compute time (ticks) that we were actually asleep */
427 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
431 /* Disable bus master arbitration */
432 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
433 /* Get start time (ticks) */
434 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
436 inb(pr->power.states[ACPI_STATE_C3].address);
437 /* Dummy op - must do something useless after P_LVL3 read */
438 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
439 /* Get end time (ticks) */
440 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
441 /* Enable bus master arbitration */
442 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
443 /* Re-enable interrupts */
445 /* Compute time (ticks) that we were actually asleep */
446 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
454 next_state = pr->power.state;
459 * Track the number of longs (time asleep is greater than threshold)
460 * and promote when the count threshold is reached. Note that bus
461 * mastering activity may prevent promotions.
463 if (cx->promotion.state) {
464 if (sleep_ticks > cx->promotion.threshold.ticks) {
465 cx->promotion.count++;
466 cx->demotion.count = 0;
467 if (cx->promotion.count >= cx->promotion.threshold.count) {
468 if (pr->flags.bm_check) {
469 if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) {
470 next_state = cx->promotion.state;
475 next_state = cx->promotion.state;
485 * Track the number of shorts (time asleep is less than time threshold)
486 * and demote when the usage threshold is reached.
488 if (cx->demotion.state) {
489 if (sleep_ticks < cx->demotion.threshold.ticks) {
490 cx->demotion.count++;
491 cx->promotion.count = 0;
492 if (cx->demotion.count >= cx->demotion.threshold.count) {
493 next_state = cx->demotion.state;
503 * If we're going to start using a new Cx state we must clean up
504 * from the previous and prepare to use the new.
506 if (next_state != pr->power.state)
507 acpi_processor_power_activate(pr, next_state);
514 acpi_processor_set_power_policy (
515 struct acpi_processor *pr)
517 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
520 * This function sets the default Cx state policy (OS idle handler).
521 * Our scheme is to promote quickly to C2 but more conservatively
522 * to C3. We're favoring C2 for its characteristics of low latency
523 * (quick response), good power savings, and ability to allow bus
524 * mastering activity. Note that the Cx state policy is completely
525 * customizable and can be altered dynamically.
529 return_VALUE(-EINVAL);
535 pr->power.state = ACPI_STATE_C1;
536 pr->power.default_state = ACPI_STATE_C1;
541 * Set the default C1 promotion and C2 demotion policies, where we
542 * promote from C1 to C2 after several (10) successive C1 transitions,
543 * as we cannot (currently) measure the time spent in C1. Demote from
544 * C2 to C1 anytime we experience a 'short' (time spent in C2 is less
545 * than the C2 transtion latency). Note the simplifying assumption
546 * that the 'cost' of a transition is amortized when we sleep for at
547 * least as long as the transition's latency (thus the total transition
548 * time is two times the latency).
550 * TBD: Measure C1 sleep times by instrumenting the core IRQ handler.
551 * TBD: Demote to default C-State after long periods of activity.
552 * TBD: Investigate policy's use of CPU utilization -vs- sleep duration.
554 if (pr->power.states[ACPI_STATE_C2].valid) {
555 pr->power.states[ACPI_STATE_C1].promotion.threshold.count = 10;
556 pr->power.states[ACPI_STATE_C1].promotion.threshold.ticks =
557 pr->power.states[ACPI_STATE_C2].latency_ticks;
558 pr->power.states[ACPI_STATE_C1].promotion.state = ACPI_STATE_C2;
560 pr->power.states[ACPI_STATE_C2].demotion.threshold.count = 1;
561 pr->power.states[ACPI_STATE_C2].demotion.threshold.ticks =
562 pr->power.states[ACPI_STATE_C2].latency_ticks;
563 pr->power.states[ACPI_STATE_C2].demotion.state = ACPI_STATE_C1;
569 * Set default C2 promotion and C3 demotion policies, where we promote
570 * from C2 to C3 after several (4) cycles of no bus mastering activity
571 * while maintaining sleep time criteria. Demote immediately on a
572 * short or whenever bus mastering activity occurs.
574 if ((pr->power.states[ACPI_STATE_C2].valid) &&
575 (pr->power.states[ACPI_STATE_C3].valid)) {
576 pr->power.states[ACPI_STATE_C2].promotion.threshold.count = 4;
577 pr->power.states[ACPI_STATE_C2].promotion.threshold.ticks =
578 pr->power.states[ACPI_STATE_C3].latency_ticks;
579 pr->power.states[ACPI_STATE_C2].promotion.threshold.bm = 0x0F;
580 pr->power.states[ACPI_STATE_C2].promotion.state = ACPI_STATE_C3;
582 pr->power.states[ACPI_STATE_C3].demotion.threshold.count = 1;
583 pr->power.states[ACPI_STATE_C3].demotion.threshold.ticks =
584 pr->power.states[ACPI_STATE_C3].latency_ticks;
585 pr->power.states[ACPI_STATE_C3].demotion.threshold.bm = 0x0F;
586 pr->power.states[ACPI_STATE_C3].demotion.state = ACPI_STATE_C2;
594 acpi_processor_get_power_info (
595 struct acpi_processor *pr)
599 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
602 return_VALUE(-EINVAL);
604 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
605 "lvl2[0x%08x] lvl3[0x%08x]\n",
606 pr->power.states[ACPI_STATE_C2].address,
607 pr->power.states[ACPI_STATE_C3].address));
609 /* TBD: Support ACPI 2.0 objects */
614 * This state exists only as filler in our array.
616 pr->power.states[ACPI_STATE_C0].valid = 1;
621 * ACPI requires C1 support for all processors.
623 * TBD: What about PROC_C1?
625 pr->power.states[ACPI_STATE_C1].valid = 1;
630 * We're (currently) only supporting C2 on UP systems.
632 * TBD: Support for C2 on MP (P_LVL2_UP).
634 if (pr->power.states[ACPI_STATE_C2].address) {
636 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
639 * C2 latency must be less than or equal to 100 microseconds.
641 if (acpi_fadt.plvl2_lat > ACPI_PROCESSOR_MAX_C2_LATENCY)
642 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
643 "C2 latency too large [%d]\n",
644 acpi_fadt.plvl2_lat));
646 * Only support C2 on UP systems (see TBD above).
649 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
650 "C2 not supported in SMP mode\n"));
652 * Otherwise we've met all of our C2 requirements.
653 * Normalize the C2 latency to expidite policy.
656 pr->power.states[ACPI_STATE_C2].valid = 1;
657 pr->power.states[ACPI_STATE_C2].latency_ticks =
658 US_TO_PM_TIMER_TICKS(acpi_fadt.plvl2_lat);
665 * TBD: Investigate use of WBINVD on UP/SMP system in absence of
668 if (pr->power.states[ACPI_STATE_C3].address) {
670 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
673 * C3 latency must be less than or equal to 1000 microseconds.
675 if (acpi_fadt.plvl3_lat > ACPI_PROCESSOR_MAX_C3_LATENCY)
676 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
677 "C3 latency too large [%d]\n",
678 acpi_fadt.plvl3_lat));
680 * Only support C3 when bus mastering arbitration control
681 * is present (able to disable bus mastering to maintain
682 * cache coherency while in C3).
684 else if (!pr->flags.bm_control)
685 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
686 "C3 support requires bus mastering control\n"));
688 * Only support C3 on UP systems, as bm_control is only viable
689 * on a UP system and flushing caches (e.g. WBINVD) is simply
690 * too costly (at this time).
693 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
694 "C3 not supported in SMP mode\n"));
696 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
697 * DMA transfers are used by any ISA device to avoid livelock.
698 * Note that we could disable Type-F DMA (as recommended by
699 * the erratum), but this is known to disrupt certain ISA
700 * devices thus we take the conservative approach.
702 else if (errata.piix4.fdma) {
703 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
704 "C3 not supported on PIIX4 with Type-F DMA\n"));
707 * Otherwise we've met all of our C3 requirements.
708 * Normalize the C2 latency to expidite policy. Enable
709 * checking of bus mastering status (bm_check) so we can
710 * use this in our C3 policy.
713 pr->power.states[ACPI_STATE_C3].valid = 1;
714 pr->power.states[ACPI_STATE_C3].latency_ticks =
715 US_TO_PM_TIMER_TICKS(acpi_fadt.plvl3_lat);
716 pr->flags.bm_check = 1;
723 * Now that we know which state are supported, set the default
724 * policy. Note that this policy can be changed dynamically
725 * (e.g. encourage deeper sleeps to conserve battery life when
728 result = acpi_processor_set_power_policy(pr);
730 return_VALUE(result);
733 * If this processor supports C2 or C3 we denote it as being 'power
734 * manageable'. Note that there's really no policy involved for
735 * when only C1 is supported.
737 if (pr->power.states[ACPI_STATE_C2].valid
738 || pr->power.states[ACPI_STATE_C3].valid)
745 /* --------------------------------------------------------------------------
746 Performance Management
747 -------------------------------------------------------------------------- */
748 #ifdef CONFIG_CPU_FREQ
750 static DECLARE_MUTEX(performance_sem);
753 * _PPC support is implemented as a CPUfreq policy notifier:
754 * This means each time a CPUfreq driver registered also with
755 * the ACPI core is asked to change the speed policy, the maximum
756 * value is adjusted so that it is within the platform limit.
758 * Also, when a new platform limit value is detected, the CPUfreq
759 * policy is adjusted accordingly.
762 static int acpi_processor_ppc_is_init = 0;
764 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
768 struct cpufreq_policy *policy = data;
769 struct acpi_processor *pr;
770 unsigned int ppc = 0;
772 down(&performance_sem);
774 if (event != CPUFREQ_INCOMPATIBLE)
777 pr = processors[policy->cpu];
778 if (!pr || !pr->performance)
781 ppc = (unsigned int) pr->performance_platform_limit;
785 if (ppc > pr->performance->state_count)
788 cpufreq_verify_within_limits(policy, 0,
789 pr->performance->states[ppc].core_frequency * 1000);
792 up(&performance_sem);
798 static struct notifier_block acpi_ppc_notifier_block = {
799 .notifier_call = acpi_processor_ppc_notifier,
804 acpi_processor_get_platform_limit (
805 struct acpi_processor* pr)
807 acpi_status status = 0;
808 unsigned long ppc = 0;
810 ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit");
813 return_VALUE(-EINVAL);
816 * _PPC indicates the maximum state currently supported by the platform
817 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
819 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
820 if(ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
821 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n"));
822 return_VALUE(-ENODEV);
825 pr->performance_platform_limit = (int) ppc;
831 static int acpi_processor_ppc_has_changed(
832 struct acpi_processor *pr)
834 int ret = acpi_processor_get_platform_limit(pr);
838 return cpufreq_update_policy(pr->id);
842 static void acpi_processor_ppc_init(void) {
843 if (!cpufreq_register_notifier(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
844 acpi_processor_ppc_is_init = 1;
846 printk(KERN_DEBUG "Warning: Processor Platform Limit not supported.\n");
850 static void acpi_processor_ppc_exit(void) {
851 if (acpi_processor_ppc_is_init)
852 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER);
854 acpi_processor_ppc_is_init = 0;
858 * when registering a cpufreq driver with this ACPI processor driver, the
859 * _PCT and _PSS structures are read out and written into struct
860 * acpi_processor_performance.
863 static int acpi_processor_set_pdc (struct acpi_processor *pr)
865 acpi_status status = AE_OK;
867 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
868 struct acpi_object_list no_object = {1, &arg0};
869 struct acpi_object_list *pdc;
871 ACPI_FUNCTION_TRACE("acpi_processor_set_pdc");
873 arg0.buffer.length = 12;
874 arg0.buffer.pointer = (u8 *) arg0_buf;
875 arg0_buf[0] = ACPI_PDC_REVISION_ID;
879 pdc = (pr->performance->pdc) ? pr->performance->pdc : &no_object;
881 status = acpi_evaluate_object(pr->handle, "_PDC", pdc, NULL);
883 if ((ACPI_FAILURE(status)) && (pr->performance->pdc))
884 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Error evaluating _PDC, using legacy perf. control...\n"));
886 return_VALUE(status);
891 acpi_processor_get_performance_control (
892 struct acpi_processor *pr)
895 acpi_status status = 0;
896 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
897 union acpi_object *pct = NULL;
898 union acpi_object obj = {0};
900 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control");
902 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
903 if(ACPI_FAILURE(status)) {
904 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n"));
905 return_VALUE(-ENODEV);
908 pct = (union acpi_object *) buffer.pointer;
909 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
910 || (pct->package.count != 2)) {
911 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n"));
920 obj = pct->package.elements[0];
922 if ((obj.type != ACPI_TYPE_BUFFER)
923 || (obj.buffer.length < sizeof(struct acpi_pct_register))
924 || (obj.buffer.pointer == NULL)) {
925 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
926 "Invalid _PCT data (control_register)\n"));
930 memcpy(&pr->performance->control_register, obj.buffer.pointer, sizeof(struct acpi_pct_register));
937 obj = pct->package.elements[1];
939 if ((obj.type != ACPI_TYPE_BUFFER)
940 || (obj.buffer.length < sizeof(struct acpi_pct_register))
941 || (obj.buffer.pointer == NULL)) {
942 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
943 "Invalid _PCT data (status_register)\n"));
948 memcpy(&pr->performance->status_register, obj.buffer.pointer, sizeof(struct acpi_pct_register));
951 acpi_os_free(buffer.pointer);
953 return_VALUE(result);
958 acpi_processor_get_performance_states (
959 struct acpi_processor *pr)
962 acpi_status status = AE_OK;
963 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
964 struct acpi_buffer format = {sizeof("NNNNNN"), "NNNNNN"};
965 struct acpi_buffer state = {0, NULL};
966 union acpi_object *pss = NULL;
969 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states");
971 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
972 if(ACPI_FAILURE(status)) {
973 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n"));
974 return_VALUE(-ENODEV);
977 pss = (union acpi_object *) buffer.pointer;
978 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
979 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n"));
984 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
985 pss->package.count));
987 pr->performance->state_count = pss->package.count;
988 pr->performance->states = kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, GFP_KERNEL);
989 if (!pr->performance->states) {
994 for (i = 0; i < pr->performance->state_count; i++) {
996 struct acpi_processor_px *px = &(pr->performance->states[i]);
998 state.length = sizeof(struct acpi_processor_px);
1001 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
1003 status = acpi_extract_package(&(pss->package.elements[i]),
1005 if (ACPI_FAILURE(status)) {
1006 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n"));
1008 kfree(pr->performance->states);
1012 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1013 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
1015 (u32) px->core_frequency,
1017 (u32) px->transition_latency,
1018 (u32) px->bus_master_latency,
1022 if (!px->core_frequency) {
1023 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data: freq is zero\n"));
1025 kfree(pr->performance->states);
1031 acpi_os_free(buffer.pointer);
1033 return_VALUE(result);
1038 acpi_processor_get_performance_info (
1039 struct acpi_processor *pr)
1042 acpi_status status = AE_OK;
1043 acpi_handle handle = NULL;
1045 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");
1047 if (!pr || !pr->performance || !pr->handle)
1048 return_VALUE(-EINVAL);
1050 status = acpi_get_handle(pr->handle, "_PCT", &handle);
1051 if (ACPI_FAILURE(status)) {
1052 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1053 "ACPI-based processor performance control unavailable\n"));
1054 return_VALUE(-ENODEV);
1057 acpi_processor_set_pdc(pr);
1059 result = acpi_processor_get_performance_control(pr);
1061 return_VALUE(result);
1063 result = acpi_processor_get_performance_states(pr);
1065 return_VALUE(result);
1067 result = acpi_processor_get_platform_limit(pr);
1069 return_VALUE(result);
1075 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
1076 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
1078 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
1079 static struct file_operations acpi_processor_perf_fops = {
1080 .open = acpi_processor_perf_open_fs,
1082 .llseek = seq_lseek,
1083 .release = single_release,
1086 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
1088 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1091 ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");
1096 if (!pr->performance) {
1097 seq_puts(seq, "<not supported>\n");
1101 seq_printf(seq, "state count: %d\n"
1102 "active state: P%d\n",
1103 pr->performance->state_count,
1104 pr->performance->state);
1106 seq_puts(seq, "states:\n");
1107 for (i = 0; i < pr->performance->state_count; i++)
1108 seq_printf(seq, " %cP%d: %d MHz, %d mW, %d uS\n",
1109 (i == pr->performance->state?'*':' '), i,
1110 (u32) pr->performance->states[i].core_frequency,
1111 (u32) pr->performance->states[i].power,
1112 (u32) pr->performance->states[i].transition_latency);
1118 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
1120 return single_open(file, acpi_processor_perf_seq_show,
1125 acpi_processor_write_performance (
1127 const char __user *buffer,
1132 struct seq_file *m = (struct seq_file *) file->private_data;
1133 struct acpi_processor *pr = (struct acpi_processor *) m->private;
1134 struct acpi_processor_performance *perf;
1135 char state_string[12] = {'\0'};
1136 unsigned int new_state = 0;
1137 struct cpufreq_policy policy;
1139 ACPI_FUNCTION_TRACE("acpi_processor_write_performance");
1141 if (!pr || (count > sizeof(state_string) - 1))
1142 return_VALUE(-EINVAL);
1144 perf = pr->performance;
1146 return_VALUE(-EINVAL);
1148 if (copy_from_user(state_string, buffer, count))
1149 return_VALUE(-EFAULT);
1151 state_string[count] = '\0';
1152 new_state = simple_strtoul(state_string, NULL, 0);
1154 if (new_state >= perf->state_count)
1155 return_VALUE(-EINVAL);
1157 cpufreq_get_policy(&policy, pr->id);
1159 policy.cpu = pr->id;
1160 policy.min = perf->states[new_state].core_frequency * 1000;
1161 policy.max = perf->states[new_state].core_frequency * 1000;
1163 result = cpufreq_set_policy(&policy);
1165 return_VALUE(result);
1167 return_VALUE(count);
1171 acpi_cpufreq_add_file (
1172 struct acpi_processor *pr)
1174 struct proc_dir_entry *entry = NULL;
1175 struct acpi_device *device = NULL;
1177 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
1179 if (acpi_bus_get_device(pr->handle, &device))
1182 /* add file 'performance' [R/W] */
1183 entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
1184 S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));
1186 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1187 "Unable to create '%s' fs entry\n",
1188 ACPI_PROCESSOR_FILE_PERFORMANCE));
1190 entry->proc_fops = &acpi_processor_perf_fops;
1191 entry->proc_fops->write = acpi_processor_write_performance;
1192 entry->data = acpi_driver_data(device);
1193 entry->owner = THIS_MODULE;
1199 acpi_cpufreq_remove_file (
1200 struct acpi_processor *pr)
1202 struct acpi_device *device = NULL;
1204 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
1206 if (acpi_bus_get_device(pr->handle, &device))
1209 /* remove file 'performance' */
1210 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
1211 acpi_device_dir(device));
1217 static void acpi_cpufreq_add_file (struct acpi_processor *pr) { return; }
1218 static void acpi_cpufreq_remove_file (struct acpi_processor *pr) { return; }
1219 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
1223 acpi_processor_register_performance (
1224 struct acpi_processor_performance * performance,
1227 struct acpi_processor *pr;
1229 ACPI_FUNCTION_TRACE("acpi_processor_register_performance");
1231 if (!acpi_processor_ppc_is_init)
1232 return_VALUE(-EINVAL);
1234 down(&performance_sem);
1236 pr = processors[cpu];
1238 up(&performance_sem);
1239 return_VALUE(-ENODEV);
1242 if (pr->performance) {
1243 up(&performance_sem);
1244 return_VALUE(-EBUSY);
1247 pr->performance = performance;
1249 if (acpi_processor_get_performance_info(pr)) {
1250 pr->performance = NULL;
1251 up(&performance_sem);
1255 acpi_cpufreq_add_file(pr);
1257 up(&performance_sem);
1260 EXPORT_SYMBOL(acpi_processor_register_performance);
1264 acpi_processor_unregister_performance (
1265 struct acpi_processor_performance * performance,
1268 struct acpi_processor *pr;
1270 ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
1272 if (!acpi_processor_ppc_is_init)
1275 down(&performance_sem);
1277 pr = processors[cpu];
1279 up(&performance_sem);
1283 kfree(pr->performance->states);
1284 pr->performance = NULL;
1286 acpi_cpufreq_remove_file(pr);
1288 up(&performance_sem);
1292 EXPORT_SYMBOL(acpi_processor_unregister_performance);
1295 /* for the rest of it, check arch/i386/kernel/cpu/cpufreq/acpi.c */
1297 #else /* !CONFIG_CPU_FREQ */
1299 static void acpi_processor_ppc_init(void) { return; }
1300 static void acpi_processor_ppc_exit(void) { return; }
1302 static int acpi_processor_ppc_has_changed(struct acpi_processor *pr) {
1303 static unsigned int printout = 1;
1305 printk(KERN_WARNING "Warning: Processor Platform Limit event detected, but not handled.\n");
1306 printk(KERN_WARNING "Consider compiling CPUfreq support into your kernel.\n");
1312 #endif /* CONFIG_CPU_FREQ */
1314 /* --------------------------------------------------------------------------
1316 -------------------------------------------------------------------------- */
1319 acpi_processor_get_throttling (
1320 struct acpi_processor *pr)
1327 ACPI_FUNCTION_TRACE("acpi_processor_get_throttling");
1330 return_VALUE(-EINVAL);
1332 if (!pr->flags.throttling)
1333 return_VALUE(-ENODEV);
1335 pr->throttling.state = 0;
1337 local_irq_disable();
1339 duty_mask = pr->throttling.state_count - 1;
1341 duty_mask <<= pr->throttling.duty_offset;
1343 value = inl(pr->throttling.address);
1346 * Compute the current throttling state when throttling is enabled
1350 duty_value = value & duty_mask;
1351 duty_value >>= pr->throttling.duty_offset;
1354 state = pr->throttling.state_count-duty_value;
1357 pr->throttling.state = state;
1361 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1362 "Throttling state is T%d (%d%% throttling applied)\n",
1363 state, pr->throttling.states[state].performance));
1370 acpi_processor_set_throttling (
1371 struct acpi_processor *pr,
1378 ACPI_FUNCTION_TRACE("acpi_processor_set_throttling");
1381 return_VALUE(-EINVAL);
1383 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1384 return_VALUE(-EINVAL);
1386 if (!pr->flags.throttling)
1387 return_VALUE(-ENODEV);
1389 if (state == pr->throttling.state)
1392 local_irq_disable();
1395 * Calculate the duty_value and duty_mask.
1398 duty_value = pr->throttling.state_count - state;
1400 duty_value <<= pr->throttling.duty_offset;
1402 /* Used to clear all duty_value bits */
1403 duty_mask = pr->throttling.state_count - 1;
1405 duty_mask <<= acpi_fadt.duty_offset;
1406 duty_mask = ~duty_mask;
1410 * Disable throttling by writing a 0 to bit 4. Note that we must
1411 * turn it off before you can change the duty_value.
1413 value = inl(pr->throttling.address);
1415 value &= 0xFFFFFFEF;
1416 outl(value, pr->throttling.address);
1420 * Write the new duty_value and then enable throttling. Note
1421 * that a state value of 0 leaves throttling disabled.
1425 value |= duty_value;
1426 outl(value, pr->throttling.address);
1428 value |= 0x00000010;
1429 outl(value, pr->throttling.address);
1432 pr->throttling.state = state;
1436 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1437 "Throttling state set to T%d (%d%%)\n", state,
1438 (pr->throttling.states[state].performance?pr->throttling.states[state].performance/10:0)));
1445 acpi_processor_get_throttling_info (
1446 struct acpi_processor *pr)
1452 ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info");
1454 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1455 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1456 pr->throttling.address,
1457 pr->throttling.duty_offset,
1458 pr->throttling.duty_width));
1461 return_VALUE(-EINVAL);
1463 /* TBD: Support ACPI 2.0 objects */
1465 if (!pr->throttling.address) {
1466 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
1469 else if (!pr->throttling.duty_width) {
1470 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
1473 /* TBD: Support duty_cycle values that span bit 4. */
1474 else if ((pr->throttling.duty_offset
1475 + pr->throttling.duty_width) > 4) {
1476 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "duty_cycle spans bit 4\n"));
1481 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1482 * This shouldn't be an issue as few (if any) mobile systems ever
1485 if (errata.piix4.throttle) {
1486 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1487 "Throttling not supported on PIIX4 A- or B-step\n"));
1491 pr->throttling.state_count = 1 << acpi_fadt.duty_width;
1494 * Compute state values. Note that throttling displays a linear power/
1495 * performance relationship (at 50% performance the CPU will consume
1496 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
1499 step = (1000 / pr->throttling.state_count);
1501 for (i=0; i<pr->throttling.state_count; i++) {
1502 pr->throttling.states[i].performance = step * i;
1503 pr->throttling.states[i].power = step * i;
1506 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1507 pr->throttling.state_count));
1509 pr->flags.throttling = 1;
1512 * Disable throttling (if enabled). We'll let subsequent policy (e.g.
1513 * thermal) decide to lower performance if it so chooses, but for now
1514 * we'll crank up the speed.
1517 result = acpi_processor_get_throttling(pr);
1521 if (pr->throttling.state) {
1522 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabling throttling (was T%d)\n",
1523 pr->throttling.state));
1524 result = acpi_processor_set_throttling(pr, 0);
1531 pr->flags.throttling = 0;
1533 return_VALUE(result);
1537 /* --------------------------------------------------------------------------
1539 -------------------------------------------------------------------------- */
1542 acpi_processor_apply_limit (
1543 struct acpi_processor* pr)
1549 ACPI_FUNCTION_TRACE("acpi_processor_apply_limit");
1552 return_VALUE(-EINVAL);
1554 if (!pr->flags.limit)
1555 return_VALUE(-ENODEV);
1557 if (pr->flags.throttling) {
1558 if (pr->limit.user.tx > tx)
1559 tx = pr->limit.user.tx;
1560 if (pr->limit.thermal.tx > tx)
1561 tx = pr->limit.thermal.tx;
1563 result = acpi_processor_set_throttling(pr, tx);
1568 pr->limit.state.px = px;
1569 pr->limit.state.tx = tx;
1571 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d] limit set to (P%d:T%d)\n",
1574 pr->limit.state.tx));
1578 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to set limit\n"));
1580 return_VALUE(result);
1584 #ifdef CONFIG_CPU_FREQ
1586 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
1587 * offers (in most cases) voltage scaling in addition to frequency scaling, and
1588 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
1589 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
1592 static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS];
1593 static unsigned int acpi_thermal_cpufreq_is_init = 0;
1596 static int cpu_has_cpufreq(unsigned int cpu)
1598 struct cpufreq_policy policy;
1599 if (!acpi_thermal_cpufreq_is_init)
1601 if (!cpufreq_get_policy(&policy, cpu))
1607 static int acpi_thermal_cpufreq_increase(unsigned int cpu)
1609 if (!cpu_has_cpufreq(cpu))
1612 if (cpufreq_thermal_reduction_pctg[cpu] < 60) {
1613 cpufreq_thermal_reduction_pctg[cpu] += 20;
1614 cpufreq_update_policy(cpu);
1622 static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
1624 if (!cpu_has_cpufreq(cpu))
1627 if (cpufreq_thermal_reduction_pctg[cpu] >= 20) {
1628 cpufreq_thermal_reduction_pctg[cpu] -= 20;
1629 cpufreq_update_policy(cpu);
1637 static int acpi_thermal_cpufreq_notifier(
1638 struct notifier_block *nb,
1639 unsigned long event,
1642 struct cpufreq_policy *policy = data;
1643 unsigned long max_freq = 0;
1645 if (event != CPUFREQ_ADJUST)
1648 max_freq = (policy->cpuinfo.max_freq * (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100;
1650 cpufreq_verify_within_limits(policy, 0, max_freq);
1657 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
1658 .notifier_call = acpi_thermal_cpufreq_notifier,
1662 static void acpi_thermal_cpufreq_init(void) {
1665 for (i=0; i<NR_CPUS; i++)
1666 cpufreq_thermal_reduction_pctg[i] = 0;
1668 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER);
1670 acpi_thermal_cpufreq_is_init = 1;
1673 static void acpi_thermal_cpufreq_exit(void) {
1674 if (acpi_thermal_cpufreq_is_init)
1675 cpufreq_unregister_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER);
1677 acpi_thermal_cpufreq_is_init = 0;
1680 #else /* ! CONFIG_CPU_FREQ */
1682 static void acpi_thermal_cpufreq_init(void) { return; }
1683 static void acpi_thermal_cpufreq_exit(void) { return; }
1684 static int acpi_thermal_cpufreq_increase(unsigned int cpu) { return -ENODEV; }
1685 static int acpi_thermal_cpufreq_decrease(unsigned int cpu) { return -ENODEV; }
1692 acpi_processor_set_thermal_limit (
1697 struct acpi_processor *pr = NULL;
1698 struct acpi_device *device = NULL;
1701 ACPI_FUNCTION_TRACE("acpi_processor_set_thermal_limit");
1703 if ((type < ACPI_PROCESSOR_LIMIT_NONE)
1704 || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
1705 return_VALUE(-EINVAL);
1707 result = acpi_bus_get_device(handle, &device);
1709 return_VALUE(result);
1711 pr = (struct acpi_processor *) acpi_driver_data(device);
1713 return_VALUE(-ENODEV);
1715 /* Thermal limits are always relative to the current Px/Tx state. */
1716 if (pr->flags.throttling)
1717 pr->limit.thermal.tx = pr->throttling.state;
1720 * Our default policy is to only use throttling at the lowest
1721 * performance state.
1724 tx = pr->limit.thermal.tx;
1728 case ACPI_PROCESSOR_LIMIT_NONE:
1730 result = acpi_thermal_cpufreq_decrease(pr->id);
1735 case ACPI_PROCESSOR_LIMIT_INCREMENT:
1736 /* if going up: P-states first, T-states later */
1738 result = acpi_thermal_cpufreq_increase(pr->id);
1741 else if (result == -ERANGE)
1742 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1743 "At maximum performance state\n"));
1745 if (pr->flags.throttling) {
1746 if (tx == (pr->throttling.state_count - 1))
1747 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1748 "At maximum throttling state\n"));
1754 case ACPI_PROCESSOR_LIMIT_DECREMENT:
1755 /* if going down: T-states first, P-states later */
1757 if (pr->flags.throttling) {
1759 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1760 "At minimum throttling state\n"));
1767 result = acpi_thermal_cpufreq_decrease(pr->id);
1768 if (result == -ERANGE)
1769 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1770 "At minimum performance state\n"));
1776 if (pr->flags.throttling) {
1777 pr->limit.thermal.px = 0;
1778 pr->limit.thermal.tx = tx;
1780 result = acpi_processor_apply_limit(pr);
1782 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1783 "Unable to set thermal limit\n"));
1785 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
1786 pr->limit.thermal.px,
1787 pr->limit.thermal.tx));
1791 return_VALUE(result);
1796 acpi_processor_get_limit_info (
1797 struct acpi_processor *pr)
1799 ACPI_FUNCTION_TRACE("acpi_processor_get_limit_info");
1802 return_VALUE(-EINVAL);
1804 if (pr->flags.throttling)
1805 pr->flags.limit = 1;
1811 /* --------------------------------------------------------------------------
1812 FS Interface (/proc)
1813 -------------------------------------------------------------------------- */
1815 struct proc_dir_entry *acpi_processor_dir = NULL;
1817 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
1819 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1821 ACPI_FUNCTION_TRACE("acpi_processor_info_seq_show");
1826 seq_printf(seq, "processor id: %d\n"
1828 "bus mastering control: %s\n"
1829 "power management: %s\n"
1830 "throttling control: %s\n"
1831 "limit interface: %s\n",
1834 pr->flags.bm_control ? "yes" : "no",
1835 pr->flags.power ? "yes" : "no",
1836 pr->flags.throttling ? "yes" : "no",
1837 pr->flags.limit ? "yes" : "no");
1843 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
1845 return single_open(file, acpi_processor_info_seq_show,
1849 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1851 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1854 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
1859 seq_printf(seq, "active state: C%d\n"
1860 "default state: C%d\n"
1861 "bus master activity: %08x\n",
1863 pr->power.default_state,
1864 pr->power.bm_activity);
1866 seq_puts(seq, "states:\n");
1868 for (i = 1; i < ACPI_C_STATE_COUNT; i++) {
1869 seq_printf(seq, " %cC%d: ",
1870 (i == pr->power.state?'*':' '), i);
1872 if (!pr->power.states[i].valid) {
1873 seq_puts(seq, "<not supported>\n");
1877 if (pr->power.states[i].promotion.state)
1878 seq_printf(seq, "promotion[C%d] ",
1879 pr->power.states[i].promotion.state);
1881 seq_puts(seq, "promotion[--] ");
1883 if (pr->power.states[i].demotion.state)
1884 seq_printf(seq, "demotion[C%d] ",
1885 pr->power.states[i].demotion.state);
1887 seq_puts(seq, "demotion[--] ");
1889 seq_printf(seq, "latency[%03d] usage[%08d]\n",
1890 pr->power.states[i].latency,
1891 pr->power.states[i].usage);
1898 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1900 return single_open(file, acpi_processor_power_seq_show,
1904 static int acpi_processor_throttling_seq_show(struct seq_file *seq, void *offset)
1906 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1910 ACPI_FUNCTION_TRACE("acpi_processor_throttling_seq_show");
1915 if (!(pr->throttling.state_count > 0)) {
1916 seq_puts(seq, "<not supported>\n");
1920 result = acpi_processor_get_throttling(pr);
1923 seq_puts(seq, "Could not determine current throttling state.\n");
1927 seq_printf(seq, "state count: %d\n"
1928 "active state: T%d\n",
1929 pr->throttling.state_count,
1930 pr->throttling.state);
1932 seq_puts(seq, "states:\n");
1933 for (i = 0; i < pr->throttling.state_count; i++)
1934 seq_printf(seq, " %cT%d: %02d%%\n",
1935 (i == pr->throttling.state?'*':' '), i,
1936 (pr->throttling.states[i].performance?pr->throttling.states[i].performance/10:0));
1942 static int acpi_processor_throttling_open_fs(struct inode *inode, struct file *file)
1944 return single_open(file, acpi_processor_throttling_seq_show,
1949 acpi_processor_write_throttling (
1956 struct seq_file *m = (struct seq_file *)file->private_data;
1957 struct acpi_processor *pr = (struct acpi_processor *)m->private;
1958 char state_string[12] = {'\0'};
1960 ACPI_FUNCTION_TRACE("acpi_processor_write_throttling");
1962 if (!pr || (count > sizeof(state_string) - 1))
1963 return_VALUE(-EINVAL);
1965 if (copy_from_user(state_string, buffer, count))
1966 return_VALUE(-EFAULT);
1968 state_string[count] = '\0';
1970 result = acpi_processor_set_throttling(pr,
1971 simple_strtoul(state_string, NULL, 0));
1973 return_VALUE(result);
1975 return_VALUE(count);
1978 static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
1980 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1982 ACPI_FUNCTION_TRACE("acpi_processor_limit_seq_show");
1987 if (!pr->flags.limit) {
1988 seq_puts(seq, "<not supported>\n");
1992 seq_printf(seq, "active limit: P%d:T%d\n"
1993 "user limit: P%d:T%d\n"
1994 "thermal limit: P%d:T%d\n",
1995 pr->limit.state.px, pr->limit.state.tx,
1996 pr->limit.user.px, pr->limit.user.tx,
1997 pr->limit.thermal.px, pr->limit.thermal.tx);
2003 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file)
2005 return single_open(file, acpi_processor_limit_seq_show,
2010 acpi_processor_write_limit (
2017 struct seq_file *m = (struct seq_file *)file->private_data;
2018 struct acpi_processor *pr = (struct acpi_processor *)m->private;
2019 char limit_string[25] = {'\0'};
2023 ACPI_FUNCTION_TRACE("acpi_processor_write_limit");
2025 if (!pr || (count > sizeof(limit_string) - 1)) {
2026 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n"));
2027 return_VALUE(-EINVAL);
2030 if (copy_from_user(limit_string, buffer, count)) {
2031 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data\n"));
2032 return_VALUE(-EFAULT);
2035 limit_string[count] = '\0';
2037 if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) {
2038 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data format\n"));
2039 return_VALUE(-EINVAL);
2042 if (pr->flags.throttling) {
2043 if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) {
2044 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid tx\n"));
2045 return_VALUE(-EINVAL);
2047 pr->limit.user.tx = tx;
2050 result = acpi_processor_apply_limit(pr);
2052 return_VALUE(count);
2057 acpi_processor_add_fs (
2058 struct acpi_device *device)
2060 struct proc_dir_entry *entry = NULL;
2062 ACPI_FUNCTION_TRACE("acpi_processor_add_fs");
2064 if (!acpi_device_dir(device)) {
2065 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
2066 acpi_processor_dir);
2067 if (!acpi_device_dir(device))
2068 return_VALUE(-ENODEV);
2070 acpi_device_dir(device)->owner = THIS_MODULE;
2073 entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
2074 S_IRUGO, acpi_device_dir(device));
2076 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2077 "Unable to create '%s' fs entry\n",
2078 ACPI_PROCESSOR_FILE_INFO));
2080 entry->proc_fops = &acpi_processor_info_fops;
2081 entry->data = acpi_driver_data(device);
2082 entry->owner = THIS_MODULE;
2086 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
2087 S_IRUGO, acpi_device_dir(device));
2089 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2090 "Unable to create '%s' fs entry\n",
2091 ACPI_PROCESSOR_FILE_POWER));
2093 entry->proc_fops = &acpi_processor_power_fops;
2094 entry->data = acpi_driver_data(device);
2095 entry->owner = THIS_MODULE;
2098 /* 'throttling' [R/W] */
2099 entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
2100 S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));
2102 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2103 "Unable to create '%s' fs entry\n",
2104 ACPI_PROCESSOR_FILE_THROTTLING));
2106 entry->proc_fops = &acpi_processor_throttling_fops;
2107 entry->proc_fops->write = acpi_processor_write_throttling;
2108 entry->data = acpi_driver_data(device);
2109 entry->owner = THIS_MODULE;
2113 entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
2114 S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device));
2116 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2117 "Unable to create '%s' fs entry\n",
2118 ACPI_PROCESSOR_FILE_LIMIT));
2120 entry->proc_fops = &acpi_processor_limit_fops;
2121 entry->proc_fops->write = acpi_processor_write_limit;
2122 entry->data = acpi_driver_data(device);
2123 entry->owner = THIS_MODULE;
2131 acpi_processor_remove_fs (
2132 struct acpi_device *device)
2134 ACPI_FUNCTION_TRACE("acpi_processor_remove_fs");
2136 if (acpi_device_dir(device)) {
2137 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,acpi_device_dir(device));
2138 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,acpi_device_dir(device));
2139 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
2140 acpi_device_dir(device));
2141 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,acpi_device_dir(device));
2142 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
2143 acpi_device_dir(device) = NULL;
2150 /* --------------------------------------------------------------------------
2152 -------------------------------------------------------------------------- */
2155 acpi_processor_get_info (
2156 struct acpi_processor *pr)
2158 acpi_status status = 0;
2159 union acpi_object object = {0};
2160 struct acpi_buffer buffer = {sizeof(union acpi_object), &object};
2161 static int cpu_index = 0;
2163 ACPI_FUNCTION_TRACE("acpi_processor_get_info");
2166 return_VALUE(-EINVAL);
2168 if (num_online_cpus() > 1)
2172 * Extra Processor objects may be enumerated on MP systems with
2173 * less than the max # of CPUs. They should be ignored.
2175 if ((cpu_index + 1) > num_online_cpus())
2176 return_VALUE(-ENODEV);
2178 acpi_processor_errata(pr);
2181 * Check to see if we have bus mastering arbitration control. This
2182 * is required for proper C3 usage (to maintain cache coherency).
2184 if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) {
2185 pr->flags.bm_control = 1;
2186 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2187 "Bus mastering arbitration control present\n"));
2190 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2191 "No bus mastering arbitration control\n"));
2194 * Evalute the processor object. Note that it is common on SMP to
2195 * have the first (boot) processor with a valid PBLK address while
2196 * all others have a NULL address.
2198 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
2199 if (ACPI_FAILURE(status)) {
2200 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2201 "Error evaluating processor object\n"));
2202 return_VALUE(-ENODEV);
2206 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
2207 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
2209 pr->id = cpu_index++;
2210 pr->acpi_id = object.processor.proc_id;
2212 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
2215 if (!object.processor.pblk_address)
2216 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
2217 else if (object.processor.pblk_length != 6)
2218 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n",
2219 object.processor.pblk_length));
2221 pr->throttling.address = object.processor.pblk_address;
2222 pr->throttling.duty_offset = acpi_fadt.duty_offset;
2223 pr->throttling.duty_width = acpi_fadt.duty_width;
2224 pr->power.states[ACPI_STATE_C2].address =
2225 object.processor.pblk_address + 4;
2226 pr->power.states[ACPI_STATE_C3].address =
2227 object.processor.pblk_address + 5;
2230 * We don't care about error returns - we just try to mark
2231 * these reserved so that nobody else is confused into thinking
2232 * that this region might be unused..
2234 * (In particular, allocating the IO range for Cardbus)
2236 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
2237 request_region(acpi_fadt.xpm_tmr_blk.address, 4, "ACPI timer");
2240 acpi_processor_get_power_info(pr);
2241 #ifdef CONFIG_CPU_FREQ
2242 acpi_processor_ppc_has_changed(pr);
2244 acpi_processor_get_throttling_info(pr);
2245 acpi_processor_get_limit_info(pr);
2252 acpi_processor_notify (
2257 struct acpi_processor *pr = (struct acpi_processor *) data;
2258 struct acpi_device *device = NULL;
2260 ACPI_FUNCTION_TRACE("acpi_processor_notify");
2265 if (acpi_bus_get_device(pr->handle, &device))
2269 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
2270 acpi_processor_ppc_has_changed(pr);
2271 acpi_bus_generate_event(device, event,
2272 pr->performance_platform_limit);
2274 case ACPI_PROCESSOR_NOTIFY_POWER:
2276 acpi_bus_generate_event(device, event, 0);
2279 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2280 "Unsupported event [0x%x]\n", event));
2289 acpi_processor_add (
2290 struct acpi_device *device)
2293 acpi_status status = AE_OK;
2294 struct acpi_processor *pr = NULL;
2297 ACPI_FUNCTION_TRACE("acpi_processor_add");
2300 return_VALUE(-EINVAL);
2302 pr = kmalloc(sizeof(struct acpi_processor), GFP_KERNEL);
2304 return_VALUE(-ENOMEM);
2305 memset(pr, 0, sizeof(struct acpi_processor));
2307 pr->handle = device->handle;
2308 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
2309 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
2310 acpi_driver_data(device) = pr;
2312 result = acpi_processor_get_info(pr);
2316 result = acpi_processor_add_fs(device);
2320 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
2321 acpi_processor_notify, pr);
2322 if (ACPI_FAILURE(status)) {
2323 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2324 "Error installing notify handler\n"));
2329 processors[pr->id] = pr;
2332 * Install the idle handler if processor power management is supported.
2333 * Note that the default idle handler (default_idle) will be used on
2334 * platforms that only support C1.
2336 if ((pr->id == 0) && (pr->flags.power)) {
2337 pm_idle_save = pm_idle;
2338 pm_idle = acpi_processor_idle;
2341 printk(KERN_INFO PREFIX "%s [%s] (supports",
2342 acpi_device_name(device), acpi_device_bid(device));
2343 for (i=1; i<ACPI_C_STATE_COUNT; i++)
2344 if (pr->power.states[i].valid)
2346 if (pr->flags.throttling)
2347 printk(", %d throttling states", pr->throttling.state_count);
2352 acpi_processor_remove_fs(device);
2356 return_VALUE(result);
2361 acpi_processor_remove (
2362 struct acpi_device *device,
2365 acpi_status status = AE_OK;
2366 struct acpi_processor *pr = NULL;
2368 ACPI_FUNCTION_TRACE("acpi_processor_remove");
2370 if (!device || !acpi_driver_data(device))
2371 return_VALUE(-EINVAL);
2373 pr = (struct acpi_processor *) acpi_driver_data(device);
2375 /* Unregister the idle handler when processor #0 is removed. */
2377 pm_idle = pm_idle_save;
2379 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
2380 acpi_processor_notify);
2381 if (ACPI_FAILURE(status)) {
2382 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2383 "Error removing notify handler\n"));
2386 acpi_processor_remove_fs(device);
2388 processors[pr->id] = NULL;
2397 acpi_processor_init (void)
2401 ACPI_FUNCTION_TRACE("acpi_processor_init");
2403 memset(&processors, 0, sizeof(processors));
2404 memset(&errata, 0, sizeof(errata));
2406 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
2407 if (!acpi_processor_dir)
2408 return_VALUE(-ENODEV);
2409 acpi_processor_dir->owner = THIS_MODULE;
2411 result = acpi_bus_register_driver(&acpi_processor_driver);
2413 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
2414 return_VALUE(-ENODEV);
2417 acpi_thermal_cpufreq_init();
2419 acpi_processor_ppc_init();
2426 acpi_processor_exit (void)
2428 ACPI_FUNCTION_TRACE("acpi_processor_exit");
2430 acpi_processor_ppc_exit();
2432 acpi_thermal_cpufreq_exit();
2434 acpi_bus_unregister_driver(&acpi_processor_driver);
2436 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
2442 module_init(acpi_processor_init);
2443 module_exit(acpi_processor_exit);
2445 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);