X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Facpi%2Fprocessor_idle.c;h=3f30af21574ea37c3d443f54e2050fa4fb85c77f;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=eb730a80952c6bc58c0cf3b0be80b9daf4aaf993;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index eb730a809..3f30af215 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -3,7 +3,7 @@ * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2004 Dominik Brodowski + * Copyright (C) 2004, 2005 Dominik Brodowski * Copyright (C) 2004 Anil S Keshavamurthy * - Added processor hotplug support * Copyright (C) 2005 Venkatesh Pallipadi @@ -38,6 +38,7 @@ #include #include #include /* need_resched() */ +#include #include #include @@ -54,10 +55,10 @@ ACPI_MODULE_NAME("acpi_processor") #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -static void (*pm_idle_save) (void); +static void (*pm_idle_save) (void) __read_mostly; module_param(max_cstate, uint, 0644); -static unsigned int nocst = 0; +static unsigned int nocst __read_mostly; module_param(nocst, uint, 0000); /* @@ -67,7 +68,7 @@ module_param(nocst, uint, 0000); * 100 HZ: 0x0000000F: 4 jiffies = 40ms * reduce history for more aggressive entry into C3 */ -static unsigned int bm_history = +static unsigned int bm_history __read_mostly = (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); module_param(bm_history, uint, 0644); /* -------------------------------------------------------------------------- @@ -97,6 +98,9 @@ static int set_max_cstate(struct dmi_system_id *id) /* Actually this shouldn't be __cpuinitdata, would be better to fix the callers to only run once -AK */ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { + { set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), + DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, { set_max_cstate, "IBM ThinkPad R40e", { DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, @@ -206,15 +210,36 @@ acpi_processor_power_activate(struct acpi_processor *pr, static void acpi_safe_halt(void) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); if (!need_resched()) safe_halt(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; } static atomic_t c3_cpu_count; +/* Common C-state entry for C2, C3, .. */ +static void acpi_cstate_enter(struct acpi_processor_cx *cstate) +{ + if (cstate->space_id == ACPI_CSTATE_FFH) { + /* Call into architectural FFH based C-state */ + acpi_processor_ffh_cstate_enter(cstate); + } else { + int unused; + /* IO port based C-state */ + inb(cstate->address); + /* Dummy wait op - must do something useless after P_LVL2 read + because chipsets cannot guarantee that STPCLK# signal + gets asserted in time to freeze execution properly. */ + unused = inl(acpi_fadt.xpm_tmr_blk.address); + } +} + static void acpi_processor_idle(void) { struct acpi_processor *pr = NULL; @@ -261,21 +286,15 @@ static void acpi_processor_idle(void) u32 bm_status = 0; unsigned long diff = jiffies - pr->power.bm_check_timestamp; - if (diff > 32) - diff = 32; + if (diff > 31) + diff = 31; - while (diff) { - /* if we didn't get called, assume there was busmaster activity */ - diff--; - if (diff) - pr->power.bm_activity |= 0x1; - pr->power.bm_activity <<= 1; - } + pr->power.bm_activity <<= diff; acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status, ACPI_MTX_DO_NOT_LOCK); if (bm_status) { - pr->power.bm_activity++; + pr->power.bm_activity |= 0x1; acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1, ACPI_MTX_DO_NOT_LOCK); } @@ -287,16 +306,16 @@ static void acpi_processor_idle(void) else if (errata.piix4.bmisx) { if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) - pr->power.bm_activity++; + pr->power.bm_activity |= 0x1; } pr->power.bm_check_timestamp = jiffies; /* - * Apply bus mastering demotion policy. Automatically demote + * If bus mastering is or was active this jiffy, demote * to avoid a faulty transition. Note that the processor * won't enter a low-power state during this call (to this - * funciton) but should upon the next. + * function) but should upon the next. * * TBD: A better policy might be to fallback to the demotion * state (use it for this quantum only) istead of @@ -304,7 +323,8 @@ static void acpi_processor_idle(void) * qualification. This may, however, introduce DMA * issues (e.g. floppy DMA transfer overrun/underrun). */ - if (pr->power.bm_activity & cx->demotion.threshold.bm) { + if ((pr->power.bm_activity & 0x1) && + cx->demotion.threshold.bm) { local_irq_enable(); next_state = cx->demotion.state; goto end; @@ -322,18 +342,20 @@ static void acpi_processor_idle(void) cx = &pr->power.states[ACPI_STATE_C1]; #endif - cx->usage++; - /* * Sleep: * ------ * Invoke the current Cx state to put the processor to sleep. */ if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); if (need_resched()) { - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; local_irq_enable(); return; } @@ -364,14 +386,17 @@ static void acpi_processor_idle(void) /* Get start time (ticks) */ t1 = inl(acpi_fadt.xpm_tmr_blk.address); /* Invoke C2 */ - inb(cx->address); - /* Dummy op - must do something useless after P_LVL2 read */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); + acpi_cstate_enter(cx); /* Get end time (ticks) */ t2 = inl(acpi_fadt.xpm_tmr_blk.address); + +#ifdef CONFIG_GENERIC_TIME + /* TSC halts in C2, so notify users */ + mark_tsc_unstable(); +#endif /* Re-enable interrupts */ local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; @@ -397,9 +422,7 @@ static void acpi_processor_idle(void) /* Get start time (ticks) */ t1 = inl(acpi_fadt.xpm_tmr_blk.address); /* Invoke C3 */ - inb(cx->address); - /* Dummy op - must do something useless after P_LVL3 read */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); + acpi_cstate_enter(cx); /* Get end time (ticks) */ t2 = inl(acpi_fadt.xpm_tmr_blk.address); if (pr->flags.bm_check) { @@ -409,9 +432,13 @@ static void acpi_processor_idle(void) ACPI_MTX_DO_NOT_LOCK); } +#ifdef CONFIG_GENERIC_TIME + /* TSC halts in C3, so notify users */ + mark_tsc_unstable(); +#endif /* Re-enable interrupts */ local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; @@ -421,6 +448,9 @@ static void acpi_processor_idle(void) local_irq_enable(); return; } + cx->usage++; + if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) + cx->time += sleep_ticks; next_state = pr->power.state; @@ -443,7 +473,8 @@ static void acpi_processor_idle(void) */ if (cx->promotion.state && ((cx->promotion.state - pr->power.states) <= max_cstate)) { - if (sleep_ticks > cx->promotion.threshold.ticks) { + if (sleep_ticks > cx->promotion.threshold.ticks && + cx->promotion.state->latency <= system_latency_constraint()) { cx->promotion.count++; cx->demotion.count = 0; if (cx->promotion.count >= @@ -484,8 +515,10 @@ static void acpi_processor_idle(void) end: /* * Demote if current state exceeds max_cstate + * or if the latency of the current state is unacceptable */ - if ((pr->power.state - pr->power.states) > max_cstate) { + if ((pr->power.state - pr->power.states) > max_cstate || + pr->power.state->latency > system_latency_constraint()) { if (cx->demotion.state) next_state = cx->demotion.state; } @@ -508,10 +541,9 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) struct acpi_processor_cx *higher = NULL; struct acpi_processor_cx *cx; - ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); if (!pr) - return_VALUE(-EINVAL); + return -EINVAL; /* * This function sets the default Cx state policy (OS idle handler). @@ -535,7 +567,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) } if (!state_is_set) - return_VALUE(-ENODEV); + return -ENODEV; /* demotion */ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { @@ -574,18 +606,17 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) higher = cx; } - return_VALUE(0); + return 0; } static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) { - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt"); if (!pr) - return_VALUE(-EINVAL); + return -EINVAL; if (!pr->pblk) - return_VALUE(-ENODEV); + return -ENODEV; /* if info is obtained from pblk/fadt, type equals state */ pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; @@ -597,7 +628,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) * an SMP system. */ if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) - return_VALUE(-ENODEV); + return -ENODEV; #endif /* determine C2 and C3 address from pblk */ @@ -613,25 +644,20 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); - return_VALUE(0); + return 0; } -static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) +static int acpi_processor_get_power_info_default(struct acpi_processor *pr) { - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); - - /* Zero initialize all the C-states info. */ - memset(pr->power.states, 0, sizeof(pr->power.states)); - - /* set the first C-State to C1 */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; - - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ + if (!pr->power.states[ACPI_STATE_C1].valid) { + /* set the first C-State to C1 */ + /* all processors need to support C1 */ + pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; + pr->power.states[ACPI_STATE_C1].valid = 1; + } + /* the C0 state only exists as a filler in our array */ pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; - - return_VALUE(0); + return 0; } static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) @@ -643,30 +669,23 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *cst; - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); if (nocst) - return_VALUE(-ENODEV); + return -ENODEV; - current_count = 1; - - /* Zero initialize C2 onwards and prepare for fresh CST lookup */ - for (i = 2; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(&(pr->power.states[i]), 0, - sizeof(struct acpi_processor_cx)); + current_count = 0; status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return_VALUE(-ENODEV); + return -ENODEV; } - cst = (union acpi_object *)buffer.pointer; + cst = buffer.pointer; /* There must be at least 2 elements */ if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "not enough elements in _CST\n")); + printk(KERN_ERR PREFIX "not enough elements in _CST\n"); status = -EFAULT; goto end; } @@ -675,8 +694,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) /* Validate number of power states. */ if (count < 1 || count != cst->package.count - 1) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "count given by _CST is not valid\n")); + printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); status = -EFAULT; goto end; } @@ -692,14 +710,14 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) memset(&cx, 0, sizeof(cx)); - element = (union acpi_object *)&(cst->package.elements[i]); + element = &(cst->package.elements[i]); if (element->type != ACPI_TYPE_PACKAGE) continue; if (element->package.count != 4) continue; - obj = (union acpi_object *)&(element->package.elements[0]); + obj = &(element->package.elements[0]); if (obj->type != ACPI_TYPE_BUFFER) continue; @@ -710,30 +728,47 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) continue; - cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? - 0 : reg->address; - /* There should be an easy way to extract an integer... */ - obj = (union acpi_object *)&(element->package.elements[1]); + obj = &(element->package.elements[1]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.type = obj->integer.value; + /* + * Some buggy BIOSes won't list C1 in _CST - + * Let acpi_processor_get_power_info_default() handle them later + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + current_count++; + + cx.address = reg->address; + cx.index = current_count + 1; + + cx.space_id = ACPI_CSTATE_SYSTEMIO; + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_ffh_cstate_probe + (pr->id, &cx, reg) == 0) { + cx.space_id = ACPI_CSTATE_FFH; + } else if (cx.type != ACPI_STATE_C1) { + /* + * C1 is a special case where FIXED_HARDWARE + * can be handled in non-MWAIT way as well. + * In that case, save this _CST entry info. + * That is, we retain space_id of SYSTEM_IO for + * halt based C1. + * Otherwise, ignore this info and continue. + */ + continue; + } + } - if ((cx.type != ACPI_STATE_C1) && - (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) - continue; - - if ((cx.type < ACPI_STATE_C2) || (cx.type > ACPI_STATE_C3)) - continue; - - obj = (union acpi_object *)&(element->package.elements[2]); + obj = &(element->package.elements[2]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.latency = obj->integer.value; - obj = (union acpi_object *)&(element->package.elements[3]); + obj = &(element->package.elements[3]); if (obj->type != ACPI_TYPE_INTEGER) continue; @@ -764,17 +799,16 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) status = -EFAULT; end: - acpi_os_free(buffer.pointer); + kfree(buffer.pointer); - return_VALUE(status); + return status; } static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) { - ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2"); if (!cx->address) - return_VOID; + return; /* * C2 latency must be less than or equal to 100 @@ -783,7 +817,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "latency too large [%d]\n", cx->latency)); - return_VOID; + return; } /* @@ -793,7 +827,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) cx->valid = 1; cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); - return_VOID; + return; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -801,10 +835,9 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, { static int bm_check_flag; - ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); if (!cx->address) - return_VOID; + return; /* * C3 latency must be less than or equal to 1000 @@ -813,7 +846,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "latency too large [%d]\n", cx->latency)); - return_VOID; + return; } /* @@ -826,7 +859,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, else if (errata.piix4.fdma) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 not supported on PIIX4 with Type-F DMA\n")); - return_VOID; + return; } /* All the logic here assumes flags.bm_check is same across all CPUs */ @@ -843,7 +876,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, if (!pr->flags.bm_control) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 support requires bus mastering control\n")); - return_VOID; + return; } } else { /* @@ -854,7 +887,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cache invalidation should work properly" " for C3 to be enabled on SMP systems\n")); - return_VOID; + return; } acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); @@ -869,7 +902,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, cx->valid = 1; cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); - return_VOID; + return; } static int acpi_processor_power_verify(struct acpi_processor *pr) @@ -878,12 +911,9 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) unsigned int working = 0; #ifdef ARCH_APICTIMER_STOPS_ON_C3 - struct cpuinfo_x86 *c = cpu_data + pr->id; + int timer_broadcast = 0; cpumask_t mask = cpumask_of_cpu(pr->id); - - if (c->x86_vendor == X86_VENDOR_INTEL) { - on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); - } + on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); #endif for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { @@ -896,15 +926,20 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) case ACPI_STATE_C2: acpi_processor_power_verify_c2(cx); +#ifdef ARCH_APICTIMER_STOPS_ON_C3 + /* Some AMD systems fake C3 as C2, but still + have timer troubles */ + if (cx->valid && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + timer_broadcast++; +#endif break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); #ifdef ARCH_APICTIMER_STOPS_ON_C3 - if (cx->valid && c->x86_vendor == X86_VENDOR_INTEL) { - on_each_cpu(switch_APIC_timer_to_ipi, - &mask, 1, 1); - } + if (cx->valid) + timer_broadcast++; #endif break; } @@ -913,6 +948,11 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) working++; } +#ifdef ARCH_APICTIMER_STOPS_ON_C3 + if (timer_broadcast) + on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); +#endif + return (working); } @@ -921,16 +961,21 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) unsigned int i; int result; - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); /* NOTE: the idle thread may not be running while calling * this function */ - /* Adding C1 state */ - acpi_processor_get_power_info_default_c1(pr); + /* Zero initialize all the C-states info. */ + memset(pr->power.states, 0, sizeof(pr->power.states)); + result = acpi_processor_get_power_info_cst(pr); if (result == -ENODEV) - acpi_processor_get_power_info_fadt(pr); + result = acpi_processor_get_power_info_fadt(pr); + + if (result) + return result; + + acpi_processor_get_power_info_default(pr); pr->power.count = acpi_processor_power_verify(pr); @@ -944,7 +989,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) */ result = acpi_processor_set_power_policy(pr); if (result) - return_VALUE(result); + return result; /* * if one state of type C2 or C3 is available, mark this @@ -958,24 +1003,23 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) } } - return_VALUE(0); + return 0; } int acpi_processor_cst_has_changed(struct acpi_processor *pr) { int result = 0; - ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed"); if (!pr) - return_VALUE(-EINVAL); + return -EINVAL; if (nocst) { - return_VALUE(-ENODEV); + return -ENODEV; } if (!pr->flags.power_setup_done) - return_VALUE(-ENODEV); + return -ENODEV; /* Fall back to the default idle loop */ pm_idle = pm_idle_save; @@ -986,26 +1030,27 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) pm_idle = acpi_processor_idle; - return_VALUE(result); + return result; } /* proc interface */ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) { - struct acpi_processor *pr = (struct acpi_processor *)seq->private; + struct acpi_processor *pr = seq->private; unsigned int i; - ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show"); if (!pr) goto end; seq_printf(seq, "active state: C%zd\n" "max_cstate: C%d\n" - "bus master activity: %08x\n", + "bus master activity: %08x\n" + "maximum allowed latency: %d usec\n", pr->power.state ? pr->power.state - pr->power.states : 0, - max_cstate, (unsigned)pr->power.bm_activity); + max_cstate, (unsigned)pr->power.bm_activity, + system_latency_constraint()); seq_puts(seq, "states:\n"); @@ -1048,13 +1093,14 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) else seq_puts(seq, "demotion[--] "); - seq_printf(seq, "latency[%03d] usage[%08d]\n", + seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", pr->power.states[i].latency, - pr->power.states[i].usage); + pr->power.states[i].usage, + pr->power.states[i].time); } end: - return_VALUE(0); + return 0; } static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) @@ -1063,22 +1109,45 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) PDE(inode)->data); } -static struct file_operations acpi_processor_power_fops = { +static const struct file_operations acpi_processor_power_fops = { .open = acpi_processor_power_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; -int acpi_processor_power_init(struct acpi_processor *pr, +#ifdef CONFIG_SMP +static void smp_callback(void *v) +{ + /* we already woke the CPU up, nothing more to do */ +} + +/* + * This function gets called when a part of the kernel has a new latency + * requirement. This means we need to get all processors out of their C-state, + * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that + * wakes them all right up. + */ +static int acpi_processor_latency_notify(struct notifier_block *b, + unsigned long l, void *v) +{ + smp_call_function(smp_callback, NULL, 0, 1); + return NOTIFY_OK; +} + +static struct notifier_block acpi_processor_latency_notifier = { + .notifier_call = acpi_processor_latency_notify, +}; +#endif + +int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { acpi_status status = 0; - static int first_run = 0; + static int first_run; struct proc_dir_entry *entry = NULL; unsigned int i; - ACPI_FUNCTION_TRACE("acpi_processor_power_init"); if (!first_run) { dmi_check_system(processor_power_dmi_table); @@ -1087,17 +1156,20 @@ int acpi_processor_power_init(struct acpi_processor *pr, "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; +#ifdef CONFIG_SMP + register_latency_notifier(&acpi_processor_latency_notifier); +#endif } if (!pr) - return_VALUE(-EINVAL); + return -EINVAL; if (acpi_fadt.cst_cnt && !nocst) { status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Notifying BIOS of _CST ability failed\n")); + ACPI_EXCEPTION((AE_INFO, status, + "Notifying BIOS of _CST ability failed")); } } @@ -1126,9 +1198,7 @@ int acpi_processor_power_init(struct acpi_processor *pr, entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, S_IRUGO, acpi_device_dir(device)); if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_POWER)); + return -EIO; else { entry->proc_fops = &acpi_processor_power_fops; entry->data = acpi_driver_data(device); @@ -1137,13 +1207,12 @@ int acpi_processor_power_init(struct acpi_processor *pr, pr->flags.power_setup_done = 1; - return_VALUE(0); + return 0; } int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device) { - ACPI_FUNCTION_TRACE("acpi_processor_power_exit"); pr->flags.power_setup_done = 0; @@ -1161,7 +1230,10 @@ int acpi_processor_power_exit(struct acpi_processor *pr, * copies of pm_idle before proceeding. */ cpu_idle_wait(); +#ifdef CONFIG_SMP + unregister_latency_notifier(&acpi_processor_latency_notifier); +#endif } - return_VALUE(0); + return 0; }