X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fkernel%2Fcpu%2Fcpufreq%2Fspeedstep-centrino.c;fp=arch%2Fi386%2Fkernel%2Fcpu%2Fcpufreq%2Fspeedstep-centrino.c;h=c173c0fa117a927734bf9c2ee389c1d5f7ada91d;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=b77f1358bd79e341bd3d2a9a8606819bb1a1f01a;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index b77f1358b..c173c0fa1 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c @@ -2,21 +2,26 @@ * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium * M (part of the Centrino chipset). * - * Since the original Pentium M, most new Intel CPUs support Enhanced - * SpeedStep. - * * Despite the "SpeedStep" in the name, this is almost entirely unlike * traditional SpeedStep. * * Modelled on speedstep.c * * Copyright (C) 2003 Jeremy Fitzhardinge + * + * WARNING WARNING WARNING + * + * This driver manipulates the PERF_CTL MSR, which is only somewhat + * documented. While it seems to work on my laptop, it has not been + * tested anywhere else, and it may not work for you, do strange + * things or simply crash. */ #include #include #include #include +#include #include /* current */ #include #include @@ -31,7 +36,7 @@ #include #define PFX "speedstep-centrino: " -#define MAINTAINER "cpufreq@lists.linux.org.uk" +#define MAINTAINER "Jeremy Fitzhardinge " #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) @@ -245,7 +250,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->cpu_id == NULL) { /* No match at all */ - dprintk("no support for CPU model \"%s\": " + dprintk(KERN_INFO PFX "no support for CPU model \"%s\": " "send /proc/cpuinfo to " MAINTAINER "\n", cpu->x86_model_id); return -ENOENT; @@ -253,10 +258,10 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->op_points == NULL) { /* Matched a non-match */ - dprintk("no table support for CPU model \"%s\"\n", + dprintk(KERN_INFO PFX "no table support for CPU model \"%s\"\n", cpu->x86_model_id); #ifndef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI - dprintk("try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n"); + dprintk(KERN_INFO PFX "try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n"); #endif return -ENOENT; } @@ -346,36 +351,7 @@ static unsigned int get_cur_freq(unsigned int cpu) #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI -static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; - -/* - * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States - * library - * - * Before doing the actual init, we need to do _PSD related setup whenever - * supported by the BIOS. These are handled by this early_init routine. - */ -static int centrino_cpu_early_init_acpi(void) -{ - unsigned int i, j; - struct acpi_processor_performance *data; - - for_each_possible_cpu(i) { - data = kzalloc(sizeof(struct acpi_processor_performance), - GFP_KERNEL); - if (!data) { - for_each_possible_cpu(j) { - kfree(acpi_perf_data[j]); - acpi_perf_data[j] = NULL; - } - return (-ENOMEM); - } - acpi_perf_data[i] = data; - } - - acpi_processor_preregister_performance(acpi_perf_data); - return 0; -} +static struct acpi_processor_performance p; /* * centrino_cpu_init_acpi - register with ACPI P-States library @@ -389,57 +365,46 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) unsigned long cur_freq; int result = 0, i; unsigned int cpu = policy->cpu; - struct acpi_processor_performance *p; - - p = acpi_perf_data[cpu]; /* register with ACPI core */ - if (acpi_processor_register_performance(p, cpu)) { - dprintk(PFX "obtaining ACPI data failed\n"); + if (acpi_processor_register_performance(&p, cpu)) { + dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); return -EIO; } - policy->shared_type = p->shared_type; - /* - * Will let policy->cpus know about dependency only when software - * coordination is required. - */ - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || - policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - policy->cpus = p->shared_cpu_map; /* verify the acpi_data */ - if (p->state_count <= 1) { + if (p.state_count <= 1) { dprintk("No P-States\n"); result = -ENODEV; goto err_unreg; } - if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || - (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { dprintk("Invalid control/status registers (%x - %x)\n", - p->control_register.space_id, p->status_register.space_id); + p.control_register.space_id, p.status_register.space_id); result = -EIO; goto err_unreg; } - for (i=0; istate_count; i++) { - if (p->states[i].control != p->states[i].status) { + for (i=0; istates[i].control, p->states[i].status); + p.states[i].control, p.states[i].status); result = -EINVAL; goto err_unreg; } - if (!p->states[i].core_frequency) { + if (!p.states[i].core_frequency) { dprintk("Zero core frequency for state %u\n", i); result = -EINVAL; goto err_unreg; } - if (p->states[i].core_frequency > p->states[0].core_frequency) { + if (p.states[i].core_frequency > p.states[0].core_frequency) { dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i, - p->states[i].core_frequency, p->states[0].core_frequency); - p->states[i].core_frequency = 0; + p.states[i].core_frequency, p.states[0].core_frequency); + p.states[i].core_frequency = 0; continue; } } @@ -451,26 +416,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) } centrino_model[cpu]->model_name=NULL; - centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000; + centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000; centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) * - (p->state_count + 1), GFP_KERNEL); + (p.state_count + 1), GFP_KERNEL); if (!centrino_model[cpu]->op_points) { result = -ENOMEM; goto err_kfree; } - for (i=0; istate_count; i++) { - centrino_model[cpu]->op_points[i].index = p->states[i].control; - centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000; + for (i=0; iop_points[i].index = p.states[i].control; + centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000; dprintk("adding state %i with frequency %u and control value %04x\n", i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); } - centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END; + centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END; cur_freq = get_cur_freq(cpu); - for (i=0; istate_count; i++) { - if (!p->states[i].core_frequency) { + for (i=0; iop_points[i].frequency = CPUFREQ_ENTRY_INVALID; continue; @@ -486,7 +451,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) } if (cur_freq == centrino_model[cpu]->op_points[i].frequency) - p->state = i; + p.state = i; } /* notify BIOS that we exist */ @@ -499,13 +464,12 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) err_kfree: kfree(centrino_model[cpu]); err_unreg: - acpi_processor_unregister_performance(p, cpu); - dprintk(PFX "invalid ACPI data\n"); + acpi_processor_unregister_performance(&p, cpu); + dprintk(KERN_INFO PFX "invalid ACPI data\n"); return (result); } #else static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; } -static inline int centrino_cpu_early_init_acpi(void) { return 0; } #endif static int centrino_cpu_init(struct cpufreq_policy *policy) @@ -515,13 +479,15 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) unsigned l, h; int ret; int i; + struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; /* Only Intel makes Enhanced Speedstep-capable CPUs */ if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; - if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { centrino_driver.flags |= CPUFREQ_CONST_LOOPS; + } if (centrino_cpu_init_acpi(policy)) { if (policy->cpu != 0) @@ -535,7 +501,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) centrino_cpu[policy->cpu] = &cpu_ids[i]; if (!centrino_cpu[policy->cpu]) { - dprintk("found unsupported CPU with " + dprintk(KERN_INFO PFX "found unsupported CPU with " "Enhanced SpeedStep: send /proc/cpuinfo to " MAINTAINER "\n"); return -ENODEV; @@ -591,15 +557,10 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI if (!centrino_model[cpu]->model_name) { - static struct acpi_processor_performance *p; - - if (acpi_perf_data[cpu]) { - p = acpi_perf_data[cpu]; - dprintk("unregistering and freeing ACPI data\n"); - acpi_processor_unregister_performance(p, cpu); - kfree(centrino_model[cpu]->op_points); - kfree(centrino_model[cpu]); - } + dprintk("unregistering and freeing ACPI data\n"); + acpi_processor_unregister_performance(&p, cpu); + kfree(centrino_model[cpu]->op_points); + kfree(centrino_model[cpu]); } #endif @@ -633,128 +594,63 @@ static int centrino_target (struct cpufreq_policy *policy, unsigned int relation) { unsigned int newstate = 0; - unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; + unsigned int msr, oldmsr, h, cpu = policy->cpu; struct cpufreq_freqs freqs; - cpumask_t online_policy_cpus; cpumask_t saved_mask; - cpumask_t set_mask; - cpumask_t covered_cpus; - int retval = 0; - unsigned int j, k, first_cpu, tmp; + int retval; - if (unlikely(centrino_model[cpu] == NULL)) + if (centrino_model[cpu] == NULL) return -ENODEV; - if (unlikely(cpufreq_frequency_table_target(policy, - centrino_model[cpu]->op_points, - target_freq, - relation, - &newstate))) { - return -EINVAL; + /* + * Support for SMP systems. + * Make sure we are running on the CPU that wants to change frequency + */ + saved_mask = current->cpus_allowed; + set_cpus_allowed(current, policy->cpus); + if (!cpu_isset(smp_processor_id(), policy->cpus)) { + dprintk("couldn't limit to CPUs in this domain\n"); + return(-EAGAIN); } -#ifdef CONFIG_HOTPLUG_CPU - /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); -#else - online_policy_cpus = policy->cpus; -#endif + if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq, + relation, &newstate)) { + retval = -EINVAL; + goto migrate_end; + } - saved_mask = current->cpus_allowed; - first_cpu = 1; - cpus_clear(covered_cpus); - for_each_cpu_mask(j, online_policy_cpus) { - /* - * Support for SMP systems. - * Make sure we are running on CPU that wants to change freq - */ - cpus_clear(set_mask); - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - cpus_or(set_mask, set_mask, online_policy_cpus); - else - cpu_set(j, set_mask); - - set_cpus_allowed(current, set_mask); - if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { - dprintk("couldn't limit to CPUs in this domain\n"); - retval = -EAGAIN; - if (first_cpu) { - /* We haven't started the transition yet. */ - goto migrate_end; - } - break; - } + msr = centrino_model[cpu]->op_points[newstate].index; + rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); - msr = centrino_model[cpu]->op_points[newstate].index; - - if (first_cpu) { - rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); - if (msr == (oldmsr & 0xffff)) { - dprintk("no change needed - msr was and needs " - "to be %x\n", oldmsr); - retval = 0; - goto migrate_end; - } - - freqs.old = extract_clock(oldmsr, cpu, 0); - freqs.new = extract_clock(msr, cpu, 0); - - dprintk("target=%dkHz old=%d new=%d msr=%04x\n", - target_freq, freqs.old, freqs.new, msr); - - for_each_cpu_mask(k, online_policy_cpus) { - freqs.cpu = k; - cpufreq_notify_transition(&freqs, - CPUFREQ_PRECHANGE); - } - - first_cpu = 0; - /* all but 16 LSB are reserved, treat them with care */ - oldmsr &= ~0xffff; - msr &= 0xffff; - oldmsr |= msr; - } + if (msr == (oldmsr & 0xffff)) { + retval = 0; + dprintk("no change needed - msr was and needs to be %x\n", oldmsr); + goto migrate_end; + } - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - break; + freqs.cpu = cpu; + freqs.old = extract_clock(oldmsr, cpu, 0); + freqs.new = extract_clock(msr, cpu, 0); - cpu_set(j, covered_cpus); - } + dprintk("target=%dkHz old=%d new=%d msr=%04x\n", + target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask(k, online_policy_cpus) { - freqs.cpu = k; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - if (unlikely(retval)) { - /* - * We have failed halfway through the frequency change. - * We have sent callbacks to policy->cpus and - * MSRs have already been written on coverd_cpus. - * Best effort undo.. - */ + /* all but 16 LSB are "reserved", so treat them with + care */ + oldmsr &= ~0xffff; + msr &= 0xffff; + oldmsr |= msr; - if (!cpus_empty(covered_cpus)) { - for_each_cpu_mask(j, covered_cpus) { - set_cpus_allowed(current, cpumask_of_cpu(j)); - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - } - } + wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - tmp = freqs.new; - freqs.new = freqs.old; - freqs.old = tmp; - for_each_cpu_mask(j, online_policy_cpus) { - freqs.cpu = j; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } - } + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + retval = 0; migrate_end: set_cpus_allowed(current, saved_mask); - return 0; + return (retval); } static struct freq_attr* centrino_attr[] = { @@ -796,25 +692,12 @@ static int __init centrino_init(void) if (!cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; - centrino_cpu_early_init_acpi(); - return cpufreq_register_driver(¢rino_driver); } static void __exit centrino_exit(void) { -#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI - unsigned int j; -#endif - cpufreq_unregister_driver(¢rino_driver); - -#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI - for_each_possible_cpu(j) { - kfree(acpi_perf_data[j]); - acpi_perf_data[j] = NULL; - } -#endif } MODULE_AUTHOR ("Jeremy Fitzhardinge ");