#include "speedstep-lib.h"
#define PFX "p4-clockmod: "
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg)
/*
* Duty Cycle (3bits), note DC_DISABLE is not specified in
return -EINVAL;
rdmsr(MSR_IA32_THERM_STATUS, l, h);
-
+#if 0
if (l & 0x01)
- dprintk("CPU#%d currently thermal throttled\n", cpu);
-
+ printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu);
+#endif
if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
rdmsr(MSR_IA32_THERM_CONTROL, l, h);
if (newstate == DC_DISABLE) {
- dprintk("CPU#%d disabling modulation\n", cpu);
+ /* printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu); */
wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
} else {
- dprintk("CPU#%d setting duty cycle to %d%%\n",
- cpu, ((125 * newstate) / 10));
+ /* printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n",
+ cpu, ((125 * newstate) / 10)); */
/* bits 63 - 5 : reserved
* bit 4 : enable/disable
* bits 3-1 : duty cycle
{
unsigned int newstate = DC_RESV;
struct cpufreq_freqs freqs;
- cpumask_t cpus_allowed;
+ cpumask_t cpus_allowed, affected_cpu_map;
int i;
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
if (freqs.new == freqs.old)
return 0;
+ /* switch to physical CPU where state is to be changed*/
+ cpus_allowed = current->cpus_allowed;
+
+ /* only run on CPU to be set, or on its sibling */
+#ifdef CONFIG_SMP
+ affected_cpu_map = cpu_sibling_map[policy->cpu];
+#else
+ affected_cpu_map = cpumask_of_cpu(policy->cpu);
+#endif
+
/* notifiers */
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask(i, affected_cpu_map) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
- cpus_allowed = current->cpus_allowed;
-
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask(i, affected_cpu_map) {
cpumask_t this_cpu = cpumask_of_cpu(i);
set_cpus_allowed(current, this_cpu);
set_cpus_allowed(current, cpus_allowed);
/* notifiers */
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask(i, affected_cpu_map) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
int cpuid = 0;
unsigned int i;
-#ifdef CONFIG_SMP
- policy->cpus = cpu_sibling_map[policy->cpu];
-#endif
-
/* Errata workaround */
cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
switch (cpuid) {
case 0x0f11:
case 0x0f12:
has_N44_O17_errata[policy->cpu] = 1;
- dprintk("has errata -- disabling low frequencies\n");
}
/* get max frequency */
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
- cpumask_t cpus_allowed;
+ cpumask_t cpus_allowed, affected_cpu_map;
u32 l, h;
cpus_allowed = current->cpus_allowed;
+ affected_cpu_map = cpumask_of_cpu(cpu);
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- BUG_ON(smp_processor_id() != cpu);
+ set_cpus_allowed(current, affected_cpu_map);
+ BUG_ON(!cpu_isset(smp_processor_id(), affected_cpu_map));
rdmsr(MSR_IA32_THERM_CONTROL, l, h);
static int __init cpufreq_p4_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
- int ret;
/*
* THERM_CONTROL is architectural for IA32 now, so
!test_bit(X86_FEATURE_ACC, c->x86_capability))
return -ENODEV;
- ret = cpufreq_register_driver(&p4clockmod_driver);
- if (!ret)
- printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
+ printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
- return (ret);
+ return cpufreq_register_driver(&p4clockmod_driver);
}
late_initcall(cpufreq_p4_init);
module_exit(cpufreq_p4_exit);
+