linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / arch / i386 / kernel / nmi.c
index ce9d5bb..be87c5e 100644 (file)
  *  Mikael Pettersson  : PM converted to driver model. Disable/enable API.
  */
 
+#include <linux/config.h>
+#include <linux/mm.h>
 #include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/smp_lock.h>
 #include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/nmi.h>
 #include <linux/sysdev.h>
 #include <linux/sysctl.h>
-#include <linux/percpu.h>
 
 #include <asm/smp.h>
+#include <asm/div64.h>
 #include <asm/nmi.h>
-#include <asm/intel_arch_perfmon.h>
 
 #include "mach_traps.h"
 
@@ -95,9 +100,6 @@ int nmi_active;
        (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT|     \
         P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
 
-#define ARCH_PERFMON_NMI_EVENT_SEL     ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
-#define ARCH_PERFMON_NMI_EVENT_UMASK   ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
-
 #ifdef CONFIG_SMP
 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
  * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -106,7 +108,7 @@ int nmi_active;
 static __init void nmi_cpu_busy(void *data)
 {
        volatile int *endflag = data;
-       local_irq_enable_in_hardirq();
+       local_irq_enable();
        /* Intentionally don't use cpu_relax here. This is
           to make sure that the performance counter really ticks,
           even if there is a simulator or similar that catches the
@@ -136,12 +138,12 @@ static int __init check_nmi_watchdog(void)
        if (nmi_watchdog == NMI_LOCAL_APIC)
                smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
 
-       for_each_possible_cpu(cpu)
+       for_each_cpu(cpu)
                prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
        local_irq_enable();
        mdelay((10*1000)/nmi_hz); // wait 10 ticks
 
-       for_each_possible_cpu(cpu) {
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
 #ifdef CONFIG_SMP
                /* Check cpu_callin_map here because that is set
                   after the timer is started. */
@@ -210,8 +212,6 @@ static int __init setup_nmi_watchdog(char *str)
 
 __setup("nmi_watchdog=", setup_nmi_watchdog);
 
-static void disable_intel_arch_watchdog(void);
-
 static void disable_lapic_nmi_watchdog(void)
 {
        if (nmi_active <= 0)
@@ -221,10 +221,6 @@ static void disable_lapic_nmi_watchdog(void)
                wrmsr(MSR_K7_EVNTSEL0, 0, 0);
                break;
        case X86_VENDOR_INTEL:
-               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
-                       disable_intel_arch_watchdog();
-                       break;
-               }
                switch (boot_cpu_data.x86) {
                case 6:
                        if (boot_cpu_data.x86_model > 0xd)
@@ -453,53 +449,6 @@ static int setup_p4_watchdog(void)
        return 1;
 }
 
-static void disable_intel_arch_watchdog(void)
-{
-       unsigned ebx;
-
-       /*
-        * Check whether the Architectural PerfMon supports
-        * Unhalted Core Cycles Event or not.
-        * NOTE: Corresponding bit = 0 in ebp indicates event present.
-        */
-       ebx = cpuid_ebx(10);
-       if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
-               wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
-}
-
-static int setup_intel_arch_watchdog(void)
-{
-       unsigned int evntsel;
-       unsigned ebx;
-
-       /*
-        * Check whether the Architectural PerfMon supports
-        * Unhalted Core Cycles Event or not.
-        * NOTE: Corresponding bit = 0 in ebp indicates event present.
-        */
-       ebx = cpuid_ebx(10);
-       if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
-               return 0;
-
-       nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
-
-       clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
-       clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
-
-       evntsel = ARCH_PERFMON_EVENTSEL_INT
-               | ARCH_PERFMON_EVENTSEL_OS
-               | ARCH_PERFMON_EVENTSEL_USR
-               | ARCH_PERFMON_NMI_EVENT_SEL
-               | ARCH_PERFMON_NMI_EVENT_UMASK;
-
-       wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
-       write_watchdog_counter("INTEL_ARCH_PERFCTR0");
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-       wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
-       return 1;
-}
-
 void setup_apic_nmi_watchdog (void)
 {
        switch (boot_cpu_data.x86_vendor) {
@@ -509,11 +458,6 @@ void setup_apic_nmi_watchdog (void)
                setup_k7_watchdog();
                break;
        case X86_VENDOR_INTEL:
-               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
-                       if (!setup_intel_arch_watchdog())
-                               return;
-                       break;
-               }
                switch (boot_cpu_data.x86) {
                case 6:
                        if (boot_cpu_data.x86_model > 0xd)
@@ -566,7 +510,7 @@ void touch_nmi_watchdog (void)
         * Just reset the alert counters, (other CPUs might be
         * spinning on locks we hold):
         */
-       for_each_possible_cpu(i)
+       for (i = 0; i < NR_CPUS; i++)
                alert_counter[i] = 0;
 
        /*
@@ -574,7 +518,6 @@ void touch_nmi_watchdog (void)
         */
        touch_softlockup_watchdog();
 }
-EXPORT_SYMBOL(touch_nmi_watchdog);
 
 extern void die_nmi(struct pt_regs *, const char *msg);
 
@@ -586,8 +529,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
         * always switch the stack NMI-atomically, it's safe to use
         * smp_processor_id().
         */
-       unsigned int sum;
-       int cpu = smp_processor_id();
+       int sum, cpu = smp_processor_id();
 
        sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
 
@@ -601,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
                        /*
                         * die_nmi will return ONLY if NOTIFY_STOP happens..
                         */
-                       die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
+                       die_nmi(regs, "NMI Watchdog detected LOCKUP");
        } else {
                last_irq_sums[cpu] = sum;
                alert_counter[cpu] = 0;
@@ -618,8 +560,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
                        wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
                        apic_write(APIC_LVTPC, APIC_DM_NMI);
                }
-               else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
-                        nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
+               else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) {
                        /* Only P6 based Pentium M need to re-unmask
                         * the apic vector but it doesn't hurt
                         * other P6 variant */