2 * linux/arch/i386/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
13 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/nmi.h>
20 #include <linux/sysdev.h>
21 #include <linux/sysctl.h>
22 #include <linux/percpu.h>
26 #include <asm/intel_arch_perfmon.h>
28 #include "mach_traps.h"
30 unsigned int nmi_watchdog = NMI_NONE;
31 extern int unknown_nmi_panic;
32 static unsigned int nmi_hz = HZ;
33 static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
34 static unsigned int nmi_p4_cccr_val;
35 extern void show_registers(struct pt_regs *regs);
38 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39 * - it may be reserved by some other driver, or not
40 * - when not reserved by some other driver, it may be used for
41 * the NMI watchdog, or not
43 * This is maintained separately from nmi_active because the NMI
44 * watchdog may also be driven from the I/O APIC timer.
46 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
47 static unsigned int lapic_nmi_owner;
48 #define LAPIC_NMI_WATCHDOG (1<<0)
49 #define LAPIC_NMI_RESERVED (1<<1)
52 * +1: the lapic NMI watchdog is active, but can be disabled
53 * 0: the lapic NMI watchdog has not been set up, and cannot
55 * -1: the lapic NMI watchdog is disabled, but can be enabled
59 #define K7_EVNTSEL_ENABLE (1 << 22)
60 #define K7_EVNTSEL_INT (1 << 20)
61 #define K7_EVNTSEL_OS (1 << 17)
62 #define K7_EVNTSEL_USR (1 << 16)
63 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
64 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
66 #define P6_EVNTSEL0_ENABLE (1 << 22)
67 #define P6_EVNTSEL_INT (1 << 20)
68 #define P6_EVNTSEL_OS (1 << 17)
69 #define P6_EVNTSEL_USR (1 << 16)
70 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
71 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
73 #define MSR_P4_MISC_ENABLE 0x1A0
74 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
75 #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
76 #define MSR_P4_PERFCTR0 0x300
77 #define MSR_P4_CCCR0 0x360
78 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
79 #define P4_ESCR_OS (1<<3)
80 #define P4_ESCR_USR (1<<2)
81 #define P4_CCCR_OVF_PMI0 (1<<26)
82 #define P4_CCCR_OVF_PMI1 (1<<27)
83 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
84 #define P4_CCCR_COMPLEMENT (1<<19)
85 #define P4_CCCR_COMPARE (1<<18)
86 #define P4_CCCR_REQUIRED (3<<16)
87 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
88 #define P4_CCCR_ENABLE (1<<12)
89 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
90 CRU_ESCR0 (with any non-null event selector) through a complemented
91 max threshold. [IA32-Vol3, Section 14.9.9] */
92 #define MSR_P4_IQ_COUNTER0 0x30C
93 #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
94 #define P4_NMI_IQ_CCCR0 \
95 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
96 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
98 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
99 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
102 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
103 * the CPU is idle. To make sure the NMI watchdog really ticks on all
104 * CPUs during the test make them busy.
106 static __init void nmi_cpu_busy(void *data)
108 volatile int *endflag = data;
109 local_irq_enable_in_hardirq();
110 /* Intentionally don't use cpu_relax here. This is
111 to make sure that the performance counter really ticks,
112 even if there is a simulator or similar that catches the
113 pause instruction. On a real HT machine this is fine because
114 all other CPUs are busy with "useless" delay loops and don't
115 care if they get somewhat less cycles. */
116 while (*endflag == 0)
121 static int __init check_nmi_watchdog(void)
123 volatile int endflag = 0;
124 unsigned int *prev_nmi_count;
127 if (nmi_watchdog == NMI_NONE)
130 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
134 printk(KERN_INFO "Testing NMI watchdog ... ");
136 if (nmi_watchdog == NMI_LOCAL_APIC)
137 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
139 for_each_possible_cpu(cpu)
140 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
142 mdelay((10*1000)/nmi_hz); // wait 10 ticks
144 for_each_possible_cpu(cpu) {
146 /* Check cpu_callin_map here because that is set
147 after the timer is started. */
148 if (!cpu_isset(cpu, cpu_callin_map))
151 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
153 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
158 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
159 kfree(prev_nmi_count);
166 /* now that we know it works we can reduce NMI frequency to
167 something more reasonable; makes a difference in some configs */
168 if (nmi_watchdog == NMI_LOCAL_APIC)
171 kfree(prev_nmi_count);
174 /* This needs to happen later in boot so counters are working */
175 late_initcall(check_nmi_watchdog);
177 static int __init setup_nmi_watchdog(char *str)
181 get_option(&str, &nmi);
183 if (nmi >= NMI_INVALID)
188 * If any other x86 CPU has a local APIC, then
189 * please test the NMI stuff there and send me the
190 * missing bits. Right now Intel P6/P4 and AMD K7 only.
192 if ((nmi == NMI_LOCAL_APIC) &&
193 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
194 (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15))
196 if ((nmi == NMI_LOCAL_APIC) &&
197 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
198 (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15))
201 * We can enable the IO-APIC watchdog
204 if (nmi == NMI_IO_APIC) {
211 __setup("nmi_watchdog=", setup_nmi_watchdog);
213 static void disable_intel_arch_watchdog(void);
215 static void disable_lapic_nmi_watchdog(void)
219 switch (boot_cpu_data.x86_vendor) {
221 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
223 case X86_VENDOR_INTEL:
224 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
225 disable_intel_arch_watchdog();
228 switch (boot_cpu_data.x86) {
230 if (boot_cpu_data.x86_model > 0xd)
233 wrmsr(MSR_P6_EVNTSEL0, 0, 0);
236 if (boot_cpu_data.x86_model > 0x4)
239 wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
240 wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
246 /* tell do_nmi() and others that we're not active any more */
250 static void enable_lapic_nmi_watchdog(void)
252 if (nmi_active < 0) {
253 nmi_watchdog = NMI_LOCAL_APIC;
254 setup_apic_nmi_watchdog();
258 int reserve_lapic_nmi(void)
260 unsigned int old_owner;
262 spin_lock(&lapic_nmi_owner_lock);
263 old_owner = lapic_nmi_owner;
264 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
265 spin_unlock(&lapic_nmi_owner_lock);
266 if (old_owner & LAPIC_NMI_RESERVED)
268 if (old_owner & LAPIC_NMI_WATCHDOG)
269 disable_lapic_nmi_watchdog();
273 void release_lapic_nmi(void)
275 unsigned int new_owner;
277 spin_lock(&lapic_nmi_owner_lock);
278 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
279 lapic_nmi_owner = new_owner;
280 spin_unlock(&lapic_nmi_owner_lock);
281 if (new_owner & LAPIC_NMI_WATCHDOG)
282 enable_lapic_nmi_watchdog();
285 void disable_timer_nmi_watchdog(void)
287 if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
290 unset_nmi_callback();
292 nmi_watchdog = NMI_NONE;
295 void enable_timer_nmi_watchdog(void)
297 if (nmi_active < 0) {
298 nmi_watchdog = NMI_IO_APIC;
299 touch_nmi_watchdog();
306 static int nmi_pm_active; /* nmi_active before suspend */
308 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
310 nmi_pm_active = nmi_active;
311 disable_lapic_nmi_watchdog();
315 static int lapic_nmi_resume(struct sys_device *dev)
317 if (nmi_pm_active > 0)
318 enable_lapic_nmi_watchdog();
323 static struct sysdev_class nmi_sysclass = {
324 set_kset_name("lapic_nmi"),
325 .resume = lapic_nmi_resume,
326 .suspend = lapic_nmi_suspend,
329 static struct sys_device device_lapic_nmi = {
331 .cls = &nmi_sysclass,
334 static int __init init_lapic_nmi_sysfs(void)
338 if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
341 error = sysdev_class_register(&nmi_sysclass);
343 error = sysdev_register(&device_lapic_nmi);
346 /* must come after the local APIC's device_initcall() */
347 late_initcall(init_lapic_nmi_sysfs);
349 #endif /* CONFIG_PM */
352 * Activate the NMI watchdog via the local APIC.
353 * Original code written by Keith Owens.
356 static void clear_msr_range(unsigned int base, unsigned int n)
360 for(i = 0; i < n; ++i)
364 static void write_watchdog_counter(const char *descr)
366 u64 count = (u64)cpu_khz * 1000;
368 do_div(count, nmi_hz);
370 Dprintk("setting %s to -0x%08Lx\n", descr, count);
371 wrmsrl(nmi_perfctr_msr, 0 - count);
374 static void setup_k7_watchdog(void)
376 unsigned int evntsel;
378 nmi_perfctr_msr = MSR_K7_PERFCTR0;
380 clear_msr_range(MSR_K7_EVNTSEL0, 4);
381 clear_msr_range(MSR_K7_PERFCTR0, 4);
383 evntsel = K7_EVNTSEL_INT
388 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
389 write_watchdog_counter("K7_PERFCTR0");
390 apic_write(APIC_LVTPC, APIC_DM_NMI);
391 evntsel |= K7_EVNTSEL_ENABLE;
392 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
395 static void setup_p6_watchdog(void)
397 unsigned int evntsel;
399 nmi_perfctr_msr = MSR_P6_PERFCTR0;
401 clear_msr_range(MSR_P6_EVNTSEL0, 2);
402 clear_msr_range(MSR_P6_PERFCTR0, 2);
404 evntsel = P6_EVNTSEL_INT
409 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
410 write_watchdog_counter("P6_PERFCTR0");
411 apic_write(APIC_LVTPC, APIC_DM_NMI);
412 evntsel |= P6_EVNTSEL0_ENABLE;
413 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
416 static int setup_p4_watchdog(void)
418 unsigned int misc_enable, dummy;
420 rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy);
421 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
424 nmi_perfctr_msr = MSR_P4_IQ_COUNTER0;
425 nmi_p4_cccr_val = P4_NMI_IQ_CCCR0;
427 if (smp_num_siblings == 2)
428 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
431 if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL))
432 clear_msr_range(0x3F1, 2);
433 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
434 docs doesn't fully define it, so leave it alone for now. */
435 if (boot_cpu_data.x86_model >= 0x3) {
436 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
437 clear_msr_range(0x3A0, 26);
438 clear_msr_range(0x3BC, 3);
440 clear_msr_range(0x3A0, 31);
442 clear_msr_range(0x3C0, 6);
443 clear_msr_range(0x3C8, 6);
444 clear_msr_range(0x3E0, 2);
445 clear_msr_range(MSR_P4_CCCR0, 18);
446 clear_msr_range(MSR_P4_PERFCTR0, 18);
448 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
449 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
450 write_watchdog_counter("P4_IQ_COUNTER0");
451 apic_write(APIC_LVTPC, APIC_DM_NMI);
452 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
456 static void disable_intel_arch_watchdog(void)
461 * Check whether the Architectural PerfMon supports
462 * Unhalted Core Cycles Event or not.
463 * NOTE: Corresponding bit = 0 in ebp indicates event present.
466 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
467 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
470 static int setup_intel_arch_watchdog(void)
472 unsigned int evntsel;
476 * Check whether the Architectural PerfMon supports
477 * Unhalted Core Cycles Event or not.
478 * NOTE: Corresponding bit = 0 in ebp indicates event present.
481 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
484 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
486 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
487 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
489 evntsel = ARCH_PERFMON_EVENTSEL_INT
490 | ARCH_PERFMON_EVENTSEL_OS
491 | ARCH_PERFMON_EVENTSEL_USR
492 | ARCH_PERFMON_NMI_EVENT_SEL
493 | ARCH_PERFMON_NMI_EVENT_UMASK;
495 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
496 write_watchdog_counter("INTEL_ARCH_PERFCTR0");
497 apic_write(APIC_LVTPC, APIC_DM_NMI);
498 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
499 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
503 void setup_apic_nmi_watchdog (void)
505 switch (boot_cpu_data.x86_vendor) {
507 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
511 case X86_VENDOR_INTEL:
512 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
513 if (!setup_intel_arch_watchdog())
517 switch (boot_cpu_data.x86) {
519 if (boot_cpu_data.x86_model > 0xd)
525 if (boot_cpu_data.x86_model > 0x4)
528 if (!setup_p4_watchdog())
538 lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
543 * the best way to detect whether a CPU has a 'hard lockup' problem
544 * is to check it's local APIC timer IRQ counts. If they are not
545 * changing then that CPU has some problem.
547 * as these watchdog NMI IRQs are generated on every CPU, we only
548 * have to check the current processor.
550 * since NMIs don't listen to _any_ locks, we have to be extremely
551 * careful not to rely on unsafe variables. The printk might lock
552 * up though, so we have to break up any console locks first ...
553 * [when there will be more tty-related locks, break them up
558 last_irq_sums [NR_CPUS],
559 alert_counter [NR_CPUS];
561 void touch_nmi_watchdog (void)
566 * Just reset the alert counters, (other CPUs might be
567 * spinning on locks we hold):
569 for_each_possible_cpu(i)
570 alert_counter[i] = 0;
573 * Tickle the softlockup detector too:
575 touch_softlockup_watchdog();
577 EXPORT_SYMBOL(touch_nmi_watchdog);
579 extern void die_nmi(struct pt_regs *, const char *msg);
581 void nmi_watchdog_tick (struct pt_regs * regs)
585 * Since current_thread_info()-> is always on the stack, and we
586 * always switch the stack NMI-atomically, it's safe to use
587 * smp_processor_id().
590 int cpu = smp_processor_id();
592 sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
594 if (last_irq_sums[cpu] == sum) {
596 * Ayiee, looks like this CPU is stuck ...
597 * wait a few IRQs (5 seconds) before doing the oops ...
599 alert_counter[cpu]++;
600 if (alert_counter[cpu] == 5*nmi_hz)
602 * die_nmi will return ONLY if NOTIFY_STOP happens..
604 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
606 last_irq_sums[cpu] = sum;
607 alert_counter[cpu] = 0;
609 if (nmi_perfctr_msr) {
610 if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
613 * - An overflown perfctr will assert its interrupt
614 * until the OVF flag in its CCCR is cleared.
615 * - LVTPC is masked on interrupt and must be
616 * unmasked by the LVTPC handler.
618 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
619 apic_write(APIC_LVTPC, APIC_DM_NMI);
621 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
622 nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
623 /* Only P6 based Pentium M need to re-unmask
624 * the apic vector but it doesn't hurt
625 * other P6 variant */
626 apic_write(APIC_LVTPC, APIC_DM_NMI);
628 write_watchdog_counter(NULL);
634 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
636 unsigned char reason = get_nmi_reason();
639 if (!(reason & 0xc0)) {
640 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
647 * proc handler for /proc/sys/kernel/unknown_nmi_panic
649 int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file,
650 void __user *buffer, size_t *length, loff_t *ppos)
654 old_state = unknown_nmi_panic;
655 proc_dointvec(table, write, file, buffer, length, ppos);
656 if (!!old_state == !!unknown_nmi_panic)
659 if (unknown_nmi_panic) {
660 if (reserve_lapic_nmi() < 0) {
661 unknown_nmi_panic = 0;
664 set_nmi_callback(unknown_nmi_panic_callback);
668 unset_nmi_callback();
675 EXPORT_SYMBOL(nmi_active);
676 EXPORT_SYMBOL(nmi_watchdog);
677 EXPORT_SYMBOL(reserve_lapic_nmi);
678 EXPORT_SYMBOL(release_lapic_nmi);
679 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
680 EXPORT_SYMBOL(enable_timer_nmi_watchdog);