2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/config.h>
17 #include <linux/irq.h>
18 #include <linux/delay.h>
19 #include <linux/bootmem.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/sysdev.h>
26 #include <linux/nmi.h>
27 #include <linux/sysctl.h>
31 #include <asm/mpspec.h>
34 #include <asm/proto.h>
35 #include <asm/kdebug.h>
38 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39 * - it may be reserved by some other driver, or not
40 * - when not reserved by some other driver, it may be used for
41 * the NMI watchdog, or not
43 * This is maintained separately from nmi_active because the NMI
44 * watchdog may also be driven from the I/O APIC timer.
46 static spinlock_t lapic_nmi_owner_lock = SPIN_LOCK_UNLOCKED;
47 static unsigned int lapic_nmi_owner;
48 #define LAPIC_NMI_WATCHDOG (1<<0)
49 #define LAPIC_NMI_RESERVED (1<<1)
52 * +1: the lapic NMI watchdog is active, but can be disabled
53 * 0: the lapic NMI watchdog has not been set up, and cannot
55 * -1: the lapic NMI watchdog is disabled, but can be enabled
57 int nmi_active; /* oprofile uses this */
60 unsigned int nmi_watchdog = NMI_DEFAULT;
61 static unsigned int nmi_hz = HZ;
62 unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
64 /* Note that these events don't tick when the CPU idles. This means
65 the frequency varies with CPU load. */
67 #define K7_EVNTSEL_ENABLE (1 << 22)
68 #define K7_EVNTSEL_INT (1 << 20)
69 #define K7_EVNTSEL_OS (1 << 17)
70 #define K7_EVNTSEL_USR (1 << 16)
71 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
72 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
74 #define P6_EVNTSEL0_ENABLE (1 << 22)
75 #define P6_EVNTSEL_INT (1 << 20)
76 #define P6_EVNTSEL_OS (1 << 17)
77 #define P6_EVNTSEL_USR (1 << 16)
78 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
79 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
81 /* Run after command line and cpu_init init, but before all other checks */
82 void __init nmi_watchdog_default(void)
84 if (nmi_watchdog != NMI_DEFAULT)
87 /* For some reason the IO APIC watchdog doesn't work on the AMD
88 8111 chipset. For now switch to local APIC mode using
89 perfctr0 there. On Intel CPUs we don't have code to handle
90 the perfctr and the IO-APIC seems to work, so use that. */
92 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
93 nmi_watchdog = NMI_LOCAL_APIC;
95 "Using local APIC NMI watchdog using perfctr0\n");
97 printk(KERN_INFO "Using IO APIC NMI watchdog\n");
98 nmi_watchdog = NMI_IO_APIC;
102 /* Why is there no CPUID flag for this? */
103 static __init int cpu_has_lapic(void)
105 switch (boot_cpu_data.x86_vendor) {
106 case X86_VENDOR_INTEL:
108 return boot_cpu_data.x86 >= 6;
109 /* .... add more cpus here or find a different way to figure this out. */
115 int __init check_nmi_watchdog (void)
120 if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) {
121 nmi_watchdog = NMI_NONE;
125 printk(KERN_INFO "testing NMI watchdog ... ");
127 for (cpu = 0; cpu < NR_CPUS; cpu++)
128 counts[cpu] = cpu_pda[cpu].__nmi_count;
130 mdelay((10*1000)/nmi_hz); // wait 10 ticks
132 for (cpu = 0; cpu < NR_CPUS; cpu++) {
133 if (!cpu_online(cpu))
135 if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
136 printk("CPU#%d: NMI appears to be stuck (%d)!\n",
138 cpu_pda[cpu].__nmi_count);
140 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
146 /* now that we know it works we can reduce NMI frequency to
147 something more reasonable; makes a difference in some configs */
148 if (nmi_watchdog == NMI_LOCAL_APIC)
154 int __init setup_nmi_watchdog(char *str)
158 if (!strncmp(str,"panic",5)) {
159 panic_on_timeout = 1;
160 str = strchr(str, ',');
166 get_option(&str, &nmi);
168 if (nmi >= NMI_INVALID)
174 __setup("nmi_watchdog=", setup_nmi_watchdog);
176 static void disable_lapic_nmi_watchdog(void)
180 switch (boot_cpu_data.x86_vendor) {
182 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
184 case X86_VENDOR_INTEL:
185 wrmsr(MSR_IA32_EVNTSEL0, 0, 0);
189 /* tell do_nmi() and others that we're not active any more */
193 static void enable_lapic_nmi_watchdog(void)
195 if (nmi_active < 0) {
196 nmi_watchdog = NMI_LOCAL_APIC;
197 setup_apic_nmi_watchdog();
201 int reserve_lapic_nmi(void)
203 unsigned int old_owner;
205 spin_lock(&lapic_nmi_owner_lock);
206 old_owner = lapic_nmi_owner;
207 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
208 spin_unlock(&lapic_nmi_owner_lock);
209 if (old_owner & LAPIC_NMI_RESERVED)
211 if (old_owner & LAPIC_NMI_WATCHDOG)
212 disable_lapic_nmi_watchdog();
216 void release_lapic_nmi(void)
218 unsigned int new_owner;
220 spin_lock(&lapic_nmi_owner_lock);
221 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
222 lapic_nmi_owner = new_owner;
223 spin_unlock(&lapic_nmi_owner_lock);
224 if (new_owner & LAPIC_NMI_WATCHDOG)
225 enable_lapic_nmi_watchdog();
228 void disable_timer_nmi_watchdog(void)
230 if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
234 unset_nmi_callback();
236 nmi_watchdog = NMI_NONE;
239 void enable_timer_nmi_watchdog(void)
241 if (nmi_active < 0) {
242 nmi_watchdog = NMI_IO_APIC;
243 touch_nmi_watchdog();
251 static int nmi_pm_active; /* nmi_active before suspend */
253 static int lapic_nmi_suspend(struct sys_device *dev, u32 state)
255 nmi_pm_active = nmi_active;
256 disable_lapic_nmi_watchdog();
260 static int lapic_nmi_resume(struct sys_device *dev)
262 if (nmi_pm_active > 0)
263 enable_lapic_nmi_watchdog();
267 static struct sysdev_class nmi_sysclass = {
268 set_kset_name("lapic_nmi"),
269 .resume = lapic_nmi_resume,
270 .suspend = lapic_nmi_suspend,
273 static struct sys_device device_lapic_nmi = {
275 .cls = &nmi_sysclass,
278 static int __init init_lapic_nmi_sysfs(void)
282 if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
285 error = sysdev_class_register(&nmi_sysclass);
287 error = sysdev_register(&device_lapic_nmi);
290 /* must come after the local APIC's device_initcall() */
291 late_initcall(init_lapic_nmi_sysfs);
293 #endif /* CONFIG_PM */
296 * Activate the NMI watchdog via the local APIC.
297 * Original code written by Keith Owens.
300 static void setup_k7_watchdog(void)
303 unsigned int evntsel;
305 /* No check, so can start with slow frequency */
308 /* XXX should check these in EFER */
310 nmi_perfctr_msr = MSR_K7_PERFCTR0;
312 for(i = 0; i < 4; ++i) {
313 /* Simulator may not support it */
314 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL))
316 wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
319 evntsel = K7_EVNTSEL_INT
324 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
325 wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz*1000) / nmi_hz);
326 apic_write(APIC_LVTPC, APIC_DM_NMI);
327 evntsel |= K7_EVNTSEL_ENABLE;
328 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
331 void setup_apic_nmi_watchdog(void)
333 switch (boot_cpu_data.x86_vendor) {
335 if (boot_cpu_data.x86 < 6)
337 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
344 lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
349 * the best way to detect whether a CPU has a 'hard lockup' problem
350 * is to check it's local APIC timer IRQ counts. If they are not
351 * changing then that CPU has some problem.
353 * as these watchdog NMI IRQs are generated on every CPU, we only
354 * have to check the current processor.
356 * since NMIs don't listen to _any_ locks, we have to be extremely
357 * careful not to rely on unsafe variables. The printk might lock
358 * up though, so we have to break up any console locks first ...
359 * [when there will be more tty-related locks, break them up
364 last_irq_sums [NR_CPUS],
365 alert_counter [NR_CPUS];
367 void touch_nmi_watchdog (void)
372 * Just reset the alert counters, (other CPUs might be
373 * spinning on locks we hold):
375 for (i = 0; i < NR_CPUS; i++)
376 alert_counter[i] = 0;
379 void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
383 cpu = safe_smp_processor_id();
384 sum = read_pda(apic_timer_irqs);
385 if (last_irq_sums[cpu] == sum) {
387 * Ayiee, looks like this CPU is stuck ...
388 * wait a few IRQs (5 seconds) before doing the oops ...
390 alert_counter[cpu]++;
391 if (alert_counter[cpu] == 5*nmi_hz) {
392 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
394 alert_counter[cpu] = 0;
397 die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs);
400 last_irq_sums[cpu] = sum;
401 alert_counter[cpu] = 0;
404 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
407 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
412 static nmi_callback_t nmi_callback = dummy_nmi_callback;
414 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
416 int cpu = safe_smp_processor_id();
419 add_pda(__nmi_count,1);
420 if (!nmi_callback(regs, cpu))
421 default_do_nmi(regs);
425 void set_nmi_callback(nmi_callback_t callback)
427 nmi_callback = callback;
430 void unset_nmi_callback(void)
432 nmi_callback = dummy_nmi_callback;
437 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
439 unsigned char reason = get_nmi_reason();
442 if (!(reason & 0xc0)) {
443 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
450 * proc handler for /proc/sys/kernel/unknown_nmi_panic
452 int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
453 void __user *buffer, size_t *length, loff_t *ppos)
457 old_state = unknown_nmi_panic;
458 proc_dointvec(table, write, file, buffer, length, ppos);
459 if (!!old_state == !!unknown_nmi_panic)
462 if (unknown_nmi_panic) {
463 if (reserve_lapic_nmi() < 0) {
464 unknown_nmi_panic = 0;
467 set_nmi_callback(unknown_nmi_panic_callback);
471 unset_nmi_callback();
478 EXPORT_SYMBOL(nmi_active);
479 EXPORT_SYMBOL(nmi_watchdog);
480 EXPORT_SYMBOL(reserve_lapic_nmi);
481 EXPORT_SYMBOL(release_lapic_nmi);
482 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
483 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
484 EXPORT_SYMBOL(touch_nmi_watchdog);