X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fpanic.c;h=6a1bc03c7e2c49e6523ce90e2c216b4f2e897790;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=081f7465fc8dec17d3e69a5c9ff7c29f8165ad5b;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/kernel/panic.c b/kernel/panic.c index 081f7465f..6a1bc03c7 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -18,14 +18,17 @@ #include #include #include +#include -int panic_timeout; int panic_on_oops; int tainted; +static int pause_on_oops; +static int pause_on_oops_flag; +static DEFINE_SPINLOCK(pause_on_oops_lock); -EXPORT_SYMBOL(panic_timeout); +int panic_timeout; -struct notifier_block *panic_notifier_list; +ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); @@ -59,10 +62,17 @@ NORET_TYPE void panic(const char * fmt, ...) long i; static char buf[1024]; va_list args; -#if defined(CONFIG_ARCH_S390) +#if defined(CONFIG_S390) unsigned long caller = (unsigned long) __builtin_return_address(0); #endif + /* + * It's possible to come here directly from a panic-assertion and not + * have preempt disabled. Some functions called from here want + * preempt to be disabled. No point enabling it later though... + */ + preempt_disable(); + bust_spinlocks(1); va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); @@ -70,17 +80,28 @@ NORET_TYPE void panic(const char * fmt, ...) printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); bust_spinlocks(0); + /* + * If we have crashed and we have a crash kernel loaded let it handle + * everything else. + * Do we want to call this before we try to display a message? + */ + crash_kexec(NULL); + #ifdef CONFIG_SMP + /* + * Note smp_send_stop is the usual smp shutdown function, which + * unfortunately means it may not be hardened to work in a panic + * situation. + */ smp_send_stop(); #endif - notifier_call_chain(&panic_notifier_list, 0, buf); + atomic_notifier_call_chain(&panic_notifier_list, 0, buf); if (!panic_blink) panic_blink = no_blink; - if (panic_timeout > 0) - { + if (panic_timeout > 0) { /* * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked.. @@ -92,12 +113,11 @@ NORET_TYPE void panic(const char * fmt, ...) mdelay(1); i++; } - /* - * Should we run the reboot notifier. For the moment Im - * choosing not too. It might crash, be corrupt or do - * more harm than good for other reasons. + /* This will not be a clean reboot, with everything + * shutting down. But if there is a chance of + * rebooting the system it will be rebooted. */ - machine_restart(NULL); + emergency_restart(); } #ifdef __sparc__ { @@ -107,11 +127,12 @@ NORET_TYPE void panic(const char * fmt, ...) printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); } #endif -#if defined(CONFIG_ARCH_S390) +#if defined(CONFIG_S390) disabled_wait(caller); #endif local_irq_enable(); for (i = 0;;) { + touch_softlockup_watchdog(); i += panic_blink(i); mdelay(1); i++; @@ -149,9 +170,102 @@ const char *print_tainted(void) snprintf(buf, sizeof(buf), "Not tainted"); return(buf); } +EXPORT_SYMBOL(print_tainted); void add_taint(unsigned flag) { tainted |= flag; } EXPORT_SYMBOL(add_taint); + +static int __init pause_on_oops_setup(char *str) +{ + pause_on_oops = simple_strtoul(str, NULL, 0); + return 1; +} +__setup("pause_on_oops=", pause_on_oops_setup); + +static void spin_msec(int msecs) +{ + int i; + + for (i = 0; i < msecs; i++) { + touch_nmi_watchdog(); + mdelay(1); + } +} + +/* + * It just happens that oops_enter() and oops_exit() are identically + * implemented... + */ +static void do_oops_enter_exit(void) +{ + unsigned long flags; + static int spin_counter; + + if (!pause_on_oops) + return; + + spin_lock_irqsave(&pause_on_oops_lock, flags); + if (pause_on_oops_flag == 0) { + /* This CPU may now print the oops message */ + pause_on_oops_flag = 1; + } else { + /* We need to stall this CPU */ + if (!spin_counter) { + /* This CPU gets to do the counting */ + spin_counter = pause_on_oops; + do { + spin_unlock(&pause_on_oops_lock); + spin_msec(MSEC_PER_SEC); + spin_lock(&pause_on_oops_lock); + } while (--spin_counter); + pause_on_oops_flag = 0; + } else { + /* This CPU waits for a different one */ + while (spin_counter) { + spin_unlock(&pause_on_oops_lock); + spin_msec(1); + spin_lock(&pause_on_oops_lock); + } + } + } + spin_unlock_irqrestore(&pause_on_oops_lock, flags); +} + +/* + * Return true if the calling CPU is allowed to print oops-related info. This + * is a bit racy.. + */ +int oops_may_print(void) +{ + return pause_on_oops_flag == 0; +} + +/* + * Called when the architecture enters its oops handler, before it prints + * anything. If this is the first CPU to oops, and it's oopsing the first time + * then let it proceed. + * + * This is all enabled by the pause_on_oops kernel boot option. We do all this + * to ensure that oopses don't scroll off the screen. It has the side-effect + * of preventing later-oopsing CPUs from mucking up the display, too. + * + * It turns out that the CPU which is allowed to print ends up pausing for the + * right duration, whereas all the other CPUs pause for twice as long: once in + * oops_enter(), once in oops_exit(). + */ +void oops_enter(void) +{ + do_oops_enter_exit(); +} + +/* + * Called when the architecture exits its oops handler, after printing + * everything. + */ +void oops_exit(void) +{ + do_oops_enter_exit(); +}