asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24;
-ATOMIC_NOTIFIER_HEAD(i386die_chain);
-
-extern char last_sysfs_file[];
+struct notifier_block *i386die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
int register_die_notifier(struct notifier_block *nb)
{
- vmalloc_sync_all();
- return atomic_notifier_chain_register(&i386die_chain, nb);
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&i386die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
}
EXPORT_SYMBOL(register_die_notifier);
-int unregister_die_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&i386die_chain, nb);
-}
-EXPORT_SYMBOL(unregister_die_notifier);
-
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
return p > (void *)tinfo &&
p < (void *)tinfo + THREAD_SIZE - 3;
}
-/*
- * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line.
- */
-static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
- int printed)
+static void print_addr_and_symbol(unsigned long addr, char *log_lvl)
{
- if (!printed)
- printk(log_lvl);
-
-#if CONFIG_STACK_BACKTRACE_COLS == 1
+ printk(log_lvl);
printk(" [<%08lx>] ", addr);
-#else
- printk(" <%08lx> ", addr);
-#endif
print_symbol("%s", addr);
-
- printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
- if (printed)
- printk(" ");
- else
- printk("\n");
-
- return printed;
+ printk("\n");
}
static inline unsigned long print_context_stack(struct thread_info *tinfo,
char *log_lvl)
{
unsigned long addr;
- int printed = 0; /* nr of entries already printed on current line */
#ifdef CONFIG_FRAME_POINTER
while (valid_stack_ptr(tinfo, (void *)ebp)) {
addr = *(unsigned long *)(ebp + 4);
- printed = print_addr_and_symbol(addr, log_lvl, printed);
+ print_addr_and_symbol(addr, log_lvl);
ebp = *(unsigned long *)ebp;
}
#else
while (valid_stack_ptr(tinfo, stack)) {
addr = *stack++;
if (__kernel_text_address(addr))
- printed = print_addr_and_symbol(addr, log_lvl, printed);
+ print_addr_and_symbol(addr, log_lvl);
}
#endif
- if (printed)
- printk("\n");
-
return ebp;
}
stack = (unsigned long*)context->previous_esp;
if (!stack)
break;
- printk("%s =======================\n", log_lvl);
+ printk(log_lvl);
+ printk(" =======================\n");
}
}
}
stack = esp;
+ printk(log_lvl);
for(i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
- if (i && ((i % 8) == 0))
- printk("\n%s ", log_lvl);
+ if (i && ((i % 8) == 0)) {
+ printk("\n");
+ printk(log_lvl);
+ printk(" ");
+ }
printk("%08lx ", *stack++);
}
- printk("\n%sCall Trace:\n", log_lvl);
+ printk("\n");
+ printk(log_lvl);
+ printk("Call Trace:\n");
show_trace_log_lvl(task, esp, log_lvl);
}
void show_stack(struct task_struct *task, unsigned long *esp)
{
- printk(" ");
show_stack_log_lvl(task, esp, "");
}
esp = (unsigned long) (®s->esp);
savesegment(ss, ss);
- if (user_mode_vm(regs)) {
+ if (user_mode(regs)) {
in_kernel = 0;
esp = regs->esp;
ss = regs->xss & 0xffff;
static int die_counter;
unsigned long flags;
- oops_enter();
-
vxh_throw_oops();
if (die.lock_owner != raw_smp_processor_id()) {
if (++die.lock_owner_depth < 3) {
int nl = 0;
- unsigned long esp;
- unsigned short ss;
-
handle_BUG(regs);
printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
#endif
if (nl)
printk("\n");
-#ifdef CONFIG_SYSFS
- printk(KERN_ALERT "last sysfs file: %s\n", last_sysfs_file);
-#endif
- if (notify_die(DIE_OOPS, str, regs, err,
- current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) {
- show_registers(regs);
- vxh_dump_history();
- /* Executive summary in case the oops scrolled away */
- esp = (unsigned long) (®s->esp);
- savesegment(ss, ss);
- if (user_mode(regs)) {
- esp = regs->esp;
- ss = regs->xss & 0xffff;
- }
- printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
- print_symbol("%s", regs->eip);
- printk(" SS:ESP %04x:%08lx\n", ss, esp);
- }
- else
- regs = NULL;
+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ show_registers(regs);
+ vxh_dump_history();
} else
printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
die.lock_owner = -1;
spin_unlock_irqrestore(&die.lock, flags);
- if (!regs)
- return;
-
if (kexec_should_crash(current))
crash_kexec(regs);
ssleep(5);
panic("Fatal exception");
}
- oops_exit();
do_exit(SIGSEGV);
}
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-
-
-/*
- * lazy-check for CS validity on exec-shield binaries:
- *
- * the original non-exec stack patch was written by
- * Solar Designer <solar at openwall.com>. Thanks!
- */
-static int
-check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
-{
- struct desc_struct *desc1, *desc2;
- struct vm_area_struct *vma;
- unsigned long limit;
-
- if (current->mm == NULL)
- return 0;
-
- limit = -1UL;
- if (current->mm->context.exec_limit != -1UL) {
- limit = PAGE_SIZE;
- spin_lock(¤t->mm->page_table_lock);
- for (vma = current->mm->mmap; vma; vma = vma->vm_next)
- if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
- limit = vma->vm_end;
- spin_unlock(¤t->mm->page_table_lock);
- if (limit >= TASK_SIZE)
- limit = -1UL;
- current->mm->context.exec_limit = limit;
- }
- set_user_cs(¤t->mm->context.user_cs, limit);
-
- desc1 = ¤t->mm->context.user_cs;
- desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
-
- if (desc1->a != desc2->a || desc1->b != desc2->b) {
- /*
- * The CS was not in sync - reload it and retry the
- * instruction. If the instruction still faults then
- * we won't hit this branch next time around.
- */
- if (print_fatal_signals >= 2) {
- printk("#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
- printk(" exec_limit: %08lx, user_cs: %08lx/%08lx, CPU_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
- }
- load_user_cs_desc(cpu, current->mm);
- return 1;
- }
-
- return 0;
-}
-
-/*
- * The fixup code for errors in iret jumps to here (iret_exc). It loses
- * the original trap number and error code. The bogus trap 32 and error
- * code 0 are what the vanilla kernel delivers via:
- * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
- *
- * In case of a general protection fault in the iret instruction, we
- * need to check for a lazy CS update for exec-shield.
- */
-fastcall void do_iret_error(struct pt_regs *regs, long error_code)
-{
- int ok = check_lazy_exec_limit(get_cpu(), regs, error_code);
- put_cpu();
- if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
- error_code, 32, SIGSEGV) != NOTIFY_STOP) {
- siginfo_t info;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = ILL_BADSTK;
- info.si_addr = 0;
- do_trap(32, SIGSEGV, "iret exception", 0, regs, error_code,
- &info);
- }
-}
+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
fastcall void __kprobes do_general_protection(struct pt_regs * regs,
long error_code)
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
struct thread_struct *thread = ¤t->thread;
- int ok;
/*
* Perform the lazy TSS's I/O bitmap copy. If the TSS has an
put_cpu();
return;
}
+ put_cpu();
current->thread.error_code = error_code;
current->thread.trap_no = 13;
if (!user_mode(regs))
goto gp_in_kernel;
- ok = check_lazy_exec_limit(cpu, regs, error_code);
-
- put_cpu();
-
- if (ok)
- return;
-
- if (print_fatal_signals) {
- printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
- printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b);
- }
-
current->thread.error_code = error_code;
current->thread.trap_no = 13;
force_sig(SIGSEGV, current);
return;
gp_in_vm86:
- put_cpu();
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
return;
gp_in_kernel:
- put_cpu();
if (!fixup_exception(regs)) {
if (notify_die(DIE_GPF, "general protection fault", regs,
error_code, 13, SIGSEGV) == NOTIFY_STOP)
static void io_check_error(unsigned char reason, struct pt_regs * regs)
{
+ unsigned long i;
+
printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
show_registers(regs);
/* Re-enable the IOCK line, wait for a few seconds */
- clear_io_check_error(reason);
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+ i = 2000;
+ while (--i) udelay(1000);
+ reason &= ~8;
+ outb(reason, 0x61);
}
static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
void die_nmi (struct pt_regs *regs, const char *msg)
{
- if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
+ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) ==
NOTIFY_STOP)
return;
/* If we are in kernel we are probably nested up pretty bad
* and might aswell get out now while we still can.
*/
- if (!user_mode_vm(regs)) {
+ if (!user_mode(regs)) {
current->thread.trap_no = 2;
crash_kexec(regs);
}
reason = get_nmi_reason();
if (!(reason & 0xc0)) {
- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
== NOTIFY_STOP)
return;
#ifdef CONFIG_X86_LOCAL_APIC
unknown_nmi_error(reason, regs);
return;
}
- if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
return;
if (reason & 0x80)
mem_parity_error(reason, regs);
void set_nmi_callback(nmi_callback_t callback)
{
- vmalloc_sync_all();
rcu_assign_pointer(nmi_callback, callback);
}
EXPORT_SYMBOL_GPL(set_nmi_callback);
static int __init kstack_setup(char *s)
{
kstack_depth_to_print = simple_strtoul(s, NULL, 0);
- return 1;
+ return 0;
}
__setup("kstack=", kstack_setup);