This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / i386 / kernel / crash_dump.c
1 /*
2  * Architecture specific (i386) functions for kexec based crash dumps.
3  *
4  * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5  *
6  * Copyright (C) IBM Corporation, 2004. All rights reserved.
7  *
8  */
9
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/irq.h>
15
16 #include <asm/crash_dump.h>
17 #include <asm/processor.h>
18 #include <asm/hardirq.h>
19 #include <asm/nmi.h>
20 #include <asm/hw_irq.h>
21
22 struct pt_regs crash_smp_regs[NR_CPUS];
23 long crash_smp_current_task[NR_CPUS];
24
25 #ifdef CONFIG_SMP
26 static atomic_t waiting_for_dump_ipi;
27 static int crash_dump_expect_ipi[NR_CPUS];
28 extern void crash_dump_send_ipi(void);
29 extern void stop_this_cpu(void *);
30
31 static int crash_dump_nmi_callback(struct pt_regs *regs, int cpu)
32 {
33         if (!crash_dump_expect_ipi[cpu])
34                 return 0;
35
36         crash_dump_expect_ipi[cpu] = 0;
37         crash_dump_save_this_cpu(regs, cpu);
38         atomic_dec(&waiting_for_dump_ipi);
39
40         stop_this_cpu(NULL);
41
42         return 1;
43 }
44
45 void __crash_dump_stop_cpus(void)
46 {
47         int i, cpu, other_cpus;
48
49         preempt_disable();
50         cpu = smp_processor_id();
51         other_cpus = num_online_cpus()-1;
52
53         if (other_cpus > 0) {
54                 atomic_set(&waiting_for_dump_ipi, other_cpus);
55
56                 for (i = 0; i < NR_CPUS; i++)
57                         crash_dump_expect_ipi[i] = (i != cpu && cpu_online(i));
58
59                 set_nmi_callback(crash_dump_nmi_callback);
60                 /* Ensure the new callback function is set before sending
61                  * out the IPI
62                  */
63                 wmb();
64
65                 crash_dump_send_ipi();
66                 while (atomic_read(&waiting_for_dump_ipi) > 0)
67                         cpu_relax();
68
69                 unset_nmi_callback();
70         } else {
71                 local_irq_disable();
72                 disable_local_APIC();
73                 local_irq_enable();
74         }
75         preempt_enable();
76 }
77 #else
78 void __crash_dump_stop_cpus(void) {}
79 #endif
80
81 void crash_get_current_regs(struct pt_regs *regs)
82 {
83         __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
84         __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
85         __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
86         __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
87         __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
88         __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
89         __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
90         __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
91         __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
92         __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
93         __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
94         __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
95         __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
96
97         regs->eip = (unsigned long)current_text_addr();
98 }
99
100 void crash_dump_save_this_cpu(struct pt_regs *regs, int cpu)
101 {
102         crash_smp_current_task[cpu] = (long)current;
103         crash_smp_regs[cpu] = *regs;
104 }
105