2 * Architecture specific (i386) functions for Linux crash dumps.
4 * Created by: Matt Robinson (yakker@sgi.com)
6 * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
8 * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
9 * Copyright 2000 TurboLinux, Inc. All rights reserved.
11 * This code is released under version 2 of the GNU GPL.
15 * The hooks for dumping the kernel virtual memory to disk are in this
16 * file. Any time a modification is made to the virtual memory mechanism,
17 * these routines must be changed to use the new mechanisms.
19 #include <linux/init.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/smp.h>
24 #include <linux/vmalloc.h>
26 #include <linux/dump.h>
27 #include "dump_methods.h"
28 #include <linux/irq.h>
30 #include <asm/processor.h>
32 #include <asm/hardirq.h>
35 static __s32 saved_irq_count; /* saved preempt_count() flags */
43 if (dump_header_asm.dha_stack[0])
46 ptr = vmalloc(THREAD_SIZE * num_online_cpus());
48 printk("vmalloc for dha_stacks failed\n");
52 for (i = 0; i < num_online_cpus(); i++) {
53 dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
62 if (dump_header_asm.dha_stack[0]) {
63 vfree((void *)dump_header_asm.dha_stack[0]);
64 dump_header_asm.dha_stack[0] = 0;
71 __dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
75 /* In case of panic dumps, we collects regs on entry to panic.
76 * so, we shouldn't 'fix' ssesp here again. But it is hard to
77 * tell just looking at regs whether ssesp need fixing. We make
78 * this decision by looking at xss in regs. If we have better
79 * means to determine that ssesp are valid (by some flag which
80 * tells that we are here due to panic dump), then we can use
81 * that instead of this kludge.
83 if (!user_mode(regs)) {
84 if ((0xffff & regs->xss) == __KERNEL_DS)
85 /* already fixed up */
87 dest_regs->esp = (unsigned long)&(regs->esp);
88 __asm__ __volatile__ ("movw %%ss, %%ax;"
89 :"=a"(dest_regs->xss));
95 extern cpumask_t irq_affinity[];
96 extern irq_desc_t irq_desc[];
97 extern void dump_send_ipi(void);
99 static int dump_expect_ipi[NR_CPUS];
100 static atomic_t waiting_for_dump_ipi;
101 static cpumask_t saved_affinity[NR_IRQS];
103 extern void stop_this_cpu(void *); /* exported by i386 kernel */
106 dump_nmi_callback(struct pt_regs *regs, int cpu)
108 if (!dump_expect_ipi[cpu])
111 dump_expect_ipi[cpu] = 0;
113 dump_save_this_cpu(regs);
114 atomic_dec(&waiting_for_dump_ipi);
117 switch (dump_silence_level) {
118 case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
120 barrier(); /* paranoia */
121 if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
124 cpu_relax(); /* kill time nicely */
128 case DUMP_HALT_CPUS: /* Execute halt */
132 case DUMP_SOFT_SPIN_CPUS:
133 /* Mark the task so it spins in schedule */
134 set_tsk_thread_flag(current, TIF_NEED_RESCHED);
141 /* save registers on other processors */
143 __dump_save_other_cpus(void)
145 int i, cpu = smp_processor_id();
146 int other_cpus = num_online_cpus()-1;
148 if (other_cpus > 0) {
149 atomic_set(&waiting_for_dump_ipi, other_cpus);
151 for (i = 0; i < NR_CPUS; i++) {
152 dump_expect_ipi[i] = (i != cpu && cpu_online(i));
155 /* short circuit normal NMI handling temporarily */
156 set_nmi_callback(dump_nmi_callback);
160 /* may be we dont need to wait for NMI to be processed.
161 just write out the header at the end of dumping, if
162 this IPI is not processed until then, there probably
163 is a problem and we just fail to capture state of
165 while(atomic_read(&waiting_for_dump_ipi) > 0) {
169 unset_nmi_callback();
174 * Routine to save the old irq affinities and change affinities of all irqs to
178 set_irq_affinity(void)
181 cpumask_t cpu = CPU_MASK_NONE;
183 cpu_set(smp_processor_id(), cpu);
184 memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
185 for (i = 0; i < NR_IRQS; i++) {
186 if (irq_desc[i].handler == NULL)
188 irq_affinity[i] = cpu;
189 if (irq_desc[i].handler->set_affinity != NULL)
190 irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
195 * Restore old irq affinities.
198 reset_irq_affinity(void)
202 memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
203 for (i = 0; i < NR_IRQS; i++) {
204 if (irq_desc[i].handler == NULL)
206 if (irq_desc[i].handler->set_affinity != NULL)
207 irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
211 #else /* !CONFIG_SMP */
212 #define set_irq_affinity() do { } while (0)
213 #define reset_irq_affinity() do { } while (0)
214 #define save_other_cpu_states() do { } while (0)
215 #endif /* !CONFIG_SMP */
218 * Kludge - dump from interrupt context is unreliable (Fixme)
220 * We do this so that softirqs initiated for dump i/o
221 * get processed and we don't hang while waiting for i/o
222 * to complete or in any irq synchronization attempt.
224 * This is not quite legal of course, as it has the side
225 * effect of making all interrupts & softirqs triggered
226 * while dump is in progress complete before currently
227 * pending softirqs and the currently executing interrupt
233 saved_irq_count = irq_count();
234 preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
240 preempt_count() |= saved_irq_count;
244 * Name: __dump_irq_enable
245 * Func: Reset system so interrupts are enabled.
246 * This is used for dump methods that require interrupts
247 * Eventually, all methods will have interrupts disabled
248 * and this code can be removed.
250 * Change irq affinities
251 * Re-enable interrupts
254 __dump_irq_enable(void)
263 * Name: __dump_irq_restore
264 * Func: Resume the system state in an architecture-specific way.
268 __dump_irq_restore(void)
271 reset_irq_affinity();
276 * Name: __dump_configure_header()
277 * Func: Meant to fill in arch specific header fields except per-cpu state
278 * already captured via __dump_save_context for all CPUs.
281 __dump_configure_header(const struct pt_regs *regs)
287 * Name: __dump_init()
288 * Func: Initialize the dumping routine process.
291 __dump_init(uint64_t local_memory_start)
297 * Name: __dump_open()
298 * Func: Open the dump device (architecture specific).
307 * Name: __dump_cleanup()
308 * Func: Free any architecture specific data structures. This is called
309 * when the dump module is being removed.
317 extern int pfn_is_ram(unsigned long);
320 * Name: __dump_page_valid()
321 * Func: Check if page is valid to dump.
324 __dump_page_valid(unsigned long index)
326 if (!pfn_valid(index))
329 return pfn_is_ram(index);
333 * Name: manual_handle_crashdump()
334 * Func: Interface for the lkcd dump command. Calls dump_execute()
337 manual_handle_crashdump(void) {
341 get_current_regs(®s);
342 dump_execute("manual", ®s);