/*
- * Architecture specific (i386) functions for kexec based crash dumps.
- *
- * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
- *
- * Copyright (C) IBM Corporation, 2004. All rights reserved.
+ * kernel/crash_dump.c - Memory preserving reboot related code.
*
+ * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ * Copyright (C) IBM Corporation, 2004. All rights reserved
*/
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-
-#include <asm/crash_dump.h>
-#include <asm/processor.h>
-#include <asm/hardirq.h>
-#include <asm/nmi.h>
-#include <asm/hw_irq.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/crash_dump.h>
-struct pt_regs crash_smp_regs[NR_CPUS];
-long crash_smp_current_task[NR_CPUS];
+#include <asm/uaccess.h>
-#ifdef CONFIG_SMP
-static atomic_t waiting_for_dump_ipi;
-static int crash_dump_expect_ipi[NR_CPUS];
-extern void crash_dump_send_ipi(void);
-extern void stop_this_cpu(void *);
-
-static int crash_dump_nmi_callback(struct pt_regs *regs, int cpu)
-{
- if (!crash_dump_expect_ipi[cpu])
- return 0;
+static void *kdump_buf_page;
- crash_dump_expect_ipi[cpu] = 0;
- crash_dump_save_this_cpu(regs, cpu);
- atomic_dec(&waiting_for_dump_ipi);
-
- stop_this_cpu(NULL);
-
- return 1;
-}
-
-void __crash_dump_stop_cpus(void)
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
{
- int i, cpu, other_cpus;
-
- preempt_disable();
- cpu = smp_processor_id();
- other_cpus = num_online_cpus()-1;
+ void *vaddr;
- if (other_cpus > 0) {
- atomic_set(&waiting_for_dump_ipi, other_cpus);
-
- for (i = 0; i < NR_CPUS; i++)
- crash_dump_expect_ipi[i] = (i != cpu && cpu_online(i));
-
- set_nmi_callback(crash_dump_nmi_callback);
- /* Ensure the new callback function is set before sending
- * out the IPI
- */
- wmb();
+ if (!csize)
+ return 0;
- crash_dump_send_ipi();
- while (atomic_read(&waiting_for_dump_ipi) > 0)
- cpu_relax();
+ vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
- unset_nmi_callback();
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr, KM_PTE0);
} else {
- local_irq_disable();
- disable_local_APIC();
- local_irq_enable();
+ if (!kdump_buf_page) {
+ printk(KERN_WARNING "Kdump: Kdump buffer page not"
+ " allocated\n");
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr, KM_PTE0);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
}
- preempt_enable();
+
+ return csize;
}
-#else
-void __crash_dump_stop_cpus(void) {}
-#endif
-void crash_get_current_regs(struct pt_regs *regs)
+static int __init kdump_buf_page_init(void)
{
- __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
- __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
- __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
- __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
- __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
- __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
- __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
- __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
- __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
- __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
- __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
- __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
- __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
+ int ret = 0;
- regs->eip = (unsigned long)current_text_addr();
-}
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
+ " page\n");
+ ret = -ENOMEM;
+ }
-void crash_dump_save_this_cpu(struct pt_regs *regs, int cpu)
-{
- crash_smp_current_task[cpu] = (long)current;
- crash_smp_regs[cpu] = *regs;
+ return ret;
}
-
+arch_initcall(kdump_buf_page_init);