This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / i386 / kernel / smp.c
index d575f4b..5c4f178 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mc146818rtc.h>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
+#include <linux/dump.h>
 
 #include <asm/mtrr.h>
 #include <asm/tlbflush.h>
@@ -142,6 +143,13 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector)
         */
        cfg = __prepare_ICR(shortcut, vector);
 
+       if (vector == DUMP_VECTOR) {
+               /*
+                * Setup DUMP IPI to be delivered as an NMI
+                */
+               cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+       }
+
        /*
         * Send the IPI. The write to APIC_ICR fires this off.
         */
@@ -219,7 +227,13 @@ inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
                         * program the ICR 
                         */
                        cfg = __prepare_ICR(0, vector);
-                       
+               
+                       if (vector == DUMP_VECTOR) {
+                               /*
+                                * Setup DUMP IPI to be delivered as an NMI
+                                */
+                               cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+                       }       
                        /*
                         * Send the IPI. The write to APIC_ICR fires this off.
                         */
@@ -308,7 +322,7 @@ static inline void leave_mm (unsigned long cpu)
  * 2) Leave the mm if we are in the lazy tlb mode.
  */
 
-fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
+asmlinkage void smp_invalidate_interrupt (void)
 {
        unsigned long cpu;
 
@@ -467,6 +481,11 @@ void flush_tlb_all(void)
        on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 }
 
+void dump_send_ipi(void)
+{
+       send_IPI_allbutself(DUMP_VECTOR);
+}
+
 /*
  * this function sends a 'reschedule' IPI to another CPU.
  * it goes straight through and wastes no time serializing
@@ -538,17 +557,17 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
 
        /* Wait for response */
        while (atomic_read(&data.started) != cpus)
-               cpu_relax();
+               barrier();
 
        if (wait)
                while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
+                       barrier();
        spin_unlock(&call_lock);
 
        return 0;
 }
 
-static void stop_this_cpu (void * dummy)
+void stop_this_cpu (void * dummy)
 {
        /*
         * Remove this CPU:
@@ -574,17 +593,19 @@ void smp_send_stop(void)
        local_irq_enable();
 }
 
+EXPORT_SYMBOL(smp_send_stop);
+
 /*
  * Reschedule call back. Nothing to do,
  * all the work is done automatically when
  * we return from the interrupt.
  */
-fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
+asmlinkage void smp_reschedule_interrupt(void)
 {
        ack_APIC_irq();
 }
 
-fastcall void smp_call_function_interrupt(struct pt_regs *regs)
+asmlinkage void smp_call_function_interrupt(void)
 {
        void (*func) (void *info) = call_data->func;
        void *info = call_data->info;
@@ -609,4 +630,3 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
                atomic_inc(&call_data->finished);
        }
 }
-