This commit was manufactured by cvs2svn to create branch
[linux-2.6.git] / arch / i386 / kernel / smp.c
index bb59601..2772f18 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mc146818rtc.h>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
+#include <linux/dump.h>
 
 #include <asm/mtrr.h>
 #include <asm/tlbflush.h>
@@ -143,6 +144,13 @@ inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
         */
        cfg = __prepare_ICR(shortcut, vector);
 
+       if (vector == DUMP_VECTOR) {
+               /*
+                * Setup DUMP IPI to be delivered as an NMI
+                */
+               cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+       }
+
        /*
         * Send the IPI. The write to APIC_ICR fires this off.
         */
@@ -220,7 +228,13 @@ inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
                         * program the ICR 
                         */
                        cfg = __prepare_ICR(0, vector);
-                       
+               
+                       if (vector == DUMP_VECTOR) {
+                               /*
+                                * Setup DUMP IPI to be delivered as an NMI
+                                */
+                               cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+                       }       
                        /*
                         * Send the IPI. The write to APIC_ICR fires this off.
                         */
@@ -326,10 +340,12 @@ asmlinkage void smp_invalidate_interrupt (void)
                 
        if (flush_mm == cpu_tlbstate[cpu].active_mm) {
                if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+#ifndef CONFIG_X86_SWITCH_PAGETABLES
                        if (flush_va == FLUSH_ALL)
                                local_flush_tlb();
                        else
                                __flush_tlb_one(flush_va);
+#endif
                } else
                        leave_mm(cpu);
        }
@@ -395,21 +411,6 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
        spin_unlock(&tlbstate_lock);
 }
        
-void flush_tlb_current_task(void)
-{
-       struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-       preempt_enable();
-}
-
 void flush_tlb_mm (struct mm_struct * mm)
 {
        cpumask_t cpu_mask;
@@ -441,7 +442,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 
        if (current->active_mm == mm) {
                if(current->mm)
-                       __flush_tlb_one(va);
+#ifndef CONFIG_X86_SWITCH_PAGETABLES
+                       __flush_tlb_one(va)
+#endif
+                               ;
                 else
                        leave_mm(smp_processor_id());
        }
@@ -466,6 +470,11 @@ void flush_tlb_all(void)
        on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 }
 
+void dump_send_ipi(void)
+{
+       send_IPI_allbutself(DUMP_VECTOR);
+}
+
 /*
  * this function sends a 'reschedule' IPI to another CPU.
  * it goes straight through and wastes no time serializing
@@ -504,7 +513,10 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
  * <func> The function to run. This must be fast and non-blocking.
  * <info> An arbitrary pointer to pass to the function.
  * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * <wait> If 1, wait (atomically) until function has completed on other CPUs.
+ *        If 0, wait for the IPI to be received by other CPUs, but do not wait 
+ *        for the completion of the function on each CPU.  
+ *        If -1, do not wait for other CPUs to receive IPI.
  * [RETURNS] 0 on success, else a negative status code. Does not return until
  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
  *
@@ -519,13 +531,14 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
                return 0;
 
        /* Can deadlock when called with interrupts disabled */
-       WARN_ON(irqs_disabled());
+       /* Only if we are waiting for other CPU to ack */
+       WARN_ON(irqs_disabled() && wait >= 0);
 
        data.func = func;
        data.info = info;
        atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
+       data.wait = wait > 0 ? wait : 0;
+       if (wait > 0)
                atomic_set(&data.finished, 0);
 
        spin_lock(&call_lock);
@@ -536,10 +549,11 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
        send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 
        /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               barrier();
+       if (wait >= 0)
+               while (atomic_read(&data.started) != cpus)
+                       barrier();
 
-       if (wait)
+       if (wait > 0)
                while (atomic_read(&data.finished) != cpus)
                        barrier();
        spin_unlock(&call_lock);
@@ -547,7 +561,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
        return 0;
 }
 
-static void stop_this_cpu (void * dummy)
+void stop_this_cpu (void * dummy)
 {
        /*
         * Remove this CPU:
@@ -573,6 +587,8 @@ void smp_send_stop(void)
        local_irq_enable();
 }
 
+EXPORT_SYMBOL(smp_send_stop);
+
 /*
  * Reschedule call back. Nothing to do,
  * all the work is done automatically when
@@ -608,4 +624,3 @@ asmlinkage void smp_call_function_interrupt(void)
                atomic_inc(&call_data->finished);
        }
 }
-