This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / i386 / kernel / smp.c
index 7c64adb..5c4f178 100644 (file)
 #include <linux/mc146818rtc.h>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
+#include <linux/dump.h>
 
 #include <asm/mtrr.h>
-#include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
-#include <mach_ipi.h>
 #include <mach_apic.h>
 
 /*
  *     about nothing of note with C stepping upwards.
  */
 
-struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
 
 /*
  * the following functions deal with sending IPIs between CPUs.
@@ -123,7 +122,7 @@ static inline int __prepare_ICR2 (unsigned int mask)
        return SET_APIC_DEST_FIELD(mask);
 }
 
-inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
+void __send_IPI_shortcut(unsigned int shortcut, int vector)
 {
        /*
         * Subtle. In the case of the 'never do double writes' workaround
@@ -144,6 +143,13 @@ inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
         */
        cfg = __prepare_ICR(shortcut, vector);
 
+       if (vector == DUMP_VECTOR) {
+               /*
+                * Setup DUMP IPI to be delivered as an NMI
+                */
+               cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+       }
+
        /*
         * Send the IPI. The write to APIC_ICR fires this off.
         */
@@ -158,9 +164,9 @@ void fastcall send_IPI_self(int vector)
 /*
  * This is only used on smaller machines.
  */
-inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
 {
-       unsigned long mask = cpus_coerce(cpumask);
+       unsigned long mask = cpus_addr(cpumask)[0];
        unsigned long cfg;
        unsigned long flags;
 
@@ -221,7 +227,13 @@ inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
                         * program the ICR 
                         */
                        cfg = __prepare_ICR(0, vector);
-                       
+               
+                       if (vector == DUMP_VECTOR) {
+                               /*
+                                * Setup DUMP IPI to be delivered as an NMI
+                                */
+                               cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+                       }       
                        /*
                         * Send the IPI. The write to APIC_ICR fires this off.
                         */
@@ -231,6 +243,8 @@ inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
        local_irq_restore(flags);
 }
 
+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
+
 /*
  *     Smarter SMP flushing macros. 
  *             c/o Linus Torvalds.
@@ -256,9 +270,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
  */
 static inline void leave_mm (unsigned long cpu)
 {
-       if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
                BUG();
-       cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+       cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
        load_cr3(swapper_pg_dir);
 }
 
@@ -325,8 +339,8 @@ asmlinkage void smp_invalidate_interrupt (void)
                 * BUG();
                 */
                 
-       if (flush_mm == cpu_tlbstate[cpu].active_mm) {
-               if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+       if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+               if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
                        if (flush_va == FLUSH_ALL)
                                local_flush_tlb();
                        else
@@ -458,13 +472,18 @@ static void do_flush_tlb_all(void* info)
        unsigned long cpu = smp_processor_id();
 
        __flush_tlb_all();
-       if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
+       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
                leave_mm(cpu);
 }
 
 void flush_tlb_all(void)
 {
-       on_each_cpu(do_flush_tlb_all, 0, 1, 1);
+       on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
+
+void dump_send_ipi(void)
+{
+       send_IPI_allbutself(DUMP_VECTOR);
 }
 
 /*
@@ -548,7 +567,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
        return 0;
 }
 
-static void stop_this_cpu (void * dummy)
+void stop_this_cpu (void * dummy)
 {
        /*
         * Remove this CPU:
@@ -574,6 +593,8 @@ void smp_send_stop(void)
        local_irq_enable();
 }
 
+EXPORT_SYMBOL(smp_send_stop);
+
 /*
  * Reschedule call back. Nothing to do,
  * all the work is done automatically when
@@ -609,4 +630,3 @@ asmlinkage void smp_call_function_interrupt(void)
                atomic_inc(&call_data->finished);
        }
 }
-