#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
+#include <linux/dump.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
*/
cfg = __prepare_ICR(shortcut, vector);
+ if (vector == DUMP_VECTOR) {
+ /*
+ * Setup DUMP IPI to be delivered as an NMI
+ */
+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+ }
+
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
* program the ICR
*/
cfg = __prepare_ICR(0, vector);
-
+
+ if (vector == DUMP_VECTOR) {
+ /*
+ * Setup DUMP IPI to be delivered as an NMI
+ */
+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+ }
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
if (flush_mm == cpu_tlbstate[cpu].active_mm) {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+#ifndef CONFIG_X86_SWITCH_PAGETABLES
if (flush_va == FLUSH_ALL)
local_flush_tlb();
else
__flush_tlb_one(flush_va);
+#endif
} else
leave_mm(cpu);
}
spin_unlock(&tlbstate_lock);
}
-void flush_tlb_current_task(void)
-{
- struct mm_struct *mm = current->mm;
- cpumask_t cpu_mask;
-
- preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
-
- local_flush_tlb();
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
- preempt_enable();
-}
-
void flush_tlb_mm (struct mm_struct * mm)
{
cpumask_t cpu_mask;
if (current->active_mm == mm) {
if(current->mm)
- __flush_tlb_one(va);
+#ifndef CONFIG_X86_SWITCH_PAGETABLES
+ __flush_tlb_one(va)
+#endif
+ ;
else
leave_mm(smp_processor_id());
}
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
}
+void dump_send_ipi(void)
+{
+ send_IPI_allbutself(DUMP_VECTOR);
+}
+
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * <wait> If 1, wait (atomically) until function has completed on other CPUs.
+ * If 0, wait for the IPI to be received by other CPUs, but do not wait
+ * for the completion of the function on each CPU.
+ * If -1, do not wait for other CPUs to receive IPI.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
return 0;
/* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ /* Only if we are waiting for other CPU to ack */
+ WARN_ON(irqs_disabled() && wait >= 0);
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
+ data.wait = wait > 0 ? wait : 0;
+ if (wait > 0)
atomic_set(&data.finished, 0);
spin_lock(&call_lock);
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */
- while (atomic_read(&data.started) != cpus)
- barrier();
+ if (wait >= 0)
+ while (atomic_read(&data.started) != cpus)
+ barrier();
- if (wait)
+ if (wait > 0)
while (atomic_read(&data.finished) != cpus)
barrier();
spin_unlock(&call_lock);
return 0;
}
-static void stop_this_cpu (void * dummy)
+void stop_this_cpu (void * dummy)
{
/*
* Remove this CPU:
local_irq_enable();
}
+EXPORT_SYMBOL(smp_send_stop);
+
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
atomic_inc(&call_data->finished);
}
}
-