#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
+#include <linux/dump.h>
#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-#include <mach_ipi.h>
+#include <asm/desc.h>
#include <mach_apic.h>
/*
* about nothing of note with C stepping upwards.
*/
-struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
/*
* the following functions deal with sending IPIs between CPUs.
return SET_APIC_DEST_FIELD(mask);
}
-inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
+void __send_IPI_shortcut(unsigned int shortcut, int vector)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
*/
cfg = __prepare_ICR(shortcut, vector);
+ if (vector == DUMP_VECTOR) {
+ /*
+ * Setup DUMP IPI to be delivered as an NMI
+ */
+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+ }
+
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
/*
* This is only used on smaller machines.
*/
-inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
{
- unsigned long mask = cpus_coerce(cpumask);
+ unsigned long mask = cpus_addr(cpumask)[0];
unsigned long cfg;
unsigned long flags;
* program the ICR
*/
cfg = __prepare_ICR(0, vector);
-
+
+ if (vector == DUMP_VECTOR) {
+ /*
+ * Setup DUMP IPI to be delivered as an NMI
+ */
+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+ }
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
local_irq_restore(flags);
}
+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
+
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
*/
static inline void leave_mm (unsigned long cpu)
{
- if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG();
- cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
* 2) Leave the mm if we are in the lazy tlb mode.
*/
-asmlinkage void smp_invalidate_interrupt (void)
+fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
{
unsigned long cpu;
cpu = get_cpu();
+ if (current->active_mm)
+ load_user_cs_desc(cpu, current->active_mm);
if (!cpu_isset(cpu, flush_cpumask))
goto out;
* BUG();
*/
- if (flush_mm == cpu_tlbstate[cpu].active_mm) {
- if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL)
local_flush_tlb();
else
unsigned long cpu = smp_processor_id();
__flush_tlb_all();
- if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
leave_mm(cpu);
}
void flush_tlb_all(void)
{
- on_each_cpu(do_flush_tlb_all, 0, 1, 1);
+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
+
+void dump_send_ipi(void)
+{
+ send_IPI_allbutself(DUMP_VECTOR);
}
/*
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * <wait> If 1, wait (atomically) until function has completed on other CPUs.
+ * If 0, wait for the IPI to be received by other CPUs, but do not wait
+ * for the completion of the function on each CPU.
+ * If -1, do not wait for other CPUs to receive IPI.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
* hardware interrupt handler or from a bottom half handler.
*/
{
- struct call_data_struct data;
+ static struct call_data_struct dumpdata;
+ struct call_data_struct normaldata;
+ struct call_data_struct *data;
int cpus = num_online_cpus()-1;
if (!cpus)
return 0;
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
+ /* Can deadlock when called with interrupts disabled */
+ /* Only if we are waiting for other CPU to ack */
+ WARN_ON(irqs_disabled() && wait >= 0);
spin_lock(&call_lock);
- call_data = &data;
+ if (wait == -1) {
+ /* if another cpu beat us, they win! */
+ if (dumpdata.func) {
+ spin_unlock(&call_lock);
+ return 0;
+ }
+ data = &dumpdata;
+ } else
+ data = &normaldata;
+
+ data->func = func;
+ data->info = info;
+ atomic_set(&data->started, 0);
+ data->wait = wait > 0 ? wait : 0;
+ if (wait > 0)
+ atomic_set(&data->finished, 0);
+
+ call_data = data;
mb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */
- while (atomic_read(&data.started) != cpus)
- barrier();
+ if (wait >= 0)
+ while (atomic_read(&data->started) != cpus)
+ cpu_relax();
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
+ if (wait > 0)
+ while (atomic_read(&data->finished) != cpus)
+ cpu_relax();
spin_unlock(&call_lock);
return 0;
}
-static void stop_this_cpu (void * dummy)
+void stop_this_cpu (void * dummy)
{
/*
* Remove this CPU:
local_irq_enable();
}
+EXPORT_SYMBOL(smp_send_stop);
+
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
* we return from the interrupt.
*/
-asmlinkage void smp_reschedule_interrupt(void)
+fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
}
-asmlinkage void smp_call_function_interrupt(void)
+fastcall void smp_call_function_interrupt(struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
atomic_inc(&call_data->finished);
}
}
-