#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
-#include <linux/dump.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
*/
cfg = __prepare_ICR(shortcut, vector);
- if (vector == DUMP_VECTOR) {
- /*
- * Setup DUMP IPI to be delivered as an NMI
- */
- cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
- }
-
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
unsigned long flags;
local_irq_save(flags);
-
+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
/*
* Wait for idle.
*/
local_irq_restore(flags);
}
-inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
+void send_IPI_mask_sequence(cpumask_t mask, int vector)
{
unsigned long cfg, flags;
unsigned int query_cpu;
* program the ICR
*/
cfg = __prepare_ICR(0, vector);
-
- if (vector == DUMP_VECTOR) {
- /*
- * Setup DUMP IPI to be delivered as an NMI
- */
- cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
- }
+
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
static cpumask_t flush_cpumask;
static struct mm_struct * flush_mm;
static unsigned long flush_va;
-static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(tlbstate_lock);
#define FLUSH_ALL 0xffffffff
/*
* 2) Leave the mm if we are in the lazy tlb mode.
*/
-asmlinkage void smp_invalidate_interrupt (void)
+fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
{
unsigned long cpu;
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va)
{
- cpumask_t tmp;
/*
* A couple of (to be removed) sanity checks:
*
- * - we do not send IPIs to not-yet booted CPUs.
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
-
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(cpumask, tmp));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
BUG_ON(!mm);
+ /* If a CPU which we ran on has gone down, OK. */
+ cpus_and(cpumask, cpumask, cpu_online_map);
+ if (cpus_empty(cpumask))
+ return;
+
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
}
-void dump_send_ipi(void)
-{
- send_IPI_allbutself(DUMP_VECTOR);
-}
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
*/
void smp_send_reschedule(int cpu)
{
+ WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
struct call_data_struct {
void (*func) (void *info);
int wait;
};
+void lock_ipi_call_lock(void)
+{
+ spin_lock_irq(&call_lock);
+}
+
+void unlock_ipi_call_lock(void)
+{
+ spin_unlock_irq(&call_lock);
+}
+
static struct call_data_struct * call_data;
/*
*/
{
struct call_data_struct data;
- int cpus = num_online_cpus()-1;
+ int cpus;
- if (!cpus)
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+ cpus = num_online_cpus() - 1;
+ if (!cpus) {
+ spin_unlock(&call_lock);
return 0;
+ }
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
if (wait)
atomic_set(&data.finished, 0);
- spin_lock(&call_lock);
call_data = &data;
mb();
/* Wait for response */
while (atomic_read(&data.started) != cpus)
- barrier();
+ cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
- barrier();
+ cpu_relax();
spin_unlock(&call_lock);
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
-void stop_this_cpu (void * dummy)
+static void stop_this_cpu (void * dummy)
{
/*
* Remove this CPU:
local_irq_disable();
disable_local_APIC();
if (cpu_data[smp_processor_id()].hlt_works_ok)
- for(;;) __asm__("hlt");
+ for(;;) halt();
for (;;);
}
local_irq_enable();
}
-EXPORT_SYMBOL(smp_send_stop);
-
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
* we return from the interrupt.
*/
-asmlinkage void smp_reschedule_interrupt(void)
+fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
}
-asmlinkage void smp_call_function_interrupt(void)
+fastcall void smp_call_function_interrupt(struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
atomic_inc(&call_data->finished);
}
}
+