X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Fsmp.c;h=4149a0d49c4ff0fd9010f96c67053b5ea5ced483;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=7f56a498664d7c1e4d448cba6cb4a212904c1b18;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 7f56a4986..4149a0d49 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -28,11 +28,10 @@ #include #include #include -#include #include +#include #include -#include #include #include #include @@ -53,7 +52,7 @@ * Structure and data for smp_call_function(). This is designed to minimise static memory * requirements. It also looks cleaner. */ -static spinlock_t call_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned DEFINE_SPINLOCK(call_lock); struct call_data_struct { void (*func) (void *info); @@ -71,10 +70,23 @@ static volatile struct call_data_struct *call_data; /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; +extern void cpu_halt (void); + +void +lock_ipi_calllock(void) +{ + spin_lock_irq(&call_lock); +} + +void +unlock_ipi_calllock(void) +{ + spin_unlock_irq(&call_lock); +} + static void stop_this_cpu (void) { - extern void cpu_halt (void); /* * Remove this CPU: */ @@ -84,6 +96,17 @@ stop_this_cpu (void) cpu_halt(); } +void +cpu_die(void) +{ + max_xtp(); + local_irq_disable(); + cpu_halt(); + /* Should never be here */ + BUG(); + for (;;); +} + irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs) { @@ -202,7 +225,7 @@ smp_send_reschedule (int cpu) void smp_flush_tlb_all (void) { - on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1); + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); } EXPORT_SYMBOL(smp_flush_tlb_all); @@ -267,11 +290,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int /* Wait for response */ while (atomic_read(&data.started) != cpus) - barrier(); + cpu_relax(); if (wait) while (atomic_read(&data.finished) != cpus) - barrier(); + cpu_relax(); call_data = NULL; spin_unlock_bh(&call_lock); @@ -308,6 +331,9 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai if (!cpus) return 0; + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + data.func = func; data.info = info; atomic_set(&data.started, 0); @@ -323,11 +349,11 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai /* Wait for response */ while (atomic_read(&data.started) != cpus) - barrier(); + cpu_relax(); if (wait) while (atomic_read(&data.finished) != cpus) - barrier(); + cpu_relax(); call_data = NULL; spin_unlock(&call_lock);