#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/efi.h>
+#include <linux/bitops.h>
#include <asm/atomic.h>
-#include <asm/bitops.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/machvec.h>
* Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner.
*/
-static spinlock_t call_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
struct call_data_struct {
void (*func) (void *info);
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i) && i != smp_processor_id())
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- send_IPI_single(i, op);
+ for_each_online_cpu(i) {
+ send_IPI_single(i, op);
+ }
}
/*
{
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
}
-EXPORT_SYMBOL(smp_flush_tlb_all);
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
+ preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
{
local_finish_flush_tlb_mm(mm);
+ preempt_enable();
return;
}
+ preempt_enable();
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
* have been running in the address space. It's not clear that this is worth the
int me = get_cpu(); /* prevent preemption and reschedule on another processor */
if (cpuid == me) {
- printk("%s: trying to call self\n", __FUNCTION__);
+ printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
put_cpu();
return -EBUSY;
}
/* Wait for response */
while (atomic_read(&data.started) != cpus)
- barrier();
+ cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
- barrier();
+ cpu_relax();
call_data = NULL;
spin_unlock_bh(&call_lock);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
- barrier();
+ cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
- barrier();
+ cpu_relax();
call_data = NULL;
spin_unlock(&call_lock);