#include <linux/delay.h>
#include <linux/efi.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/atomic.h>
#include <asm/current.h>
atomic_t finished;
};
-static struct call_data_struct * call_data;
+static volatile struct call_data_struct *call_data;
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
}
irqreturn_t
-handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
+handle_IPI (int irq, void *dev_id)
{
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
case IPI_CPU_STOP:
stop_this_cpu();
break;
-
+#ifdef CONFIG_KEXEC
+ case IPI_KDUMP_CPU_STOP:
+ unw_init_running(kdump_cpu_freeze, NULL);
+ break;
+#endif
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
break;
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i) && i != smp_processor_id())
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- send_IPI_single(i, op);
+ for_each_online_cpu(i) {
+ send_IPI_single(i, op);
+ }
}
/*
send_IPI_single(smp_processor_id(), op);
}
+#ifdef CONFIG_KEXEC
+void
+kdump_smp_send_stop()
+{
+ send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
+}
+
+void
+kdump_smp_send_init()
+{
+ unsigned int cpu, self_cpu;
+ self_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ if (cpu != self_cpu) {
+ if(kdump_status[cpu] == 0)
+ platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
+ }
+ }
+}
+#endif
/*
* Called with preeemption disabled.
*/
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
+ preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
{
local_finish_flush_tlb_mm(mm);
+ preempt_enable();
return;
}
+ preempt_enable();
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
* have been running in the address space. It's not clear that this is worth the
int me = get_cpu(); /* prevent preemption and reschedule on another processor */
if (cpuid == me) {
- printk("%s: trying to call self\n", __FUNCTION__);
+ printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
put_cpu();
return -EBUSY;
}
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <nonatomic> currently unused.
- * <wait> If 1, wait (atomically) until function has complete on other
- * CPUs. If 0, wait for the IPI to be received by other CPUs, but
- * do not wait for the completion of the IPI on each CPU. If -1,
- * do not wait for other CPUs to receive IPI.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code.
*
* Does not return until remote CPUs are nearly ready to execute <func> or are or have
int
smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
{
- static struct call_data_struct dumpdata;
- struct call_data_struct normaldata;
- struct call_data_struct *data;
- int cpus = num_online_cpus()-1;
+ struct call_data_struct data;
+ int cpus;
- if (!cpus)
+ spin_lock(&call_lock);
+ cpus = num_online_cpus() - 1;
+ if (!cpus) {
+ spin_unlock(&call_lock);
return 0;
+ }
/* Can deadlock when called with interrupts disabled */
- /* Only if we are waiting for other CPU to ack */
- WARN_ON(irqs_disabled() && wait >= 0);
+ WARN_ON(irqs_disabled());
- spin_lock(&call_lock);
- if (wait == -1) {
- /* if another cpu beat us, they win! */
- if (dumpdata.func) {
- spin_unlock(&call_lock);
- return 0;
- }
- data = &dumpdata;
- } else
- data = &normaldata;
-
- data->func = func;
- data->info = info;
- atomic_set(&data->started, 0);
- data->wait = wait > 0 ? wait : 0;
- if (wait > 0)
- atomic_set(&data->finished, 0);
-
- call_data = data;
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
send_IPI_allbutself(IPI_CALL_FUNC);
/* Wait for response */
- if (wait >= 0)
- while (atomic_read(&data->started) != cpus)
- cpu_relax();
+ while (atomic_read(&data.started) != cpus)
+ cpu_relax();
- if (wait > 0)
- while (atomic_read(&data->finished) != cpus)
+ if (wait)
+ while (atomic_read(&data.finished) != cpus)
cpu_relax();
-
- if (wait >= 0)
- call_data = NULL;
+ call_data = NULL;
spin_unlock(&call_lock);
return 0;