X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Fsmp.c;h=f4c7f7769cf7df617bdb7cecefd5be4802bb840e;hb=refs%2Fheads%2Fvserver;hp=953095e2ce157d93c4e9666678b4009ccf171b9c;hpb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;p=linux-2.6.git diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 953095e2c..f4c7f7769 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -66,6 +67,7 @@ static volatile struct call_data_struct *call_data; #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 +#define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; @@ -108,7 +110,7 @@ cpu_die(void) } irqreturn_t -handle_IPI (int irq, void *dev_id, struct pt_regs *regs) +handle_IPI (int irq, void *dev_id) { int this_cpu = get_cpu(); unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); @@ -155,7 +157,11 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs) case IPI_CPU_STOP: stop_this_cpu(); break; - +#ifdef CONFIG_KEXEC + case IPI_KDUMP_CPU_STOP: + unw_init_running(kdump_cpu_freeze, NULL); + break; +#endif default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; @@ -185,8 +191,8 @@ send_IPI_allbutself (int op) { unsigned int i; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i) && i != smp_processor_id()) + for_each_online_cpu(i) { + if (i != smp_processor_id()) send_IPI_single(i, op); } } @@ -199,9 +205,9 @@ send_IPI_all (int op) { int i; - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - send_IPI_single(i, op); + for_each_online_cpu(i) { + send_IPI_single(i, op); + } } /* @@ -213,6 +219,26 @@ send_IPI_self (int op) send_IPI_single(smp_processor_id(), op); } +#ifdef CONFIG_KEXEC +void +kdump_smp_send_stop() +{ + send_IPI_allbutself(IPI_KDUMP_CPU_STOP); +} + +void +kdump_smp_send_init() +{ + unsigned int cpu, self_cpu; + self_cpu = smp_processor_id(); + for_each_online_cpu(cpu) { + if (cpu != self_cpu) { + if(kdump_status[cpu] == 0) + platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); + } + } +} +#endif /* * Called with preeemption disabled. */ @@ -231,13 +257,16 @@ smp_flush_tlb_all (void) void smp_flush_tlb_mm (struct mm_struct *mm) { + preempt_disable(); /* this happens for the common case of a single-threaded fork(): */ if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) { local_finish_flush_tlb_mm(mm); + preempt_enable(); return; } + preempt_enable(); /* * We could optimize this further by using mm->cpu_vm_mask to track which CPUs * have been running in the address space. It's not clear that this is worth the @@ -269,7 +298,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int int me = get_cpu(); /* prevent preemption and reschedule on another processor */ if (cpuid == me) { - printk("%s: trying to call self\n", __FUNCTION__); + printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__); put_cpu(); return -EBUSY; } @@ -325,10 +354,14 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; - int cpus = num_online_cpus()-1; + int cpus; - if (!cpus) + spin_lock(&call_lock); + cpus = num_online_cpus() - 1; + if (!cpus) { + spin_unlock(&call_lock); return 0; + } /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -340,8 +373,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai if (wait) atomic_set(&data.finished, 0); - spin_lock(&call_lock); - call_data = &data; mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ send_IPI_allbutself(IPI_CALL_FUNC);