2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/irq.h>
16 #include <linux/delay.h>
17 #include <linux/spinlock.h>
18 #include <linux/smp_lock.h>
19 #include <linux/smp.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/mc146818rtc.h>
22 #include <linux/interrupt.h>
25 #include <asm/pgalloc.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mach_apic.h>
28 #include <asm/mmu_context.h>
29 #include <asm/proto.h>
30 #include <asm/apicdef.h>
33 * Smarter SMP flushing macros.
36 * These mean you can really definitely utterly forget about
37 * writing to user space from interrupts. (Its not allowed anyway).
39 * Optimizations Manfred Spraul <manfred@colorfullife.com>
42 static cpumask_t flush_cpumask;
43 static struct mm_struct * flush_mm;
44 static unsigned long flush_va;
45 static DEFINE_SPINLOCK(tlbstate_lock);
46 #define FLUSH_ALL -1ULL
49 * We cannot call mmdrop() because we are in interrupt context,
50 * instead update mm->cpu_vm_mask.
52 static inline void leave_mm (unsigned long cpu)
54 if (read_pda(mmu_state) == TLBSTATE_OK)
56 clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
57 load_cr3(swapper_pg_dir);
62 * The flush IPI assumes that a thread switch happens in this order:
63 * [cpu0: the cpu that switches]
64 * 1) switch_mm() either 1a) or 1b)
65 * 1a) thread switch to a different mm
66 * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
67 * Stop ipi delivery for the old mm. This is not synchronized with
68 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
69 * for the wrong mm, and in the worst case we perform a superfluous
71 * 1a2) set cpu mmu_state to TLBSTATE_OK
72 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
73 * was in lazy tlb mode.
74 * 1a3) update cpu active_mm
75 * Now cpu0 accepts tlb flushes for the new mm.
76 * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
77 * Now the other cpus will send tlb flush ipis.
79 * 1b) thread switch without mm change
80 * cpu active_mm is correct, cpu0 already handles
82 * 1b1) set cpu mmu_state to TLBSTATE_OK
83 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
84 * Atomically set the bit [other cpus will start sending flush ipis],
86 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
87 * 2) switch %%esp, ie current
89 * The interrupt must handle 2 special cases:
90 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
91 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
92 * runs in kernel space, the cpu could load tlb entries for user space
95 * The good news is that cpu mmu_state is local to each cpu, no
96 * write/read ordering problems.
102 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
103 * 2) Leave the mm if we are in the lazy tlb mode.
106 asmlinkage void smp_invalidate_interrupt (void)
112 if (!cpu_isset(cpu, flush_cpumask))
115 * This was a BUG() but until someone can quote me the
116 * line from the intel manual that guarantees an IPI to
117 * multiple CPUs is retried _only_ on the erroring CPUs
118 * its staying as a return
123 if (flush_mm == read_pda(active_mm)) {
124 if (read_pda(mmu_state) == TLBSTATE_OK) {
125 if (flush_va == FLUSH_ALL)
128 __flush_tlb_one(flush_va);
133 cpu_clear(cpu, flush_cpumask);
136 put_cpu_no_resched();
139 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
144 * A couple of (to be removed) sanity checks:
146 * - we do not send IPIs to not-yet booted CPUs.
147 * - current CPU must not be in mask
148 * - mask must exist :)
150 BUG_ON(cpus_empty(cpumask));
151 cpus_and(tmp, cpumask, cpu_online_map);
152 BUG_ON(!cpus_equal(tmp, cpumask));
153 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
158 * I'm not happy about this global shared spinlock in the
159 * MM hot path, but we'll see how contended it is.
160 * Temporarily this turns IRQs off, so that lockups are
161 * detected by the NMI watchdog.
163 spin_lock(&tlbstate_lock);
167 cpus_or(flush_cpumask, cpumask, flush_cpumask);
170 * We have to send the IPI only to
173 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
175 while (!cpus_empty(flush_cpumask))
176 mb(); /* nothing. lockup detection does not belong here */;
180 spin_unlock(&tlbstate_lock);
183 void flush_tlb_current_task(void)
185 struct mm_struct *mm = current->mm;
189 cpu_mask = mm->cpu_vm_mask;
190 cpu_clear(smp_processor_id(), cpu_mask);
193 if (!cpus_empty(cpu_mask))
194 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
198 void flush_tlb_mm (struct mm_struct * mm)
203 cpu_mask = mm->cpu_vm_mask;
204 cpu_clear(smp_processor_id(), cpu_mask);
206 if (current->active_mm == mm) {
210 leave_mm(smp_processor_id());
212 if (!cpus_empty(cpu_mask))
213 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
218 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
220 struct mm_struct *mm = vma->vm_mm;
224 cpu_mask = mm->cpu_vm_mask;
225 cpu_clear(smp_processor_id(), cpu_mask);
227 if (current->active_mm == mm) {
231 leave_mm(smp_processor_id());
234 if (!cpus_empty(cpu_mask))
235 flush_tlb_others(cpu_mask, mm, va);
240 static void do_flush_tlb_all(void* info)
242 unsigned long cpu = smp_processor_id();
245 if (read_pda(mmu_state) == TLBSTATE_LAZY)
249 void flush_tlb_all(void)
251 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
254 void smp_kdb_stop(void)
256 send_IPI_allbutself(KDB_VECTOR);
260 * this function sends a 'reschedule' IPI to another CPU.
261 * it goes straight through and wastes no time serializing
262 * anything. Worst case is that we lose a reschedule ...
265 void smp_send_reschedule(int cpu)
267 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
271 * Structure and data for smp_call_function(). This is designed to minimise
272 * static memory requirements. It also looks cleaner.
274 static DEFINE_SPINLOCK(call_lock);
276 struct call_data_struct {
277 void (*func) (void *info);
284 static struct call_data_struct * call_data;
287 * this function sends a 'generic call function' IPI to all other CPUs
290 static void __smp_call_function (void (*func) (void *info), void *info,
291 int nonatomic, int wait)
293 static struct call_data_struct dumpdata;
294 struct call_data_struct normaldata;
295 struct call_data_struct *data;
296 int cpus = num_online_cpus()-1;
302 /* if another cpu beat us, they win! */
304 spin_unlock(&call_lock);
313 atomic_set(&data->started, 0);
314 data->wait = wait > 0 ? wait : 0;
316 atomic_set(&data->finished, 0);
320 /* Send a message to all other CPUs and wait for them to respond */
321 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
323 /* Wait for response */
325 while (atomic_read(&data->started) != cpus)
329 while (atomic_read(&data->finished) != cpus)
337 * smp_call_function - run a function on all other CPUs.
338 * @func: The function to run. This must be fast and non-blocking.
339 * @info: An arbitrary pointer to pass to the function.
340 * @nonatomic: currently unused.
341 * @wait: If 1, wait (atomically) until function has complete on other CPUs.
342 * If 0, wait for the IPI to be received by other CPUs, but do not wait
343 * for the completion of the IPI on each CPU. If -1, do not wait for
344 * other CPUs to receive IPI.
346 * Returns 0 on success, else a negative status code. Does not return until
347 * remote CPUs are nearly ready to execute func or are or have executed.
349 * You must not call this function with disabled interrupts or from a
350 * hardware interrupt handler or from a bottom half handler.
351 * Actually there are a few legal cases, like panic.
353 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
356 spin_lock(&call_lock);
357 __smp_call_function(func,info,nonatomic,wait);
358 spin_unlock(&call_lock);
362 void smp_stop_cpu(void)
367 cpu_clear(smp_processor_id(), cpu_online_map);
369 disable_local_APIC();
373 static void smp_really_stop_cpu(void *dummy)
380 void smp_send_stop(void)
385 /* Don't deadlock on the call lock in panic */
386 if (!spin_trylock(&call_lock)) {
387 /* ignore locking because we have paniced anyways */
390 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
392 spin_unlock(&call_lock);
395 disable_local_APIC();
400 * Reschedule call back. Nothing to do,
401 * all the work is done automatically when
402 * we return from the interrupt.
404 asmlinkage void smp_reschedule_interrupt(void)
409 asmlinkage void smp_call_function_interrupt(void)
411 void (*func) (void *info) = call_data->func;
412 void *info = call_data->info;
413 int wait = call_data->wait;
417 * Notify initiating CPU that I've grabbed the data and am
418 * about to execute the function
421 atomic_inc(&call_data->started);
423 * At this point the info structure may be out of scope unless wait==1
430 atomic_inc(&call_data->finished);
434 int safe_smp_processor_id(void)
441 apicid = hard_smp_processor_id();
442 if (x86_cpu_to_apicid[apicid] == apicid)
445 for (i = 0; i < NR_CPUS; ++i) {
446 if (x86_cpu_to_apicid[i] == apicid)
450 /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
451 * or called too early. Either way, we must be CPU 0. */
452 if (x86_cpu_to_apicid[0] == BAD_APICID)
455 return 0; /* Should not happen */