2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp_lock.h>
18 #include <linux/smp.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/interrupt.h>
24 #include <asm/pgalloc.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mach_apic.h>
27 #include <asm/mmu_context.h>
28 #include <asm/proto.h>
29 #include <asm/apicdef.h>
32 #include <xen/evtchn.h>
37 * Smarter SMP flushing macros.
40 * These mean you can really definitely utterly forget about
41 * writing to user space from interrupts. (Its not allowed anyway).
43 * Optimizations Manfred Spraul <manfred@colorfullife.com>
45 * More scalable flush, from Andi Kleen
47 * To avoid global state use 8 different call vectors.
48 * Each CPU uses a specific vector to trigger flushes on other
49 * CPUs. Depending on the received vector the target CPUs look into
50 * the right per cpu variable for the flush data.
52 * With more than 8 CPUs they are hashed to the 8 available
53 * vectors. The limited global vector space forces us to this right now.
54 * In future when interrupts are split into per CPU domains this could be
55 * fixed, at the cost of triggering multiple IPIs in some cases.
58 union smp_flush_state {
60 cpumask_t flush_cpumask;
61 struct mm_struct *flush_mm;
62 unsigned long flush_va;
63 #define FLUSH_ALL -1ULL
64 spinlock_t tlbstate_lock;
66 char pad[SMP_CACHE_BYTES];
67 } ____cacheline_aligned;
69 /* State is put into the per CPU data section, but padded
70 to a full cache line because other CPUs can access it and we don't
71 want false sharing in the per cpu data segment. */
72 static DEFINE_PER_CPU(union smp_flush_state, flush_state);
76 * We cannot call mmdrop() because we are in interrupt context,
77 * instead update mm->cpu_vm_mask.
79 static inline void leave_mm(unsigned long cpu)
81 if (read_pda(mmu_state) == TLBSTATE_OK)
83 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
84 load_cr3(swapper_pg_dir);
90 * The flush IPI assumes that a thread switch happens in this order:
91 * [cpu0: the cpu that switches]
92 * 1) switch_mm() either 1a) or 1b)
93 * 1a) thread switch to a different mm
94 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
95 * Stop ipi delivery for the old mm. This is not synchronized with
96 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
97 * for the wrong mm, and in the worst case we perform a superfluous
99 * 1a2) set cpu mmu_state to TLBSTATE_OK
100 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
101 * was in lazy tlb mode.
102 * 1a3) update cpu active_mm
103 * Now cpu0 accepts tlb flushes for the new mm.
104 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
105 * Now the other cpus will send tlb flush ipis.
107 * 1b) thread switch without mm change
108 * cpu active_mm is correct, cpu0 already handles
110 * 1b1) set cpu mmu_state to TLBSTATE_OK
111 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
112 * Atomically set the bit [other cpus will start sending flush ipis],
114 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
115 * 2) switch %%esp, ie current
117 * The interrupt must handle 2 special cases:
118 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
119 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
120 * runs in kernel space, the cpu could load tlb entries for user space
123 * The good news is that cpu mmu_state is local to each cpu, no
124 * write/read ordering problems.
130 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
131 * 2) Leave the mm if we are in the lazy tlb mode.
133 * Interrupts are disabled.
136 asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
140 union smp_flush_state *f;
142 cpu = smp_processor_id();
144 * orig_rax contains the interrupt vector - 256.
145 * Use that to determine where the sender put the data.
147 sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
148 f = &per_cpu(flush_state, sender);
150 if (!cpu_isset(cpu, f->flush_cpumask))
153 * This was a BUG() but until someone can quote me the
154 * line from the intel manual that guarantees an IPI to
155 * multiple CPUs is retried _only_ on the erroring CPUs
156 * its staying as a return
161 if (f->flush_mm == read_pda(active_mm)) {
162 if (read_pda(mmu_state) == TLBSTATE_OK) {
163 if (f->flush_va == FLUSH_ALL)
166 __flush_tlb_one(f->flush_va);
172 cpu_clear(cpu, f->flush_cpumask);
175 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
179 union smp_flush_state *f;
181 /* Caller has disabled preemption */
182 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
183 f = &per_cpu(flush_state, sender);
185 /* Could avoid this lock when
186 num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
187 probably not worth checking this for a cache-hot lock. */
188 spin_lock(&f->tlbstate_lock);
192 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
195 * We have to send the IPI only to
198 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
200 while (!cpus_empty(f->flush_cpumask))
205 spin_unlock(&f->tlbstate_lock);
208 int __cpuinit init_smp_flush(void)
211 for_each_cpu_mask(i, cpu_possible_map) {
212 spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
217 core_initcall(init_smp_flush);
219 void flush_tlb_current_task(void)
221 struct mm_struct *mm = current->mm;
225 cpu_mask = mm->cpu_vm_mask;
226 cpu_clear(smp_processor_id(), cpu_mask);
229 if (!cpus_empty(cpu_mask))
230 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
234 void flush_tlb_mm (struct mm_struct * mm)
239 cpu_mask = mm->cpu_vm_mask;
240 cpu_clear(smp_processor_id(), cpu_mask);
242 if (current->active_mm == mm) {
246 leave_mm(smp_processor_id());
248 if (!cpus_empty(cpu_mask))
249 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
254 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
256 struct mm_struct *mm = vma->vm_mm;
260 cpu_mask = mm->cpu_vm_mask;
261 cpu_clear(smp_processor_id(), cpu_mask);
263 if (current->active_mm == mm) {
267 leave_mm(smp_processor_id());
270 if (!cpus_empty(cpu_mask))
271 flush_tlb_others(cpu_mask, mm, va);
276 static void do_flush_tlb_all(void* info)
278 unsigned long cpu = smp_processor_id();
281 if (read_pda(mmu_state) == TLBSTATE_LAZY)
285 void flush_tlb_all(void)
287 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
290 asmlinkage void smp_invalidate_interrupt (void)
292 void flush_tlb_current_task(void)
293 { xen_tlb_flush_mask(¤t->mm->cpu_vm_mask); }
294 void flush_tlb_mm (struct mm_struct * mm)
295 { xen_tlb_flush_mask(&mm->cpu_vm_mask); }
296 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
297 { xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
298 void flush_tlb_all(void)
299 { xen_tlb_flush_all(); }
303 * this function sends a 'reschedule' IPI to another CPU.
304 * it goes straight through and wastes no time serializing
305 * anything. Worst case is that we lose a reschedule ...
308 void smp_send_reschedule(int cpu)
310 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
314 * Structure and data for smp_call_function(). This is designed to minimise
315 * static memory requirements. It also looks cleaner.
317 static DEFINE_SPINLOCK(call_lock);
319 struct call_data_struct {
320 void (*func) (void *info);
327 static struct call_data_struct * call_data;
329 void lock_ipi_call_lock(void)
331 spin_lock_irq(&call_lock);
334 void unlock_ipi_call_lock(void)
336 spin_unlock_irq(&call_lock);
340 * this function sends a 'generic call function' IPI to one other CPU
343 * cpu is a standard Linux logical CPU number.
346 __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
347 int nonatomic, int wait)
349 struct call_data_struct data;
354 atomic_set(&data.started, 0);
357 atomic_set(&data.finished, 0);
361 /* Send a message to all other CPUs and wait for them to respond */
362 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
364 /* Wait for response */
365 while (atomic_read(&data.started) != cpus)
371 while (atomic_read(&data.finished) != cpus)
376 * smp_call_function_single - Run a function on another CPU
377 * @func: The function to run. This must be fast and non-blocking.
378 * @info: An arbitrary pointer to pass to the function.
379 * @nonatomic: Currently unused.
380 * @wait: If true, wait until function has completed on other CPUs.
382 * Retrurns 0 on success, else a negative status code.
384 * Does not return until the remote CPU is nearly ready to execute <func>
385 * or is or has executed.
388 int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
389 int nonatomic, int wait)
391 /* prevent preemption and reschedule on another processor */
398 spin_lock_bh(&call_lock);
399 __smp_call_function_single(cpu, func, info, nonatomic, wait);
400 spin_unlock_bh(&call_lock);
406 * this function sends a 'generic call function' IPI to all other CPUs
409 static void __smp_call_function (void (*func) (void *info), void *info,
410 int nonatomic, int wait)
412 struct call_data_struct data;
413 int cpus = num_online_cpus()-1;
420 atomic_set(&data.started, 0);
423 atomic_set(&data.finished, 0);
427 /* Send a message to all other CPUs and wait for them to respond */
428 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
430 /* Wait for response */
431 while (atomic_read(&data.started) != cpus)
441 while (atomic_read(&data.finished) != cpus)
450 * smp_call_function - run a function on all other CPUs.
451 * @func: The function to run. This must be fast and non-blocking.
452 * @info: An arbitrary pointer to pass to the function.
453 * @nonatomic: currently unused.
454 * @wait: If true, wait (atomically) until function has completed on other
457 * Returns 0 on success, else a negative status code. Does not return until
458 * remote CPUs are nearly ready to execute func or are or have executed.
460 * You must not call this function with disabled interrupts or from a
461 * hardware interrupt handler or from a bottom half handler.
462 * Actually there are a few legal cases, like panic.
464 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
467 spin_lock(&call_lock);
468 __smp_call_function(func,info,nonatomic,wait);
469 spin_unlock(&call_lock);
473 void smp_stop_cpu(void)
479 cpu_clear(smp_processor_id(), cpu_online_map);
480 local_irq_save(flags);
482 disable_local_APIC();
484 local_irq_restore(flags);
487 static void smp_really_stop_cpu(void *dummy)
494 void smp_send_stop(void)
501 /* Don't deadlock on the call lock in panic */
502 if (!spin_trylock(&call_lock)) {
503 /* ignore locking because we have paniced anyways */
506 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
508 spin_unlock(&call_lock);
512 disable_local_APIC();
518 * Reschedule call back. Nothing to do,
519 * all the work is done automatically when
520 * we return from the interrupt.
523 asmlinkage void smp_reschedule_interrupt(void)
525 asmlinkage irqreturn_t smp_reschedule_interrupt(void)
536 asmlinkage void smp_call_function_interrupt(void)
538 asmlinkage irqreturn_t smp_call_function_interrupt(void)
541 void (*func) (void *info) = call_data->func;
542 void *info = call_data->info;
543 int wait = call_data->wait;
549 * Notify initiating CPU that I've grabbed the data and am
550 * about to execute the function
553 atomic_inc(&call_data->started);
555 * At this point the info structure may be out of scope unless wait==1
563 atomic_inc(&call_data->finished);
570 int safe_smp_processor_id(void)
573 return smp_processor_id();
580 apicid = hard_smp_processor_id();
581 if (x86_cpu_to_apicid[apicid] == apicid)
584 for (i = 0; i < NR_CPUS; ++i) {
585 if (x86_cpu_to_apicid[i] == apicid)
589 /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
590 * or called too early. Either way, we must be CPU 0. */
591 if (x86_cpu_to_apicid[0] == BAD_APICID)
594 return 0; /* Should not happen */