2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
7 * This code is released under the GNU General Public License version 2 or
11 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp_lock.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/cache.h>
21 #include <linux/interrupt.h>
22 #include <linux/dump.h>
25 #include <asm/tlbflush.h>
27 #include <mach_apic.h>
30 * Some notes on x86 processor bugs affecting SMP operation:
32 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
33 * The Linux implications for SMP are handled as follows:
35 * Pentium III / [Xeon]
36 * None of the E1AP-E3AP errata are visible to the user.
43 * None of the A1AP-A3AP errata are visible to the user.
50 * None of 1AP-9AP errata are visible to the normal user,
51 * except occasional delivery of 'spurious interrupt' as trap #15.
52 * This is very rare and a non-problem.
54 * 1AP. Linux maps APIC as non-cacheable
55 * 2AP. worked around in hardware
56 * 3AP. fixed in C0 and above steppings microcode update.
57 * Linux does not use excessive STARTUP_IPIs.
58 * 4AP. worked around in hardware
59 * 5AP. symmetric IO mode (normal Linux operation) not affected.
60 * 'noapic' mode has vector 0xf filled out properly.
61 * 6AP. 'noapic' mode might be affected - fixed in later steppings
62 * 7AP. We do not assume writes to the LVT deassering IRQs
63 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
64 * 9AP. We do not use mixed mode
67 * There is a marginal case where REP MOVS on 100MHz SMP
68 * machines with B stepping processors can fail. XXX should provide
69 * an L1cache=Writethrough or L1cache=off option.
71 * B stepping CPUs may hang. There are hardware work arounds
72 * for this. We warn about it in case your board doesn't have the work
73 * arounds. Basically thats so I can tell anyone with a B stepping
74 * CPU and SMP problems "tough".
76 * Specific items [From Pentium Processor Specification Update]
78 * 1AP. Linux doesn't use remote read
79 * 2AP. Linux doesn't trust APIC errors
80 * 3AP. We work around this
81 * 4AP. Linux never generated 3 interrupts of the same priority
82 * to cause a lost local interrupt.
83 * 5AP. Remote read is never used
84 * 6AP. not affected - worked around in hardware
85 * 7AP. not affected - worked around in hardware
86 * 8AP. worked around in hardware - we get explicit CS errors if not
87 * 9AP. only 'noapic' mode affected. Might generate spurious
88 * interrupts, we log only the first one and count the
90 * 10AP. not affected - worked around in hardware
91 * 11AP. Linux reads the APIC between writes to avoid this, as per
92 * the documentation. Make sure you preserve this as it affects
93 * the C stepping chips too.
94 * 12AP. not affected - worked around in hardware
95 * 13AP. not affected - worked around in hardware
96 * 14AP. we always deassert INIT during bootup
97 * 15AP. not affected - worked around in hardware
98 * 16AP. not affected - worked around in hardware
99 * 17AP. not affected - worked around in hardware
100 * 18AP. not affected - worked around in hardware
101 * 19AP. not affected - worked around in BIOS
103 * If this sounds worrying believe me these bugs are either ___RARE___,
104 * or are signal timing bugs worked around in hardware and there's
105 * about nothing of note with C stepping upwards.
108 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
111 * the following functions deal with sending IPIs between CPUs.
113 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
116 static inline int __prepare_ICR (unsigned int shortcut, int vector)
118 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
121 static inline int __prepare_ICR2 (unsigned int mask)
123 return SET_APIC_DEST_FIELD(mask);
126 void __send_IPI_shortcut(unsigned int shortcut, int vector)
129 * Subtle. In the case of the 'never do double writes' workaround
130 * we have to lock out interrupts to be safe. As we don't care
131 * of the value read we use an atomic rmw access to avoid costly
132 * cli/sti. Otherwise we use an even cheaper single atomic write
140 apic_wait_icr_idle();
142 if (vector == CRASH_DUMP_VECTOR)
143 cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
146 * No need to touch the target chip field
148 cfg = __prepare_ICR(shortcut, vector);
150 if (vector == CRASH_DUMP_VECTOR) {
152 * Setup DUMP IPI to be delivered as an NMI
154 cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
158 * Send the IPI. The write to APIC_ICR fires this off.
160 apic_write_around(APIC_ICR, cfg);
163 void fastcall send_IPI_self(int vector)
165 __send_IPI_shortcut(APIC_DEST_SELF, vector);
169 * This is only used on smaller machines.
171 void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
173 unsigned long mask = cpus_addr(cpumask)[0];
177 local_irq_save(flags);
182 apic_wait_icr_idle();
185 * prepare target chip field
187 cfg = __prepare_ICR2(mask);
188 apic_write_around(APIC_ICR2, cfg);
193 cfg = __prepare_ICR(0, vector);
196 * Send the IPI. The write to APIC_ICR fires this off.
198 apic_write_around(APIC_ICR, cfg);
200 local_irq_restore(flags);
203 void send_IPI_mask_sequence(cpumask_t mask, int vector)
205 unsigned long cfg, flags;
206 unsigned int query_cpu;
209 * Hack. The clustered APIC addressing mode doesn't allow us to send
210 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
211 * should be modified to do 1 message per cluster ID - mbligh
214 local_irq_save(flags);
216 for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
217 if (cpu_isset(query_cpu, mask)) {
222 apic_wait_icr_idle();
225 * prepare target chip field
227 cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
228 apic_write_around(APIC_ICR2, cfg);
233 cfg = __prepare_ICR(0, vector);
235 if (vector == CRASH_DUMP_VECTOR) {
237 * Setup DUMP IPI to be delivered as an NMI
239 cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
242 * Send the IPI. The write to APIC_ICR fires this off.
244 apic_write_around(APIC_ICR, cfg);
247 local_irq_restore(flags);
250 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
253 * Smarter SMP flushing macros.
254 * c/o Linus Torvalds.
256 * These mean you can really definitely utterly forget about
257 * writing to user space from interrupts. (Its not allowed anyway).
259 * Optimizations Manfred Spraul <manfred@colorfullife.com>
262 static cpumask_t flush_cpumask;
263 static struct mm_struct * flush_mm;
264 static unsigned long flush_va;
265 static DEFINE_SPINLOCK(tlbstate_lock);
266 #define FLUSH_ALL 0xffffffff
269 * We cannot call mmdrop() because we are in interrupt context,
270 * instead update mm->cpu_vm_mask.
272 * We need to reload %cr3 since the page tables may be going
273 * away from under us..
275 static inline void leave_mm (unsigned long cpu)
277 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
279 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
280 load_cr3(swapper_pg_dir);
285 * The flush IPI assumes that a thread switch happens in this order:
286 * [cpu0: the cpu that switches]
287 * 1) switch_mm() either 1a) or 1b)
288 * 1a) thread switch to a different mm
289 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
290 * Stop ipi delivery for the old mm. This is not synchronized with
291 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
292 * for the wrong mm, and in the worst case we perform a superflous
294 * 1a2) set cpu_tlbstate to TLBSTATE_OK
295 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
296 * was in lazy tlb mode.
297 * 1a3) update cpu_tlbstate[].active_mm
298 * Now cpu0 accepts tlb flushes for the new mm.
299 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
300 * Now the other cpus will send tlb flush ipis.
302 * 1b) thread switch without mm change
303 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
305 * 1b1) set cpu_tlbstate to TLBSTATE_OK
306 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
307 * Atomically set the bit [other cpus will start sending flush ipis],
309 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
310 * 2) switch %%esp, ie current
312 * The interrupt must handle 2 special cases:
313 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
314 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
315 * runs in kernel space, the cpu could load tlb entries for user space
318 * The good news is that cpu_tlbstate is local to each cpu, no
319 * write/read ordering problems.
325 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
326 * 2) Leave the mm if we are in the lazy tlb mode.
329 fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
334 if (current->active_mm)
335 load_user_cs_desc(cpu, current->active_mm);
337 if (!cpu_isset(cpu, flush_cpumask))
340 * This was a BUG() but until someone can quote me the
341 * line from the intel manual that guarantees an IPI to
342 * multiple CPUs is retried _only_ on the erroring CPUs
343 * its staying as a return
348 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
349 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
350 if (flush_va == FLUSH_ALL)
353 __flush_tlb_one(flush_va);
358 smp_mb__before_clear_bit();
359 cpu_clear(cpu, flush_cpumask);
360 smp_mb__after_clear_bit();
362 put_cpu_no_resched();
365 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
370 * A couple of (to be removed) sanity checks:
372 * - we do not send IPIs to not-yet booted CPUs.
373 * - current CPU must not be in mask
374 * - mask must exist :)
376 BUG_ON(cpus_empty(cpumask));
378 cpus_and(tmp, cpumask, cpu_online_map);
379 BUG_ON(!cpus_equal(cpumask, tmp));
380 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
384 * i'm not happy about this global shared spinlock in the
385 * MM hot path, but we'll see how contended it is.
386 * Temporarily this turns IRQs off, so that lockups are
387 * detected by the NMI watchdog.
389 spin_lock(&tlbstate_lock);
393 #if NR_CPUS <= BITS_PER_LONG
394 atomic_set_mask(cpumask, &flush_cpumask);
398 unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
399 unsigned long *cpu_mask = (unsigned long *)&cpumask;
400 for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
401 atomic_set_mask(cpu_mask[k], &flush_mask[k]);
405 * We have to send the IPI only to
408 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
410 while (!cpus_empty(flush_cpumask))
411 /* nothing. lockup detection does not belong here */
416 spin_unlock(&tlbstate_lock);
419 void flush_tlb_current_task(void)
421 struct mm_struct *mm = current->mm;
425 cpu_mask = mm->cpu_vm_mask;
426 cpu_clear(smp_processor_id(), cpu_mask);
429 if (!cpus_empty(cpu_mask))
430 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
434 void flush_tlb_mm (struct mm_struct * mm)
439 cpu_mask = mm->cpu_vm_mask;
440 cpu_clear(smp_processor_id(), cpu_mask);
442 if (current->active_mm == mm) {
446 leave_mm(smp_processor_id());
448 if (!cpus_empty(cpu_mask))
449 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
454 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
456 struct mm_struct *mm = vma->vm_mm;
460 cpu_mask = mm->cpu_vm_mask;
461 cpu_clear(smp_processor_id(), cpu_mask);
463 if (current->active_mm == mm) {
467 leave_mm(smp_processor_id());
470 if (!cpus_empty(cpu_mask))
471 flush_tlb_others(cpu_mask, mm, va);
476 static void do_flush_tlb_all(void* info)
478 unsigned long cpu = smp_processor_id();
481 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
485 void flush_tlb_all(void)
487 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
490 void dump_send_ipi(void)
492 send_IPI_allbutself(CRASH_DUMP_VECTOR);
496 * this function sends a 'reschedule' IPI to another CPU.
497 * it goes straight through and wastes no time serializing
498 * anything. Worst case is that we lose a reschedule ...
500 void smp_send_reschedule(int cpu)
502 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
505 void crash_dump_send_ipi(void)
507 send_IPI_allbutself(CRASH_DUMP_VECTOR);
511 * Structure and data for smp_call_function(). This is designed to minimise
512 * static memory requirements. It also looks cleaner.
514 static DEFINE_SPINLOCK(call_lock);
516 struct call_data_struct {
517 void (*func) (void *info);
524 static struct call_data_struct * call_data;
527 * this function sends a 'generic call function' IPI to all other CPUs
531 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
534 * [SUMMARY] Run a function on all other CPUs.
535 * <func> The function to run. This must be fast and non-blocking.
536 * <info> An arbitrary pointer to pass to the function.
537 * <nonatomic> currently unused.
538 * <wait> If 1, wait (atomically) until function has completed on other CPUs.
539 * If 0, wait for the IPI to be received by other CPUs, but do not wait
540 * for the completion of the function on each CPU.
541 * If -1, do not wait for other CPUs to receive IPI.
542 * [RETURNS] 0 on success, else a negative status code. Does not return until
543 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
545 * You must not call this function with disabled interrupts or from a
546 * hardware interrupt handler or from a bottom half handler.
549 static struct call_data_struct dumpdata;
550 struct call_data_struct normaldata;
551 struct call_data_struct *data;
552 int cpus = num_online_cpus()-1;
557 /* Can deadlock when called with interrupts disabled */
558 /* Only if we are waiting for other CPU to ack */
559 WARN_ON(irqs_disabled() && wait >= 0);
561 spin_lock(&call_lock);
563 /* if another cpu beat us, they win! */
565 spin_unlock(&call_lock);
574 atomic_set(&data->started, 0);
575 data->wait = wait > 0 ? wait : 0;
577 atomic_set(&data->finished, 0);
582 /* Send a message to all other CPUs and wait for them to respond */
583 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
585 /* Wait for response */
587 while (atomic_read(&data->started) != cpus)
591 while (atomic_read(&data->finished) != cpus)
593 spin_unlock(&call_lock);
598 void stop_this_cpu (void * dummy)
603 cpu_clear(smp_processor_id(), cpu_online_map);
605 disable_local_APIC();
606 if (cpu_data[smp_processor_id()].hlt_works_ok)
607 for(;;) __asm__("hlt");
612 * this function calls the 'stop' function on all other CPUs in the system.
615 void smp_send_stop(void)
617 smp_call_function(stop_this_cpu, NULL, 1, 0);
620 disable_local_APIC();
624 EXPORT_SYMBOL(smp_send_stop);
627 * Reschedule call back. Nothing to do,
628 * all the work is done automatically when
629 * we return from the interrupt.
631 fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
636 fastcall void smp_call_function_interrupt(struct pt_regs *regs)
638 void (*func) (void *info) = call_data->func;
639 void *info = call_data->info;
640 int wait = call_data->wait;
644 * Notify initiating CPU that I've grabbed the data and am
645 * about to execute the function
648 atomic_inc(&call_data->started);
650 * At this point the info structure may be out of scope unless wait==1
658 atomic_inc(&call_data->finished);