2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
7 * This code is released under the GNU General Public License version 2 or
11 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp_lock.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/cache.h>
21 #include <linux/interrupt.h>
24 #include <asm/tlbflush.h>
27 #include <mach_apic.h>
29 #include <asm-xen/evtchn.h>
31 #define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
34 * Some notes on x86 processor bugs affecting SMP operation:
36 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
37 * The Linux implications for SMP are handled as follows:
39 * Pentium III / [Xeon]
40 * None of the E1AP-E3AP errata are visible to the user.
47 * None of the A1AP-A3AP errata are visible to the user.
54 * None of 1AP-9AP errata are visible to the normal user,
55 * except occasional delivery of 'spurious interrupt' as trap #15.
56 * This is very rare and a non-problem.
58 * 1AP. Linux maps APIC as non-cacheable
59 * 2AP. worked around in hardware
60 * 3AP. fixed in C0 and above steppings microcode update.
61 * Linux does not use excessive STARTUP_IPIs.
62 * 4AP. worked around in hardware
63 * 5AP. symmetric IO mode (normal Linux operation) not affected.
64 * 'noapic' mode has vector 0xf filled out properly.
65 * 6AP. 'noapic' mode might be affected - fixed in later steppings
66 * 7AP. We do not assume writes to the LVT deassering IRQs
67 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
68 * 9AP. We do not use mixed mode
71 * There is a marginal case where REP MOVS on 100MHz SMP
72 * machines with B stepping processors can fail. XXX should provide
73 * an L1cache=Writethrough or L1cache=off option.
75 * B stepping CPUs may hang. There are hardware work arounds
76 * for this. We warn about it in case your board doesn't have the work
77 * arounds. Basically thats so I can tell anyone with a B stepping
78 * CPU and SMP problems "tough".
80 * Specific items [From Pentium Processor Specification Update]
82 * 1AP. Linux doesn't use remote read
83 * 2AP. Linux doesn't trust APIC errors
84 * 3AP. We work around this
85 * 4AP. Linux never generated 3 interrupts of the same priority
86 * to cause a lost local interrupt.
87 * 5AP. Remote read is never used
88 * 6AP. not affected - worked around in hardware
89 * 7AP. not affected - worked around in hardware
90 * 8AP. worked around in hardware - we get explicit CS errors if not
91 * 9AP. only 'noapic' mode affected. Might generate spurious
92 * interrupts, we log only the first one and count the
94 * 10AP. not affected - worked around in hardware
95 * 11AP. Linux reads the APIC between writes to avoid this, as per
96 * the documentation. Make sure you preserve this as it affects
97 * the C stepping chips too.
98 * 12AP. not affected - worked around in hardware
99 * 13AP. not affected - worked around in hardware
100 * 14AP. we always deassert INIT during bootup
101 * 15AP. not affected - worked around in hardware
102 * 16AP. not affected - worked around in hardware
103 * 17AP. not affected - worked around in hardware
104 * 18AP. not affected - worked around in hardware
105 * 19AP. not affected - worked around in BIOS
107 * If this sounds worrying believe me these bugs are either ___RARE___,
108 * or are signal timing bugs worked around in hardware and there's
109 * about nothing of note with C stepping upwards.
112 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
115 * the following functions deal with sending IPIs between CPUs.
117 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
120 static inline int __prepare_ICR (unsigned int shortcut, int vector)
122 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
125 static inline int __prepare_ICR2 (unsigned int mask)
127 return SET_APIC_DEST_FIELD(mask);
130 DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
132 static inline void __send_IPI_one(unsigned int cpu, int vector)
136 evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
137 // printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
140 shared_info_t *s = HYPERVISOR_shared_info;
141 while (synch_test_bit(evtchn, &s->evtchn_pending[0]) ||
142 synch_test_bit(evtchn, &s->evtchn_mask[0]))
145 notify_via_evtchn(evtchn);
147 printk("send_IPI to unbound port %d/%d",
151 void __send_IPI_shortcut(unsigned int shortcut, int vector)
157 __send_IPI_one(smp_processor_id(), vector);
159 case APIC_DEST_ALLBUT:
160 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
161 if (cpu == smp_processor_id())
163 if (cpu_isset(cpu, cpu_online_map)) {
164 __send_IPI_one(cpu, vector);
169 printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
175 void fastcall send_IPI_self(int vector)
177 __send_IPI_shortcut(APIC_DEST_SELF, vector);
181 * This is only used on smaller machines.
183 void send_IPI_mask_bitmask(cpumask_t mask, int vector)
188 local_irq_save(flags);
190 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
191 if (cpu_isset(cpu, mask)) {
192 __send_IPI_one(cpu, vector);
196 local_irq_restore(flags);
199 inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
202 send_IPI_mask_bitmask(mask, vector);
205 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
209 * Smarter SMP flushing macros.
210 * c/o Linus Torvalds.
212 * These mean you can really definitely utterly forget about
213 * writing to user space from interrupts. (Its not allowed anyway).
215 * Optimizations Manfred Spraul <manfred@colorfullife.com>
218 static cpumask_t flush_cpumask;
219 static struct mm_struct * flush_mm;
220 static unsigned long flush_va;
221 static DEFINE_SPINLOCK(tlbstate_lock);
222 #define FLUSH_ALL 0xffffffff
225 * We cannot call mmdrop() because we are in interrupt context,
226 * instead update mm->cpu_vm_mask.
228 * We need to reload %cr3 since the page tables may be going
229 * away from under us..
231 static inline void leave_mm (unsigned long cpu)
233 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
235 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
236 load_cr3(swapper_pg_dir);
241 * The flush IPI assumes that a thread switch happens in this order:
242 * [cpu0: the cpu that switches]
243 * 1) switch_mm() either 1a) or 1b)
244 * 1a) thread switch to a different mm
245 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
246 * Stop ipi delivery for the old mm. This is not synchronized with
247 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
248 * for the wrong mm, and in the worst case we perform a superflous
250 * 1a2) set cpu_tlbstate to TLBSTATE_OK
251 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
252 * was in lazy tlb mode.
253 * 1a3) update cpu_tlbstate[].active_mm
254 * Now cpu0 accepts tlb flushes for the new mm.
255 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
256 * Now the other cpus will send tlb flush ipis.
258 * 1b) thread switch without mm change
259 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
261 * 1b1) set cpu_tlbstate to TLBSTATE_OK
262 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
263 * Atomically set the bit [other cpus will start sending flush ipis],
265 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
266 * 2) switch %%esp, ie current
268 * The interrupt must handle 2 special cases:
269 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
270 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
271 * runs in kernel space, the cpu could load tlb entries for user space
274 * The good news is that cpu_tlbstate is local to each cpu, no
275 * write/read ordering problems.
281 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
282 * 2) Leave the mm if we are in the lazy tlb mode.
285 irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
286 struct pt_regs *regs)
291 if (current->active_mm)
292 load_user_cs_desc(cpu, current->active_mm);
294 if (!cpu_isset(cpu, flush_cpumask))
297 * This was a BUG() but until someone can quote me the
298 * line from the intel manual that guarantees an IPI to
299 * multiple CPUs is retried _only_ on the erroring CPUs
300 * its staying as a return
305 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
306 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
307 if (flush_va == FLUSH_ALL)
310 __flush_tlb_one(flush_va);
314 smp_mb__before_clear_bit();
315 cpu_clear(cpu, flush_cpumask);
316 smp_mb__after_clear_bit();
318 put_cpu_no_resched();
323 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
328 * A couple of (to be removed) sanity checks:
330 * - we do not send IPIs to not-yet booted CPUs.
331 * - current CPU must not be in mask
332 * - mask must exist :)
334 BUG_ON(cpus_empty(cpumask));
336 cpus_and(tmp, cpumask, cpu_online_map);
337 BUG_ON(!cpus_equal(cpumask, tmp));
338 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
342 * i'm not happy about this global shared spinlock in the
343 * MM hot path, but we'll see how contended it is.
344 * Temporarily this turns IRQs off, so that lockups are
345 * detected by the NMI watchdog.
347 spin_lock(&tlbstate_lock);
351 #if NR_CPUS <= BITS_PER_LONG
352 atomic_set_mask(cpumask, &flush_cpumask);
356 unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
357 unsigned long *cpu_mask = (unsigned long *)&cpumask;
358 for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
359 atomic_set_mask(cpu_mask[k], &flush_mask[k]);
363 * We have to send the IPI only to
366 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
368 while (!cpus_empty(flush_cpumask))
369 /* nothing. lockup detection does not belong here */
374 spin_unlock(&tlbstate_lock);
377 void flush_tlb_current_task(void)
379 struct mm_struct *mm = current->mm;
383 cpu_mask = mm->cpu_vm_mask;
384 cpu_clear(smp_processor_id(), cpu_mask);
387 if (!cpus_empty(cpu_mask))
388 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
392 void flush_tlb_mm (struct mm_struct * mm)
397 cpu_mask = mm->cpu_vm_mask;
398 cpu_clear(smp_processor_id(), cpu_mask);
400 if (current->active_mm == mm) {
404 leave_mm(smp_processor_id());
406 if (!cpus_empty(cpu_mask))
407 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
412 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
414 struct mm_struct *mm = vma->vm_mm;
418 cpu_mask = mm->cpu_vm_mask;
419 cpu_clear(smp_processor_id(), cpu_mask);
421 if (current->active_mm == mm) {
425 leave_mm(smp_processor_id());
428 if (!cpus_empty(cpu_mask))
429 flush_tlb_others(cpu_mask, mm, va);
434 static void do_flush_tlb_all(void* info)
436 unsigned long cpu = smp_processor_id();
439 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
443 void flush_tlb_all(void)
445 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
450 irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
451 struct pt_regs *regs)
453 void flush_tlb_current_task(void)
454 { xen_tlb_flush_mask(current->mm->cpu_vm_mask); }
455 void flush_tlb_mm(struct mm_struct * mm)
456 { xen_tlb_flush_mask(mm->cpu_vm_mask); }
457 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
458 { xen_invlpg_mask(vma->vm_mm->cpu_vm_mask, va); }
459 void flush_tlb_all(void)
460 { xen_tlb_flush_all(); }
465 * this function sends a 'reschedule' IPI to another CPU.
466 * it goes straight through and wastes no time serializing
467 * anything. Worst case is that we lose a reschedule ...
469 void smp_send_reschedule(int cpu)
471 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
475 * Structure and data for smp_call_function(). This is designed to minimise
476 * static memory requirements. It also looks cleaner.
478 static DEFINE_SPINLOCK(call_lock);
480 struct call_data_struct {
481 void (*func) (void *info);
488 static struct call_data_struct * call_data;
491 * this function sends a 'generic call function' IPI to all other CPUs
495 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
498 * [SUMMARY] Run a function on all other CPUs.
499 * <func> The function to run. This must be fast and non-blocking.
500 * <info> An arbitrary pointer to pass to the function.
501 * <nonatomic> currently unused.
502 * <wait> If true, wait (atomically) until function has completed on other CPUs.
503 * [RETURNS] 0 on success, else a negative status code. Does not return until
504 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
506 * You must not call this function with disabled interrupts or from a
507 * hardware interrupt handler or from a bottom half handler.
510 struct call_data_struct data;
511 int cpus = num_online_cpus()-1;
516 /* Can deadlock when called with interrupts disabled */
517 WARN_ON(irqs_disabled());
521 atomic_set(&data.started, 0);
524 atomic_set(&data.finished, 0);
526 spin_lock(&call_lock);
530 /* Send a message to all other CPUs and wait for them to respond */
531 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
533 /* Wait for response */
534 while (atomic_read(&data.started) != cpus)
538 while (atomic_read(&data.finished) != cpus)
540 spin_unlock(&call_lock);
545 static void stop_this_cpu (void * dummy)
550 cpu_clear(smp_processor_id(), cpu_online_map);
553 xxprint("stop_this_cpu disable_local_APIC\n");
555 disable_local_APIC();
557 if (cpu_data[smp_processor_id()].hlt_works_ok)
558 for(;;) __asm__("hlt");
563 * this function calls the 'stop' function on all other CPUs in the system.
566 void smp_send_stop(void)
568 smp_call_function(stop_this_cpu, NULL, 1, 0);
572 xxprint("smp_send_stop disable_local_APIC\n");
574 disable_local_APIC();
580 * Reschedule call back. Nothing to do,
581 * all the work is done automatically when
582 * we return from the interrupt.
584 irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
585 struct pt_regs *regs)
591 #include <linux/kallsyms.h>
592 irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
593 struct pt_regs *regs)
595 void (*func) (void *info) = call_data->func;
596 void *info = call_data->info;
597 int wait = call_data->wait;
600 * Notify initiating CPU that I've grabbed the data and am
601 * about to execute the function
604 atomic_inc(&call_data->started);
606 * At this point the info structure may be out of scope unless wait==1
614 atomic_inc(&call_data->finished);