1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
9 #include <linux/pagemap.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/cpudata.h>
32 #include <asm/pgtable.h>
33 #include <asm/oplib.h>
34 #include <asm/hardirq.h>
35 #include <asm/uaccess.h>
36 #include <asm/timer.h>
37 #include <asm/starfire.h>
39 extern int linux_num_cpus;
40 extern void calibrate_delay(void);
42 /* Please don't make this stuff initdata!!! --DaveM */
43 static unsigned char boot_cpu_id;
45 cpumask_t cpu_online_map = CPU_MASK_NONE;
46 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
47 static cpumask_t smp_commenced_mask;
48 static cpumask_t cpu_callout_map;
50 void smp_info(struct seq_file *m)
54 seq_printf(m, "State:\n");
55 for (i = 0; i < NR_CPUS; i++) {
58 "CPU%d:\t\tonline\n", i);
62 void smp_bogo(struct seq_file *m)
66 for (i = 0; i < NR_CPUS; i++)
69 "Cpu%dBogo\t: %lu.%02lu\n"
70 "Cpu%dClkTck\t: %016lx\n",
71 i, cpu_data(i).udelay_val / (500000/HZ),
72 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
73 i, cpu_data(i).clock_tick);
76 void __init smp_store_cpu_info(int id)
80 /* multiplier and counter set by
81 smp_setup_percpu_timer() */
82 cpu_data(id).udelay_val = loops_per_jiffy;
84 cpu_find_by_mid(id, &cpu_node);
85 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
86 "clock-frequency", 0);
88 cpu_data(id).pgcache_size = 0;
89 cpu_data(id).pte_cache[0] = NULL;
90 cpu_data(id).pte_cache[1] = NULL;
91 cpu_data(id).pgdcache_size = 0;
92 cpu_data(id).pgd_cache = NULL;
93 cpu_data(id).idle_volume = 1;
96 static void smp_setup_percpu_timer(void);
98 static volatile unsigned long callin_flag = 0;
100 extern void inherit_locked_prom_mappings(int save_p);
102 void __init smp_callin(void)
104 int cpuid = hard_smp_processor_id();
105 extern int bigkernel;
106 extern unsigned long kern_locked_tte_data;
109 prom_dtlb_load(sparc64_highest_locked_tlbent()-1,
110 kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
111 prom_itlb_load(sparc64_highest_locked_tlbent()-1,
112 kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
115 inherit_locked_prom_mappings(0);
119 smp_setup_percpu_timer();
124 smp_store_cpu_info(cpuid);
126 __asm__ __volatile__("membar #Sync\n\t"
127 "flush %%g6" : : : "memory");
129 /* Clear this or we will die instantly when we
130 * schedule back to this idler...
132 clear_thread_flag(TIF_NEWCHILD);
134 /* Attach to the address space of init_task. */
135 atomic_inc(&init_mm.mm_count);
136 current->active_mm = &init_mm;
138 while (!cpu_isset(cpuid, smp_commenced_mask))
141 cpu_set(cpuid, cpu_online_map);
146 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
147 panic("SMP bolixed\n");
150 static unsigned long current_tick_offset;
152 /* This tick register synchronization scheme is taken entirely from
153 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
155 * The only change I've made is to rework it so that the master
156 * initiates the synchonization instead of the slave. -DaveM
160 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
162 #define NUM_ROUNDS 64 /* magic value */
163 #define NUM_ITERS 5 /* likewise */
165 static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
166 static unsigned long go[SLAVE + 1];
168 #define DEBUG_TICK_SYNC 0
170 static inline long get_delta (long *rt, long *master)
172 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
173 unsigned long tcenter, t0, t1, tm;
176 for (i = 0; i < NUM_ITERS; i++) {
177 t0 = tick_ops->get_tick();
179 membar("#StoreLoad");
180 while (!(tm = go[SLAVE]))
183 membar("#StoreStore");
184 t1 = tick_ops->get_tick();
186 if (t1 - t0 < best_t1 - best_t0)
187 best_t0 = t0, best_t1 = t1, best_tm = tm;
190 *rt = best_t1 - best_t0;
191 *master = best_tm - best_t0;
193 /* average best_t0 and best_t1 without overflow: */
194 tcenter = (best_t0/2 + best_t1/2);
195 if (best_t0 % 2 + best_t1 % 2 == 2)
197 return tcenter - best_tm;
200 void smp_synchronize_tick_client(void)
202 long i, delta, adj, adjust_latency = 0, done = 0;
203 unsigned long flags, rt, master_time_stamp, bound;
206 long rt; /* roundtrip time */
207 long master; /* master's timestamp */
208 long diff; /* difference between midpoint and master's timestamp */
209 long lat; /* estimate of itc adjustment latency */
218 local_irq_save(flags);
220 for (i = 0; i < NUM_ROUNDS; i++) {
221 delta = get_delta(&rt, &master_time_stamp);
223 done = 1; /* let's lock on to this... */
229 adjust_latency += -delta;
230 adj = -delta + adjust_latency/4;
234 tick_ops->add_tick(adj, current_tick_offset);
238 t[i].master = master_time_stamp;
240 t[i].lat = adjust_latency/4;
244 local_irq_restore(flags);
247 for (i = 0; i < NUM_ROUNDS; i++)
248 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
249 t[i].rt, t[i].master, t[i].diff, t[i].lat);
252 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
253 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
256 static void smp_start_sync_tick_client(int cpu);
258 static void smp_synchronize_one_tick(int cpu)
260 unsigned long flags, i;
264 smp_start_sync_tick_client(cpu);
266 /* wait for client to be ready */
270 /* now let the client proceed into his loop */
272 membar("#StoreLoad");
274 spin_lock_irqsave(&itc_sync_lock, flags);
276 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
280 membar("#StoreStore");
281 go[SLAVE] = tick_ops->get_tick();
282 membar("#StoreLoad");
285 spin_unlock_irqrestore(&itc_sync_lock, flags);
288 extern unsigned long sparc64_cpu_startup;
290 /* The OBP cpu startup callback truncates the 3rd arg cookie to
291 * 32-bits (I think) so to be safe we have it read the pointer
292 * contained here so we work on >4GB machines. -DaveM
294 static struct thread_info *cpu_new_thread = NULL;
296 static int __devinit smp_boot_one_cpu(unsigned int cpu)
298 unsigned long entry =
299 (unsigned long)(&sparc64_cpu_startup);
300 unsigned long cookie =
301 (unsigned long)(&cpu_new_thread);
302 struct task_struct *p;
303 int timeout, ret, cpu_node;
305 kernel_thread(NULL, NULL, CLONE_IDLETASK);
307 p = prev_task(&init_task);
314 cpu_new_thread = p->thread_info;
315 cpu_set(cpu, cpu_callout_map);
317 cpu_find_by_mid(cpu, &cpu_node);
318 prom_startcpu(cpu_node, entry, cookie);
320 for (timeout = 0; timeout < 5000000; timeout++) {
328 printk("Processor %d is stuck.\n", cpu);
329 cpu_clear(cpu, cpu_callout_map);
332 cpu_new_thread = NULL;
337 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
342 if (this_is_starfire) {
343 /* map to real upaid */
344 cpu = (((cpu & 0x3c) << 1) |
345 ((cpu & 0x40) >> 4) |
349 target = (cpu << 14) | 0x70;
351 /* Ok, this is the real Spitfire Errata #54.
352 * One must read back from a UDB internal register
353 * after writes to the UDB interrupt dispatch, but
354 * before the membar Sync for that write.
355 * So we use the high UDB control register (ASI 0x7f,
356 * ADDR 0x20) for the dummy read. -DaveM
359 __asm__ __volatile__(
360 "wrpr %1, %2, %%pstate\n\t"
361 "stxa %4, [%0] %3\n\t"
362 "stxa %5, [%0+%8] %3\n\t"
364 "stxa %6, [%0+%8] %3\n\t"
366 "stxa %%g0, [%7] %3\n\t"
369 "ldxa [%%g1] 0x7f, %%g0\n\t"
372 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
373 "r" (data0), "r" (data1), "r" (data2), "r" (target),
374 "r" (0x10), "0" (tmp)
377 /* NOTE: PSTATE_IE is still clear. */
380 __asm__ __volatile__("ldxa [%%g0] %1, %0"
382 : "i" (ASI_INTR_DISPATCH_STAT));
384 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
391 } while (result & 0x1);
392 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
395 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
396 smp_processor_id(), result);
403 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
408 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
409 for (i = 0; i < NR_CPUS; i++) {
410 if (cpu_isset(i, mask)) {
411 spitfire_xcall_helper(data0, data1, data2, pstate, i);
413 if (cpus_empty(mask))
419 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
420 * packet, but we have no use for that. However we do take advantage of
421 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
424 #error Fixup cheetah_xcall_deliver Dave...
426 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
429 int nack_busy_id, is_jalapeno;
431 if (cpus_empty(mask))
434 /* Unfortunately, someone at Sun had the brilliant idea to make the
435 * busy/nack fields hard-coded by ITID number for this Ultra-III
436 * derivative processor.
438 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
439 is_jalapeno = ((ver >> 32) == 0x003e0016);
441 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
444 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
445 : : "r" (pstate), "i" (PSTATE_IE));
447 /* Setup the dispatch data registers. */
448 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
449 "stxa %1, [%4] %6\n\t"
450 "stxa %2, [%5] %6\n\t"
453 : "r" (data0), "r" (data1), "r" (data2),
454 "r" (0x40), "r" (0x50), "r" (0x60),
459 cpumask_t work_mask = mask;
462 for (i = 0; i < NR_CPUS; i++) {
463 if (cpu_isset(i, work_mask)) {
464 u64 target = (i << 14) | 0x70;
467 target |= (nack_busy_id << 24);
468 __asm__ __volatile__(
469 "stxa %%g0, [%0] %1\n\t"
472 : "r" (target), "i" (ASI_INTR_W));
474 cpu_clear(i, work_mask);
475 if (cpus_empty(work_mask))
481 /* Now, poll for completion. */
486 stuck = 100000 * nack_busy_id;
488 __asm__ __volatile__("ldxa [%%g0] %1, %0"
489 : "=r" (dispatch_stat)
490 : "i" (ASI_INTR_DISPATCH_STAT));
491 if (dispatch_stat == 0UL) {
492 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
498 } while (dispatch_stat & 0x5555555555555555UL);
500 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
503 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
504 /* Busy bits will not clear, continue instead
505 * of freezing up on this cpu.
507 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
508 smp_processor_id(), dispatch_stat);
510 cpumask_t work_mask = mask;
511 int i, this_busy_nack = 0;
513 /* Delay some random time with interrupts enabled
514 * to prevent deadlock.
516 udelay(2 * nack_busy_id);
518 /* Clear out the mask bits for cpus which did not
521 for (i = 0; i < NR_CPUS; i++) {
522 if (cpu_isset(i, work_mask)) {
526 check_mask = (0x2UL << (2*i));
528 check_mask = (0x2UL <<
530 if ((dispatch_stat & check_mask) == 0)
533 cpu_clear(i, work_mask);
534 if (cpus_empty(work_mask))
544 /* Send cross call to all processors mentioned in MASK
547 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
549 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
551 cpus_and(mask, mask, cpu_online_map);
552 cpu_clear(smp_processor_id(), mask);
554 if (tlb_type == spitfire)
555 spitfire_xcall_deliver(data0, data1, data2, mask);
557 cheetah_xcall_deliver(data0, data1, data2, mask);
558 /* NOTE: Caller runs local copy on master. */
561 extern unsigned long xcall_sync_tick;
563 static void smp_start_sync_tick_client(int cpu)
565 cpumask_t mask = cpumask_of_cpu(cpu);
567 smp_cross_call_masked(&xcall_sync_tick,
571 /* Send cross call to all processors except self. */
572 #define smp_cross_call(func, ctx, data1, data2) \
573 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
575 struct call_data_struct {
576 void (*func) (void *info);
582 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
583 static struct call_data_struct *call_data;
585 extern unsigned long xcall_call_function;
588 * You must not call this function with disabled interrupts or from a
589 * hardware interrupt handler or from a bottom half handler.
591 int smp_call_function(void (*func)(void *info), void *info,
592 int nonatomic, int wait)
594 struct call_data_struct data;
595 int cpus = num_online_cpus() - 1;
601 /* Can deadlock when called with interrupts disabled */
602 WARN_ON(irqs_disabled());
606 atomic_set(&data.finished, 0);
609 spin_lock(&call_lock);
613 smp_cross_call(&xcall_call_function, 0, 0, 0);
616 * Wait for other cpus to complete function or at
617 * least snap the call data.
620 while (atomic_read(&data.finished) != cpus) {
627 spin_unlock(&call_lock);
632 spin_unlock(&call_lock);
633 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
634 (long) num_online_cpus() - 1L,
635 (long) atomic_read(&data.finished));
639 void smp_call_function_client(int irq, struct pt_regs *regs)
641 void (*func) (void *info) = call_data->func;
642 void *info = call_data->info;
644 clear_softint(1 << irq);
645 if (call_data->wait) {
646 /* let initiator proceed only after completion */
648 atomic_inc(&call_data->finished);
650 /* let initiator proceed after getting data */
651 atomic_inc(&call_data->finished);
656 extern unsigned long xcall_flush_tlb_page;
657 extern unsigned long xcall_flush_tlb_mm;
658 extern unsigned long xcall_flush_tlb_range;
659 extern unsigned long xcall_flush_tlb_kernel_range;
660 extern unsigned long xcall_flush_tlb_all_spitfire;
661 extern unsigned long xcall_flush_tlb_all_cheetah;
662 extern unsigned long xcall_report_regs;
663 extern unsigned long xcall_receive_signal;
664 extern unsigned long xcall_flush_dcache_page_cheetah;
665 extern unsigned long xcall_flush_dcache_page_spitfire;
667 #ifdef CONFIG_DEBUG_DCFLUSH
668 extern atomic_t dcpage_flushes;
669 extern atomic_t dcpage_flushes_xcall;
672 static __inline__ void __local_flush_dcache_page(struct page *page)
674 #if (L1DCACHE_SIZE > PAGE_SIZE)
675 __flush_dcache_page(page->virtual,
676 ((tlb_type == spitfire) &&
677 page_mapping(page) != NULL));
679 if (page_mapping(page) != NULL &&
680 tlb_type == spitfire)
681 __flush_icache_page(__pa(page->virtual));
685 void smp_flush_dcache_page_impl(struct page *page, int cpu)
687 cpumask_t mask = cpumask_of_cpu(cpu);
689 #ifdef CONFIG_DEBUG_DCFLUSH
690 atomic_inc(&dcpage_flushes);
692 if (cpu == smp_processor_id()) {
693 __local_flush_dcache_page(page);
694 } else if (cpu_online(cpu)) {
697 if (tlb_type == spitfire) {
699 ((u64)&xcall_flush_dcache_page_spitfire);
700 if (page_mapping(page) != NULL)
701 data0 |= ((u64)1 << 32);
702 spitfire_xcall_deliver(data0,
708 ((u64)&xcall_flush_dcache_page_cheetah);
709 cheetah_xcall_deliver(data0,
713 #ifdef CONFIG_DEBUG_DCFLUSH
714 atomic_inc(&dcpage_flushes_xcall);
719 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
721 cpumask_t mask = cpu_online_map;
724 cpu_clear(smp_processor_id(), mask);
726 #ifdef CONFIG_DEBUG_DCFLUSH
727 atomic_inc(&dcpage_flushes);
729 if (cpus_empty(mask))
731 if (tlb_type == spitfire) {
732 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
733 if (page_mapping(page) != NULL)
734 data0 |= ((u64)1 << 32);
735 spitfire_xcall_deliver(data0,
740 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
741 cheetah_xcall_deliver(data0,
745 #ifdef CONFIG_DEBUG_DCFLUSH
746 atomic_inc(&dcpage_flushes_xcall);
749 __local_flush_dcache_page(page);
752 void smp_receive_signal(int cpu)
754 cpumask_t mask = cpumask_of_cpu(cpu);
756 if (cpu_online(cpu)) {
757 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
759 if (tlb_type == spitfire)
760 spitfire_xcall_deliver(data0, 0, 0, mask);
762 cheetah_xcall_deliver(data0, 0, 0, mask);
766 void smp_receive_signal_client(int irq, struct pt_regs *regs)
768 /* Just return, rtrap takes care of the rest. */
769 clear_softint(1 << irq);
772 void smp_report_regs(void)
774 smp_cross_call(&xcall_report_regs, 0, 0, 0);
777 void smp_flush_tlb_all(void)
779 if (tlb_type == spitfire)
780 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
782 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
786 /* We know that the window frames of the user have been flushed
787 * to the stack before we get here because all callers of us
788 * are flush_tlb_*() routines, and these run after flush_cache_*()
789 * which performs the flushw.
791 * The SMP TLB coherency scheme we use works as follows:
793 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
794 * space has (potentially) executed on, this is the heuristic
795 * we use to avoid doing cross calls.
797 * Also, for flushing from kswapd and also for clones, we
798 * use cpu_vm_mask as the list of cpus to make run the TLB.
800 * 2) TLB context numbers are shared globally across all processors
801 * in the system, this allows us to play several games to avoid
804 * One invariant is that when a cpu switches to a process, and
805 * that processes tsk->active_mm->cpu_vm_mask does not have the
806 * current cpu's bit set, that tlb context is flushed locally.
808 * If the address space is non-shared (ie. mm->count == 1) we avoid
809 * cross calls when we want to flush the currently running process's
810 * tlb state. This is done by clearing all cpu bits except the current
811 * processor's in current->active_mm->cpu_vm_mask and performing the
812 * flush locally only. This will force any subsequent cpus which run
813 * this task to flush the context from the local tlb if the process
814 * migrates to another cpu (again).
816 * 3) For shared address spaces (threads) and swapping we bite the
817 * bullet for most cases and perform the cross call (but only to
818 * the cpus listed in cpu_vm_mask).
820 * The performance gain from "optimizing" away the cross call for threads is
821 * questionable (in theory the big win for threads is the massive sharing of
822 * address space state across processors).
824 void smp_flush_tlb_mm(struct mm_struct *mm)
827 * This code is called from two places, dup_mmap and exit_mmap. In the
828 * former case, we really need a flush. In the later case, the callers
829 * are single threaded exec_mmap (really need a flush), multithreaded
830 * exec_mmap case (do not need to flush, since the caller gets a new
831 * context via activate_mm), and all other callers of mmput() whence
832 * the flush can be optimized since the associated threads are dead and
833 * the mm is being torn down (__exit_mm and other mmput callers) or the
834 * owning thread is dissociating itself from the mm. The
835 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
836 * for single thread exec and dup_mmap cases. An alternate check might
837 * have been (current->mm != mm).
840 if (atomic_read(&mm->mm_users) == 0)
844 u32 ctx = CTX_HWBITS(mm->context);
845 int cpu = smp_processor_id();
847 if (atomic_read(&mm->mm_users) == 1) {
848 /* See smp_flush_tlb_page for info about this. */
849 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
850 goto local_flush_and_out;
853 smp_cross_call_masked(&xcall_flush_tlb_mm,
858 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
862 void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
865 u32 ctx = CTX_HWBITS(mm->context);
866 int cpu = smp_processor_id();
869 end = PAGE_ALIGN(end);
871 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
872 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
873 goto local_flush_and_out;
876 smp_cross_call_masked(&xcall_flush_tlb_range,
881 __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
882 end, PAGE_SIZE, (end-start));
885 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
888 end = PAGE_ALIGN(end);
890 smp_cross_call(&xcall_flush_tlb_kernel_range,
893 __flush_tlb_kernel_range(start, end);
897 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
900 u32 ctx = CTX_HWBITS(mm->context);
901 int cpu = smp_processor_id();
904 if (mm == current->active_mm &&
905 atomic_read(&mm->mm_users) == 1) {
906 /* By virtue of being the current address space, and
907 * having the only reference to it, the following
910 * It would not be a win to perform the xcall tlb
911 * flush in this case, because even if we switch back
912 * to one of the other processors in cpu_vm_mask it
913 * is almost certain that all TLB entries for this
914 * context will be replaced by the time that happens.
916 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
917 goto local_flush_and_out;
919 cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
921 /* By virtue of running under the mm->page_table_lock,
922 * and mmu_context.h:switch_mm doing the same, the
923 * following operation is safe.
925 if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
926 goto local_flush_and_out;
929 /* OK, we have to actually perform the cross call. Most
930 * likely this is a cloned mm or kswapd is kicking out pages
931 * for a task which has run recently on another cpu.
933 smp_cross_call_masked(&xcall_flush_tlb_page,
936 if (!cpu_isset(cpu, mm->cpu_vm_mask))
940 __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
945 /* #define CAPTURE_DEBUG */
946 extern unsigned long xcall_capture;
948 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
949 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
950 static unsigned long penguins_are_doing_time;
952 void smp_capture(void)
954 int result = __atomic_add(1, &smp_capture_depth);
956 membar("#StoreStore | #LoadStore");
958 int ncpus = num_online_cpus();
961 printk("CPU[%d]: Sending penguins to jail...",
964 penguins_are_doing_time = 1;
965 membar("#StoreStore | #LoadStore");
966 atomic_inc(&smp_capture_registry);
967 smp_cross_call(&xcall_capture, 0, 0, 0);
968 while (atomic_read(&smp_capture_registry) != ncpus)
976 void smp_release(void)
978 if (atomic_dec_and_test(&smp_capture_depth)) {
980 printk("CPU[%d]: Giving pardon to "
981 "imprisoned penguins\n",
984 penguins_are_doing_time = 0;
985 membar("#StoreStore | #StoreLoad");
986 atomic_dec(&smp_capture_registry);
990 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
991 * can service tlb flush xcalls...
993 extern void prom_world(int);
994 extern void save_alternate_globals(unsigned long *);
995 extern void restore_alternate_globals(unsigned long *);
996 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
998 unsigned long global_save[24];
1000 clear_softint(1 << irq);
1004 __asm__ __volatile__("flushw");
1005 save_alternate_globals(global_save);
1007 atomic_inc(&smp_capture_registry);
1008 membar("#StoreLoad | #StoreStore");
1009 while (penguins_are_doing_time)
1010 membar("#LoadLoad");
1011 restore_alternate_globals(global_save);
1012 atomic_dec(&smp_capture_registry);
1018 extern unsigned long xcall_promstop;
1020 void smp_promstop_others(void)
1022 smp_cross_call(&xcall_promstop, 0, 0, 0);
1025 extern void sparc64_do_profile(struct pt_regs *regs);
1027 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1028 #define prof_counter(__cpu) cpu_data(__cpu).counter
1030 void smp_percpu_timer_interrupt(struct pt_regs *regs)
1032 unsigned long compare, tick, pstate;
1033 int cpu = smp_processor_id();
1034 int user = user_mode(regs);
1037 * Check for level 14 softint.
1040 unsigned long tick_mask = tick_ops->softint_mask;
1042 if (!(get_softint() & tick_mask)) {
1043 extern void handler_irq(int, struct pt_regs *);
1045 handler_irq(14, regs);
1048 clear_softint(tick_mask);
1052 sparc64_do_profile(regs);
1053 if (!--prof_counter(cpu)) {
1056 if (cpu == boot_cpu_id) {
1057 kstat_this_cpu.irqs[0]++;
1058 timer_tick_interrupt(regs);
1061 update_process_times(user);
1065 prof_counter(cpu) = prof_multiplier(cpu);
1068 /* Guarantee that the following sequences execute
1071 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1072 "wrpr %0, %1, %%pstate"
1076 compare = tick_ops->add_compare(current_tick_offset);
1077 tick = tick_ops->get_tick();
1079 /* Restore PSTATE_IE. */
1080 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1083 } while (time_after_eq(tick, compare));
1086 static void __init smp_setup_percpu_timer(void)
1088 int cpu = smp_processor_id();
1089 unsigned long pstate;
1091 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1093 /* Guarantee that the following sequences execute
1096 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1097 "wrpr %0, %1, %%pstate"
1101 tick_ops->init_tick(current_tick_offset);
1103 /* Restore PSTATE_IE. */
1104 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1109 void __init smp_tick_init(void)
1111 boot_cpu_id = hard_smp_processor_id();
1112 current_tick_offset = timer_tick_offset;
1114 cpu_set(boot_cpu_id, cpu_online_map);
1115 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1118 cycles_t cacheflush_time;
1119 unsigned long cache_decay_ticks;
1121 extern unsigned long cheetah_tune_scheduling(void);
1123 static void __init smp_tune_scheduling(void)
1125 unsigned long orig_flush_base, flush_base, flags, *p;
1126 unsigned int ecache_size, order;
1127 cycles_t tick1, tick2, raw;
1130 /* Approximate heuristic for SMP scheduling. It is an
1131 * estimation of the time it takes to flush the L2 cache
1132 * on the local processor.
1134 * The ia32 chooses to use the L1 cache flush time instead,
1135 * and I consider this complete nonsense. The Ultra can service
1136 * a miss to the L1 with a hit to the L2 in 7 or 8 cycles, and
1137 * L2 misses are what create extra bus traffic (ie. the "cost"
1138 * of moving a process from one cpu to another).
1140 printk("SMP: Calibrating ecache flush... ");
1141 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1142 cacheflush_time = cheetah_tune_scheduling();
1146 cpu_find_by_instance(0, &cpu_node, NULL);
1147 ecache_size = prom_getintdefault(cpu_node,
1148 "ecache-size", (512 * 1024));
1149 if (ecache_size > (4 * 1024 * 1024))
1150 ecache_size = (4 * 1024 * 1024);
1151 orig_flush_base = flush_base =
1152 __get_free_pages(GFP_KERNEL, order = get_order(ecache_size));
1154 if (flush_base != 0UL) {
1155 local_irq_save(flags);
1157 /* Scan twice the size once just to get the TLB entries
1158 * loaded and make sure the second scan measures pure misses.
1160 for (p = (unsigned long *)flush_base;
1161 ((unsigned long)p) < (flush_base + (ecache_size<<1));
1162 p += (64 / sizeof(unsigned long)))
1163 *((volatile unsigned long *)p);
1165 tick1 = tick_ops->get_tick();
1167 __asm__ __volatile__("1:\n\t"
1168 "ldx [%0 + 0x000], %%g1\n\t"
1169 "ldx [%0 + 0x040], %%g2\n\t"
1170 "ldx [%0 + 0x080], %%g3\n\t"
1171 "ldx [%0 + 0x0c0], %%g5\n\t"
1172 "add %0, 0x100, %0\n\t"
1174 "bne,pt %%xcc, 1b\n\t"
1176 : "=&r" (flush_base)
1178 "r" (flush_base + ecache_size)
1179 : "g1", "g2", "g3", "g5");
1181 tick2 = tick_ops->get_tick();
1183 local_irq_restore(flags);
1185 raw = (tick2 - tick1);
1187 /* Dampen it a little, considering two processes
1188 * sharing the cache and fitting.
1190 cacheflush_time = (raw - (raw >> 2));
1192 free_pages(orig_flush_base, order);
1194 cacheflush_time = ((ecache_size << 2) +
1195 (ecache_size << 1));
1198 /* Convert ticks/sticks to jiffies. */
1199 cache_decay_ticks = cacheflush_time / timer_tick_offset;
1200 if (cache_decay_ticks < 1)
1201 cache_decay_ticks = 1;
1203 printk("Using heuristic of %ld cycles, %ld ticks.\n",
1204 cacheflush_time, cache_decay_ticks);
1207 /* /proc/profile writes can call this, don't __init it please. */
1208 static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
1210 int setup_profiling_timer(unsigned int multiplier)
1212 unsigned long flags;
1215 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1218 spin_lock_irqsave(&prof_setup_lock, flags);
1219 for (i = 0; i < NR_CPUS; i++)
1220 prof_multiplier(i) = multiplier;
1221 current_tick_offset = (timer_tick_offset / multiplier);
1222 spin_unlock_irqrestore(&prof_setup_lock, flags);
1227 void __init smp_prepare_cpus(unsigned int max_cpus)
1232 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1234 cpu_set(mid, phys_cpu_present_map);
1238 if (num_possible_cpus() > max_cpus) {
1240 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1241 if (mid != boot_cpu_id) {
1242 cpu_clear(mid, phys_cpu_present_map);
1243 if (num_possible_cpus() <= max_cpus)
1250 smp_store_cpu_info(boot_cpu_id);
1253 void __devinit smp_prepare_boot_cpu(void)
1255 if (hard_smp_processor_id() >= NR_CPUS) {
1256 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1260 current_thread_info()->cpu = hard_smp_processor_id();
1261 cpu_set(smp_processor_id(), cpu_online_map);
1262 cpu_set(smp_processor_id(), phys_cpu_present_map);
1265 int __devinit __cpu_up(unsigned int cpu)
1267 int ret = smp_boot_one_cpu(cpu);
1270 cpu_set(cpu, smp_commenced_mask);
1271 while (!cpu_isset(cpu, cpu_online_map))
1273 if (!cpu_isset(cpu, cpu_online_map)) {
1276 smp_synchronize_one_tick(cpu);
1282 void __init smp_cpus_done(unsigned int max_cpus)
1284 unsigned long bogosum = 0;
1287 for (i = 0; i < NR_CPUS; i++) {
1289 bogosum += cpu_data(i).udelay_val;
1291 printk("Total of %ld processors activated "
1292 "(%lu.%02lu BogoMIPS).\n",
1293 (long) num_online_cpus(),
1294 bogosum/(500000/HZ),
1295 (bogosum/(5000/HZ))%100);
1297 /* We want to run this with all the other cpus spinning
1300 smp_tune_scheduling();
1303 /* This needn't do anything as we do not sleep the cpu
1304 * inside of the idler task, so an interrupt is not needed
1305 * to get a clean fast response.
1307 * XXX Reverify this assumption... -DaveM
1309 * Addendum: We do want it to do something for the signal
1310 * delivery case, we detect that by just seeing
1311 * if we are trying to send this to an idler or not.
1313 void smp_send_reschedule(int cpu)
1315 if (cpu_data(cpu).idle_volume == 0)
1316 smp_receive_signal(cpu);
1319 /* This is a nop because we capture all other cpus
1320 * anyways when making the PROM active.
1322 void smp_send_stop(void)