1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
9 #include <linux/pagemap.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/cpudata.h>
32 #include <asm/pgtable.h>
33 #include <asm/oplib.h>
34 #include <asm/hardirq.h>
35 #include <asm/uaccess.h>
36 #include <asm/timer.h>
37 #include <asm/starfire.h>
39 extern int linux_num_cpus;
40 extern void calibrate_delay(void);
42 /* Please don't make this stuff initdata!!! --DaveM */
43 static unsigned char boot_cpu_id;
45 cpumask_t cpu_online_map = CPU_MASK_NONE;
46 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
47 static cpumask_t smp_commenced_mask;
48 static cpumask_t cpu_callout_map;
50 void smp_info(struct seq_file *m)
54 seq_printf(m, "State:\n");
55 for (i = 0; i < NR_CPUS; i++) {
58 "CPU%d:\t\tonline\n", i);
62 void smp_bogo(struct seq_file *m)
66 for (i = 0; i < NR_CPUS; i++)
69 "Cpu%dBogo\t: %lu.%02lu\n"
70 "Cpu%dClkTck\t: %016lx\n",
71 i, cpu_data(i).udelay_val / (500000/HZ),
72 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
73 i, cpu_data(i).clock_tick);
76 void __init smp_store_cpu_info(int id)
80 /* multiplier and counter set by
81 smp_setup_percpu_timer() */
82 cpu_data(id).udelay_val = loops_per_jiffy;
84 cpu_find_by_mid(id, &cpu_node);
85 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
86 "clock-frequency", 0);
88 cpu_data(id).pgcache_size = 0;
89 cpu_data(id).pte_cache[0] = NULL;
90 cpu_data(id).pte_cache[1] = NULL;
91 cpu_data(id).pgdcache_size = 0;
92 cpu_data(id).pgd_cache = NULL;
93 cpu_data(id).idle_volume = 1;
96 static void smp_setup_percpu_timer(void);
98 static volatile unsigned long callin_flag = 0;
100 extern void inherit_locked_prom_mappings(int save_p);
102 void __init smp_callin(void)
104 int cpuid = hard_smp_processor_id();
105 extern int bigkernel;
106 extern unsigned long kern_locked_tte_data;
109 prom_dtlb_load(sparc64_highest_locked_tlbent()-1,
110 kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
111 prom_itlb_load(sparc64_highest_locked_tlbent()-1,
112 kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
115 inherit_locked_prom_mappings(0);
119 smp_setup_percpu_timer();
124 smp_store_cpu_info(cpuid);
126 __asm__ __volatile__("membar #Sync\n\t"
127 "flush %%g6" : : : "memory");
129 /* Clear this or we will die instantly when we
130 * schedule back to this idler...
132 clear_thread_flag(TIF_NEWCHILD);
134 /* Attach to the address space of init_task. */
135 atomic_inc(&init_mm.mm_count);
136 current->active_mm = &init_mm;
138 while (!cpu_isset(cpuid, smp_commenced_mask))
141 cpu_set(cpuid, cpu_online_map);
146 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
147 panic("SMP bolixed\n");
150 static unsigned long current_tick_offset;
152 /* This tick register synchronization scheme is taken entirely from
153 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
155 * The only change I've made is to rework it so that the master
156 * initiates the synchonization instead of the slave. -DaveM
160 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
162 #define NUM_ROUNDS 64 /* magic value */
163 #define NUM_ITERS 5 /* likewise */
165 static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
166 static unsigned long go[SLAVE + 1];
168 #define DEBUG_TICK_SYNC 0
170 static inline long get_delta (long *rt, long *master)
172 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
173 unsigned long tcenter, t0, t1, tm;
176 for (i = 0; i < NUM_ITERS; i++) {
177 t0 = tick_ops->get_tick();
179 membar("#StoreLoad");
180 while (!(tm = go[SLAVE]))
183 membar("#StoreStore");
184 t1 = tick_ops->get_tick();
186 if (t1 - t0 < best_t1 - best_t0)
187 best_t0 = t0, best_t1 = t1, best_tm = tm;
190 *rt = best_t1 - best_t0;
191 *master = best_tm - best_t0;
193 /* average best_t0 and best_t1 without overflow: */
194 tcenter = (best_t0/2 + best_t1/2);
195 if (best_t0 % 2 + best_t1 % 2 == 2)
197 return tcenter - best_tm;
200 void smp_synchronize_tick_client(void)
202 long i, delta, adj, adjust_latency = 0, done = 0;
203 unsigned long flags, rt, master_time_stamp, bound;
206 long rt; /* roundtrip time */
207 long master; /* master's timestamp */
208 long diff; /* difference between midpoint and master's timestamp */
209 long lat; /* estimate of itc adjustment latency */
218 local_irq_save(flags);
220 for (i = 0; i < NUM_ROUNDS; i++) {
221 delta = get_delta(&rt, &master_time_stamp);
223 done = 1; /* let's lock on to this... */
229 adjust_latency += -delta;
230 adj = -delta + adjust_latency/4;
234 tick_ops->add_tick(adj, current_tick_offset);
238 t[i].master = master_time_stamp;
240 t[i].lat = adjust_latency/4;
244 local_irq_restore(flags);
247 for (i = 0; i < NUM_ROUNDS; i++)
248 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
249 t[i].rt, t[i].master, t[i].diff, t[i].lat);
252 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
253 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
256 static void smp_start_sync_tick_client(int cpu);
258 static void smp_synchronize_one_tick(int cpu)
260 unsigned long flags, i;
264 smp_start_sync_tick_client(cpu);
266 /* wait for client to be ready */
270 /* now let the client proceed into his loop */
272 membar("#StoreLoad");
274 spin_lock_irqsave(&itc_sync_lock, flags);
276 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
280 membar("#StoreStore");
281 go[SLAVE] = tick_ops->get_tick();
282 membar("#StoreLoad");
285 spin_unlock_irqrestore(&itc_sync_lock, flags);
288 extern unsigned long sparc64_cpu_startup;
290 /* The OBP cpu startup callback truncates the 3rd arg cookie to
291 * 32-bits (I think) so to be safe we have it read the pointer
292 * contained here so we work on >4GB machines. -DaveM
294 static struct thread_info *cpu_new_thread = NULL;
296 static int __devinit smp_boot_one_cpu(unsigned int cpu)
298 unsigned long entry =
299 (unsigned long)(&sparc64_cpu_startup);
300 unsigned long cookie =
301 (unsigned long)(&cpu_new_thread);
302 struct task_struct *p;
303 int timeout, ret, cpu_node;
305 kernel_thread(NULL, NULL, CLONE_IDLETASK);
307 p = prev_task(&init_task);
314 cpu_new_thread = p->thread_info;
315 cpu_set(cpu, cpu_callout_map);
317 cpu_find_by_mid(cpu, &cpu_node);
318 prom_startcpu(cpu_node, entry, cookie);
320 for (timeout = 0; timeout < 5000000; timeout++) {
328 printk("Processor %d is stuck.\n", cpu);
329 cpu_clear(cpu, cpu_callout_map);
332 cpu_new_thread = NULL;
337 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
342 if (this_is_starfire) {
343 /* map to real upaid */
344 cpu = (((cpu & 0x3c) << 1) |
345 ((cpu & 0x40) >> 4) |
349 target = (cpu << 14) | 0x70;
351 /* Ok, this is the real Spitfire Errata #54.
352 * One must read back from a UDB internal register
353 * after writes to the UDB interrupt dispatch, but
354 * before the membar Sync for that write.
355 * So we use the high UDB control register (ASI 0x7f,
356 * ADDR 0x20) for the dummy read. -DaveM
359 __asm__ __volatile__(
360 "wrpr %1, %2, %%pstate\n\t"
361 "stxa %4, [%0] %3\n\t"
362 "stxa %5, [%0+%8] %3\n\t"
364 "stxa %6, [%0+%8] %3\n\t"
366 "stxa %%g0, [%7] %3\n\t"
369 "ldxa [%%g1] 0x7f, %%g0\n\t"
372 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
373 "r" (data0), "r" (data1), "r" (data2), "r" (target),
374 "r" (0x10), "0" (tmp)
377 /* NOTE: PSTATE_IE is still clear. */
380 __asm__ __volatile__("ldxa [%%g0] %1, %0"
382 : "i" (ASI_INTR_DISPATCH_STAT));
384 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
391 } while (result & 0x1);
392 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
395 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
396 smp_processor_id(), result);
403 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
408 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
409 for_each_cpu_mask(i, mask)
410 spitfire_xcall_helper(data0, data1, data2, pstate, i);
413 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
414 * packet, but we have no use for that. However we do take advantage of
415 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
417 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
420 int nack_busy_id, is_jalapeno;
422 if (cpus_empty(mask))
425 /* Unfortunately, someone at Sun had the brilliant idea to make the
426 * busy/nack fields hard-coded by ITID number for this Ultra-III
427 * derivative processor.
429 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
430 is_jalapeno = ((ver >> 32) == 0x003e0016);
432 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
435 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
436 : : "r" (pstate), "i" (PSTATE_IE));
438 /* Setup the dispatch data registers. */
439 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
440 "stxa %1, [%4] %6\n\t"
441 "stxa %2, [%5] %6\n\t"
444 : "r" (data0), "r" (data1), "r" (data2),
445 "r" (0x40), "r" (0x50), "r" (0x60),
452 for_each_cpu_mask(i, mask) {
453 u64 target = (i << 14) | 0x70;
456 target |= (nack_busy_id << 24);
457 __asm__ __volatile__(
458 "stxa %%g0, [%0] %1\n\t"
461 : "r" (target), "i" (ASI_INTR_W));
466 /* Now, poll for completion. */
471 stuck = 100000 * nack_busy_id;
473 __asm__ __volatile__("ldxa [%%g0] %1, %0"
474 : "=r" (dispatch_stat)
475 : "i" (ASI_INTR_DISPATCH_STAT));
476 if (dispatch_stat == 0UL) {
477 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
483 } while (dispatch_stat & 0x5555555555555555UL);
485 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
488 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
489 /* Busy bits will not clear, continue instead
490 * of freezing up on this cpu.
492 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
493 smp_processor_id(), dispatch_stat);
495 int i, this_busy_nack = 0;
497 /* Delay some random time with interrupts enabled
498 * to prevent deadlock.
500 udelay(2 * nack_busy_id);
502 /* Clear out the mask bits for cpus which did not
505 for_each_cpu_mask(i, mask) {
509 check_mask = (0x2UL << (2*i));
511 check_mask = (0x2UL <<
513 if ((dispatch_stat & check_mask) == 0)
523 /* Send cross call to all processors mentioned in MASK
526 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
528 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
529 int this_cpu = get_cpu();
531 cpus_and(mask, mask, cpu_online_map);
532 cpu_clear(this_cpu, mask);
534 if (tlb_type == spitfire)
535 spitfire_xcall_deliver(data0, data1, data2, mask);
537 cheetah_xcall_deliver(data0, data1, data2, mask);
538 /* NOTE: Caller runs local copy on master. */
543 extern unsigned long xcall_sync_tick;
545 static void smp_start_sync_tick_client(int cpu)
547 cpumask_t mask = cpumask_of_cpu(cpu);
549 smp_cross_call_masked(&xcall_sync_tick,
553 /* Send cross call to all processors except self. */
554 #define smp_cross_call(func, ctx, data1, data2) \
555 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
557 struct call_data_struct {
558 void (*func) (void *info);
564 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
565 static struct call_data_struct *call_data;
567 extern unsigned long xcall_call_function;
570 * You must not call this function with disabled interrupts or from a
571 * hardware interrupt handler or from a bottom half handler.
573 int smp_call_function(void (*func)(void *info), void *info,
574 int nonatomic, int wait)
576 struct call_data_struct data;
577 int cpus = num_online_cpus() - 1;
583 /* Can deadlock when called with interrupts disabled */
584 WARN_ON(irqs_disabled());
588 atomic_set(&data.finished, 0);
591 spin_lock(&call_lock);
595 smp_cross_call(&xcall_call_function, 0, 0, 0);
598 * Wait for other cpus to complete function or at
599 * least snap the call data.
602 while (atomic_read(&data.finished) != cpus) {
609 spin_unlock(&call_lock);
614 spin_unlock(&call_lock);
615 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
616 (long) num_online_cpus() - 1L,
617 (long) atomic_read(&data.finished));
621 void smp_call_function_client(int irq, struct pt_regs *regs)
623 void (*func) (void *info) = call_data->func;
624 void *info = call_data->info;
626 clear_softint(1 << irq);
627 if (call_data->wait) {
628 /* let initiator proceed only after completion */
630 atomic_inc(&call_data->finished);
632 /* let initiator proceed after getting data */
633 atomic_inc(&call_data->finished);
638 extern unsigned long xcall_flush_tlb_page;
639 extern unsigned long xcall_flush_tlb_mm;
640 extern unsigned long xcall_flush_tlb_range;
641 extern unsigned long xcall_flush_tlb_kernel_range;
642 extern unsigned long xcall_flush_tlb_all_spitfire;
643 extern unsigned long xcall_flush_tlb_all_cheetah;
644 extern unsigned long xcall_report_regs;
645 extern unsigned long xcall_receive_signal;
646 extern unsigned long xcall_flush_dcache_page_cheetah;
647 extern unsigned long xcall_flush_dcache_page_spitfire;
649 #ifdef CONFIG_DEBUG_DCFLUSH
650 extern atomic_t dcpage_flushes;
651 extern atomic_t dcpage_flushes_xcall;
654 static __inline__ void __local_flush_dcache_page(struct page *page)
656 #if (L1DCACHE_SIZE > PAGE_SIZE)
657 __flush_dcache_page(page_address(page),
658 ((tlb_type == spitfire) &&
659 page_mapping(page) != NULL));
661 if (page_mapping(page) != NULL &&
662 tlb_type == spitfire)
663 __flush_icache_page(__pa(page_address(page)));
667 void smp_flush_dcache_page_impl(struct page *page, int cpu)
669 cpumask_t mask = cpumask_of_cpu(cpu);
670 int this_cpu = get_cpu();
672 #ifdef CONFIG_DEBUG_DCFLUSH
673 atomic_inc(&dcpage_flushes);
675 if (cpu == this_cpu) {
676 __local_flush_dcache_page(page);
677 } else if (cpu_online(cpu)) {
678 void *pg_addr = page_address(page);
681 if (tlb_type == spitfire) {
683 ((u64)&xcall_flush_dcache_page_spitfire);
684 if (page_mapping(page) != NULL)
685 data0 |= ((u64)1 << 32);
686 spitfire_xcall_deliver(data0,
692 ((u64)&xcall_flush_dcache_page_cheetah);
693 cheetah_xcall_deliver(data0,
697 #ifdef CONFIG_DEBUG_DCFLUSH
698 atomic_inc(&dcpage_flushes_xcall);
705 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
707 void *pg_addr = page_address(page);
708 cpumask_t mask = cpu_online_map;
710 int this_cpu = get_cpu();
712 cpu_clear(this_cpu, mask);
714 #ifdef CONFIG_DEBUG_DCFLUSH
715 atomic_inc(&dcpage_flushes);
717 if (cpus_empty(mask))
719 if (tlb_type == spitfire) {
720 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
721 if (page_mapping(page) != NULL)
722 data0 |= ((u64)1 << 32);
723 spitfire_xcall_deliver(data0,
728 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
729 cheetah_xcall_deliver(data0,
733 #ifdef CONFIG_DEBUG_DCFLUSH
734 atomic_inc(&dcpage_flushes_xcall);
737 __local_flush_dcache_page(page);
742 void smp_receive_signal(int cpu)
744 cpumask_t mask = cpumask_of_cpu(cpu);
746 if (cpu_online(cpu)) {
747 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
749 if (tlb_type == spitfire)
750 spitfire_xcall_deliver(data0, 0, 0, mask);
752 cheetah_xcall_deliver(data0, 0, 0, mask);
756 void smp_receive_signal_client(int irq, struct pt_regs *regs)
758 /* Just return, rtrap takes care of the rest. */
759 clear_softint(1 << irq);
762 void smp_report_regs(void)
764 smp_cross_call(&xcall_report_regs, 0, 0, 0);
767 void smp_flush_tlb_all(void)
769 if (tlb_type == spitfire)
770 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
772 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
776 /* We know that the window frames of the user have been flushed
777 * to the stack before we get here because all callers of us
778 * are flush_tlb_*() routines, and these run after flush_cache_*()
779 * which performs the flushw.
781 * The SMP TLB coherency scheme we use works as follows:
783 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
784 * space has (potentially) executed on, this is the heuristic
785 * we use to avoid doing cross calls.
787 * Also, for flushing from kswapd and also for clones, we
788 * use cpu_vm_mask as the list of cpus to make run the TLB.
790 * 2) TLB context numbers are shared globally across all processors
791 * in the system, this allows us to play several games to avoid
794 * One invariant is that when a cpu switches to a process, and
795 * that processes tsk->active_mm->cpu_vm_mask does not have the
796 * current cpu's bit set, that tlb context is flushed locally.
798 * If the address space is non-shared (ie. mm->count == 1) we avoid
799 * cross calls when we want to flush the currently running process's
800 * tlb state. This is done by clearing all cpu bits except the current
801 * processor's in current->active_mm->cpu_vm_mask and performing the
802 * flush locally only. This will force any subsequent cpus which run
803 * this task to flush the context from the local tlb if the process
804 * migrates to another cpu (again).
806 * 3) For shared address spaces (threads) and swapping we bite the
807 * bullet for most cases and perform the cross call (but only to
808 * the cpus listed in cpu_vm_mask).
810 * The performance gain from "optimizing" away the cross call for threads is
811 * questionable (in theory the big win for threads is the massive sharing of
812 * address space state across processors).
814 void smp_flush_tlb_mm(struct mm_struct *mm)
817 * This code is called from two places, dup_mmap and exit_mmap. In the
818 * former case, we really need a flush. In the later case, the callers
819 * are single threaded exec_mmap (really need a flush), multithreaded
820 * exec_mmap case (do not need to flush, since the caller gets a new
821 * context via activate_mm), and all other callers of mmput() whence
822 * the flush can be optimized since the associated threads are dead and
823 * the mm is being torn down (__exit_mm and other mmput callers) or the
824 * owning thread is dissociating itself from the mm. The
825 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
826 * for single thread exec and dup_mmap cases. An alternate check might
827 * have been (current->mm != mm).
830 if (atomic_read(&mm->mm_users) == 0)
834 u32 ctx = CTX_HWBITS(mm->context);
837 if (atomic_read(&mm->mm_users) == 1) {
838 /* See smp_flush_tlb_page for info about this. */
839 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
840 goto local_flush_and_out;
843 smp_cross_call_masked(&xcall_flush_tlb_mm,
848 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
854 void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
857 u32 ctx = CTX_HWBITS(mm->context);
861 end = PAGE_ALIGN(end);
863 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
864 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
865 goto local_flush_and_out;
868 smp_cross_call_masked(&xcall_flush_tlb_range,
873 __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
874 end, PAGE_SIZE, (end-start));
879 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
882 end = PAGE_ALIGN(end);
884 smp_cross_call(&xcall_flush_tlb_kernel_range,
887 __flush_tlb_kernel_range(start, end);
891 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
894 u32 ctx = CTX_HWBITS(mm->context);
898 if (mm == current->active_mm &&
899 atomic_read(&mm->mm_users) == 1) {
900 /* By virtue of being the current address space, and
901 * having the only reference to it, the following
904 * It would not be a win to perform the xcall tlb
905 * flush in this case, because even if we switch back
906 * to one of the other processors in cpu_vm_mask it
907 * is almost certain that all TLB entries for this
908 * context will be replaced by the time that happens.
910 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
911 goto local_flush_and_out;
913 cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
915 /* By virtue of running under the mm->page_table_lock,
916 * and mmu_context.h:switch_mm doing the same, the
917 * following operation is safe.
919 if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
920 goto local_flush_and_out;
923 /* OK, we have to actually perform the cross call. Most
924 * likely this is a cloned mm or kswapd is kicking out pages
925 * for a task which has run recently on another cpu.
927 smp_cross_call_masked(&xcall_flush_tlb_page,
930 if (!cpu_isset(cpu, mm->cpu_vm_mask))
934 __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
941 /* #define CAPTURE_DEBUG */
942 extern unsigned long xcall_capture;
944 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
945 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
946 static unsigned long penguins_are_doing_time;
948 void smp_capture(void)
950 int result = __atomic_add(1, &smp_capture_depth);
952 membar("#StoreStore | #LoadStore");
954 int ncpus = num_online_cpus();
957 printk("CPU[%d]: Sending penguins to jail...",
960 penguins_are_doing_time = 1;
961 membar("#StoreStore | #LoadStore");
962 atomic_inc(&smp_capture_registry);
963 smp_cross_call(&xcall_capture, 0, 0, 0);
964 while (atomic_read(&smp_capture_registry) != ncpus)
972 void smp_release(void)
974 if (atomic_dec_and_test(&smp_capture_depth)) {
976 printk("CPU[%d]: Giving pardon to "
977 "imprisoned penguins\n",
980 penguins_are_doing_time = 0;
981 membar("#StoreStore | #StoreLoad");
982 atomic_dec(&smp_capture_registry);
986 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
987 * can service tlb flush xcalls...
989 extern void prom_world(int);
990 extern void save_alternate_globals(unsigned long *);
991 extern void restore_alternate_globals(unsigned long *);
992 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
994 unsigned long global_save[24];
996 clear_softint(1 << irq);
1000 __asm__ __volatile__("flushw");
1001 save_alternate_globals(global_save);
1003 atomic_inc(&smp_capture_registry);
1004 membar("#StoreLoad | #StoreStore");
1005 while (penguins_are_doing_time)
1006 membar("#LoadLoad");
1007 restore_alternate_globals(global_save);
1008 atomic_dec(&smp_capture_registry);
1014 extern unsigned long xcall_promstop;
1016 void smp_promstop_others(void)
1018 smp_cross_call(&xcall_promstop, 0, 0, 0);
1021 extern void sparc64_do_profile(struct pt_regs *regs);
1023 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1024 #define prof_counter(__cpu) cpu_data(__cpu).counter
1026 void smp_percpu_timer_interrupt(struct pt_regs *regs)
1028 unsigned long compare, tick, pstate;
1029 int cpu = smp_processor_id();
1030 int user = user_mode(regs);
1033 * Check for level 14 softint.
1036 unsigned long tick_mask = tick_ops->softint_mask;
1038 if (!(get_softint() & tick_mask)) {
1039 extern void handler_irq(int, struct pt_regs *);
1041 handler_irq(14, regs);
1044 clear_softint(tick_mask);
1048 sparc64_do_profile(regs);
1049 if (!--prof_counter(cpu)) {
1052 if (cpu == boot_cpu_id) {
1053 kstat_this_cpu.irqs[0]++;
1054 timer_tick_interrupt(regs);
1057 update_process_times(user);
1061 prof_counter(cpu) = prof_multiplier(cpu);
1064 /* Guarantee that the following sequences execute
1067 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1068 "wrpr %0, %1, %%pstate"
1072 compare = tick_ops->add_compare(current_tick_offset);
1073 tick = tick_ops->get_tick();
1075 /* Restore PSTATE_IE. */
1076 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1079 } while (time_after_eq(tick, compare));
1082 static void __init smp_setup_percpu_timer(void)
1084 int cpu = smp_processor_id();
1085 unsigned long pstate;
1087 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1089 /* Guarantee that the following sequences execute
1092 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1093 "wrpr %0, %1, %%pstate"
1097 tick_ops->init_tick(current_tick_offset);
1099 /* Restore PSTATE_IE. */
1100 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1105 void __init smp_tick_init(void)
1107 boot_cpu_id = hard_smp_processor_id();
1108 current_tick_offset = timer_tick_offset;
1110 cpu_set(boot_cpu_id, cpu_online_map);
1111 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1114 cycles_t cacheflush_time;
1115 unsigned long cache_decay_ticks;
1117 extern unsigned long cheetah_tune_scheduling(void);
1119 static void __init smp_tune_scheduling(void)
1121 unsigned long orig_flush_base, flush_base, flags, *p;
1122 unsigned int ecache_size, order;
1123 cycles_t tick1, tick2, raw;
1126 /* Approximate heuristic for SMP scheduling. It is an
1127 * estimation of the time it takes to flush the L2 cache
1128 * on the local processor.
1130 * The ia32 chooses to use the L1 cache flush time instead,
1131 * and I consider this complete nonsense. The Ultra can service
1132 * a miss to the L1 with a hit to the L2 in 7 or 8 cycles, and
1133 * L2 misses are what create extra bus traffic (ie. the "cost"
1134 * of moving a process from one cpu to another).
1136 printk("SMP: Calibrating ecache flush... ");
1137 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1138 cacheflush_time = cheetah_tune_scheduling();
1142 cpu_find_by_instance(0, &cpu_node, NULL);
1143 ecache_size = prom_getintdefault(cpu_node,
1144 "ecache-size", (512 * 1024));
1145 if (ecache_size > (4 * 1024 * 1024))
1146 ecache_size = (4 * 1024 * 1024);
1147 orig_flush_base = flush_base =
1148 __get_free_pages(GFP_KERNEL, order = get_order(ecache_size));
1150 if (flush_base != 0UL) {
1151 local_irq_save(flags);
1153 /* Scan twice the size once just to get the TLB entries
1154 * loaded and make sure the second scan measures pure misses.
1156 for (p = (unsigned long *)flush_base;
1157 ((unsigned long)p) < (flush_base + (ecache_size<<1));
1158 p += (64 / sizeof(unsigned long)))
1159 *((volatile unsigned long *)p);
1161 tick1 = tick_ops->get_tick();
1163 __asm__ __volatile__("1:\n\t"
1164 "ldx [%0 + 0x000], %%g1\n\t"
1165 "ldx [%0 + 0x040], %%g2\n\t"
1166 "ldx [%0 + 0x080], %%g3\n\t"
1167 "ldx [%0 + 0x0c0], %%g5\n\t"
1168 "add %0, 0x100, %0\n\t"
1170 "bne,pt %%xcc, 1b\n\t"
1172 : "=&r" (flush_base)
1174 "r" (flush_base + ecache_size)
1175 : "g1", "g2", "g3", "g5");
1177 tick2 = tick_ops->get_tick();
1179 local_irq_restore(flags);
1181 raw = (tick2 - tick1);
1183 /* Dampen it a little, considering two processes
1184 * sharing the cache and fitting.
1186 cacheflush_time = (raw - (raw >> 2));
1188 free_pages(orig_flush_base, order);
1190 cacheflush_time = ((ecache_size << 2) +
1191 (ecache_size << 1));
1194 /* Convert ticks/sticks to jiffies. */
1195 cache_decay_ticks = cacheflush_time / timer_tick_offset;
1196 if (cache_decay_ticks < 1)
1197 cache_decay_ticks = 1;
1199 printk("Using heuristic of %ld cycles, %ld ticks.\n",
1200 cacheflush_time, cache_decay_ticks);
1203 /* /proc/profile writes can call this, don't __init it please. */
1204 static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
1206 int setup_profiling_timer(unsigned int multiplier)
1208 unsigned long flags;
1211 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1214 spin_lock_irqsave(&prof_setup_lock, flags);
1215 for (i = 0; i < NR_CPUS; i++)
1216 prof_multiplier(i) = multiplier;
1217 current_tick_offset = (timer_tick_offset / multiplier);
1218 spin_unlock_irqrestore(&prof_setup_lock, flags);
1223 void __init smp_prepare_cpus(unsigned int max_cpus)
1228 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1230 cpu_set(mid, phys_cpu_present_map);
1234 if (num_possible_cpus() > max_cpus) {
1236 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1237 if (mid != boot_cpu_id) {
1238 cpu_clear(mid, phys_cpu_present_map);
1239 if (num_possible_cpus() <= max_cpus)
1246 smp_store_cpu_info(boot_cpu_id);
1249 void __devinit smp_prepare_boot_cpu(void)
1251 if (hard_smp_processor_id() >= NR_CPUS) {
1252 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1256 current_thread_info()->cpu = hard_smp_processor_id();
1257 cpu_set(smp_processor_id(), cpu_online_map);
1258 cpu_set(smp_processor_id(), phys_cpu_present_map);
1261 int __devinit __cpu_up(unsigned int cpu)
1263 int ret = smp_boot_one_cpu(cpu);
1266 cpu_set(cpu, smp_commenced_mask);
1267 while (!cpu_isset(cpu, cpu_online_map))
1269 if (!cpu_isset(cpu, cpu_online_map)) {
1272 smp_synchronize_one_tick(cpu);
1278 void __init smp_cpus_done(unsigned int max_cpus)
1280 unsigned long bogosum = 0;
1283 for (i = 0; i < NR_CPUS; i++) {
1285 bogosum += cpu_data(i).udelay_val;
1287 printk("Total of %ld processors activated "
1288 "(%lu.%02lu BogoMIPS).\n",
1289 (long) num_online_cpus(),
1290 bogosum/(500000/HZ),
1291 (bogosum/(5000/HZ))%100);
1293 /* We want to run this with all the other cpus spinning
1296 smp_tune_scheduling();
1299 /* This needn't do anything as we do not sleep the cpu
1300 * inside of the idler task, so an interrupt is not needed
1301 * to get a clean fast response.
1303 * XXX Reverify this assumption... -DaveM
1305 * Addendum: We do want it to do something for the signal
1306 * delivery case, we detect that by just seeing
1307 * if we are trying to send this to an idler or not.
1309 void smp_send_reschedule(int cpu)
1311 if (cpu_data(cpu).idle_volume == 0)
1312 smp_receive_signal(cpu);
1315 /* This is a nop because we capture all other cpus
1316 * anyways when making the PROM active.
1318 void smp_send_stop(void)