1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
9 #include <linux/pagemap.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/cpudata.h>
32 #include <asm/pgtable.h>
33 #include <asm/oplib.h>
34 #include <asm/hardirq.h>
35 #include <asm/uaccess.h>
36 #include <asm/timer.h>
37 #include <asm/starfire.h>
40 extern int linux_num_cpus;
41 extern void calibrate_delay(void);
43 /* Please don't make this stuff initdata!!! --DaveM */
44 static unsigned char boot_cpu_id;
46 cpumask_t cpu_online_map = CPU_MASK_NONE;
47 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
48 static cpumask_t smp_commenced_mask;
49 static cpumask_t cpu_callout_map;
51 void smp_info(struct seq_file *m)
55 seq_printf(m, "State:\n");
56 for (i = 0; i < NR_CPUS; i++) {
59 "CPU%d:\t\tonline\n", i);
63 void smp_bogo(struct seq_file *m)
67 for (i = 0; i < NR_CPUS; i++)
70 "Cpu%dBogo\t: %lu.%02lu\n"
71 "Cpu%dClkTck\t: %016lx\n",
72 i, cpu_data(i).udelay_val / (500000/HZ),
73 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
74 i, cpu_data(i).clock_tick);
77 void __init smp_store_cpu_info(int id)
81 /* multiplier and counter set by
82 smp_setup_percpu_timer() */
83 cpu_data(id).udelay_val = loops_per_jiffy;
85 cpu_find_by_mid(id, &cpu_node);
86 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
87 "clock-frequency", 0);
89 cpu_data(id).pgcache_size = 0;
90 cpu_data(id).pte_cache[0] = NULL;
91 cpu_data(id).pte_cache[1] = NULL;
92 cpu_data(id).pgdcache_size = 0;
93 cpu_data(id).pgd_cache = NULL;
94 cpu_data(id).idle_volume = 1;
97 static void smp_setup_percpu_timer(void);
99 static volatile unsigned long callin_flag = 0;
101 extern void inherit_locked_prom_mappings(int save_p);
103 void __init smp_callin(void)
105 int cpuid = hard_smp_processor_id();
106 extern int bigkernel;
107 extern unsigned long kern_locked_tte_data;
110 prom_dtlb_load(sparc64_highest_locked_tlbent()-1,
111 kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
112 prom_itlb_load(sparc64_highest_locked_tlbent()-1,
113 kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
116 inherit_locked_prom_mappings(0);
120 smp_setup_percpu_timer();
125 smp_store_cpu_info(cpuid);
127 __asm__ __volatile__("membar #Sync\n\t"
128 "flush %%g6" : : : "memory");
130 /* Clear this or we will die instantly when we
131 * schedule back to this idler...
133 clear_thread_flag(TIF_NEWCHILD);
135 /* Attach to the address space of init_task. */
136 atomic_inc(&init_mm.mm_count);
137 current->active_mm = &init_mm;
139 while (!cpu_isset(cpuid, smp_commenced_mask))
142 cpu_set(cpuid, cpu_online_map);
147 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
148 panic("SMP bolixed\n");
151 static unsigned long current_tick_offset;
153 /* This tick register synchronization scheme is taken entirely from
154 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
156 * The only change I've made is to rework it so that the master
157 * initiates the synchonization instead of the slave. -DaveM
161 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
163 #define NUM_ROUNDS 64 /* magic value */
164 #define NUM_ITERS 5 /* likewise */
166 static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
167 static unsigned long go[SLAVE + 1];
169 #define DEBUG_TICK_SYNC 0
171 static inline long get_delta (long *rt, long *master)
173 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
174 unsigned long tcenter, t0, t1, tm;
177 for (i = 0; i < NUM_ITERS; i++) {
178 t0 = tick_ops->get_tick();
180 membar("#StoreLoad");
181 while (!(tm = go[SLAVE]))
184 membar("#StoreStore");
185 t1 = tick_ops->get_tick();
187 if (t1 - t0 < best_t1 - best_t0)
188 best_t0 = t0, best_t1 = t1, best_tm = tm;
191 *rt = best_t1 - best_t0;
192 *master = best_tm - best_t0;
194 /* average best_t0 and best_t1 without overflow: */
195 tcenter = (best_t0/2 + best_t1/2);
196 if (best_t0 % 2 + best_t1 % 2 == 2)
198 return tcenter - best_tm;
201 void smp_synchronize_tick_client(void)
203 long i, delta, adj, adjust_latency = 0, done = 0;
204 unsigned long flags, rt, master_time_stamp, bound;
207 long rt; /* roundtrip time */
208 long master; /* master's timestamp */
209 long diff; /* difference between midpoint and master's timestamp */
210 long lat; /* estimate of itc adjustment latency */
219 local_irq_save(flags);
221 for (i = 0; i < NUM_ROUNDS; i++) {
222 delta = get_delta(&rt, &master_time_stamp);
224 done = 1; /* let's lock on to this... */
230 adjust_latency += -delta;
231 adj = -delta + adjust_latency/4;
235 tick_ops->add_tick(adj, current_tick_offset);
239 t[i].master = master_time_stamp;
241 t[i].lat = adjust_latency/4;
245 local_irq_restore(flags);
248 for (i = 0; i < NUM_ROUNDS; i++)
249 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
250 t[i].rt, t[i].master, t[i].diff, t[i].lat);
253 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
254 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
257 static void smp_start_sync_tick_client(int cpu);
259 static void smp_synchronize_one_tick(int cpu)
261 unsigned long flags, i;
265 smp_start_sync_tick_client(cpu);
267 /* wait for client to be ready */
271 /* now let the client proceed into his loop */
273 membar("#StoreLoad");
275 spin_lock_irqsave(&itc_sync_lock, flags);
277 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
281 membar("#StoreStore");
282 go[SLAVE] = tick_ops->get_tick();
283 membar("#StoreLoad");
286 spin_unlock_irqrestore(&itc_sync_lock, flags);
289 extern unsigned long sparc64_cpu_startup;
291 /* The OBP cpu startup callback truncates the 3rd arg cookie to
292 * 32-bits (I think) so to be safe we have it read the pointer
293 * contained here so we work on >4GB machines. -DaveM
295 static struct thread_info *cpu_new_thread = NULL;
297 static int __devinit smp_boot_one_cpu(unsigned int cpu)
299 unsigned long entry =
300 (unsigned long)(&sparc64_cpu_startup);
301 unsigned long cookie =
302 (unsigned long)(&cpu_new_thread);
303 struct task_struct *p;
304 int timeout, ret, cpu_node;
306 kernel_thread(NULL, NULL, CLONE_IDLETASK);
308 p = prev_task(&init_task);
315 cpu_new_thread = p->thread_info;
316 cpu_set(cpu, cpu_callout_map);
318 cpu_find_by_mid(cpu, &cpu_node);
319 prom_startcpu(cpu_node, entry, cookie);
321 for (timeout = 0; timeout < 5000000; timeout++) {
329 printk("Processor %d is stuck.\n", cpu);
330 cpu_clear(cpu, cpu_callout_map);
333 cpu_new_thread = NULL;
338 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
343 if (this_is_starfire) {
344 /* map to real upaid */
345 cpu = (((cpu & 0x3c) << 1) |
346 ((cpu & 0x40) >> 4) |
350 target = (cpu << 14) | 0x70;
352 /* Ok, this is the real Spitfire Errata #54.
353 * One must read back from a UDB internal register
354 * after writes to the UDB interrupt dispatch, but
355 * before the membar Sync for that write.
356 * So we use the high UDB control register (ASI 0x7f,
357 * ADDR 0x20) for the dummy read. -DaveM
360 __asm__ __volatile__(
361 "wrpr %1, %2, %%pstate\n\t"
362 "stxa %4, [%0] %3\n\t"
363 "stxa %5, [%0+%8] %3\n\t"
365 "stxa %6, [%0+%8] %3\n\t"
367 "stxa %%g0, [%7] %3\n\t"
370 "ldxa [%%g1] 0x7f, %%g0\n\t"
373 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
374 "r" (data0), "r" (data1), "r" (data2), "r" (target),
375 "r" (0x10), "0" (tmp)
378 /* NOTE: PSTATE_IE is still clear. */
381 __asm__ __volatile__("ldxa [%%g0] %1, %0"
383 : "i" (ASI_INTR_DISPATCH_STAT));
385 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
392 } while (result & 0x1);
393 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
396 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
397 smp_processor_id(), result);
404 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
409 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
410 for_each_cpu_mask(i, mask)
411 spitfire_xcall_helper(data0, data1, data2, pstate, i);
414 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
415 * packet, but we have no use for that. However we do take advantage of
416 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
418 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
421 int nack_busy_id, is_jalapeno;
423 if (cpus_empty(mask))
426 /* Unfortunately, someone at Sun had the brilliant idea to make the
427 * busy/nack fields hard-coded by ITID number for this Ultra-III
428 * derivative processor.
430 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
431 is_jalapeno = ((ver >> 32) == 0x003e0016);
433 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
436 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
437 : : "r" (pstate), "i" (PSTATE_IE));
439 /* Setup the dispatch data registers. */
440 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
441 "stxa %1, [%4] %6\n\t"
442 "stxa %2, [%5] %6\n\t"
445 : "r" (data0), "r" (data1), "r" (data2),
446 "r" (0x40), "r" (0x50), "r" (0x60),
453 for_each_cpu_mask(i, mask) {
454 u64 target = (i << 14) | 0x70;
457 target |= (nack_busy_id << 24);
458 __asm__ __volatile__(
459 "stxa %%g0, [%0] %1\n\t"
462 : "r" (target), "i" (ASI_INTR_W));
467 /* Now, poll for completion. */
472 stuck = 100000 * nack_busy_id;
474 __asm__ __volatile__("ldxa [%%g0] %1, %0"
475 : "=r" (dispatch_stat)
476 : "i" (ASI_INTR_DISPATCH_STAT));
477 if (dispatch_stat == 0UL) {
478 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
484 } while (dispatch_stat & 0x5555555555555555UL);
486 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
489 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
490 /* Busy bits will not clear, continue instead
491 * of freezing up on this cpu.
493 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
494 smp_processor_id(), dispatch_stat);
496 int i, this_busy_nack = 0;
498 /* Delay some random time with interrupts enabled
499 * to prevent deadlock.
501 udelay(2 * nack_busy_id);
503 /* Clear out the mask bits for cpus which did not
506 for_each_cpu_mask(i, mask) {
510 check_mask = (0x2UL << (2*i));
512 check_mask = (0x2UL <<
514 if ((dispatch_stat & check_mask) == 0)
524 /* Send cross call to all processors mentioned in MASK
527 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
529 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
530 int this_cpu = get_cpu();
532 cpus_and(mask, mask, cpu_online_map);
533 cpu_clear(this_cpu, mask);
535 if (tlb_type == spitfire)
536 spitfire_xcall_deliver(data0, data1, data2, mask);
538 cheetah_xcall_deliver(data0, data1, data2, mask);
539 /* NOTE: Caller runs local copy on master. */
544 extern unsigned long xcall_sync_tick;
546 static void smp_start_sync_tick_client(int cpu)
548 cpumask_t mask = cpumask_of_cpu(cpu);
550 smp_cross_call_masked(&xcall_sync_tick,
554 /* Send cross call to all processors except self. */
555 #define smp_cross_call(func, ctx, data1, data2) \
556 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
558 struct call_data_struct {
559 void (*func) (void *info);
565 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
566 static struct call_data_struct *call_data;
568 extern unsigned long xcall_call_function;
571 * You must not call this function with disabled interrupts or from a
572 * hardware interrupt handler or from a bottom half handler.
574 int smp_call_function(void (*func)(void *info), void *info,
575 int nonatomic, int wait)
577 struct call_data_struct data;
578 int cpus = num_online_cpus() - 1;
584 /* Can deadlock when called with interrupts disabled */
585 WARN_ON(irqs_disabled());
589 atomic_set(&data.finished, 0);
592 spin_lock(&call_lock);
596 smp_cross_call(&xcall_call_function, 0, 0, 0);
599 * Wait for other cpus to complete function or at
600 * least snap the call data.
603 while (atomic_read(&data.finished) != cpus) {
610 spin_unlock(&call_lock);
615 spin_unlock(&call_lock);
616 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
617 (long) num_online_cpus() - 1L,
618 (long) atomic_read(&data.finished));
622 void smp_call_function_client(int irq, struct pt_regs *regs)
624 void (*func) (void *info) = call_data->func;
625 void *info = call_data->info;
627 clear_softint(1 << irq);
628 if (call_data->wait) {
629 /* let initiator proceed only after completion */
631 atomic_inc(&call_data->finished);
633 /* let initiator proceed after getting data */
634 atomic_inc(&call_data->finished);
639 extern unsigned long xcall_flush_tlb_mm;
640 extern unsigned long xcall_flush_tlb_pending;
641 extern unsigned long xcall_flush_tlb_kernel_range;
642 extern unsigned long xcall_flush_tlb_all_spitfire;
643 extern unsigned long xcall_flush_tlb_all_cheetah;
644 extern unsigned long xcall_report_regs;
645 extern unsigned long xcall_receive_signal;
646 extern unsigned long xcall_flush_dcache_page_cheetah;
647 extern unsigned long xcall_flush_dcache_page_spitfire;
649 #ifdef CONFIG_DEBUG_DCFLUSH
650 extern atomic_t dcpage_flushes;
651 extern atomic_t dcpage_flushes_xcall;
654 static __inline__ void __local_flush_dcache_page(struct page *page)
656 #if (L1DCACHE_SIZE > PAGE_SIZE)
657 __flush_dcache_page(page_address(page),
658 ((tlb_type == spitfire) &&
659 page_mapping(page) != NULL));
661 if (page_mapping(page) != NULL &&
662 tlb_type == spitfire)
663 __flush_icache_page(__pa(page_address(page)));
667 void smp_flush_dcache_page_impl(struct page *page, int cpu)
669 cpumask_t mask = cpumask_of_cpu(cpu);
670 int this_cpu = get_cpu();
672 #ifdef CONFIG_DEBUG_DCFLUSH
673 atomic_inc(&dcpage_flushes);
675 if (cpu == this_cpu) {
676 __local_flush_dcache_page(page);
677 } else if (cpu_online(cpu)) {
678 void *pg_addr = page_address(page);
681 if (tlb_type == spitfire) {
683 ((u64)&xcall_flush_dcache_page_spitfire);
684 if (page_mapping(page) != NULL)
685 data0 |= ((u64)1 << 32);
686 spitfire_xcall_deliver(data0,
692 ((u64)&xcall_flush_dcache_page_cheetah);
693 cheetah_xcall_deliver(data0,
697 #ifdef CONFIG_DEBUG_DCFLUSH
698 atomic_inc(&dcpage_flushes_xcall);
705 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
707 void *pg_addr = page_address(page);
708 cpumask_t mask = cpu_online_map;
710 int this_cpu = get_cpu();
712 cpu_clear(this_cpu, mask);
714 #ifdef CONFIG_DEBUG_DCFLUSH
715 atomic_inc(&dcpage_flushes);
717 if (cpus_empty(mask))
719 if (tlb_type == spitfire) {
720 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
721 if (page_mapping(page) != NULL)
722 data0 |= ((u64)1 << 32);
723 spitfire_xcall_deliver(data0,
728 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
729 cheetah_xcall_deliver(data0,
733 #ifdef CONFIG_DEBUG_DCFLUSH
734 atomic_inc(&dcpage_flushes_xcall);
737 __local_flush_dcache_page(page);
742 void smp_receive_signal(int cpu)
744 cpumask_t mask = cpumask_of_cpu(cpu);
746 if (cpu_online(cpu)) {
747 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
749 if (tlb_type == spitfire)
750 spitfire_xcall_deliver(data0, 0, 0, mask);
752 cheetah_xcall_deliver(data0, 0, 0, mask);
756 void smp_receive_signal_client(int irq, struct pt_regs *regs)
758 /* Just return, rtrap takes care of the rest. */
759 clear_softint(1 << irq);
762 void smp_report_regs(void)
764 smp_cross_call(&xcall_report_regs, 0, 0, 0);
767 void smp_flush_tlb_all(void)
769 if (tlb_type == spitfire)
770 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
772 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
776 /* We know that the window frames of the user have been flushed
777 * to the stack before we get here because all callers of us
778 * are flush_tlb_*() routines, and these run after flush_cache_*()
779 * which performs the flushw.
781 * The SMP TLB coherency scheme we use works as follows:
783 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
784 * space has (potentially) executed on, this is the heuristic
785 * we use to avoid doing cross calls.
787 * Also, for flushing from kswapd and also for clones, we
788 * use cpu_vm_mask as the list of cpus to make run the TLB.
790 * 2) TLB context numbers are shared globally across all processors
791 * in the system, this allows us to play several games to avoid
794 * One invariant is that when a cpu switches to a process, and
795 * that processes tsk->active_mm->cpu_vm_mask does not have the
796 * current cpu's bit set, that tlb context is flushed locally.
798 * If the address space is non-shared (ie. mm->count == 1) we avoid
799 * cross calls when we want to flush the currently running process's
800 * tlb state. This is done by clearing all cpu bits except the current
801 * processor's in current->active_mm->cpu_vm_mask and performing the
802 * flush locally only. This will force any subsequent cpus which run
803 * this task to flush the context from the local tlb if the process
804 * migrates to another cpu (again).
806 * 3) For shared address spaces (threads) and swapping we bite the
807 * bullet for most cases and perform the cross call (but only to
808 * the cpus listed in cpu_vm_mask).
810 * The performance gain from "optimizing" away the cross call for threads is
811 * questionable (in theory the big win for threads is the massive sharing of
812 * address space state across processors).
814 void smp_flush_tlb_mm(struct mm_struct *mm)
817 * This code is called from two places, dup_mmap and exit_mmap. In the
818 * former case, we really need a flush. In the later case, the callers
819 * are single threaded exec_mmap (really need a flush), multithreaded
820 * exec_mmap case (do not need to flush, since the caller gets a new
821 * context via activate_mm), and all other callers of mmput() whence
822 * the flush can be optimized since the associated threads are dead and
823 * the mm is being torn down (__exit_mm and other mmput callers) or the
824 * owning thread is dissociating itself from the mm. The
825 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
826 * for single thread exec and dup_mmap cases. An alternate check might
827 * have been (current->mm != mm).
830 if (atomic_read(&mm->mm_users) == 0)
834 u32 ctx = CTX_HWBITS(mm->context);
837 if (atomic_read(&mm->mm_users) == 1) {
838 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
839 goto local_flush_and_out;
842 smp_cross_call_masked(&xcall_flush_tlb_mm,
847 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
853 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
855 u32 ctx = CTX_HWBITS(mm->context);
858 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
859 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
860 goto local_flush_and_out;
862 /* This optimization is not valid. Normally
863 * we will be holding the page_table_lock, but
864 * there is an exception which is copy_page_range()
865 * when forking. The lock is held during the individual
866 * page table updates in the parent, but not at the
867 * top level, which is where we are invoked.
870 cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
872 /* By virtue of running under the mm->page_table_lock,
873 * and mmu_context.h:switch_mm doing the same, the
874 * following operation is safe.
876 if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
877 goto local_flush_and_out;
881 smp_cross_call_masked(&xcall_flush_tlb_pending,
882 ctx, nr, (unsigned long) vaddrs,
886 __flush_tlb_pending(ctx, nr, vaddrs);
891 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
894 end = PAGE_ALIGN(end);
896 smp_cross_call(&xcall_flush_tlb_kernel_range,
899 __flush_tlb_kernel_range(start, end);
904 /* #define CAPTURE_DEBUG */
905 extern unsigned long xcall_capture;
907 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
908 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
909 static unsigned long penguins_are_doing_time;
911 void smp_capture(void)
913 int result = __atomic_add(1, &smp_capture_depth);
915 membar("#StoreStore | #LoadStore");
917 int ncpus = num_online_cpus();
920 printk("CPU[%d]: Sending penguins to jail...",
923 penguins_are_doing_time = 1;
924 membar("#StoreStore | #LoadStore");
925 atomic_inc(&smp_capture_registry);
926 smp_cross_call(&xcall_capture, 0, 0, 0);
927 while (atomic_read(&smp_capture_registry) != ncpus)
935 void smp_release(void)
937 if (atomic_dec_and_test(&smp_capture_depth)) {
939 printk("CPU[%d]: Giving pardon to "
940 "imprisoned penguins\n",
943 penguins_are_doing_time = 0;
944 membar("#StoreStore | #StoreLoad");
945 atomic_dec(&smp_capture_registry);
949 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
950 * can service tlb flush xcalls...
952 extern void prom_world(int);
953 extern void save_alternate_globals(unsigned long *);
954 extern void restore_alternate_globals(unsigned long *);
955 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
957 unsigned long global_save[24];
959 clear_softint(1 << irq);
963 __asm__ __volatile__("flushw");
964 save_alternate_globals(global_save);
966 atomic_inc(&smp_capture_registry);
967 membar("#StoreLoad | #StoreStore");
968 while (penguins_are_doing_time)
970 restore_alternate_globals(global_save);
971 atomic_dec(&smp_capture_registry);
977 extern unsigned long xcall_promstop;
979 void smp_promstop_others(void)
981 smp_cross_call(&xcall_promstop, 0, 0, 0);
984 extern void sparc64_do_profile(struct pt_regs *regs);
986 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
987 #define prof_counter(__cpu) cpu_data(__cpu).counter
989 void smp_percpu_timer_interrupt(struct pt_regs *regs)
991 unsigned long compare, tick, pstate;
992 int cpu = smp_processor_id();
993 int user = user_mode(regs);
996 * Check for level 14 softint.
999 unsigned long tick_mask = tick_ops->softint_mask;
1001 if (!(get_softint() & tick_mask)) {
1002 extern void handler_irq(int, struct pt_regs *);
1004 handler_irq(14, regs);
1007 clear_softint(tick_mask);
1011 sparc64_do_profile(regs);
1012 if (!--prof_counter(cpu)) {
1015 if (cpu == boot_cpu_id) {
1016 kstat_this_cpu.irqs[0]++;
1017 timer_tick_interrupt(regs);
1020 update_process_times(user);
1024 prof_counter(cpu) = prof_multiplier(cpu);
1027 /* Guarantee that the following sequences execute
1030 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1031 "wrpr %0, %1, %%pstate"
1035 compare = tick_ops->add_compare(current_tick_offset);
1036 tick = tick_ops->get_tick();
1038 /* Restore PSTATE_IE. */
1039 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1042 } while (time_after_eq(tick, compare));
1045 static void __init smp_setup_percpu_timer(void)
1047 int cpu = smp_processor_id();
1048 unsigned long pstate;
1050 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1052 /* Guarantee that the following sequences execute
1055 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1056 "wrpr %0, %1, %%pstate"
1060 tick_ops->init_tick(current_tick_offset);
1062 /* Restore PSTATE_IE. */
1063 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1068 void __init smp_tick_init(void)
1070 boot_cpu_id = hard_smp_processor_id();
1071 current_tick_offset = timer_tick_offset;
1073 cpu_set(boot_cpu_id, cpu_online_map);
1074 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1077 cycles_t cacheflush_time;
1078 unsigned long cache_decay_ticks;
1080 extern unsigned long cheetah_tune_scheduling(void);
1082 static void __init smp_tune_scheduling(void)
1084 unsigned long orig_flush_base, flush_base, flags, *p;
1085 unsigned int ecache_size, order;
1086 cycles_t tick1, tick2, raw;
1089 /* Approximate heuristic for SMP scheduling. It is an
1090 * estimation of the time it takes to flush the L2 cache
1091 * on the local processor.
1093 * The ia32 chooses to use the L1 cache flush time instead,
1094 * and I consider this complete nonsense. The Ultra can service
1095 * a miss to the L1 with a hit to the L2 in 7 or 8 cycles, and
1096 * L2 misses are what create extra bus traffic (ie. the "cost"
1097 * of moving a process from one cpu to another).
1099 printk("SMP: Calibrating ecache flush... ");
1100 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1101 cacheflush_time = cheetah_tune_scheduling();
1105 cpu_find_by_instance(0, &cpu_node, NULL);
1106 ecache_size = prom_getintdefault(cpu_node,
1107 "ecache-size", (512 * 1024));
1108 if (ecache_size > (4 * 1024 * 1024))
1109 ecache_size = (4 * 1024 * 1024);
1110 orig_flush_base = flush_base =
1111 __get_free_pages(GFP_KERNEL, order = get_order(ecache_size));
1113 if (flush_base != 0UL) {
1114 local_irq_save(flags);
1116 /* Scan twice the size once just to get the TLB entries
1117 * loaded and make sure the second scan measures pure misses.
1119 for (p = (unsigned long *)flush_base;
1120 ((unsigned long)p) < (flush_base + (ecache_size<<1));
1121 p += (64 / sizeof(unsigned long)))
1122 *((volatile unsigned long *)p);
1124 tick1 = tick_ops->get_tick();
1126 __asm__ __volatile__("1:\n\t"
1127 "ldx [%0 + 0x000], %%g1\n\t"
1128 "ldx [%0 + 0x040], %%g2\n\t"
1129 "ldx [%0 + 0x080], %%g3\n\t"
1130 "ldx [%0 + 0x0c0], %%g5\n\t"
1131 "add %0, 0x100, %0\n\t"
1133 "bne,pt %%xcc, 1b\n\t"
1135 : "=&r" (flush_base)
1137 "r" (flush_base + ecache_size)
1138 : "g1", "g2", "g3", "g5");
1140 tick2 = tick_ops->get_tick();
1142 local_irq_restore(flags);
1144 raw = (tick2 - tick1);
1146 /* Dampen it a little, considering two processes
1147 * sharing the cache and fitting.
1149 cacheflush_time = (raw - (raw >> 2));
1151 free_pages(orig_flush_base, order);
1153 cacheflush_time = ((ecache_size << 2) +
1154 (ecache_size << 1));
1157 /* Convert ticks/sticks to jiffies. */
1158 cache_decay_ticks = cacheflush_time / timer_tick_offset;
1159 if (cache_decay_ticks < 1)
1160 cache_decay_ticks = 1;
1162 printk("Using heuristic of %ld cycles, %ld ticks.\n",
1163 cacheflush_time, cache_decay_ticks);
1166 /* /proc/profile writes can call this, don't __init it please. */
1167 static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
1169 int setup_profiling_timer(unsigned int multiplier)
1171 unsigned long flags;
1174 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1177 spin_lock_irqsave(&prof_setup_lock, flags);
1178 for (i = 0; i < NR_CPUS; i++)
1179 prof_multiplier(i) = multiplier;
1180 current_tick_offset = (timer_tick_offset / multiplier);
1181 spin_unlock_irqrestore(&prof_setup_lock, flags);
1186 void __init smp_prepare_cpus(unsigned int max_cpus)
1191 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1193 cpu_set(mid, phys_cpu_present_map);
1197 if (num_possible_cpus() > max_cpus) {
1199 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1200 if (mid != boot_cpu_id) {
1201 cpu_clear(mid, phys_cpu_present_map);
1202 if (num_possible_cpus() <= max_cpus)
1209 smp_store_cpu_info(boot_cpu_id);
1212 void __devinit smp_prepare_boot_cpu(void)
1214 if (hard_smp_processor_id() >= NR_CPUS) {
1215 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1219 current_thread_info()->cpu = hard_smp_processor_id();
1220 cpu_set(smp_processor_id(), cpu_online_map);
1221 cpu_set(smp_processor_id(), phys_cpu_present_map);
1224 int __devinit __cpu_up(unsigned int cpu)
1226 int ret = smp_boot_one_cpu(cpu);
1229 cpu_set(cpu, smp_commenced_mask);
1230 while (!cpu_isset(cpu, cpu_online_map))
1232 if (!cpu_isset(cpu, cpu_online_map)) {
1235 smp_synchronize_one_tick(cpu);
1241 void __init smp_cpus_done(unsigned int max_cpus)
1243 unsigned long bogosum = 0;
1246 for (i = 0; i < NR_CPUS; i++) {
1248 bogosum += cpu_data(i).udelay_val;
1250 printk("Total of %ld processors activated "
1251 "(%lu.%02lu BogoMIPS).\n",
1252 (long) num_online_cpus(),
1253 bogosum/(500000/HZ),
1254 (bogosum/(5000/HZ))%100);
1256 /* We want to run this with all the other cpus spinning
1259 smp_tune_scheduling();
1262 /* This needn't do anything as we do not sleep the cpu
1263 * inside of the idler task, so an interrupt is not needed
1264 * to get a clean fast response.
1266 * XXX Reverify this assumption... -DaveM
1268 * Addendum: We do want it to do something for the signal
1269 * delivery case, we detect that by just seeing
1270 * if we are trying to send this to an idler or not.
1272 void smp_send_reschedule(int cpu)
1274 if (cpu_data(cpu).idle_volume == 0)
1275 smp_receive_signal(cpu);
1278 /* This is a nop because we capture all other cpus
1279 * anyways when making the PROM active.
1281 void smp_send_stop(void)