1 /* sun4m_smp.c: Sparc SUN4M SMP support.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
18 #include <linux/swap.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 #include <asm/ptrace.h>
23 #include <asm/atomic.h>
25 #include <asm/delay.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/hardirq.h>
32 #include <asm/cpudata.h>
34 #define IRQ_RESCHEDULE 13
35 #define IRQ_STOP_CPU 14
36 #define IRQ_CROSS_CALL 15
38 extern ctxd_t *srmmu_ctx_table_phys;
40 extern void calibrate_delay(void);
42 extern volatile int smp_processors_ready;
43 extern int smp_num_cpus;
44 extern int smp_threads_ready;
45 extern volatile unsigned long cpu_callin_map[NR_CPUS];
46 extern unsigned char boot_cpu_id;
47 extern int smp_activated;
48 extern volatile int __cpu_number_map[NR_CPUS];
49 extern volatile int __cpu_logical_map[NR_CPUS];
50 extern volatile unsigned long ipi_count;
51 extern volatile int smp_process_available;
52 extern volatile int smp_commenced;
53 extern int __smp4m_processor_id(void);
58 #define SMP_PRINTK(x) printk x
63 static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
65 __asm__ __volatile__("swap [%1], %0\n\t" :
66 "=&r" (val), "=&r" (ptr) :
67 "0" (val), "1" (ptr));
71 static void smp_setup_percpu_timer(void);
72 extern void cpu_probe(void);
74 void __init smp4m_callin(void)
76 int cpuid = hard_smp_processor_id();
78 local_flush_cache_all();
79 local_flush_tlb_all();
81 set_irq_udt(boot_cpu_id);
83 /* Get our local ticker going. */
84 smp_setup_percpu_timer();
87 smp_store_cpu_info(cpuid);
89 local_flush_cache_all();
90 local_flush_tlb_all();
93 * Unblock the master CPU _only_ when the scheduler state
94 * of all secondary CPUs will be up-to-date, so after
95 * the SMP initialization the master will be just allowed
96 * to call the scheduler code.
100 /* Allow master to continue. */
101 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
103 local_flush_cache_all();
104 local_flush_tlb_all();
108 /* Fix idle thread fields. */
109 __asm__ __volatile__("ld [%0], %%g6\n\t"
110 : : "r" (¤t_set[cpuid])
111 : "memory" /* paranoid */);
113 /* Attach to the address space of init_task. */
114 atomic_inc(&init_mm.mm_count);
115 current->active_mm = &init_mm;
117 while(!smp_commenced)
120 local_flush_cache_all();
121 local_flush_tlb_all();
126 extern int cpu_idle(void *unused);
127 extern void init_IRQ(void);
128 extern void cpu_panic(void);
129 extern int start_secondary(void *unused);
132 * Cycle through the processors asking the PROM to start each one.
135 extern struct linux_prom_registers smp_penguin_ctable;
136 extern unsigned long trapbase_cpu1[];
137 extern unsigned long trapbase_cpu2[];
138 extern unsigned long trapbase_cpu3[];
140 void __init smp4m_boot_cpus(void)
145 printk("Entering SMP Mode...\n");
148 cpus_clear(cpu_present_map);
150 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
151 cpu_set(mid, cpu_present_map);
153 for(i=0; i < NR_CPUS; i++) {
154 __cpu_number_map[i] = -1;
155 __cpu_logical_map[i] = -1;
158 __cpu_number_map[boot_cpu_id] = 0;
159 __cpu_logical_map[0] = boot_cpu_id;
160 current_thread_info()->cpu = boot_cpu_id;
162 smp_store_cpu_info(boot_cpu_id);
163 set_irq_udt(boot_cpu_id);
164 smp_setup_percpu_timer();
165 local_flush_cache_all();
166 if(cpu_find_by_instance(1, NULL, NULL))
167 return; /* Not an MP box. */
168 for(i = 0; i < NR_CPUS; i++) {
172 if (cpu_isset(i, cpu_present_map)) {
173 extern unsigned long sun4m_cpu_startup;
174 unsigned long *entry = &sun4m_cpu_startup;
175 struct task_struct *p;
178 /* Cook up an idler for this guy. */
179 kernel_thread(start_secondary, NULL, CLONE_IDLETASK);
183 p = prev_task(&init_task);
187 current_set[i] = p->thread_info;
191 /* See trampoline.S for details... */
192 entry += ((i-1) * 3);
195 * Initialize the contexts table
196 * Since the call to prom_startcpu() trashes the structure,
197 * we need to re-initialize it for each cpu
199 smp_penguin_ctable.which_io = 0;
200 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
201 smp_penguin_ctable.reg_size = 0;
203 /* whirrr, whirrr, whirrrrrrrrr... */
204 printk("Starting CPU %d at %p\n", i, entry);
205 local_flush_cache_all();
206 prom_startcpu(cpu_data(i).prom_node,
207 &smp_penguin_ctable, 0, (char *)entry);
209 /* wheee... it's going... */
210 for(timeout = 0; timeout < 10000; timeout++) {
211 if(cpu_callin_map[i])
215 if(cpu_callin_map[i]) {
216 /* Another "Red Snapper". */
217 __cpu_number_map[i] = i;
218 __cpu_logical_map[i] = i;
221 printk("Processor %d is stuck.\n", i);
224 if(!(cpu_callin_map[i])) {
225 cpu_clear(i, cpu_present_map);
226 __cpu_number_map[i] = -1;
229 local_flush_cache_all();
231 printk("Error: only one Processor found.\n");
232 cpu_present_map = cpumask_of_cpu(smp_processor_id());
234 unsigned long bogosum = 0;
235 for(i = 0; i < NR_CPUS; i++) {
236 if (cpu_isset(i, cpu_present_map))
237 bogosum += cpu_data(i).udelay_val;
239 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
242 (bogosum/(5000/HZ))%100);
244 smp_num_cpus = cpucount + 1;
247 /* Free unneeded trap tables */
248 if (!cpu_isset(i, cpu_present_map)) {
249 ClearPageReserved(virt_to_page(trapbase_cpu1));
250 set_page_count(virt_to_page(trapbase_cpu1), 1);
251 free_page((unsigned long)trapbase_cpu1);
255 if (!cpu_isset(2, cpu_present_map)) {
256 ClearPageReserved(virt_to_page(trapbase_cpu2));
257 set_page_count(virt_to_page(trapbase_cpu2), 1);
258 free_page((unsigned long)trapbase_cpu2);
262 if (!cpu_isset(3, cpu_present_map)) {
263 ClearPageReserved(virt_to_page(trapbase_cpu3));
264 set_page_count(virt_to_page(trapbase_cpu3), 1);
265 free_page((unsigned long)trapbase_cpu3);
270 /* Ok, they are spinning and ready to go. */
271 smp_processors_ready = 1;
274 /* At each hardware IRQ, we get this called to forward IRQ reception
275 * to the next processor. The caller must disable the IRQ level being
276 * serviced globally so that there are no double interrupts received.
278 * XXX See sparc64 irq.c.
280 void smp4m_irq_rotate(int cpu)
284 /* Cross calls, in order to work efficiently and atomically do all
285 * the message passing work themselves, only stopcpu and reschedule
286 * messages come through here.
288 void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
290 static unsigned long smp_cpu_in_msg[NR_CPUS];
292 int me = smp_processor_id();
295 if(msg == MSG_RESCHEDULE) {
296 irq = IRQ_RESCHEDULE;
298 if(smp_cpu_in_msg[me])
300 } else if(msg == MSG_STOP_CPU) {
306 smp_cpu_in_msg[me]++;
307 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
308 mask = cpu_present_map;
309 if(target == MSG_ALL_BUT_SELF)
311 for(i = 0; i < 4; i++) {
312 if (cpu_isset(i, mask))
316 set_cpu_int(target, irq);
318 smp_cpu_in_msg[me]--;
322 printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
323 panic("Bogon SMP message pass.");
326 static struct smp_funcall {
333 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
334 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
337 static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
339 /* Cross calls must be serialized, at least currently. */
340 void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
341 unsigned long arg3, unsigned long arg4, unsigned long arg5)
343 if(smp_processors_ready) {
344 register int ncpus = smp_num_cpus;
347 spin_lock_irqsave(&cross_call_lock, flags);
349 /* Init function glue. */
350 ccall_info.func = func;
351 ccall_info.arg1 = arg1;
352 ccall_info.arg2 = arg2;
353 ccall_info.arg3 = arg3;
354 ccall_info.arg4 = arg4;
355 ccall_info.arg5 = arg5;
357 /* Init receive/complete mapping, plus fire the IPI's off. */
359 cpumask_t mask = cpu_present_map;
362 cpu_clear(smp_processor_id(), mask);
363 for(i = 0; i < ncpus; i++) {
364 if (cpu_isset(i, mask)) {
365 ccall_info.processors_in[i] = 0;
366 ccall_info.processors_out[i] = 0;
367 set_cpu_int(i, IRQ_CROSS_CALL);
369 ccall_info.processors_in[i] = 1;
370 ccall_info.processors_out[i] = 1;
380 while(!ccall_info.processors_in[i])
382 } while(++i < ncpus);
386 while(!ccall_info.processors_out[i])
388 } while(++i < ncpus);
391 spin_unlock_irqrestore(&cross_call_lock, flags);
395 /* Running cross calls. */
396 void smp4m_cross_call_irq(void)
398 int i = smp_processor_id();
400 ccall_info.processors_in[i] = 1;
401 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
402 ccall_info.arg4, ccall_info.arg5);
403 ccall_info.processors_out[i] = 1;
406 extern void sparc_do_profile(unsigned long pc, unsigned long o7);
408 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
410 int cpu = smp_processor_id();
412 clear_profile_irq(cpu);
415 sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
417 if(!--prof_counter(cpu)) {
418 int user = user_mode(regs);
421 update_process_times(user);
424 prof_counter(cpu) = prof_multiplier(cpu);
428 extern unsigned int lvl14_resolution;
430 static void __init smp_setup_percpu_timer(void)
432 int cpu = smp_processor_id();
434 prof_counter(cpu) = prof_multiplier(cpu) = 1;
435 load_profile_irq(cpu, lvl14_resolution);
437 if(cpu == boot_cpu_id)
441 void __init smp4m_blackbox_id(unsigned *addr)
443 int rd = *addr & 0x3e000000;
446 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
447 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
448 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
451 void __init smp4m_blackbox_current(unsigned *addr)
453 int rd = *addr & 0x3e000000;
456 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
457 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
458 addr[4] = 0x8008200c | rd | rs1; /* and reg, 3, reg */
461 void __init sun4m_init_smp(void)
463 BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
464 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
465 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
466 BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
467 BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);