1 /* sun4m_smp.c: Sparc SUN4M SMP support.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
18 #include <linux/swap.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 #include <asm/ptrace.h>
23 #include <asm/atomic.h>
25 #include <asm/delay.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/hardirq.h>
32 #include <asm/cpudata.h>
34 #define IRQ_RESCHEDULE 13
35 #define IRQ_STOP_CPU 14
36 #define IRQ_CROSS_CALL 15
38 extern ctxd_t *srmmu_ctx_table_phys;
40 extern void calibrate_delay(void);
42 extern volatile int smp_processors_ready;
43 extern unsigned long cpu_present_map;
44 extern int smp_num_cpus;
45 extern int smp_threads_ready;
46 extern volatile unsigned long cpu_callin_map[NR_CPUS];
47 extern unsigned char boot_cpu_id;
48 extern int smp_activated;
49 extern volatile int __cpu_number_map[NR_CPUS];
50 extern volatile int __cpu_logical_map[NR_CPUS];
51 extern volatile unsigned long ipi_count;
52 extern volatile int smp_process_available;
53 extern volatile int smp_commenced;
54 extern int __smp4m_processor_id(void);
59 #define SMP_PRINTK(x) printk x
64 static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
66 __asm__ __volatile__("swap [%1], %0\n\t" :
67 "=&r" (val), "=&r" (ptr) :
68 "0" (val), "1" (ptr));
72 static void smp_setup_percpu_timer(void);
73 extern void cpu_probe(void);
75 void __init smp4m_callin(void)
77 int cpuid = hard_smp_processor_id();
79 local_flush_cache_all();
80 local_flush_tlb_all();
82 set_irq_udt(boot_cpu_id);
84 /* Get our local ticker going. */
85 smp_setup_percpu_timer();
88 smp_store_cpu_info(cpuid);
90 local_flush_cache_all();
91 local_flush_tlb_all();
94 * Unblock the master CPU _only_ when the scheduler state
95 * of all secondary CPUs will be up-to-date, so after
96 * the SMP initialization the master will be just allowed
97 * to call the scheduler code.
101 /* Allow master to continue. */
102 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
104 local_flush_cache_all();
105 local_flush_tlb_all();
109 /* Fix idle thread fields. */
110 __asm__ __volatile__("ld [%0], %%g6\n\t"
111 : : "r" (¤t_set[cpuid])
112 : "memory" /* paranoid */);
114 /* Attach to the address space of init_task. */
115 atomic_inc(&init_mm.mm_count);
116 current->active_mm = &init_mm;
118 while(!smp_commenced)
121 local_flush_cache_all();
122 local_flush_tlb_all();
127 extern int cpu_idle(void *unused);
128 extern void init_IRQ(void);
129 extern void cpu_panic(void);
130 extern int start_secondary(void *unused);
133 * Cycle through the processors asking the PROM to start each one.
136 extern struct linux_prom_registers smp_penguin_ctable;
137 extern unsigned long trapbase_cpu1[];
138 extern unsigned long trapbase_cpu2[];
139 extern unsigned long trapbase_cpu3[];
141 void __init smp4m_boot_cpus(void)
146 printk("Entering SMP Mode...\n");
151 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
152 cpu_present_map |= (1<<mid);
154 for(i=0; i < NR_CPUS; i++) {
155 __cpu_number_map[i] = -1;
156 __cpu_logical_map[i] = -1;
159 __cpu_number_map[boot_cpu_id] = 0;
160 __cpu_logical_map[0] = boot_cpu_id;
161 current_thread_info()->cpu = boot_cpu_id;
163 smp_store_cpu_info(boot_cpu_id);
164 set_irq_udt(boot_cpu_id);
165 smp_setup_percpu_timer();
166 local_flush_cache_all();
167 if(cpu_find_by_instance(1, NULL, NULL))
168 return; /* Not an MP box. */
169 for(i = 0; i < NR_CPUS; i++) {
173 if(cpu_present_map & (1 << i)) {
174 extern unsigned long sun4m_cpu_startup;
175 unsigned long *entry = &sun4m_cpu_startup;
176 struct task_struct *p;
179 /* Cook up an idler for this guy. */
180 kernel_thread(start_secondary, NULL, CLONE_IDLETASK);
184 p = prev_task(&init_task);
188 current_set[i] = p->thread_info;
192 /* See trampoline.S for details... */
193 entry += ((i-1) * 3);
196 * Initialize the contexts table
197 * Since the call to prom_startcpu() trashes the structure,
198 * we need to re-initialize it for each cpu
200 smp_penguin_ctable.which_io = 0;
201 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
202 smp_penguin_ctable.reg_size = 0;
204 /* whirrr, whirrr, whirrrrrrrrr... */
205 printk("Starting CPU %d at %p\n", i, entry);
206 local_flush_cache_all();
207 prom_startcpu(cpu_data(i).prom_node,
208 &smp_penguin_ctable, 0, (char *)entry);
210 /* wheee... it's going... */
211 for(timeout = 0; timeout < 10000; timeout++) {
212 if(cpu_callin_map[i])
216 if(cpu_callin_map[i]) {
217 /* Another "Red Snapper". */
218 __cpu_number_map[i] = i;
219 __cpu_logical_map[i] = i;
222 printk("Processor %d is stuck.\n", i);
225 if(!(cpu_callin_map[i])) {
226 cpu_present_map &= ~(1 << i);
227 __cpu_number_map[i] = -1;
230 local_flush_cache_all();
232 printk("Error: only one Processor found.\n");
233 cpu_present_map = (1 << smp_processor_id());
235 unsigned long bogosum = 0;
236 for(i = 0; i < NR_CPUS; i++) {
237 if(cpu_present_map & (1 << i))
238 bogosum += cpu_data(i).udelay_val;
240 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
243 (bogosum/(5000/HZ))%100);
245 smp_num_cpus = cpucount + 1;
248 /* Free unneeded trap tables */
249 if (!(cpu_present_map & (1 << 1))) {
250 ClearPageReserved(virt_to_page(trapbase_cpu1));
251 set_page_count(virt_to_page(trapbase_cpu1), 1);
252 free_page((unsigned long)trapbase_cpu1);
256 if (!(cpu_present_map & (1 << 2))) {
257 ClearPageReserved(virt_to_page(trapbase_cpu2));
258 set_page_count(virt_to_page(trapbase_cpu2), 1);
259 free_page((unsigned long)trapbase_cpu2);
263 if (!(cpu_present_map & (1 << 3))) {
264 ClearPageReserved(virt_to_page(trapbase_cpu3));
265 set_page_count(virt_to_page(trapbase_cpu3), 1);
266 free_page((unsigned long)trapbase_cpu3);
271 /* Ok, they are spinning and ready to go. */
272 smp_processors_ready = 1;
275 /* At each hardware IRQ, we get this called to forward IRQ reception
276 * to the next processor. The caller must disable the IRQ level being
277 * serviced globally so that there are no double interrupts received.
279 * XXX See sparc64 irq.c.
281 void smp4m_irq_rotate(int cpu)
285 /* Cross calls, in order to work efficiently and atomically do all
286 * the message passing work themselves, only stopcpu and reschedule
287 * messages come through here.
289 void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
291 static unsigned long smp_cpu_in_msg[NR_CPUS];
293 int me = smp_processor_id();
296 if(msg == MSG_RESCHEDULE) {
297 irq = IRQ_RESCHEDULE;
299 if(smp_cpu_in_msg[me])
301 } else if(msg == MSG_STOP_CPU) {
307 smp_cpu_in_msg[me]++;
308 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
309 mask = cpu_present_map;
310 if(target == MSG_ALL_BUT_SELF)
312 for(i = 0; i < 4; i++) {
317 set_cpu_int(target, irq);
319 smp_cpu_in_msg[me]--;
323 printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
324 panic("Bogon SMP message pass.");
327 static struct smp_funcall {
334 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
335 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
338 static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
340 /* Cross calls must be serialized, at least currently. */
341 void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
342 unsigned long arg3, unsigned long arg4, unsigned long arg5)
344 if(smp_processors_ready) {
345 register int ncpus = smp_num_cpus;
348 spin_lock_irqsave(&cross_call_lock, flags);
350 /* Init function glue. */
351 ccall_info.func = func;
352 ccall_info.arg1 = arg1;
353 ccall_info.arg2 = arg2;
354 ccall_info.arg3 = arg3;
355 ccall_info.arg4 = arg4;
356 ccall_info.arg5 = arg5;
358 /* Init receive/complete mapping, plus fire the IPI's off. */
360 register unsigned long mask;
363 mask = (cpu_present_map & ~(1 << smp_processor_id()));
364 for(i = 0; i < ncpus; i++) {
365 if(mask & (1 << i)) {
366 ccall_info.processors_in[i] = 0;
367 ccall_info.processors_out[i] = 0;
368 set_cpu_int(i, IRQ_CROSS_CALL);
370 ccall_info.processors_in[i] = 1;
371 ccall_info.processors_out[i] = 1;
381 while(!ccall_info.processors_in[i])
383 } while(++i < ncpus);
387 while(!ccall_info.processors_out[i])
389 } while(++i < ncpus);
392 spin_unlock_irqrestore(&cross_call_lock, flags);
396 /* Running cross calls. */
397 void smp4m_cross_call_irq(void)
399 int i = smp_processor_id();
401 ccall_info.processors_in[i] = 1;
402 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
403 ccall_info.arg4, ccall_info.arg5);
404 ccall_info.processors_out[i] = 1;
407 extern void sparc_do_profile(unsigned long pc, unsigned long o7);
409 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
411 int cpu = smp_processor_id();
413 clear_profile_irq(cpu);
416 sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
418 if(!--prof_counter(cpu)) {
419 int user = user_mode(regs);
422 update_process_times(user);
425 prof_counter(cpu) = prof_multiplier(cpu);
429 extern unsigned int lvl14_resolution;
431 static void __init smp_setup_percpu_timer(void)
433 int cpu = smp_processor_id();
435 prof_counter(cpu) = prof_multiplier(cpu) = 1;
436 load_profile_irq(cpu, lvl14_resolution);
438 if(cpu == boot_cpu_id)
442 void __init smp4m_blackbox_id(unsigned *addr)
444 int rd = *addr & 0x3e000000;
447 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
448 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
449 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
452 void __init smp4m_blackbox_current(unsigned *addr)
454 int rd = *addr & 0x3e000000;
457 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
458 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
459 addr[4] = 0x8008200c | rd | rs1; /* and reg, 3, reg */
462 void __init sun4m_init_smp(void)
464 BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
465 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
466 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
467 BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
468 BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);