Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / arch / sparc / kernel / sun4m_smp.c
1 /* sun4m_smp.c: Sparc SUN4M SMP support.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #include <asm/head.h>
7
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/profile.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 #include <asm/ptrace.h>
24 #include <asm/atomic.h>
25
26 #include <asm/delay.h>
27 #include <asm/irq.h>
28 #include <asm/page.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cpudata.h>
33
34 #define IRQ_RESCHEDULE          13
35 #define IRQ_STOP_CPU            14
36 #define IRQ_CROSS_CALL          15
37
38 extern ctxd_t *srmmu_ctx_table_phys;
39
40 extern void calibrate_delay(void);
41
42 extern volatile int smp_processors_ready;
43 extern volatile unsigned long cpu_callin_map[NR_CPUS];
44 extern unsigned char boot_cpu_id;
45
46 extern cpumask_t smp_commenced_mask;
47
48 extern int __smp4m_processor_id(void);
49
50 /*#define SMP_DEBUG*/
51
52 #ifdef SMP_DEBUG
53 #define SMP_PRINTK(x)   printk x
54 #else
55 #define SMP_PRINTK(x)
56 #endif
57
58 static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
59 {
60         __asm__ __volatile__("swap [%1], %0\n\t" :
61                              "=&r" (val), "=&r" (ptr) :
62                              "0" (val), "1" (ptr));
63         return val;
64 }
65
66 static void smp_setup_percpu_timer(void);
67 extern void cpu_probe(void);
68
69 void __init smp4m_callin(void)
70 {
71         int cpuid = hard_smp_processor_id();
72
73         local_flush_cache_all();
74         local_flush_tlb_all();
75
76         /* Get our local ticker going. */
77         smp_setup_percpu_timer();
78
79         calibrate_delay();
80         smp_store_cpu_info(cpuid);
81
82         local_flush_cache_all();
83         local_flush_tlb_all();
84
85         /*
86          * Unblock the master CPU _only_ when the scheduler state
87          * of all secondary CPUs will be up-to-date, so after
88          * the SMP initialization the master will be just allowed
89          * to call the scheduler code.
90          */
91         /* Allow master to continue. */
92         swap(&cpu_callin_map[cpuid], 1);
93
94         /* XXX: What's up with all the flushes? */
95         local_flush_cache_all();
96         local_flush_tlb_all();
97         
98         cpu_probe();
99
100         /* Fix idle thread fields. */
101         __asm__ __volatile__("ld [%0], %%g6\n\t"
102                              : : "r" (&current_set[cpuid])
103                              : "memory" /* paranoid */);
104
105         /* Attach to the address space of init_task. */
106         atomic_inc(&init_mm.mm_count);
107         current->active_mm = &init_mm;
108
109         while (!cpu_isset(cpuid, smp_commenced_mask))
110                 mb();
111
112         local_irq_enable();
113
114         cpu_set(cpuid, cpu_online_map);
115         /* last one in gets all the interrupts (for testing) */
116         set_irq_udt(boot_cpu_id);
117 }
118
119 extern void init_IRQ(void);
120 extern void cpu_panic(void);
121
122 /*
123  *      Cycle through the processors asking the PROM to start each one.
124  */
125  
126 extern struct linux_prom_registers smp_penguin_ctable;
127 extern unsigned long trapbase_cpu1[];
128 extern unsigned long trapbase_cpu2[];
129 extern unsigned long trapbase_cpu3[];
130
131 void __init smp4m_boot_cpus(void)
132 {
133         smp_setup_percpu_timer();
134         local_flush_cache_all();
135 }
136
137 int smp4m_boot_one_cpu(int i)
138 {
139         extern unsigned long sun4m_cpu_startup;
140         unsigned long *entry = &sun4m_cpu_startup;
141         struct task_struct *p;
142         int timeout;
143         int cpu_node;
144
145         cpu_find_by_mid(i, &cpu_node);
146
147         /* Cook up an idler for this guy. */
148         p = fork_idle(i);
149         current_set[i] = task_thread_info(p);
150         /* See trampoline.S for details... */
151         entry += ((i-1) * 3);
152
153         /*
154          * Initialize the contexts table
155          * Since the call to prom_startcpu() trashes the structure,
156          * we need to re-initialize it for each cpu
157          */
158         smp_penguin_ctable.which_io = 0;
159         smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
160         smp_penguin_ctable.reg_size = 0;
161
162         /* whirrr, whirrr, whirrrrrrrrr... */
163         printk("Starting CPU %d at %p\n", i, entry);
164         local_flush_cache_all();
165         prom_startcpu(cpu_node,
166                       &smp_penguin_ctable, 0, (char *)entry);
167
168         /* wheee... it's going... */
169         for(timeout = 0; timeout < 10000; timeout++) {
170                 if(cpu_callin_map[i])
171                         break;
172                 udelay(200);
173         }
174
175         if (!(cpu_callin_map[i])) {
176                 printk("Processor %d is stuck.\n", i);
177                 return -ENODEV;
178         }
179
180         local_flush_cache_all();
181         return 0;
182 }
183
184 void __init smp4m_smp_done(void)
185 {
186         int i, first;
187         int *prev;
188
189         /* setup cpu list for irq rotation */
190         first = 0;
191         prev = &first;
192         for (i = 0; i < NR_CPUS; i++) {
193                 if (cpu_online(i)) {
194                         *prev = i;
195                         prev = &cpu_data(i).next;
196                 }
197         }
198         *prev = first;
199         local_flush_cache_all();
200
201         /* Free unneeded trap tables */
202         if (!cpu_isset(1, cpu_present_map)) {
203                 ClearPageReserved(virt_to_page(trapbase_cpu1));
204                 init_page_count(virt_to_page(trapbase_cpu1));
205                 free_page((unsigned long)trapbase_cpu1);
206                 totalram_pages++;
207                 num_physpages++;
208         }
209         if (!cpu_isset(2, cpu_present_map)) {
210                 ClearPageReserved(virt_to_page(trapbase_cpu2));
211                 init_page_count(virt_to_page(trapbase_cpu2));
212                 free_page((unsigned long)trapbase_cpu2);
213                 totalram_pages++;
214                 num_physpages++;
215         }
216         if (!cpu_isset(3, cpu_present_map)) {
217                 ClearPageReserved(virt_to_page(trapbase_cpu3));
218                 init_page_count(virt_to_page(trapbase_cpu3));
219                 free_page((unsigned long)trapbase_cpu3);
220                 totalram_pages++;
221                 num_physpages++;
222         }
223
224         /* Ok, they are spinning and ready to go. */
225         smp_processors_ready = 1;
226 }
227
228 /* At each hardware IRQ, we get this called to forward IRQ reception
229  * to the next processor.  The caller must disable the IRQ level being
230  * serviced globally so that there are no double interrupts received.
231  *
232  * XXX See sparc64 irq.c.
233  */
234 void smp4m_irq_rotate(int cpu)
235 {
236         int next = cpu_data(cpu).next;
237         if (next != cpu)
238                 set_irq_udt(next);
239 }
240
241 /* Cross calls, in order to work efficiently and atomically do all
242  * the message passing work themselves, only stopcpu and reschedule
243  * messages come through here.
244  */
245 void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
246 {
247         static unsigned long smp_cpu_in_msg[NR_CPUS];
248         cpumask_t mask;
249         int me = smp_processor_id();
250         int irq, i;
251
252         if(msg == MSG_RESCHEDULE) {
253                 irq = IRQ_RESCHEDULE;
254
255                 if(smp_cpu_in_msg[me])
256                         return;
257         } else if(msg == MSG_STOP_CPU) {
258                 irq = IRQ_STOP_CPU;
259         } else {
260                 goto barf;
261         }
262
263         smp_cpu_in_msg[me]++;
264         if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
265                 mask = cpu_online_map;
266                 if(target == MSG_ALL_BUT_SELF)
267                         cpu_clear(me, mask);
268                 for(i = 0; i < 4; i++) {
269                         if (cpu_isset(i, mask))
270                                 set_cpu_int(i, irq);
271                 }
272         } else {
273                 set_cpu_int(target, irq);
274         }
275         smp_cpu_in_msg[me]--;
276
277         return;
278 barf:
279         printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
280         panic("Bogon SMP message pass.");
281 }
282
283 static struct smp_funcall {
284         smpfunc_t func;
285         unsigned long arg1;
286         unsigned long arg2;
287         unsigned long arg3;
288         unsigned long arg4;
289         unsigned long arg5;
290         unsigned long processors_in[SUN4M_NCPUS];  /* Set when ipi entered. */
291         unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
292 } ccall_info;
293
294 static DEFINE_SPINLOCK(cross_call_lock);
295
296 /* Cross calls must be serialized, at least currently. */
297 void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
298                     unsigned long arg3, unsigned long arg4, unsigned long arg5)
299 {
300                 register int ncpus = SUN4M_NCPUS;
301                 unsigned long flags;
302
303                 spin_lock_irqsave(&cross_call_lock, flags);
304
305                 /* Init function glue. */
306                 ccall_info.func = func;
307                 ccall_info.arg1 = arg1;
308                 ccall_info.arg2 = arg2;
309                 ccall_info.arg3 = arg3;
310                 ccall_info.arg4 = arg4;
311                 ccall_info.arg5 = arg5;
312
313                 /* Init receive/complete mapping, plus fire the IPI's off. */
314                 {
315                         cpumask_t mask = cpu_online_map;
316                         register int i;
317
318                         cpu_clear(smp_processor_id(), mask);
319                         for(i = 0; i < ncpus; i++) {
320                                 if (cpu_isset(i, mask)) {
321                                         ccall_info.processors_in[i] = 0;
322                                         ccall_info.processors_out[i] = 0;
323                                         set_cpu_int(i, IRQ_CROSS_CALL);
324                                 } else {
325                                         ccall_info.processors_in[i] = 1;
326                                         ccall_info.processors_out[i] = 1;
327                                 }
328                         }
329                 }
330
331                 {
332                         register int i;
333
334                         i = 0;
335                         do {
336                                 while(!ccall_info.processors_in[i])
337                                         barrier();
338                         } while(++i < ncpus);
339
340                         i = 0;
341                         do {
342                                 while(!ccall_info.processors_out[i])
343                                         barrier();
344                         } while(++i < ncpus);
345                 }
346
347                 spin_unlock_irqrestore(&cross_call_lock, flags);
348 }
349
350 /* Running cross calls. */
351 void smp4m_cross_call_irq(void)
352 {
353         int i = smp_processor_id();
354
355         ccall_info.processors_in[i] = 1;
356         ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
357                         ccall_info.arg4, ccall_info.arg5);
358         ccall_info.processors_out[i] = 1;
359 }
360
361 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
362 {
363         int cpu = smp_processor_id();
364
365         clear_profile_irq(cpu);
366
367         profile_tick(CPU_PROFILING, regs);
368
369         if(!--prof_counter(cpu)) {
370                 int user = user_mode(regs);
371
372                 irq_enter();
373                 update_process_times(user);
374                 irq_exit();
375
376                 prof_counter(cpu) = prof_multiplier(cpu);
377         }
378 }
379
380 extern unsigned int lvl14_resolution;
381
382 static void __init smp_setup_percpu_timer(void)
383 {
384         int cpu = smp_processor_id();
385
386         prof_counter(cpu) = prof_multiplier(cpu) = 1;
387         load_profile_irq(cpu, lvl14_resolution);
388
389         if(cpu == boot_cpu_id)
390                 enable_pil_irq(14);
391 }
392
393 void __init smp4m_blackbox_id(unsigned *addr)
394 {
395         int rd = *addr & 0x3e000000;
396         int rs1 = rd >> 11;
397         
398         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
399         addr[1] = 0x8130200c | rd | rs1;        /* srl reg, 0xc, reg */
400         addr[2] = 0x80082003 | rd | rs1;        /* and reg, 3, reg */
401 }
402
403 void __init smp4m_blackbox_current(unsigned *addr)
404 {
405         int rd = *addr & 0x3e000000;
406         int rs1 = rd >> 11;
407         
408         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
409         addr[2] = 0x8130200a | rd | rs1;        /* srl reg, 0xa, reg */
410         addr[4] = 0x8008200c | rd | rs1;        /* and reg, 3, reg */
411 }
412
413 void __init sun4m_init_smp(void)
414 {
415         BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
416         BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
417         BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
418         BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
419         BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
420 }