4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
34 #include <asm/ptrace.h>
35 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputable.h>
45 #include <asm/system.h>
46 #include <asm/abs_addr.h>
51 #define DBG(fmt...) udbg_printf(fmt)
56 int smp_threads_ready;
58 cpumask_t cpu_possible_map = CPU_MASK_NONE;
59 cpumask_t cpu_online_map = CPU_MASK_NONE;
60 cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62 EXPORT_SYMBOL(cpu_online_map);
63 EXPORT_SYMBOL(cpu_possible_map);
65 struct smp_ops_t *smp_ops;
67 static volatile unsigned int cpu_callin_map[NR_CPUS];
69 extern unsigned char stab_array[];
71 void smp_call_function_interrupt(void);
73 int smt_enabled_at_boot = 1;
75 #ifdef CONFIG_PPC_MULTIPLATFORM
76 void smp_mpic_message_pass(int target, int msg)
78 /* make sure we're sending something that translates to an IPI */
80 printk("SMP %d: smp_message_pass: unknown msg %d\n",
81 smp_processor_id(), msg);
87 mpic_send_ipi(msg, 0xffffffff);
89 case MSG_ALL_BUT_SELF:
90 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
93 mpic_send_ipi(msg, 1 << target);
98 int __init smp_mpic_probe(void)
102 DBG("smp_mpic_probe()...\n");
104 nr_cpus = cpus_weight(cpu_possible_map);
106 DBG("nr_cpus: %d\n", nr_cpus);
114 void __devinit smp_mpic_setup_cpu(int cpu)
116 mpic_setup_this_cpu();
119 void __devinit smp_generic_kick_cpu(int nr)
121 BUG_ON(nr < 0 || nr >= NR_CPUS);
124 * The processor is currently spinning, waiting for the
125 * cpu_start field to become non-zero After we set cpu_start,
126 * the processor will continue on to secondary_start
128 paca[nr].cpu_start = 1;
132 #endif /* CONFIG_PPC_MULTIPLATFORM */
134 static void __init smp_space_timers(unsigned int max_cpus)
137 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
138 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
141 if (i != boot_cpuid) {
142 paca[i].next_jiffy_update_tb =
143 previous_tb + offset;
144 previous_tb = paca[i].next_jiffy_update_tb;
149 void smp_message_recv(int msg, struct pt_regs *regs)
152 case PPC_MSG_CALL_FUNCTION:
153 smp_call_function_interrupt();
155 case PPC_MSG_RESCHEDULE:
156 /* XXX Do we have to do this? */
160 case PPC_MSG_MIGRATE_TASK:
164 #ifdef CONFIG_DEBUGGER
165 case PPC_MSG_DEBUGGER_BREAK:
170 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
171 smp_processor_id(), msg);
176 void smp_send_reschedule(int cpu)
178 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
181 #ifdef CONFIG_DEBUGGER
182 void smp_send_debugger_break(int cpu)
184 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
188 static void stop_this_cpu(void *dummy)
195 void smp_send_stop(void)
197 smp_call_function(stop_this_cpu, NULL, 1, 0);
201 * Structure and data for smp_call_function(). This is designed to minimise
202 * static memory requirements. It also looks cleaner.
203 * Stolen from the i386 version.
205 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
207 static struct call_data_struct {
208 void (*func) (void *info);
215 /* delay of at least 8 seconds on 1GHz cpu */
216 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
219 * This function sends a 'generic call function' IPI to all other CPUs
222 * [SUMMARY] Run a function on all other CPUs.
223 * <func> The function to run. This must be fast and non-blocking.
224 * <info> An arbitrary pointer to pass to the function.
225 * <nonatomic> currently unused.
226 * <wait> If true, wait (atomically) until function has completed on other CPUs.
227 * [RETURNS] 0 on success, else a negative status code. Does not return until
228 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
230 * You must not call this function with disabled interrupts or from a
231 * hardware interrupt handler or from a bottom half handler.
233 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
236 struct call_data_struct data;
238 unsigned long timeout;
240 /* Can deadlock when called with interrupts disabled */
241 WARN_ON(irqs_disabled());
245 atomic_set(&data.started, 0);
248 atomic_set(&data.finished, 0);
250 spin_lock(&call_lock);
251 /* Must grab online cpu count with preempt disabled, otherwise
253 cpus = num_online_cpus() - 1;
261 /* Send a message to all other CPUs and wait for them to respond */
262 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
264 /* Wait for response */
265 timeout = SMP_CALL_TIMEOUT;
266 while (atomic_read(&data.started) != cpus) {
268 if (--timeout == 0) {
269 printk("smp_call_function on cpu %d: other cpus not "
270 "responding (%d)\n", smp_processor_id(),
271 atomic_read(&data.started));
278 timeout = SMP_CALL_TIMEOUT;
279 while (atomic_read(&data.finished) != cpus) {
281 if (--timeout == 0) {
282 printk("smp_call_function on cpu %d: other "
283 "cpus not finishing (%d/%d)\n",
285 atomic_read(&data.finished),
286 atomic_read(&data.started));
298 spin_unlock(&call_lock);
302 EXPORT_SYMBOL(smp_call_function);
304 void smp_call_function_interrupt(void)
306 void (*func) (void *info);
310 /* call_data will be NULL if the sender timed out while
311 * waiting on us to receive the call.
316 func = call_data->func;
317 info = call_data->info;
318 wait = call_data->wait;
321 smp_mb__before_atomic_inc();
324 * Notify initiating CPU that I've grabbed the data and am
325 * about to execute the function
327 atomic_inc(&call_data->started);
329 * At this point the info structure may be out of scope unless wait==1
333 smp_mb__before_atomic_inc();
334 atomic_inc(&call_data->finished);
338 extern unsigned long decr_overclock;
339 extern struct gettimeofday_struct do_gtod;
341 struct thread_info *current_set[NR_CPUS];
343 DECLARE_PER_CPU(unsigned int, pvr);
345 static void __devinit smp_store_cpu_info(int id)
347 per_cpu(pvr, id) = mfspr(SPRN_PVR);
350 static void __init smp_create_idle(unsigned int cpu)
352 struct task_struct *p;
354 /* create a process for the processor */
357 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
358 paca[cpu].__current = p;
359 current_set[cpu] = p->thread_info;
362 void __init smp_prepare_cpus(unsigned int max_cpus)
366 DBG("smp_prepare_cpus\n");
369 * setup_cpu may need to be called on the boot cpu. We havent
370 * spun any cpus up but lets be paranoid.
372 BUG_ON(boot_cpuid != smp_processor_id());
375 smp_store_cpu_info(boot_cpuid);
376 cpu_callin_map[boot_cpuid] = 1;
378 #ifndef CONFIG_PPC_ISERIES
379 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
382 * Should update do_gtod.stamp_xsec.
383 * For now we leave it which means the time can be some
384 * number of msecs off until someone does a settimeofday()
386 do_gtod.tb_orig_stamp = tb_last_stamp;
387 systemcfg->tb_orig_stamp = tb_last_stamp;
390 max_cpus = smp_ops->probe();
392 smp_space_timers(max_cpus);
395 if (cpu != boot_cpuid)
396 smp_create_idle(cpu);
399 void __devinit smp_prepare_boot_cpu(void)
401 BUG_ON(smp_processor_id() != boot_cpuid);
403 cpu_set(boot_cpuid, cpu_online_map);
405 paca[boot_cpuid].__current = current;
406 current_set[boot_cpuid] = current->thread_info;
409 int __devinit __cpu_up(unsigned int cpu)
413 /* At boot, don't bother with non-present cpus -JSCHOPP */
414 if (system_state < SYSTEM_RUNNING && !cpu_present(cpu))
417 paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
419 if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
422 /* maximum of 48 CPUs on machines with a segment table */
426 tmp = &stab_array[PAGE_SIZE * cpu];
427 memset(tmp, 0, PAGE_SIZE);
428 paca[cpu].stab_addr = (unsigned long)tmp;
429 paca[cpu].stab_real = virt_to_abs(tmp);
432 /* Make sure callin-map entry is 0 (can be leftover a CPU
435 cpu_callin_map[cpu] = 0;
437 /* The information for processor bringup must
438 * be written out to main store before we release
444 DBG("smp: kicking cpu %d\n", cpu);
445 smp_ops->kick_cpu(cpu);
448 * wait to see if the cpu made a callin (is actually up).
449 * use this value that I found through experimentation.
452 if (system_state < SYSTEM_RUNNING)
453 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
455 #ifdef CONFIG_HOTPLUG_CPU
458 * CPUs can take much longer to come up in the
459 * hotplug case. Wait five seconds.
461 for (c = 25; c && !cpu_callin_map[cpu]; c--) {
466 if (!cpu_callin_map[cpu]) {
467 printk("Processor %u is stuck.\n", cpu);
471 printk("Processor %u found.\n", cpu);
473 if (smp_ops->give_timebase)
474 smp_ops->give_timebase();
476 /* Wait until cpu puts itself in the online map */
477 while (!cpu_online(cpu))
484 /* Activate a secondary processor. */
485 int __devinit start_secondary(void *unused)
487 unsigned int cpu = smp_processor_id();
489 atomic_inc(&init_mm.mm_count);
490 current->active_mm = &init_mm;
492 smp_store_cpu_info(cpu);
493 set_dec(paca[cpu].default_decr);
494 cpu_callin_map[cpu] = 1;
496 smp_ops->setup_cpu(cpu);
497 if (smp_ops->take_timebase)
498 smp_ops->take_timebase();
500 spin_lock(&call_lock);
501 cpu_set(cpu, cpu_online_map);
502 spin_unlock(&call_lock);
510 int setup_profiling_timer(unsigned int multiplier)
515 void __init smp_cpus_done(unsigned int max_cpus)
519 /* We want the setup_cpu() here to be called from CPU 0, but our
520 * init thread may have been "borrowed" by another CPU in the meantime
521 * se we pin us down to CPU 0 for a short while
523 old_mask = current->cpus_allowed;
524 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
526 smp_ops->setup_cpu(boot_cpuid);
528 set_cpus_allowed(current, old_mask);
531 * We know at boot the maximum number of cpus we can add to
532 * a partition and set cpu_possible_map accordingly. cpu_present_map
533 * needs to match for the hotplug code to allow us to hot add
536 cpu_present_map = cpu_possible_map;
539 #ifdef CONFIG_HOTPLUG_CPU
540 int __cpu_disable(void)
542 if (smp_ops->cpu_disable)
543 return smp_ops->cpu_disable();
548 void __cpu_die(unsigned int cpu)
550 if (smp_ops->cpu_die)
551 smp_ops->cpu_die(cpu);