2 * SMP boot-related support
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
8 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
9 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
10 * smp_boot_cpus()/smp_commence() is replaced by
11 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
13 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/bootmem.h>
18 #include <linux/cpu.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/kernel_stat.h>
26 #include <linux/notifier.h>
27 #include <linux/smp.h>
28 #include <linux/smp_lock.h>
29 #include <linux/spinlock.h>
30 #include <linux/efi.h>
31 #include <linux/percpu.h>
33 #include <asm/atomic.h>
34 #include <asm/bitops.h>
35 #include <asm/cache.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
41 #include <asm/machvec.h>
44 #include <asm/pgalloc.h>
45 #include <asm/pgtable.h>
46 #include <asm/processor.h>
47 #include <asm/ptrace.h>
49 #include <asm/system.h>
50 #include <asm/tlbflush.h>
51 #include <asm/unistd.h>
56 #define Dprintk(x...) printk(x)
63 * ITC synchronization related stuff:
66 #define SLAVE (SMP_CACHE_BYTES/8)
68 #define NUM_ROUNDS 64 /* magic value */
69 #define NUM_ITERS 5 /* likewise */
71 static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
72 static volatile unsigned long go[SLAVE + 1];
74 #define DEBUG_ITC_SYNC 0
76 extern void __devinit calibrate_delay (void);
77 extern void start_ap (void);
78 extern unsigned long ia64_iobase;
80 task_t *task_for_booting_cpu;
85 DEFINE_PER_CPU(int, cpu_state);
87 /* Bitmasks of currently online, and possible CPUs */
88 cpumask_t cpu_online_map;
89 EXPORT_SYMBOL(cpu_online_map);
90 cpumask_t cpu_possible_map;
91 EXPORT_SYMBOL(cpu_possible_map);
93 /* which logical CPU number maps to which CPU (physical APIC ID) */
94 volatile int ia64_cpu_to_sapicid[NR_CPUS];
95 EXPORT_SYMBOL(ia64_cpu_to_sapicid);
97 static volatile cpumask_t cpu_callin_map;
99 struct smp_boot_data smp_boot_data __initdata;
101 unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
103 char __initdata no_int_routing;
105 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
108 nointroute (char *str)
111 printk ("no_int_routing on\n");
115 __setup("nointroute", nointroute);
118 sync_master (void *arg)
120 unsigned long flags, i;
124 local_irq_save(flags);
126 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
129 go[SLAVE] = ia64_get_itc();
132 local_irq_restore(flags);
136 * Return the number of cycles by which our itc differs from the itc on the master
137 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
138 * negative that it is behind.
141 get_delta (long *rt, long *master)
143 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
144 unsigned long tcenter, t0, t1, tm;
147 for (i = 0; i < NUM_ITERS; ++i) {
150 while (!(tm = go[SLAVE]));
154 if (t1 - t0 < best_t1 - best_t0)
155 best_t0 = t0, best_t1 = t1, best_tm = tm;
158 *rt = best_t1 - best_t0;
159 *master = best_tm - best_t0;
161 /* average best_t0 and best_t1 without overflow: */
162 tcenter = (best_t0/2 + best_t1/2);
163 if (best_t0 % 2 + best_t1 % 2 == 2)
165 return tcenter - best_tm;
169 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
170 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
171 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
172 * step). The basic idea is for the slave to ask the master what itc value it has and to
173 * read its own itc before and after the master responds. Each iteration gives us three
187 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
188 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
189 * between the slave and the master is symmetric. Even if the interconnect were
190 * asymmetric, we would still know that the synchronization error is smaller than the
191 * roundtrip latency (t0 - t1).
193 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
194 * within one or two cycles. However, we can only *guarantee* that the synchronization is
195 * accurate to within a round-trip time, which is typically in the range of several
196 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
197 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
198 * than half a micro second or so.
201 ia64_sync_itc (unsigned int master)
203 long i, delta, adj, adjust_latency = 0, done = 0;
204 unsigned long flags, rt, master_time_stamp, bound;
205 extern void ia64_cpu_local_tick (void);
208 long rt; /* roundtrip time */
209 long master; /* master's timestamp */
210 long diff; /* difference between midpoint and master's timestamp */
211 long lat; /* estimate of itc adjustment latency */
216 * Make sure local timer ticks are disabled while we sync. If
217 * they were enabled, we'd have to worry about nasty issues
218 * like setting the ITC ahead of (or a long time before) the
219 * next scheduled tick.
221 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
225 if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
226 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
230 while (go[MASTER]); /* wait for master to be ready */
232 spin_lock_irqsave(&itc_sync_lock, flags);
234 for (i = 0; i < NUM_ROUNDS; ++i) {
235 delta = get_delta(&rt, &master_time_stamp);
237 done = 1; /* let's lock on to this... */
243 adjust_latency += -delta;
244 adj = -delta + adjust_latency/4;
248 ia64_set_itc(ia64_get_itc() + adj);
252 t[i].master = master_time_stamp;
254 t[i].lat = adjust_latency/4;
258 spin_unlock_irqrestore(&itc_sync_lock, flags);
261 for (i = 0; i < NUM_ROUNDS; ++i)
262 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
263 t[i].rt, t[i].master, t[i].diff, t[i].lat);
266 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
267 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
271 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
273 static inline void __devinit
274 smp_setup_percpu_timer (void)
278 static void __devinit
282 extern void ia64_init_itm(void);
284 #ifdef CONFIG_PERFMON
285 extern void pfm_init_percpu(void);
288 cpuid = smp_processor_id();
289 phys_id = hard_smp_processor_id();
291 if (cpu_online(cpuid)) {
292 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
298 cpu_set(cpuid, cpu_online_map);
299 unlock_ipi_calllock();
301 smp_setup_percpu_timer();
303 ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
305 #ifdef CONFIG_PERFMON
311 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
313 * Synchronize the ITC with the BP. Need to do this after irqs are
314 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
315 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
316 * local_bh_enable(), which bugs out if irqs are not enabled...
318 Dprintk("Going to syncup ITC with BP.\n");
327 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
329 #ifdef CONFIG_IA32_SUPPORT
334 * Allow the master to continue.
336 cpu_set(cpuid, cpu_callin_map);
337 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
342 * Activate a secondary processor. head.S calls this.
345 start_secondary (void *unused)
347 extern int cpu_idle (void);
349 /* Early console may use I/O ports */
350 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
352 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
360 static struct task_struct * __devinit
364 * Don't care about the IP and regs settings since we'll never reschedule the
367 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
371 struct task_struct *idle;
372 struct completion done;
376 do_fork_idle(void *_c_idle)
378 struct create_idle *c_idle = _c_idle;
380 c_idle->idle = fork_by_hand();
381 complete(&c_idle->done);
385 do_boot_cpu (int sapicid, int cpu)
388 struct create_idle c_idle;
389 DECLARE_WORK(work, do_fork_idle, &c_idle);
391 init_completion(&c_idle.done);
393 * We can't use kernel_thread since we must avoid to reschedule the child.
395 if (!keventd_up() || current_is_keventd())
396 work.func(work.data);
398 schedule_work(&work);
399 wait_for_completion(&c_idle.done);
402 if (IS_ERR(c_idle.idle))
403 panic("failed fork for CPU %d", cpu);
404 wake_up_forked_process(c_idle.idle);
407 * We remove it from the pidhash and the runqueue
408 * once we got the process:
410 init_idle(c_idle.idle, cpu);
412 unhash_process(c_idle.idle);
414 task_for_booting_cpu = c_idle.idle;
416 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
418 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
421 * Wait 10s total for the AP to start
423 Dprintk("Waiting on callin_map ...");
424 for (timeout = 0; timeout < 100000; timeout++) {
425 if (cpu_isset(cpu, cpu_callin_map))
426 break; /* It has booted */
431 if (!cpu_isset(cpu, cpu_callin_map)) {
432 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
433 ia64_cpu_to_sapicid[cpu] = -1;
434 cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
444 get_option (&str, &ticks);
445 cache_decay_ticks = ticks;
449 __setup("decay=", decay);
452 * # of ticks an idle task is considered cache-hot. Highly application-dependent. There
453 * are apps out there which are known to suffer significantly with values >= 4.
455 unsigned long cache_decay_ticks = 10; /* equal to MIN_TIMESLICE */
458 smp_tune_scheduling (void)
460 printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
461 (cache_decay_ticks + 1) * 1000 / HZ);
465 * Initialize the logical CPU number to SAPICID mapping
468 smp_build_cpu_map (void)
471 int boot_cpu_id = hard_smp_processor_id();
473 for (cpu = 0; cpu < NR_CPUS; cpu++) {
474 ia64_cpu_to_sapicid[cpu] = -1;
475 #ifdef CONFIG_HOTPLUG_CPU
476 cpu_set(cpu, cpu_possible_map);
480 ia64_cpu_to_sapicid[0] = boot_cpu_id;
481 cpus_clear(cpu_present_map);
482 cpu_set(0, cpu_present_map);
483 cpu_set(0, cpu_possible_map);
484 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
485 sapicid = smp_boot_data.cpu_phys_id[i];
486 if (sapicid == boot_cpu_id)
488 cpu_set(cpu, cpu_present_map);
489 cpu_set(cpu, cpu_possible_map);
490 ia64_cpu_to_sapicid[cpu] = sapicid;
497 /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
498 u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
499 EXPORT_SYMBOL(cpu_to_node_map);
500 /* which logical CPUs are on which nodes */
501 cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
504 * Build cpu to node mapping and initialize the per node cpu masks.
507 build_cpu_to_node_map (void)
511 for(node=0; node<MAX_NUMNODES; node++)
512 cpus_clear(node_to_cpu_mask[node]);
513 for(cpu = 0; cpu < NR_CPUS; ++cpu) {
515 * All Itanium NUMA platforms I know use ACPI, so maybe we
516 * can drop this ifdef completely. [EF]
518 #ifdef CONFIG_ACPI_NUMA
520 for (i = 0; i < NR_CPUS; ++i)
521 if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
522 node = node_cpuid[i].nid;
526 # error Fixme: Dunno how to build CPU-to-node map.
528 cpu_to_node_map[cpu] = node;
530 cpu_set(cpu, node_to_cpu_mask[node]);
534 #endif /* CONFIG_NUMA */
537 * Cycle through the APs sending Wakeup IPIs to boot each.
540 smp_prepare_cpus (unsigned int max_cpus)
542 int boot_cpu_id = hard_smp_processor_id();
545 * Initialize the per-CPU profiling counter/multiplier
548 smp_setup_percpu_timer();
551 * We have the boot CPU online for sure.
553 cpu_set(0, cpu_online_map);
554 cpu_set(0, cpu_callin_map);
556 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
557 ia64_cpu_to_sapicid[0] = boot_cpu_id;
559 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
561 current_thread_info()->cpu = 0;
562 smp_tune_scheduling();
565 * If SMP should be disabled, then really disable it!
568 printk(KERN_INFO "SMP mode deactivated.\n");
569 cpus_clear(cpu_online_map);
570 cpus_clear(cpu_present_map);
571 cpus_clear(cpu_possible_map);
572 cpu_set(0, cpu_online_map);
573 cpu_set(0, cpu_present_map);
574 cpu_set(0, cpu_possible_map);
579 void __devinit smp_prepare_boot_cpu(void)
581 cpu_set(smp_processor_id(), cpu_online_map);
582 cpu_set(smp_processor_id(), cpu_callin_map);
585 #ifdef CONFIG_HOTPLUG_CPU
586 extern void fixup_irqs(void);
587 /* must be called with cpucontrol mutex held */
588 static int __devinit cpu_enable(unsigned int cpu)
590 per_cpu(cpu_state,cpu) = CPU_UP_PREPARE;
593 while (!cpu_online(cpu))
598 int __cpu_disable(void)
600 int cpu = smp_processor_id();
603 * dont permit boot processor for now
609 local_flush_tlb_all();
610 printk ("Disabled cpu %u\n", smp_processor_id());
614 void __cpu_die(unsigned int cpu)
618 for (i = 0; i < 100; i++) {
619 /* They ack this in play_dead by setting CPU_DEAD */
620 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
623 * TBD: Enable this when physical removal
624 * or when we put the processor is put in
625 * SAL_BOOT_RENDEZ mode
626 * cpu_clear(cpu, cpu_callin_map);
630 current->state = TASK_UNINTERRUPTIBLE;
631 schedule_timeout(HZ/10);
633 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
635 #else /* !CONFIG_HOTPLUG_CPU */
636 static int __devinit cpu_enable(unsigned int cpu)
641 int __cpu_disable(void)
646 void __cpu_die(unsigned int cpu)
648 /* We said "no" in __cpu_disable */
651 #endif /* CONFIG_HOTPLUG_CPU */
654 smp_cpus_done (unsigned int dummy)
657 unsigned long bogosum = 0;
660 * Allow the user to impress friends.
663 for (cpu = 0; cpu < NR_CPUS; cpu++)
665 bogosum += cpu_data(cpu)->loops_per_jiffy;
667 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
668 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
672 __cpu_up (unsigned int cpu)
677 sapicid = ia64_cpu_to_sapicid[cpu];
682 * Already booted.. just enable and get outa idle lool
684 if (cpu_isset(cpu, cpu_callin_map))
688 while (!cpu_isset(cpu, cpu_online_map))
692 /* Processor goes to start_secondary(), sets online flag */
693 ret = do_boot_cpu(sapicid, cpu);
701 * Assume that CPU's have been discovered by some platform-dependent interface. For
702 * SoftSDV/Lion, that would be ACPI.
704 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
707 init_smp_config(void)
715 /* Tell SAL where to drop the AP's. */
716 ap_startup = (struct fptr *) start_ap;
717 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
718 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
720 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
721 ia64_sal_strerror(sal_ret));