2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/module.h>
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/smp_lock.h>
45 #include <linux/irq.h>
46 #include <linux/bootmem.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <asm/tlbflush.h>
52 #include <asm/arch_hooks.h>
54 #include <mach_apic.h>
55 #include <mach_wakecpu.h>
56 #include <smpboot_hooks.h>
58 /* Set if we find a B stepping CPU */
59 static int __initdata smp_b_stepping;
61 /* Number of siblings per CPU package */
62 int smp_num_siblings = 1;
63 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
65 /* bitmap of online cpus */
66 cpumask_t cpu_online_map;
68 static cpumask_t cpu_callin_map;
69 cpumask_t cpu_callout_map;
70 static cpumask_t smp_commenced_mask;
72 /* Per CPU bogomips and other parameters */
73 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
75 /* Set when the idlers are all forked */
76 int smp_threads_ready;
79 * Trampoline 80x86 program as an array.
82 extern unsigned char trampoline_data [];
83 extern unsigned char trampoline_end [];
84 static unsigned char *trampoline_base;
85 static int trampoline_exec;
88 * Currently trivial. Write the real->protected mode
89 * bootstrap into the page concerned. The caller
90 * has made sure it's suitably aligned.
93 static unsigned long __init setup_trampoline(void)
95 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
96 return virt_to_phys(trampoline_base);
100 * We are called very early to get the low memory for the
101 * SMP bootup trampoline page.
103 void __init smp_alloc_memory(void)
105 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
107 * Has to be in very low memory so we can execute
110 if (__pa(trampoline_base) >= 0x9F000)
113 * Make the SMP trampoline executable:
115 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
119 * The bootstrap kernel entry code has set these up. Save them for
123 static void __init smp_store_cpu_info(int id)
125 struct cpuinfo_x86 *c = cpu_data + id;
131 * Mask B, Pentium, but not Pentium MMX
133 if (c->x86_vendor == X86_VENDOR_INTEL &&
135 c->x86_mask >= 1 && c->x86_mask <= 4 &&
138 * Remember we have B step Pentia with bugs
143 * Certain Athlons might work (for various values of 'work') in SMP
144 * but they are not certified as MP capable.
146 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
148 /* Athlon 660/661 is valid. */
149 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
152 /* Duron 670 is valid */
153 if ((c->x86_model==7) && (c->x86_mask==0))
157 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
158 * It's worth noting that the A5 stepping (662) of some Athlon XP's
159 * have the MP bit set.
160 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
162 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
163 ((c->x86_model==7) && (c->x86_mask>=1)) ||
168 /* If we get here, it's not a certified SMP capable AMD system. */
169 tainted |= TAINT_UNSAFE_SMP;
177 * TSC synchronization.
179 * We first check whether all CPUs have their TSC's synchronized,
180 * then we print a warning if not, and always resync.
183 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
184 static atomic_t tsc_count_start = ATOMIC_INIT(0);
185 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
186 static unsigned long long tsc_values[NR_CPUS];
191 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
192 * multiplication. Not terribly optimized but we need it at boot time only
196 * == (a1 + a2*(2^32)) / b
197 * == a1/b + a2*(2^32/b)
198 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
199 * ^---- (this multiplication can overflow)
202 static unsigned long long __init div64 (unsigned long long a, unsigned long b0)
205 unsigned long long res;
207 a1 = ((unsigned int*)&a)[0];
208 a2 = ((unsigned int*)&a)[1];
211 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
213 (a2 * (0xffffffff % b0)) / b0;
218 static void __init synchronize_tsc_bp (void)
221 unsigned long long t0;
222 unsigned long long sum, avg;
224 unsigned long one_usec;
227 printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
229 /* convert from kcyc/sec to cyc/usec */
230 one_usec = cpu_khz / 1000;
232 atomic_set(&tsc_start_flag, 1);
236 * We loop a few times to get a primed instruction cache,
237 * then the last pass is more or less synchronized and
238 * the BP and APs set their cycle counters to zero all at
239 * once. This reduces the chance of having random offsets
240 * between the processors, and guarantees that the maximum
241 * delay between the cycle counters is never bigger than
242 * the latency of information-passing (cachelines) between
245 for (i = 0; i < NR_LOOPS; i++) {
247 * all APs synchronize but they loop on '== num_cpus'
249 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
251 atomic_set(&tsc_count_stop, 0);
254 * this lets the APs save their current TSC:
256 atomic_inc(&tsc_count_start);
258 rdtscll(tsc_values[smp_processor_id()]);
260 * We clear the TSC in the last loop:
266 * Wait for all APs to leave the synchronization point:
268 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
270 atomic_set(&tsc_count_start, 0);
272 atomic_inc(&tsc_count_stop);
276 for (i = 0; i < NR_CPUS; i++) {
277 if (cpu_isset(i, cpu_callout_map)) {
282 avg = div64(sum, num_booting_cpus());
285 for (i = 0; i < NR_CPUS; i++) {
286 if (!cpu_isset(i, cpu_callout_map))
288 delta = tsc_values[i] - avg;
292 * We report bigger than 2 microseconds clock differences.
294 if (delta > 2*one_usec) {
300 realdelta = div64(delta, one_usec);
301 if (tsc_values[i] < avg)
302 realdelta = -realdelta;
304 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
314 static void __init synchronize_tsc_ap (void)
319 * Not every cpu is online at the time
320 * this gets called, so we first wait for the BP to
321 * finish SMP initialization:
323 while (!atomic_read(&tsc_start_flag)) mb();
325 for (i = 0; i < NR_LOOPS; i++) {
326 atomic_inc(&tsc_count_start);
327 while (atomic_read(&tsc_count_start) != num_booting_cpus())
330 rdtscll(tsc_values[smp_processor_id()]);
334 atomic_inc(&tsc_count_stop);
335 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
340 extern void calibrate_delay(void);
342 static atomic_t init_deasserted;
344 void __init smp_callin(void)
347 unsigned long timeout;
350 * If waken up by an INIT in an 82489DX configuration
351 * we may get here before an INIT-deassert IPI reaches
352 * our local APIC. We have to wait for the IPI or we'll
353 * lock up on an APIC access.
355 wait_for_init_deassert(&init_deasserted);
358 * (This works even if the APIC is not enabled.)
360 phys_id = GET_APIC_ID(apic_read(APIC_ID));
361 cpuid = smp_processor_id();
362 if (cpu_isset(cpuid, cpu_callin_map)) {
363 printk("huh, phys CPU#%d, CPU#%d already present??\n",
367 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
370 * STARTUP IPIs are fragile beasts as they might sometimes
371 * trigger some glue motherboard logic. Complete APIC bus
372 * silence for 1 second, this overestimates the time the
373 * boot CPU is spending to send the up to 2 STARTUP IPIs
374 * by a factor of two. This should be enough.
378 * Waiting 2s total for startup (udelay is not yet working)
380 timeout = jiffies + 2*HZ;
381 while (time_before(jiffies, timeout)) {
383 * Has the boot CPU finished it's STARTUP sequence?
385 if (cpu_isset(cpuid, cpu_callout_map))
390 if (!time_before(jiffies, timeout)) {
391 printk("BUG: CPU%d started up but did not get a callout!\n",
397 * the boot CPU has finished the init stage and is spinning
398 * on callin_map until we finish. We are free to set up this
399 * CPU, first the APIC. (this is probably redundant on most
403 Dprintk("CALLIN, before setup_local_APIC().\n");
404 smp_callin_clear_local_apic();
406 map_cpu_to_logical_apicid();
414 Dprintk("Stack at about %p\n",&cpuid);
417 * Save our processor parameters
419 smp_store_cpu_info(cpuid);
421 disable_APIC_timer();
424 * Allow the master to continue.
426 cpu_set(cpuid, cpu_callin_map);
429 * Synchronize the TSC with the BP
431 if (cpu_has_tsc && cpu_khz)
432 synchronize_tsc_ap();
437 extern int cpu_idle(void);
440 * Activate a secondary processor.
442 int __init start_secondary(void *unused)
445 * Dont put anything before smp_callin(), SMP
446 * booting is too fragile that we want to limit the
447 * things done here to the most necessary things.
451 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
453 setup_secondary_APIC_clock();
454 if (nmi_watchdog == NMI_IO_APIC) {
455 disable_8259A_irq(0);
456 enable_NMI_through_LVT0(NULL);
461 * low-memory mappings have been cleared, flush them from
462 * the local TLBs too.
465 cpu_set(smp_processor_id(), cpu_online_map);
471 * Everything has been set up for the secondary
472 * CPUs - they just need to reload everything
473 * from the task structure
474 * This function must not return.
476 void __init initialize_secondary(void)
479 * We don't actually need to load the full TSS,
480 * basically just the stack pointer and the eip.
487 :"r" (current->thread.esp),"r" (current->thread.eip));
495 static struct task_struct * __init fork_by_hand(void)
499 * don't care about the eip and regs settings since
500 * we'll never reschedule the forked task.
502 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
507 /* which logical CPUs are on which nodes */
508 cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
509 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
510 /* which node each logical CPU is on */
511 int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
512 EXPORT_SYMBOL(cpu_2_node);
514 /* set up a mapping between cpu and node. */
515 static inline void map_cpu_to_node(int cpu, int node)
517 printk("Mapping cpu %d to node %d\n", cpu, node);
518 cpu_set(cpu, node_2_cpu_mask[node]);
519 cpu_2_node[cpu] = node;
522 /* undo a mapping between cpu and node. */
523 static inline void unmap_cpu_to_node(int cpu)
527 printk("Unmapping cpu %d from all nodes\n", cpu);
528 for (node = 0; node < MAX_NUMNODES; node ++)
529 cpu_clear(cpu, node_2_cpu_mask[node]);
532 #else /* !CONFIG_NUMA */
534 #define map_cpu_to_node(cpu, node) ({})
535 #define unmap_cpu_to_node(cpu) ({})
537 #endif /* CONFIG_NUMA */
539 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
541 void map_cpu_to_logical_apicid(void)
543 int cpu = smp_processor_id();
544 int apicid = logical_smp_processor_id();
546 cpu_2_logical_apicid[cpu] = apicid;
547 map_cpu_to_node(cpu, apicid_to_node(apicid));
550 void unmap_cpu_to_logical_apicid(int cpu)
552 cpu_2_logical_apicid[cpu] = BAD_APICID;
553 unmap_cpu_to_node(cpu);
557 static inline void __inquire_remote_apic(int apicid)
559 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
560 char *names[] = { "ID", "VERSION", "SPIV" };
563 printk("Inquiring remote APIC #%d...\n", apicid);
565 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
566 printk("... APIC #%d %s: ", apicid, names[i]);
571 apic_wait_icr_idle();
573 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
574 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
579 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
580 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
583 case APIC_ICR_RR_VALID:
584 status = apic_read(APIC_RRR);
585 printk("%08x\n", status);
594 #ifdef WAKE_SECONDARY_VIA_NMI
596 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
597 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
598 * won't ... remember to clear down the APIC, etc later.
601 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
603 unsigned long send_status = 0, accept_status = 0;
607 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
609 /* Boot on the stack */
610 /* Kick the second */
611 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
613 Dprintk("Waiting for send to finish...\n");
618 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
619 } while (send_status && (timeout++ < 1000));
622 * Give the other CPU some time to accept the IPI.
626 * Due to the Pentium erratum 3AP.
628 maxlvt = get_maxlvt();
630 apic_read_around(APIC_SPIV);
631 apic_write(APIC_ESR, 0);
633 accept_status = (apic_read(APIC_ESR) & 0xEF);
634 Dprintk("NMI sent.\n");
637 printk("APIC never delivered???\n");
639 printk("APIC delivery error (%lx).\n", accept_status);
641 return (send_status | accept_status);
643 #endif /* WAKE_SECONDARY_VIA_NMI */
645 #ifdef WAKE_SECONDARY_VIA_INIT
647 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
649 unsigned long send_status = 0, accept_status = 0;
650 int maxlvt, timeout, num_starts, j;
653 * Be paranoid about clearing APIC errors.
655 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
656 apic_read_around(APIC_SPIV);
657 apic_write(APIC_ESR, 0);
661 Dprintk("Asserting INIT.\n");
664 * Turn INIT on target chip
666 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
671 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
674 Dprintk("Waiting for send to finish...\n");
679 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
680 } while (send_status && (timeout++ < 1000));
684 Dprintk("Deasserting INIT.\n");
687 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
690 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
692 Dprintk("Waiting for send to finish...\n");
697 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
698 } while (send_status && (timeout++ < 1000));
700 atomic_set(&init_deasserted, 1);
703 * Should we send STARTUP IPIs ?
705 * Determine this based on the APIC version.
706 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
708 if (APIC_INTEGRATED(apic_version[phys_apicid]))
714 * Run STARTUP IPI loop.
716 Dprintk("#startup loops: %d.\n", num_starts);
718 maxlvt = get_maxlvt();
720 for (j = 1; j <= num_starts; j++) {
721 Dprintk("Sending STARTUP #%d.\n",j);
722 apic_read_around(APIC_SPIV);
723 apic_write(APIC_ESR, 0);
725 Dprintk("After apic_write.\n");
732 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
734 /* Boot on the stack */
735 /* Kick the second */
736 apic_write_around(APIC_ICR, APIC_DM_STARTUP
737 | (start_eip >> 12));
740 * Give the other CPU some time to accept the IPI.
744 Dprintk("Startup point 1.\n");
746 Dprintk("Waiting for send to finish...\n");
751 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
752 } while (send_status && (timeout++ < 1000));
755 * Give the other CPU some time to accept the IPI.
759 * Due to the Pentium erratum 3AP.
762 apic_read_around(APIC_SPIV);
763 apic_write(APIC_ESR, 0);
765 accept_status = (apic_read(APIC_ESR) & 0xEF);
766 if (send_status || accept_status)
769 Dprintk("After Startup.\n");
772 printk("APIC never delivered???\n");
774 printk("APIC delivery error (%lx).\n", accept_status);
776 return (send_status | accept_status);
778 #endif /* WAKE_SECONDARY_VIA_INIT */
780 extern cpumask_t cpu_initialized;
782 static int __init do_boot_cpu(int apicid)
784 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
785 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
786 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
789 struct task_struct *idle;
790 unsigned long boot_error;
792 unsigned long start_eip;
793 unsigned short nmi_high = 0, nmi_low = 0;
797 * We can't use kernel_thread since we must avoid to
798 * reschedule the child.
800 idle = fork_by_hand();
802 panic("failed fork for CPU %d", cpu);
803 wake_up_forked_process(idle);
806 * We remove it from the pidhash and the runqueue
807 * once we got the process:
809 init_idle(idle, cpu);
811 idle->thread.eip = (unsigned long) start_secondary;
813 unhash_process(idle);
815 /* start_eip had better be page-aligned! */
816 start_eip = setup_trampoline();
818 /* So we see what's up */
819 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
820 /* Stack for startup_32 can be just as for start_secondary onwards */
821 stack_start.esp = (void *) idle->thread.esp;
826 * This grunge runs the startup process for
827 * the targeted processor.
830 atomic_set(&init_deasserted, 0);
832 Dprintk("Setting warm reset code and vector.\n");
834 store_NMI_vector(&nmi_high, &nmi_low);
836 smpboot_setup_warm_reset_vector(start_eip);
839 * Starting actual IPI sequence...
841 boot_error = wakeup_secondary_cpu(apicid, start_eip);
845 * allow APs to start initializing.
847 Dprintk("Before Callout %d.\n", cpu);
848 cpu_set(cpu, cpu_callout_map);
849 Dprintk("After Callout %d.\n", cpu);
852 * Wait 5s total for a response
854 for (timeout = 0; timeout < 50000; timeout++) {
855 if (cpu_isset(cpu, cpu_callin_map))
856 break; /* It has booted */
860 if (cpu_isset(cpu, cpu_callin_map)) {
861 /* number CPUs logically, starting from 1 (BSP is 0) */
863 printk("CPU%d: ", cpu);
864 print_cpu_info(&cpu_data[cpu]);
865 Dprintk("CPU has booted.\n");
868 if (*((volatile unsigned char *)trampoline_base)
870 /* trampoline started but...? */
871 printk("Stuck ??\n");
873 /* trampoline code not run */
874 printk("Not responding.\n");
875 inquire_remote_apic(apicid);
879 /* Try to put things back the way they were before ... */
880 unmap_cpu_to_logical_apicid(cpu);
881 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
882 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
886 /* mark "stuck" area as not stuck */
887 *((volatile unsigned long *)trampoline_base) = 0;
892 cycles_t cacheflush_time;
893 unsigned long cache_decay_ticks;
895 static void smp_tune_scheduling (void)
897 unsigned long cachesize; /* kB */
898 unsigned long bandwidth = 350; /* MB/s */
900 * Rough estimation for SMP scheduling, this is the number of
901 * cycles it takes for a fully memory-limited process to flush
902 * the SMP-local cache.
904 * (For a P5 this pretty much means we will choose another idle
905 * CPU almost always at wakeup time (this is due to the small
906 * L1 cache), on PIIs it's around 50-100 usecs, depending on
912 * this basically disables processor-affinity
913 * scheduling on SMP without a TSC.
918 cachesize = boot_cpu_data.x86_cache_size;
919 if (cachesize == -1) {
920 cachesize = 16; /* Pentiums, 2x8kB cache */
924 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
927 cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
929 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
930 (long)cacheflush_time/(cpu_khz/1000),
931 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
932 printk("task migration cache decay timeout: %ld msecs.\n",
937 * Cycle through the processors sending APIC IPIs to boot each.
940 static int boot_cpu_logical_apicid;
941 /* Where the IO area was mapped on multiquad, always 0 otherwise */
944 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
946 static void __init smp_boot_cpus(unsigned int max_cpus)
948 int apicid, cpu, bit, kicked;
949 unsigned long bogosum = 0;
952 * Setup boot CPU information
954 smp_store_cpu_info(0); /* Final full version of the data */
955 printk("CPU%d: ", 0);
956 print_cpu_info(&cpu_data[0]);
958 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
959 boot_cpu_logical_apicid = logical_smp_processor_id();
961 current_thread_info()->cpu = 0;
962 smp_tune_scheduling();
963 cpus_clear(cpu_sibling_map[0]);
964 cpu_set(0, cpu_sibling_map[0]);
967 * If we couldn't find an SMP configuration at boot time,
968 * get out of here now!
970 if (!smp_found_config && !acpi_lapic) {
971 printk(KERN_NOTICE "SMP motherboard not detected.\n");
972 smpboot_clear_io_apic_irqs();
973 phys_cpu_present_map = physid_mask_of_physid(0);
974 if (APIC_init_uniprocessor())
975 printk(KERN_NOTICE "Local APIC not detected."
976 " Using dummy APIC emulation.\n");
977 map_cpu_to_logical_apicid();
982 * Should not be necessary because the MP table should list the boot
983 * CPU too, but we do it for the sake of robustness anyway.
984 * Makes no sense to do this check in clustered apic mode, so skip it
986 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
987 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
988 boot_cpu_physical_apicid);
989 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
993 * If we couldn't find a local APIC, then get out of here now!
995 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
996 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
997 boot_cpu_physical_apicid);
998 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
999 smpboot_clear_io_apic_irqs();
1000 phys_cpu_present_map = physid_mask_of_physid(0);
1004 verify_local_APIC();
1007 * If SMP should be disabled, then really disable it!
1010 smp_found_config = 0;
1011 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1012 smpboot_clear_io_apic_irqs();
1013 phys_cpu_present_map = physid_mask_of_physid(0);
1019 map_cpu_to_logical_apicid();
1022 setup_portio_remap();
1025 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1027 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1028 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1029 * clustered apic ID.
1031 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1034 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1035 apicid = cpu_present_to_apicid(bit);
1037 * Don't even attempt to start the boot CPU!
1039 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1042 if (!check_apicid_present(bit))
1044 if (max_cpus <= cpucount+1)
1047 if (do_boot_cpu(apicid))
1048 printk("CPU #%d not responding - cannot use it.\n",
1055 * Cleanup possible dangling ends...
1057 smpboot_restore_warm_reset_vector();
1060 * Allow the user to impress friends.
1062 Dprintk("Before bogomips.\n");
1063 for (cpu = 0; cpu < NR_CPUS; cpu++)
1064 if (cpu_isset(cpu, cpu_callout_map))
1065 bogosum += cpu_data[cpu].loops_per_jiffy;
1067 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1069 bogosum/(500000/HZ),
1070 (bogosum/(5000/HZ))%100);
1072 Dprintk("Before bogocount - setting activated=1.\n");
1075 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1078 * Don't taint if we are running SMP kernel on a single non-MP
1081 if (tainted & TAINT_UNSAFE_SMP) {
1083 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1085 tainted &= ~TAINT_UNSAFE_SMP;
1088 Dprintk("Boot done.\n");
1091 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1094 for (cpu = 0; cpu < NR_CPUS; cpu++)
1095 cpus_clear(cpu_sibling_map[cpu]);
1097 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1100 if (!cpu_isset(cpu, cpu_callout_map))
1103 if (smp_num_siblings > 1) {
1104 for (i = 0; i < NR_CPUS; i++) {
1105 if (!cpu_isset(i, cpu_callout_map))
1107 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1109 cpu_set(i, cpu_sibling_map[cpu]);
1114 cpu_set(cpu, cpu_sibling_map[cpu]);
1117 if (siblings != smp_num_siblings)
1118 printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
1121 if (nmi_watchdog == NMI_LOCAL_APIC)
1122 check_nmi_watchdog();
1124 smpboot_setup_io_apic();
1126 setup_boot_APIC_clock();
1129 * Synchronize the TSC with the AP
1131 if (cpu_has_tsc && cpucount && cpu_khz)
1132 synchronize_tsc_bp();
1135 #ifdef CONFIG_SCHED_SMT
1137 static struct sched_group sched_group_cpus[NR_CPUS];
1138 static struct sched_group sched_group_phys[NR_CPUS];
1139 static struct sched_group sched_group_nodes[MAX_NUMNODES];
1140 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1141 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1142 static DEFINE_PER_CPU(struct sched_domain, node_domains);
1143 __init void arch_init_sched_domains(void)
1146 struct sched_group *first = NULL, *last = NULL;
1148 /* Set up domains */
1150 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1151 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1152 struct sched_domain *node_domain = &per_cpu(node_domains, i);
1153 int node = cpu_to_node(i);
1154 cpumask_t nodemask = node_to_cpumask(node);
1156 *cpu_domain = SD_SIBLING_INIT;
1157 cpu_domain->span = cpu_sibling_map[i];
1158 cpu_domain->parent = phys_domain;
1159 cpu_domain->groups = &sched_group_cpus[i];
1161 *phys_domain = SD_CPU_INIT;
1162 phys_domain->span = nodemask;
1163 phys_domain->parent = node_domain;
1164 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1166 *node_domain = SD_NODE_INIT;
1167 node_domain->span = cpu_possible_map;
1168 node_domain->groups = &sched_group_nodes[cpu_to_node(i)];
1171 /* Set up CPU (sibling) groups */
1173 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1175 first = last = NULL;
1177 if (i != first_cpu(cpu_domain->span))
1180 for_each_cpu_mask(j, cpu_domain->span) {
1181 struct sched_group *cpu = &sched_group_cpus[j];
1183 cpu->cpumask = CPU_MASK_NONE;
1184 cpu_set(j, cpu->cpumask);
1185 cpu->cpu_power = SCHED_LOAD_SCALE;
1196 for (i = 0; i < MAX_NUMNODES; i++) {
1199 struct sched_group *node = &sched_group_nodes[i];
1200 cpumask_t node_cpumask = node_to_cpumask(i);
1202 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1204 if (cpus_empty(nodemask))
1207 first = last = NULL;
1208 /* Set up physical groups */
1209 for_each_cpu_mask(j, nodemask) {
1210 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);
1211 struct sched_group *cpu = &sched_group_phys[j];
1213 if (j != first_cpu(cpu_domain->span))
1216 cpu->cpumask = cpu_domain->span;
1218 * Make each extra sibling increase power by 10% of
1219 * the basic CPU. This is very arbitrary.
1221 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1222 node->cpu_power += cpu->cpu_power;
1234 first = last = NULL;
1235 for (i = 0; i < MAX_NUMNODES; i++) {
1236 struct sched_group *cpu = &sched_group_nodes[i];
1238 cpumask_t node_cpumask = node_to_cpumask(i);
1240 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1242 if (cpus_empty(nodemask))
1245 cpu->cpumask = nodemask;
1246 /* ->cpu_power already setup */
1258 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1259 cpu_attach_domain(cpu_domain, i);
1262 #else /* !CONFIG_NUMA */
1263 static struct sched_group sched_group_cpus[NR_CPUS];
1264 static struct sched_group sched_group_phys[NR_CPUS];
1265 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1266 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1267 __init void arch_init_sched_domains(void)
1270 struct sched_group *first = NULL, *last = NULL;
1272 /* Set up domains */
1274 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1275 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1277 *cpu_domain = SD_SIBLING_INIT;
1278 cpu_domain->span = cpu_sibling_map[i];
1279 cpu_domain->parent = phys_domain;
1280 cpu_domain->groups = &sched_group_cpus[i];
1282 *phys_domain = SD_CPU_INIT;
1283 phys_domain->span = cpu_possible_map;
1284 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1287 /* Set up CPU (sibling) groups */
1289 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1291 first = last = NULL;
1293 if (i != first_cpu(cpu_domain->span))
1296 for_each_cpu_mask(j, cpu_domain->span) {
1297 struct sched_group *cpu = &sched_group_cpus[j];
1299 cpus_clear(cpu->cpumask);
1300 cpu_set(j, cpu->cpumask);
1301 cpu->cpu_power = SCHED_LOAD_SCALE;
1312 first = last = NULL;
1313 /* Set up physical groups */
1315 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1316 struct sched_group *cpu = &sched_group_phys[i];
1318 if (i != first_cpu(cpu_domain->span))
1321 cpu->cpumask = cpu_domain->span;
1322 /* See SMT+NUMA setup for comment */
1323 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1335 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1336 cpu_attach_domain(cpu_domain, i);
1339 #endif /* CONFIG_NUMA */
1340 #endif /* CONFIG_SCHED_SMT */
1342 /* These are wrappers to interface to the new boot process. Someone
1343 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1344 void __init smp_prepare_cpus(unsigned int max_cpus)
1346 smp_boot_cpus(max_cpus);
1349 void __devinit smp_prepare_boot_cpu(void)
1351 cpu_set(smp_processor_id(), cpu_online_map);
1352 cpu_set(smp_processor_id(), cpu_callout_map);
1355 int __devinit __cpu_up(unsigned int cpu)
1357 /* This only works at boot for x86. See "rewrite" above. */
1358 if (cpu_isset(cpu, smp_commenced_mask)) {
1363 /* In case one didn't come up */
1364 if (!cpu_isset(cpu, cpu_callin_map)) {
1370 /* Unleash the CPU! */
1371 cpu_set(cpu, smp_commenced_mask);
1372 while (!cpu_isset(cpu, cpu_online_map))
1377 void __init smp_cpus_done(unsigned int max_cpus)
1379 #ifdef CONFIG_X86_IO_APIC
1380 setup_ioapic_dest();
1384 * Disable executability of the SMP trampoline:
1386 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1389 void __init smp_intr_init(void)
1392 * IRQ0 must be given a fixed assignment and initialized,
1393 * because it's used before the IO-APIC is set up.
1395 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1398 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1399 * IPI, driven by wakeup.
1401 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1403 /* IPI for invalidation */
1404 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1406 /* IPI for generic function call */
1407 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);