2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/module.h>
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/smp_lock.h>
44 #include <linux/irq.h>
45 #include <linux/bootmem.h>
47 #include <linux/delay.h>
48 #include <linux/mc146818rtc.h>
49 #include <asm/pgalloc.h>
50 #include <asm/tlbflush.h>
52 #include <asm/arch_hooks.h>
54 #include <mach_apic.h>
55 #include <mach_wakecpu.h>
56 #include <smpboot_hooks.h>
58 /* Set if we find a B stepping CPU */
59 static int __initdata smp_b_stepping;
61 /* Number of siblings per CPU package */
62 int smp_num_siblings = 1;
63 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
65 /* bitmap of online cpus */
66 cpumask_t cpu_online_map;
68 static cpumask_t cpu_callin_map;
69 cpumask_t cpu_callout_map;
70 static cpumask_t smp_commenced_mask;
72 /* Per CPU bogomips and other parameters */
73 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
75 /* Set when the idlers are all forked */
76 int smp_threads_ready;
79 * Trampoline 80x86 program as an array.
82 extern unsigned char trampoline_data [];
83 extern unsigned char trampoline_end [];
84 static unsigned char *trampoline_base;
87 * Currently trivial. Write the real->protected mode
88 * bootstrap into the page concerned. The caller
89 * has made sure it's suitably aligned.
92 static unsigned long __init setup_trampoline(void)
94 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
95 return virt_to_phys(trampoline_base);
99 * We are called very early to get the low memory for the
100 * SMP bootup trampoline page.
102 void __init smp_alloc_memory(void)
104 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
106 * Has to be in very low memory so we can execute
109 if (__pa(trampoline_base) >= 0x9F000)
114 * The bootstrap kernel entry code has set these up. Save them for
118 static void __init smp_store_cpu_info(int id)
120 struct cpuinfo_x86 *c = cpu_data + id;
126 * Mask B, Pentium, but not Pentium MMX
128 if (c->x86_vendor == X86_VENDOR_INTEL &&
130 c->x86_mask >= 1 && c->x86_mask <= 4 &&
133 * Remember we have B step Pentia with bugs
138 * Certain Athlons might work (for various values of 'work') in SMP
139 * but they are not certified as MP capable.
141 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
143 /* Athlon 660/661 is valid. */
144 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
147 /* Duron 670 is valid */
148 if ((c->x86_model==7) && (c->x86_mask==0))
152 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
153 * It's worth noting that the A5 stepping (662) of some Athlon XP's
154 * have the MP bit set.
155 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
157 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
158 ((c->x86_model==7) && (c->x86_mask>=1)) ||
163 /* If we get here, it's not a certified SMP capable AMD system. */
164 tainted |= TAINT_UNSAFE_SMP;
172 * TSC synchronization.
174 * We first check whether all CPUs have their TSC's synchronized,
175 * then we print a warning if not, and always resync.
178 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
179 static atomic_t tsc_count_start = ATOMIC_INIT(0);
180 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
181 static unsigned long long tsc_values[NR_CPUS];
186 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
187 * multiplication. Not terribly optimized but we need it at boot time only
191 * == (a1 + a2*(2^32)) / b
192 * == a1/b + a2*(2^32/b)
193 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
194 * ^---- (this multiplication can overflow)
197 static unsigned long long __init div64 (unsigned long long a, unsigned long b0)
200 unsigned long long res;
202 a1 = ((unsigned int*)&a)[0];
203 a2 = ((unsigned int*)&a)[1];
206 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
208 (a2 * (0xffffffff % b0)) / b0;
213 static void __init synchronize_tsc_bp (void)
216 unsigned long long t0;
217 unsigned long long sum, avg;
219 unsigned long one_usec;
222 printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
224 /* convert from kcyc/sec to cyc/usec */
225 one_usec = cpu_khz / 1000;
227 atomic_set(&tsc_start_flag, 1);
231 * We loop a few times to get a primed instruction cache,
232 * then the last pass is more or less synchronized and
233 * the BP and APs set their cycle counters to zero all at
234 * once. This reduces the chance of having random offsets
235 * between the processors, and guarantees that the maximum
236 * delay between the cycle counters is never bigger than
237 * the latency of information-passing (cachelines) between
240 for (i = 0; i < NR_LOOPS; i++) {
242 * all APs synchronize but they loop on '== num_cpus'
244 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
246 atomic_set(&tsc_count_stop, 0);
249 * this lets the APs save their current TSC:
251 atomic_inc(&tsc_count_start);
253 rdtscll(tsc_values[smp_processor_id()]);
255 * We clear the TSC in the last loop:
261 * Wait for all APs to leave the synchronization point:
263 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
265 atomic_set(&tsc_count_start, 0);
267 atomic_inc(&tsc_count_stop);
271 for (i = 0; i < NR_CPUS; i++) {
272 if (cpu_isset(i, cpu_callout_map)) {
277 avg = div64(sum, num_booting_cpus());
280 for (i = 0; i < NR_CPUS; i++) {
281 if (!cpu_isset(i, cpu_callout_map))
283 delta = tsc_values[i] - avg;
287 * We report bigger than 2 microseconds clock differences.
289 if (delta > 2*one_usec) {
295 realdelta = div64(delta, one_usec);
296 if (tsc_values[i] < avg)
297 realdelta = -realdelta;
299 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
309 static void __init synchronize_tsc_ap (void)
314 * Not every cpu is online at the time
315 * this gets called, so we first wait for the BP to
316 * finish SMP initialization:
318 while (!atomic_read(&tsc_start_flag)) mb();
320 for (i = 0; i < NR_LOOPS; i++) {
321 atomic_inc(&tsc_count_start);
322 while (atomic_read(&tsc_count_start) != num_booting_cpus())
325 rdtscll(tsc_values[smp_processor_id()]);
329 atomic_inc(&tsc_count_stop);
330 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
335 extern void calibrate_delay(void);
337 static atomic_t init_deasserted;
339 void __init smp_callin(void)
342 unsigned long timeout;
345 * If waken up by an INIT in an 82489DX configuration
346 * we may get here before an INIT-deassert IPI reaches
347 * our local APIC. We have to wait for the IPI or we'll
348 * lock up on an APIC access.
350 wait_for_init_deassert(&init_deasserted);
353 * (This works even if the APIC is not enabled.)
355 phys_id = GET_APIC_ID(apic_read(APIC_ID));
356 cpuid = smp_processor_id();
357 if (cpu_isset(cpuid, cpu_callin_map)) {
358 printk("huh, phys CPU#%d, CPU#%d already present??\n",
362 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
365 * STARTUP IPIs are fragile beasts as they might sometimes
366 * trigger some glue motherboard logic. Complete APIC bus
367 * silence for 1 second, this overestimates the time the
368 * boot CPU is spending to send the up to 2 STARTUP IPIs
369 * by a factor of two. This should be enough.
373 * Waiting 2s total for startup (udelay is not yet working)
375 timeout = jiffies + 2*HZ;
376 while (time_before(jiffies, timeout)) {
378 * Has the boot CPU finished it's STARTUP sequence?
380 if (cpu_isset(cpuid, cpu_callout_map))
385 if (!time_before(jiffies, timeout)) {
386 printk("BUG: CPU%d started up but did not get a callout!\n",
392 * the boot CPU has finished the init stage and is spinning
393 * on callin_map until we finish. We are free to set up this
394 * CPU, first the APIC. (this is probably redundant on most
398 Dprintk("CALLIN, before setup_local_APIC().\n");
399 smp_callin_clear_local_apic();
401 map_cpu_to_logical_apicid();
409 Dprintk("Stack at about %p\n",&cpuid);
412 * Save our processor parameters
414 smp_store_cpu_info(cpuid);
416 disable_APIC_timer();
419 * Allow the master to continue.
421 cpu_set(cpuid, cpu_callin_map);
424 * Synchronize the TSC with the BP
426 if (cpu_has_tsc && cpu_khz)
427 synchronize_tsc_ap();
432 extern int cpu_idle(void);
435 * Activate a secondary processor.
437 int __init start_secondary(void *unused)
440 * Dont put anything before smp_callin(), SMP
441 * booting is too fragile that we want to limit the
442 * things done here to the most necessary things.
446 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
448 setup_secondary_APIC_clock();
449 if (nmi_watchdog == NMI_IO_APIC) {
450 disable_8259A_irq(0);
451 enable_NMI_through_LVT0(NULL);
456 * low-memory mappings have been cleared, flush them from
457 * the local TLBs too.
460 cpu_set(smp_processor_id(), cpu_online_map);
466 * Everything has been set up for the secondary
467 * CPUs - they just need to reload everything
468 * from the task structure
469 * This function must not return.
471 void __init initialize_secondary(void)
474 * We don't actually need to load the full TSS,
475 * basically just the stack pointer and the eip.
482 :"r" (current->thread.esp),"r" (current->thread.eip));
490 static struct task_struct * __init fork_by_hand(void)
494 * don't care about the eip and regs settings since
495 * we'll never reschedule the forked task.
497 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
502 /* which logical CPUs are on which nodes */
503 cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
504 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
505 /* which node each logical CPU is on */
506 int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
507 EXPORT_SYMBOL(cpu_2_node);
509 /* set up a mapping between cpu and node. */
510 static inline void map_cpu_to_node(int cpu, int node)
512 printk("Mapping cpu %d to node %d\n", cpu, node);
513 cpu_set(cpu, node_2_cpu_mask[node]);
514 cpu_2_node[cpu] = node;
517 /* undo a mapping between cpu and node. */
518 static inline void unmap_cpu_to_node(int cpu)
522 printk("Unmapping cpu %d from all nodes\n", cpu);
523 for (node = 0; node < MAX_NUMNODES; node ++)
524 cpu_clear(cpu, node_2_cpu_mask[node]);
527 #else /* !CONFIG_NUMA */
529 #define map_cpu_to_node(cpu, node) ({})
530 #define unmap_cpu_to_node(cpu) ({})
532 #endif /* CONFIG_NUMA */
534 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
536 void map_cpu_to_logical_apicid(void)
538 int cpu = smp_processor_id();
539 int apicid = logical_smp_processor_id();
541 cpu_2_logical_apicid[cpu] = apicid;
542 map_cpu_to_node(cpu, apicid_to_node(apicid));
545 void unmap_cpu_to_logical_apicid(int cpu)
547 cpu_2_logical_apicid[cpu] = BAD_APICID;
548 unmap_cpu_to_node(cpu);
552 static inline void __inquire_remote_apic(int apicid)
554 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
555 char *names[] = { "ID", "VERSION", "SPIV" };
558 printk("Inquiring remote APIC #%d...\n", apicid);
560 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
561 printk("... APIC #%d %s: ", apicid, names[i]);
566 apic_wait_icr_idle();
568 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
569 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
574 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
575 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
578 case APIC_ICR_RR_VALID:
579 status = apic_read(APIC_RRR);
580 printk("%08x\n", status);
589 #ifdef WAKE_SECONDARY_VIA_NMI
591 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
592 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
593 * won't ... remember to clear down the APIC, etc later.
596 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
598 unsigned long send_status = 0, accept_status = 0;
602 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
604 /* Boot on the stack */
605 /* Kick the second */
606 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
608 Dprintk("Waiting for send to finish...\n");
613 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
614 } while (send_status && (timeout++ < 1000));
617 * Give the other CPU some time to accept the IPI.
621 * Due to the Pentium erratum 3AP.
623 maxlvt = get_maxlvt();
625 apic_read_around(APIC_SPIV);
626 apic_write(APIC_ESR, 0);
628 accept_status = (apic_read(APIC_ESR) & 0xEF);
629 Dprintk("NMI sent.\n");
632 printk("APIC never delivered???\n");
634 printk("APIC delivery error (%lx).\n", accept_status);
636 return (send_status | accept_status);
638 #endif /* WAKE_SECONDARY_VIA_NMI */
640 #ifdef WAKE_SECONDARY_VIA_INIT
642 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
644 unsigned long send_status = 0, accept_status = 0;
645 int maxlvt, timeout, num_starts, j;
648 * Be paranoid about clearing APIC errors.
650 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
651 apic_read_around(APIC_SPIV);
652 apic_write(APIC_ESR, 0);
656 Dprintk("Asserting INIT.\n");
659 * Turn INIT on target chip
661 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
666 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
669 Dprintk("Waiting for send to finish...\n");
674 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
675 } while (send_status && (timeout++ < 1000));
679 Dprintk("Deasserting INIT.\n");
682 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
685 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
687 Dprintk("Waiting for send to finish...\n");
692 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
693 } while (send_status && (timeout++ < 1000));
695 atomic_set(&init_deasserted, 1);
698 * Should we send STARTUP IPIs ?
700 * Determine this based on the APIC version.
701 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
703 if (APIC_INTEGRATED(apic_version[phys_apicid]))
709 * Run STARTUP IPI loop.
711 Dprintk("#startup loops: %d.\n", num_starts);
713 maxlvt = get_maxlvt();
715 for (j = 1; j <= num_starts; j++) {
716 Dprintk("Sending STARTUP #%d.\n",j);
717 apic_read_around(APIC_SPIV);
718 apic_write(APIC_ESR, 0);
720 Dprintk("After apic_write.\n");
727 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
729 /* Boot on the stack */
730 /* Kick the second */
731 apic_write_around(APIC_ICR, APIC_DM_STARTUP
732 | (start_eip >> 12));
735 * Give the other CPU some time to accept the IPI.
739 Dprintk("Startup point 1.\n");
741 Dprintk("Waiting for send to finish...\n");
746 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
747 } while (send_status && (timeout++ < 1000));
750 * Give the other CPU some time to accept the IPI.
754 * Due to the Pentium erratum 3AP.
757 apic_read_around(APIC_SPIV);
758 apic_write(APIC_ESR, 0);
760 accept_status = (apic_read(APIC_ESR) & 0xEF);
761 if (send_status || accept_status)
764 Dprintk("After Startup.\n");
767 printk("APIC never delivered???\n");
769 printk("APIC delivery error (%lx).\n", accept_status);
771 return (send_status | accept_status);
773 #endif /* WAKE_SECONDARY_VIA_INIT */
775 extern cpumask_t cpu_initialized;
777 static int __init do_boot_cpu(int apicid)
779 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
780 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
781 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
784 struct task_struct *idle;
785 unsigned long boot_error;
787 unsigned long start_eip;
788 unsigned short nmi_high = 0, nmi_low = 0;
792 * We can't use kernel_thread since we must avoid to
793 * reschedule the child.
795 idle = fork_by_hand();
797 panic("failed fork for CPU %d", cpu);
798 wake_up_forked_process(idle);
801 * We remove it from the pidhash and the runqueue
802 * once we got the process:
804 init_idle(idle, cpu);
806 idle->thread.eip = (unsigned long) start_secondary;
808 unhash_process(idle);
810 /* start_eip had better be page-aligned! */
811 start_eip = setup_trampoline();
813 /* So we see what's up */
814 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
815 /* Stack for startup_32 can be just as for start_secondary onwards */
816 stack_start.esp = (void *) idle->thread.esp;
821 * This grunge runs the startup process for
822 * the targeted processor.
825 atomic_set(&init_deasserted, 0);
827 Dprintk("Setting warm reset code and vector.\n");
829 store_NMI_vector(&nmi_high, &nmi_low);
831 smpboot_setup_warm_reset_vector(start_eip);
834 * Starting actual IPI sequence...
836 boot_error = wakeup_secondary_cpu(apicid, start_eip);
840 * allow APs to start initializing.
842 Dprintk("Before Callout %d.\n", cpu);
843 cpu_set(cpu, cpu_callout_map);
844 Dprintk("After Callout %d.\n", cpu);
847 * Wait 5s total for a response
849 for (timeout = 0; timeout < 50000; timeout++) {
850 if (cpu_isset(cpu, cpu_callin_map))
851 break; /* It has booted */
855 if (cpu_isset(cpu, cpu_callin_map)) {
856 /* number CPUs logically, starting from 1 (BSP is 0) */
858 printk("CPU%d: ", cpu);
859 print_cpu_info(&cpu_data[cpu]);
860 Dprintk("CPU has booted.\n");
863 if (*((volatile unsigned char *)trampoline_base)
865 /* trampoline started but...? */
866 printk("Stuck ??\n");
868 /* trampoline code not run */
869 printk("Not responding.\n");
870 inquire_remote_apic(apicid);
874 /* Try to put things back the way they were before ... */
875 unmap_cpu_to_logical_apicid(cpu);
876 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
877 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
881 /* mark "stuck" area as not stuck */
882 *((volatile unsigned long *)trampoline_base) = 0;
887 cycles_t cacheflush_time;
888 unsigned long cache_decay_ticks;
890 static void smp_tune_scheduling (void)
892 unsigned long cachesize; /* kB */
893 unsigned long bandwidth = 350; /* MB/s */
895 * Rough estimation for SMP scheduling, this is the number of
896 * cycles it takes for a fully memory-limited process to flush
897 * the SMP-local cache.
899 * (For a P5 this pretty much means we will choose another idle
900 * CPU almost always at wakeup time (this is due to the small
901 * L1 cache), on PIIs it's around 50-100 usecs, depending on
907 * this basically disables processor-affinity
908 * scheduling on SMP without a TSC.
913 cachesize = boot_cpu_data.x86_cache_size;
914 if (cachesize == -1) {
915 cachesize = 16; /* Pentiums, 2x8kB cache */
919 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
922 cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
924 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
925 (long)cacheflush_time/(cpu_khz/1000),
926 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
927 printk("task migration cache decay timeout: %ld msecs.\n",
932 * Cycle through the processors sending APIC IPIs to boot each.
935 static int boot_cpu_logical_apicid;
936 /* Where the IO area was mapped on multiquad, always 0 otherwise */
939 int cpu_sibling_map[NR_CPUS] __cacheline_aligned;
941 static void __init smp_boot_cpus(unsigned int max_cpus)
943 int apicid, cpu, bit, kicked;
944 unsigned long bogosum = 0;
947 * Setup boot CPU information
949 smp_store_cpu_info(0); /* Final full version of the data */
950 printk("CPU%d: ", 0);
951 print_cpu_info(&cpu_data[0]);
953 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
954 boot_cpu_logical_apicid = logical_smp_processor_id();
956 current_thread_info()->cpu = 0;
957 smp_tune_scheduling();
960 * If we couldn't find an SMP configuration at boot time,
961 * get out of here now!
963 if (!smp_found_config && !acpi_lapic) {
964 printk(KERN_NOTICE "SMP motherboard not detected.\n");
965 smpboot_clear_io_apic_irqs();
966 phys_cpu_present_map = physid_mask_of_physid(0);
967 if (APIC_init_uniprocessor())
968 printk(KERN_NOTICE "Local APIC not detected."
969 " Using dummy APIC emulation.\n");
970 map_cpu_to_logical_apicid();
975 * Should not be necessary because the MP table should list the boot
976 * CPU too, but we do it for the sake of robustness anyway.
977 * Makes no sense to do this check in clustered apic mode, so skip it
979 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
980 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
981 boot_cpu_physical_apicid);
982 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
986 * If we couldn't find a local APIC, then get out of here now!
988 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
989 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
990 boot_cpu_physical_apicid);
991 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
992 smpboot_clear_io_apic_irqs();
993 phys_cpu_present_map = physid_mask_of_physid(0);
1000 * If SMP should be disabled, then really disable it!
1003 smp_found_config = 0;
1004 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1005 smpboot_clear_io_apic_irqs();
1006 phys_cpu_present_map = physid_mask_of_physid(0);
1012 map_cpu_to_logical_apicid();
1015 setup_portio_remap();
1018 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1020 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1021 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1022 * clustered apic ID.
1024 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1027 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1028 apicid = cpu_present_to_apicid(bit);
1030 * Don't even attempt to start the boot CPU!
1032 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1035 if (!check_apicid_present(bit))
1037 if (max_cpus <= cpucount+1)
1040 if (do_boot_cpu(apicid))
1041 printk("CPU #%d not responding - cannot use it.\n",
1048 * Cleanup possible dangling ends...
1050 smpboot_restore_warm_reset_vector();
1053 * Allow the user to impress friends.
1055 Dprintk("Before bogomips.\n");
1056 for (cpu = 0; cpu < NR_CPUS; cpu++)
1057 if (cpu_isset(cpu, cpu_callout_map))
1058 bogosum += cpu_data[cpu].loops_per_jiffy;
1060 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1062 bogosum/(500000/HZ),
1063 (bogosum/(5000/HZ))%100);
1065 Dprintk("Before bogocount - setting activated=1.\n");
1068 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1071 * Don't taint if we are running SMP kernel on a single non-MP
1074 if (tainted & TAINT_UNSAFE_SMP) {
1076 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1078 tainted &= ~TAINT_UNSAFE_SMP;
1081 Dprintk("Boot done.\n");
1084 * If Hyper-Threading is avaialble, construct cpu_sibling_map[], so
1085 * that we can tell the sibling CPU efficiently.
1087 if (cpu_has_ht && smp_num_siblings > 1) {
1088 for (cpu = 0; cpu < NR_CPUS; cpu++)
1089 cpu_sibling_map[cpu] = NO_PROC_ID;
1091 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1093 if (!cpu_isset(cpu, cpu_callout_map))
1096 for (i = 0; i < NR_CPUS; i++) {
1097 if (i == cpu || !cpu_isset(i, cpu_callout_map))
1099 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1100 cpu_sibling_map[cpu] = i;
1101 printk("cpu_sibling_map[%d] = %d\n", cpu, cpu_sibling_map[cpu]);
1105 if (cpu_sibling_map[cpu] == NO_PROC_ID) {
1106 smp_num_siblings = 1;
1107 printk(KERN_WARNING "WARNING: No sibling found for CPU %d.\n", cpu);
1112 smpboot_setup_io_apic();
1114 setup_boot_APIC_clock();
1117 * Synchronize the TSC with the AP
1119 if (cpu_has_tsc && cpucount && cpu_khz)
1120 synchronize_tsc_bp();
1123 /* These are wrappers to interface to the new boot process. Someone
1124 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1125 void __init smp_prepare_cpus(unsigned int max_cpus)
1127 smp_boot_cpus(max_cpus);
1130 void __devinit smp_prepare_boot_cpu(void)
1132 cpu_set(smp_processor_id(), cpu_online_map);
1133 cpu_set(smp_processor_id(), cpu_callout_map);
1136 int __devinit __cpu_up(unsigned int cpu)
1138 /* This only works at boot for x86. See "rewrite" above. */
1139 if (cpu_isset(cpu, smp_commenced_mask)) {
1144 /* In case one didn't come up */
1145 if (!cpu_isset(cpu, cpu_callin_map)) {
1151 /* Unleash the CPU! */
1152 cpu_set(cpu, smp_commenced_mask);
1153 while (!cpu_isset(cpu, cpu_online_map))
1158 void __init smp_cpus_done(unsigned int max_cpus)
1160 #ifdef CONFIG_X86_IO_APIC
1161 cpumask_t targets = CPU_MASK_ALL;
1162 setup_ioapic_dest(targets);
1167 void __init smp_intr_init(void)
1170 * IRQ0 must be given a fixed assignment and initialized,
1171 * because it's used before the IO-APIC is set up.
1173 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1176 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1177 * IPI, driven by wakeup.
1179 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1181 /* IPI for invalidation */
1182 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1184 /* IPI for generic function call */
1185 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);