2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/module.h>
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/smp_lock.h>
45 #include <linux/irq.h>
46 #include <linux/bootmem.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <asm/pgalloc.h>
51 #include <asm/tlbflush.h>
53 #include <asm/arch_hooks.h>
55 #include <mach_apic.h>
56 #include <mach_wakecpu.h>
57 #include <smpboot_hooks.h>
59 /* Set if we find a B stepping CPU */
60 static int __initdata smp_b_stepping;
62 /* Number of siblings per CPU package */
63 int smp_num_siblings = 1;
64 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
66 /* bitmap of online cpus */
67 cpumask_t cpu_online_map;
69 static cpumask_t cpu_callin_map;
70 cpumask_t cpu_callout_map;
71 static cpumask_t smp_commenced_mask;
73 /* Per CPU bogomips and other parameters */
74 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
76 /* Set when the idlers are all forked */
77 int smp_threads_ready;
80 * Trampoline 80x86 program as an array.
83 extern unsigned char trampoline_data [];
84 extern unsigned char trampoline_end [];
85 static unsigned char *trampoline_base;
88 * Currently trivial. Write the real->protected mode
89 * bootstrap into the page concerned. The caller
90 * has made sure it's suitably aligned.
93 static unsigned long __init setup_trampoline(void)
95 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
96 return virt_to_phys(trampoline_base);
100 * We are called very early to get the low memory for the
101 * SMP bootup trampoline page.
103 void __init smp_alloc_memory(void)
105 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
107 * Has to be in very low memory so we can execute
110 if (__pa(trampoline_base) >= 0x9F000)
115 * The bootstrap kernel entry code has set these up. Save them for
119 static void __init smp_store_cpu_info(int id)
121 struct cpuinfo_x86 *c = cpu_data + id;
127 * Mask B, Pentium, but not Pentium MMX
129 if (c->x86_vendor == X86_VENDOR_INTEL &&
131 c->x86_mask >= 1 && c->x86_mask <= 4 &&
134 * Remember we have B step Pentia with bugs
139 * Certain Athlons might work (for various values of 'work') in SMP
140 * but they are not certified as MP capable.
142 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
144 /* Athlon 660/661 is valid. */
145 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
148 /* Duron 670 is valid */
149 if ((c->x86_model==7) && (c->x86_mask==0))
153 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
154 * It's worth noting that the A5 stepping (662) of some Athlon XP's
155 * have the MP bit set.
156 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
158 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
159 ((c->x86_model==7) && (c->x86_mask>=1)) ||
164 /* If we get here, it's not a certified SMP capable AMD system. */
165 tainted |= TAINT_UNSAFE_SMP;
173 * TSC synchronization.
175 * We first check whether all CPUs have their TSC's synchronized,
176 * then we print a warning if not, and always resync.
179 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
180 static atomic_t tsc_count_start = ATOMIC_INIT(0);
181 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
182 static unsigned long long tsc_values[NR_CPUS];
187 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
188 * multiplication. Not terribly optimized but we need it at boot time only
192 * == (a1 + a2*(2^32)) / b
193 * == a1/b + a2*(2^32/b)
194 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
195 * ^---- (this multiplication can overflow)
198 static unsigned long long __init div64 (unsigned long long a, unsigned long b0)
201 unsigned long long res;
203 a1 = ((unsigned int*)&a)[0];
204 a2 = ((unsigned int*)&a)[1];
207 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
209 (a2 * (0xffffffff % b0)) / b0;
214 static void __init synchronize_tsc_bp (void)
217 unsigned long long t0;
218 unsigned long long sum, avg;
220 unsigned long one_usec;
223 printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
225 /* convert from kcyc/sec to cyc/usec */
226 one_usec = cpu_khz / 1000;
228 atomic_set(&tsc_start_flag, 1);
232 * We loop a few times to get a primed instruction cache,
233 * then the last pass is more or less synchronized and
234 * the BP and APs set their cycle counters to zero all at
235 * once. This reduces the chance of having random offsets
236 * between the processors, and guarantees that the maximum
237 * delay between the cycle counters is never bigger than
238 * the latency of information-passing (cachelines) between
241 for (i = 0; i < NR_LOOPS; i++) {
243 * all APs synchronize but they loop on '== num_cpus'
245 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
247 atomic_set(&tsc_count_stop, 0);
250 * this lets the APs save their current TSC:
252 atomic_inc(&tsc_count_start);
254 rdtscll(tsc_values[smp_processor_id()]);
256 * We clear the TSC in the last loop:
262 * Wait for all APs to leave the synchronization point:
264 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
266 atomic_set(&tsc_count_start, 0);
268 atomic_inc(&tsc_count_stop);
272 for (i = 0; i < NR_CPUS; i++) {
273 if (cpu_isset(i, cpu_callout_map)) {
278 avg = div64(sum, num_booting_cpus());
281 for (i = 0; i < NR_CPUS; i++) {
282 if (!cpu_isset(i, cpu_callout_map))
284 delta = tsc_values[i] - avg;
288 * We report bigger than 2 microseconds clock differences.
290 if (delta > 2*one_usec) {
296 realdelta = div64(delta, one_usec);
297 if (tsc_values[i] < avg)
298 realdelta = -realdelta;
300 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
310 static void __init synchronize_tsc_ap (void)
315 * Not every cpu is online at the time
316 * this gets called, so we first wait for the BP to
317 * finish SMP initialization:
319 while (!atomic_read(&tsc_start_flag)) mb();
321 for (i = 0; i < NR_LOOPS; i++) {
322 atomic_inc(&tsc_count_start);
323 while (atomic_read(&tsc_count_start) != num_booting_cpus())
326 rdtscll(tsc_values[smp_processor_id()]);
330 atomic_inc(&tsc_count_stop);
331 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
336 extern void calibrate_delay(void);
338 static atomic_t init_deasserted;
340 void __init smp_callin(void)
343 unsigned long timeout;
346 * If waken up by an INIT in an 82489DX configuration
347 * we may get here before an INIT-deassert IPI reaches
348 * our local APIC. We have to wait for the IPI or we'll
349 * lock up on an APIC access.
351 wait_for_init_deassert(&init_deasserted);
354 * (This works even if the APIC is not enabled.)
356 phys_id = GET_APIC_ID(apic_read(APIC_ID));
357 cpuid = smp_processor_id();
358 if (cpu_isset(cpuid, cpu_callin_map)) {
359 printk("huh, phys CPU#%d, CPU#%d already present??\n",
363 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
366 * STARTUP IPIs are fragile beasts as they might sometimes
367 * trigger some glue motherboard logic. Complete APIC bus
368 * silence for 1 second, this overestimates the time the
369 * boot CPU is spending to send the up to 2 STARTUP IPIs
370 * by a factor of two. This should be enough.
374 * Waiting 2s total for startup (udelay is not yet working)
376 timeout = jiffies + 2*HZ;
377 while (time_before(jiffies, timeout)) {
379 * Has the boot CPU finished it's STARTUP sequence?
381 if (cpu_isset(cpuid, cpu_callout_map))
386 if (!time_before(jiffies, timeout)) {
387 printk("BUG: CPU%d started up but did not get a callout!\n",
393 * the boot CPU has finished the init stage and is spinning
394 * on callin_map until we finish. We are free to set up this
395 * CPU, first the APIC. (this is probably redundant on most
399 Dprintk("CALLIN, before setup_local_APIC().\n");
400 smp_callin_clear_local_apic();
402 map_cpu_to_logical_apicid();
410 Dprintk("Stack at about %p\n",&cpuid);
413 * Save our processor parameters
415 smp_store_cpu_info(cpuid);
417 disable_APIC_timer();
420 * Allow the master to continue.
422 cpu_set(cpuid, cpu_callin_map);
425 * Synchronize the TSC with the BP
427 if (cpu_has_tsc && cpu_khz)
428 synchronize_tsc_ap();
433 extern int cpu_idle(void);
436 * Activate a secondary processor.
438 int __init start_secondary(void *unused)
441 * Dont put anything before smp_callin(), SMP
442 * booting is too fragile that we want to limit the
443 * things done here to the most necessary things.
447 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
449 setup_secondary_APIC_clock();
450 if (nmi_watchdog == NMI_IO_APIC) {
451 disable_8259A_irq(0);
452 enable_NMI_through_LVT0(NULL);
457 * low-memory mappings have been cleared, flush them from
458 * the local TLBs too.
461 cpu_set(smp_processor_id(), cpu_online_map);
467 * Everything has been set up for the secondary
468 * CPUs - they just need to reload everything
469 * from the task structure
470 * This function must not return.
472 void __init initialize_secondary(void)
475 * We don't actually need to load the full TSS,
476 * basically just the stack pointer and the eip.
483 :"r" (current->thread.esp),"r" (current->thread.eip));
491 static struct task_struct * __init fork_by_hand(void)
495 * don't care about the eip and regs settings since
496 * we'll never reschedule the forked task.
498 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
503 /* which logical CPUs are on which nodes */
504 cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
505 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
506 /* which node each logical CPU is on */
507 int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
508 EXPORT_SYMBOL(cpu_2_node);
510 /* set up a mapping between cpu and node. */
511 static inline void map_cpu_to_node(int cpu, int node)
513 printk("Mapping cpu %d to node %d\n", cpu, node);
514 cpu_set(cpu, node_2_cpu_mask[node]);
515 cpu_2_node[cpu] = node;
518 /* undo a mapping between cpu and node. */
519 static inline void unmap_cpu_to_node(int cpu)
523 printk("Unmapping cpu %d from all nodes\n", cpu);
524 for (node = 0; node < MAX_NUMNODES; node ++)
525 cpu_clear(cpu, node_2_cpu_mask[node]);
528 #else /* !CONFIG_NUMA */
530 #define map_cpu_to_node(cpu, node) ({})
531 #define unmap_cpu_to_node(cpu) ({})
533 #endif /* CONFIG_NUMA */
535 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
537 void map_cpu_to_logical_apicid(void)
539 int cpu = smp_processor_id();
540 int apicid = logical_smp_processor_id();
542 cpu_2_logical_apicid[cpu] = apicid;
543 map_cpu_to_node(cpu, apicid_to_node(apicid));
546 void unmap_cpu_to_logical_apicid(int cpu)
548 cpu_2_logical_apicid[cpu] = BAD_APICID;
549 unmap_cpu_to_node(cpu);
553 static inline void __inquire_remote_apic(int apicid)
555 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
556 char *names[] = { "ID", "VERSION", "SPIV" };
559 printk("Inquiring remote APIC #%d...\n", apicid);
561 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
562 printk("... APIC #%d %s: ", apicid, names[i]);
567 apic_wait_icr_idle();
569 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
570 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
575 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
576 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
579 case APIC_ICR_RR_VALID:
580 status = apic_read(APIC_RRR);
581 printk("%08x\n", status);
590 #ifdef WAKE_SECONDARY_VIA_NMI
592 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
593 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
594 * won't ... remember to clear down the APIC, etc later.
597 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
599 unsigned long send_status = 0, accept_status = 0;
603 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
605 /* Boot on the stack */
606 /* Kick the second */
607 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
609 Dprintk("Waiting for send to finish...\n");
614 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
615 } while (send_status && (timeout++ < 1000));
618 * Give the other CPU some time to accept the IPI.
622 * Due to the Pentium erratum 3AP.
624 maxlvt = get_maxlvt();
626 apic_read_around(APIC_SPIV);
627 apic_write(APIC_ESR, 0);
629 accept_status = (apic_read(APIC_ESR) & 0xEF);
630 Dprintk("NMI sent.\n");
633 printk("APIC never delivered???\n");
635 printk("APIC delivery error (%lx).\n", accept_status);
637 return (send_status | accept_status);
639 #endif /* WAKE_SECONDARY_VIA_NMI */
641 #ifdef WAKE_SECONDARY_VIA_INIT
643 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
645 unsigned long send_status = 0, accept_status = 0;
646 int maxlvt, timeout, num_starts, j;
649 * Be paranoid about clearing APIC errors.
651 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
652 apic_read_around(APIC_SPIV);
653 apic_write(APIC_ESR, 0);
657 Dprintk("Asserting INIT.\n");
660 * Turn INIT on target chip
662 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
667 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
670 Dprintk("Waiting for send to finish...\n");
675 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
676 } while (send_status && (timeout++ < 1000));
680 Dprintk("Deasserting INIT.\n");
683 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
686 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
688 Dprintk("Waiting for send to finish...\n");
693 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
694 } while (send_status && (timeout++ < 1000));
696 atomic_set(&init_deasserted, 1);
699 * Should we send STARTUP IPIs ?
701 * Determine this based on the APIC version.
702 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
704 if (APIC_INTEGRATED(apic_version[phys_apicid]))
710 * Run STARTUP IPI loop.
712 Dprintk("#startup loops: %d.\n", num_starts);
714 maxlvt = get_maxlvt();
716 for (j = 1; j <= num_starts; j++) {
717 Dprintk("Sending STARTUP #%d.\n",j);
718 apic_read_around(APIC_SPIV);
719 apic_write(APIC_ESR, 0);
721 Dprintk("After apic_write.\n");
728 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
730 /* Boot on the stack */
731 /* Kick the second */
732 apic_write_around(APIC_ICR, APIC_DM_STARTUP
733 | (start_eip >> 12));
736 * Give the other CPU some time to accept the IPI.
740 Dprintk("Startup point 1.\n");
742 Dprintk("Waiting for send to finish...\n");
747 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
748 } while (send_status && (timeout++ < 1000));
751 * Give the other CPU some time to accept the IPI.
755 * Due to the Pentium erratum 3AP.
758 apic_read_around(APIC_SPIV);
759 apic_write(APIC_ESR, 0);
761 accept_status = (apic_read(APIC_ESR) & 0xEF);
762 if (send_status || accept_status)
765 Dprintk("After Startup.\n");
768 printk("APIC never delivered???\n");
770 printk("APIC delivery error (%lx).\n", accept_status);
772 return (send_status | accept_status);
774 #endif /* WAKE_SECONDARY_VIA_INIT */
776 extern cpumask_t cpu_initialized;
778 static int __init do_boot_cpu(int apicid)
780 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
781 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
782 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
785 struct task_struct *idle;
786 unsigned long boot_error;
788 unsigned long start_eip;
789 unsigned short nmi_high = 0, nmi_low = 0;
793 * We can't use kernel_thread since we must avoid to
794 * reschedule the child.
796 idle = fork_by_hand();
798 panic("failed fork for CPU %d", cpu);
799 wake_up_forked_process(idle);
802 * We remove it from the pidhash and the runqueue
803 * once we got the process:
805 init_idle(idle, cpu);
807 idle->thread.eip = (unsigned long) start_secondary;
809 unhash_process(idle);
811 /* start_eip had better be page-aligned! */
812 start_eip = setup_trampoline();
814 /* So we see what's up */
815 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
816 /* Stack for startup_32 can be just as for start_secondary onwards */
817 stack_start.esp = (void *) idle->thread.esp;
822 * This grunge runs the startup process for
823 * the targeted processor.
826 atomic_set(&init_deasserted, 0);
828 Dprintk("Setting warm reset code and vector.\n");
830 store_NMI_vector(&nmi_high, &nmi_low);
832 smpboot_setup_warm_reset_vector(start_eip);
835 * Starting actual IPI sequence...
837 boot_error = wakeup_secondary_cpu(apicid, start_eip);
841 * allow APs to start initializing.
843 Dprintk("Before Callout %d.\n", cpu);
844 cpu_set(cpu, cpu_callout_map);
845 Dprintk("After Callout %d.\n", cpu);
848 * Wait 5s total for a response
850 for (timeout = 0; timeout < 50000; timeout++) {
851 if (cpu_isset(cpu, cpu_callin_map))
852 break; /* It has booted */
856 if (cpu_isset(cpu, cpu_callin_map)) {
857 /* number CPUs logically, starting from 1 (BSP is 0) */
859 printk("CPU%d: ", cpu);
860 print_cpu_info(&cpu_data[cpu]);
861 Dprintk("CPU has booted.\n");
864 if (*((volatile unsigned char *)trampoline_base)
866 /* trampoline started but...? */
867 printk("Stuck ??\n");
869 /* trampoline code not run */
870 printk("Not responding.\n");
871 inquire_remote_apic(apicid);
875 /* Try to put things back the way they were before ... */
876 unmap_cpu_to_logical_apicid(cpu);
877 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
878 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
882 /* mark "stuck" area as not stuck */
883 *((volatile unsigned long *)trampoline_base) = 0;
888 cycles_t cacheflush_time;
889 unsigned long cache_decay_ticks;
891 static void smp_tune_scheduling (void)
893 unsigned long cachesize; /* kB */
894 unsigned long bandwidth = 350; /* MB/s */
896 * Rough estimation for SMP scheduling, this is the number of
897 * cycles it takes for a fully memory-limited process to flush
898 * the SMP-local cache.
900 * (For a P5 this pretty much means we will choose another idle
901 * CPU almost always at wakeup time (this is due to the small
902 * L1 cache), on PIIs it's around 50-100 usecs, depending on
908 * this basically disables processor-affinity
909 * scheduling on SMP without a TSC.
914 cachesize = boot_cpu_data.x86_cache_size;
915 if (cachesize == -1) {
916 cachesize = 16; /* Pentiums, 2x8kB cache */
920 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
923 cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
925 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
926 (long)cacheflush_time/(cpu_khz/1000),
927 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
928 printk("task migration cache decay timeout: %ld msecs.\n",
933 * Cycle through the processors sending APIC IPIs to boot each.
936 static int boot_cpu_logical_apicid;
937 /* Where the IO area was mapped on multiquad, always 0 otherwise */
940 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
942 static void __init smp_boot_cpus(unsigned int max_cpus)
944 int apicid, cpu, bit, kicked;
945 unsigned long bogosum = 0;
948 * Setup boot CPU information
950 smp_store_cpu_info(0); /* Final full version of the data */
951 printk("CPU%d: ", 0);
952 print_cpu_info(&cpu_data[0]);
954 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
955 boot_cpu_logical_apicid = logical_smp_processor_id();
957 current_thread_info()->cpu = 0;
958 smp_tune_scheduling();
959 cpus_clear(cpu_sibling_map[0]);
960 cpu_set(0, cpu_sibling_map[0]);
963 * If we couldn't find an SMP configuration at boot time,
964 * get out of here now!
966 if (!smp_found_config && !acpi_lapic) {
967 printk(KERN_NOTICE "SMP motherboard not detected.\n");
968 smpboot_clear_io_apic_irqs();
969 phys_cpu_present_map = physid_mask_of_physid(0);
970 if (APIC_init_uniprocessor())
971 printk(KERN_NOTICE "Local APIC not detected."
972 " Using dummy APIC emulation.\n");
973 map_cpu_to_logical_apicid();
978 * Should not be necessary because the MP table should list the boot
979 * CPU too, but we do it for the sake of robustness anyway.
980 * Makes no sense to do this check in clustered apic mode, so skip it
982 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
983 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
984 boot_cpu_physical_apicid);
985 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
989 * If we couldn't find a local APIC, then get out of here now!
991 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
992 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
993 boot_cpu_physical_apicid);
994 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
995 smpboot_clear_io_apic_irqs();
996 phys_cpu_present_map = physid_mask_of_physid(0);
1000 verify_local_APIC();
1003 * If SMP should be disabled, then really disable it!
1006 smp_found_config = 0;
1007 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1008 smpboot_clear_io_apic_irqs();
1009 phys_cpu_present_map = physid_mask_of_physid(0);
1015 map_cpu_to_logical_apicid();
1018 setup_portio_remap();
1021 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1023 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1024 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1025 * clustered apic ID.
1027 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1030 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1031 apicid = cpu_present_to_apicid(bit);
1033 * Don't even attempt to start the boot CPU!
1035 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1038 if (!check_apicid_present(bit))
1040 if (max_cpus <= cpucount+1)
1043 if (do_boot_cpu(apicid))
1044 printk("CPU #%d not responding - cannot use it.\n",
1051 * Cleanup possible dangling ends...
1053 smpboot_restore_warm_reset_vector();
1056 * Allow the user to impress friends.
1058 Dprintk("Before bogomips.\n");
1059 for (cpu = 0; cpu < NR_CPUS; cpu++)
1060 if (cpu_isset(cpu, cpu_callout_map))
1061 bogosum += cpu_data[cpu].loops_per_jiffy;
1063 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1065 bogosum/(500000/HZ),
1066 (bogosum/(5000/HZ))%100);
1068 Dprintk("Before bogocount - setting activated=1.\n");
1071 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1074 * Don't taint if we are running SMP kernel on a single non-MP
1077 if (tainted & TAINT_UNSAFE_SMP) {
1079 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1081 tainted &= ~TAINT_UNSAFE_SMP;
1084 Dprintk("Boot done.\n");
1087 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1090 for (cpu = 0; cpu < NR_CPUS; cpu++)
1091 cpus_clear(cpu_sibling_map[cpu]);
1093 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1096 if (!cpu_isset(cpu, cpu_callout_map))
1099 if (smp_num_siblings > 1) {
1100 for (i = 0; i < NR_CPUS; i++) {
1101 if (!cpu_isset(i, cpu_callout_map))
1103 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1105 cpu_set(i, cpu_sibling_map[cpu]);
1110 cpu_set(cpu, cpu_sibling_map[cpu]);
1113 if (siblings != smp_num_siblings)
1114 printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
1117 if (nmi_watchdog == NMI_LOCAL_APIC)
1118 check_nmi_watchdog();
1120 smpboot_setup_io_apic();
1122 setup_boot_APIC_clock();
1125 * Synchronize the TSC with the AP
1127 if (cpu_has_tsc && cpucount && cpu_khz)
1128 synchronize_tsc_bp();
1131 #ifdef CONFIG_SCHED_SMT
1133 static struct sched_group sched_group_cpus[NR_CPUS];
1134 static struct sched_group sched_group_phys[NR_CPUS];
1135 static struct sched_group sched_group_nodes[MAX_NUMNODES];
1136 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1137 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1138 static DEFINE_PER_CPU(struct sched_domain, node_domains);
1139 __init void arch_init_sched_domains(void)
1142 struct sched_group *first = NULL, *last = NULL;
1144 /* Set up domains */
1146 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1147 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1148 struct sched_domain *node_domain = &per_cpu(node_domains, i);
1149 int node = cpu_to_node(i);
1150 cpumask_t nodemask = node_to_cpumask(node);
1152 *cpu_domain = SD_SIBLING_INIT;
1153 cpu_domain->span = cpu_sibling_map[i];
1154 cpu_domain->parent = phys_domain;
1155 cpu_domain->groups = &sched_group_cpus[i];
1157 *phys_domain = SD_CPU_INIT;
1158 phys_domain->span = nodemask;
1159 phys_domain->parent = node_domain;
1160 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1162 *node_domain = SD_NODE_INIT;
1163 node_domain->span = cpu_possible_map;
1164 node_domain->groups = &sched_group_nodes[cpu_to_node(i)];
1167 /* Set up CPU (sibling) groups */
1169 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1171 first = last = NULL;
1173 if (i != first_cpu(cpu_domain->span))
1176 for_each_cpu_mask(j, cpu_domain->span) {
1177 struct sched_group *cpu = &sched_group_cpus[j];
1179 cpu->cpumask = CPU_MASK_NONE;
1180 cpu_set(j, cpu->cpumask);
1181 cpu->cpu_power = SCHED_LOAD_SCALE;
1192 for (i = 0; i < MAX_NUMNODES; i++) {
1195 struct sched_group *node = &sched_group_nodes[i];
1196 cpus_and(nodemask, node_to_cpumask(i), cpu_possible_map);
1198 if (cpus_empty(nodemask))
1201 first = last = NULL;
1202 /* Set up physical groups */
1203 for_each_cpu_mask(j, nodemask) {
1204 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);
1205 struct sched_group *cpu = &sched_group_phys[j];
1207 if (j != first_cpu(cpu_domain->span))
1210 cpu->cpumask = cpu_domain->span;
1212 * Make each extra sibling increase power by 10% of
1213 * the basic CPU. This is very arbitrary.
1215 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1216 node->cpu_power += cpu->cpu_power;
1228 first = last = NULL;
1229 for (i = 0; i < MAX_NUMNODES; i++) {
1230 struct sched_group *cpu = &sched_group_nodes[i];
1232 cpus_and(nodemask, node_to_cpumask(i), cpu_possible_map);
1234 if (cpus_empty(nodemask))
1237 cpu->cpumask = nodemask;
1238 /* ->cpu_power already setup */
1250 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1251 cpu_attach_domain(cpu_domain, i);
1254 #else /* !CONFIG_NUMA */
1255 static struct sched_group sched_group_cpus[NR_CPUS];
1256 static struct sched_group sched_group_phys[NR_CPUS];
1257 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1258 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1259 __init void arch_init_sched_domains(void)
1262 struct sched_group *first = NULL, *last = NULL;
1264 /* Set up domains */
1266 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1267 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1269 *cpu_domain = SD_SIBLING_INIT;
1270 cpu_domain->span = cpu_sibling_map[i];
1271 cpu_domain->parent = phys_domain;
1272 cpu_domain->groups = &sched_group_cpus[i];
1274 *phys_domain = SD_CPU_INIT;
1275 phys_domain->span = cpu_possible_map;
1276 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1279 /* Set up CPU (sibling) groups */
1281 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1283 first = last = NULL;
1285 if (i != first_cpu(cpu_domain->span))
1288 for_each_cpu_mask(j, cpu_domain->span) {
1289 struct sched_group *cpu = &sched_group_cpus[j];
1291 cpus_clear(cpu->cpumask);
1292 cpu_set(j, cpu->cpumask);
1293 cpu->cpu_power = SCHED_LOAD_SCALE;
1304 first = last = NULL;
1305 /* Set up physical groups */
1307 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1308 struct sched_group *cpu = &sched_group_phys[i];
1310 if (i != first_cpu(cpu_domain->span))
1313 cpu->cpumask = cpu_domain->span;
1314 /* See SMT+NUMA setup for comment */
1315 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1327 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1328 cpu_attach_domain(cpu_domain, i);
1331 #endif /* CONFIG_NUMA */
1332 #endif /* CONFIG_SCHED_SMT */
1334 /* These are wrappers to interface to the new boot process. Someone
1335 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1336 void __init smp_prepare_cpus(unsigned int max_cpus)
1338 smp_boot_cpus(max_cpus);
1341 void __devinit smp_prepare_boot_cpu(void)
1343 cpu_set(smp_processor_id(), cpu_online_map);
1344 cpu_set(smp_processor_id(), cpu_callout_map);
1347 int __devinit __cpu_up(unsigned int cpu)
1349 /* This only works at boot for x86. See "rewrite" above. */
1350 if (cpu_isset(cpu, smp_commenced_mask)) {
1355 /* In case one didn't come up */
1356 if (!cpu_isset(cpu, cpu_callin_map)) {
1362 /* Unleash the CPU! */
1363 cpu_set(cpu, smp_commenced_mask);
1364 while (!cpu_isset(cpu, cpu_online_map))
1369 void __init smp_cpus_done(unsigned int max_cpus)
1371 #ifdef CONFIG_X86_IO_APIC
1372 setup_ioapic_dest();
1377 void __init smp_intr_init(void)
1380 * IRQ0 must be given a fixed assignment and initialized,
1381 * because it's used before the IO-APIC is set up.
1383 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1386 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1387 * IPI, driven by wakeup.
1389 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1391 /* IPI for invalidation */
1392 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1394 /* IPI for generic function call */
1395 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);