X-Git-Url: http://git.onelab.eu/?p=linux-2.6.git;a=blobdiff_plain;f=arch%2Fx86_64%2Fkernel%2Fsmpboot.c;h=f1ec0f34594124f73feb76dcf0f270dc420dfca5;hp=a7e2c3e95ea5940bcab4345bb500782875ba17af;hb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;hpb=e3f6fb6212a7102bdb56ba38fa1e98fe72950475 diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index a7e2c3e95..f1ec0f345 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -12,8 +12,7 @@ * Pentium Pro and Pentium-II/Xeon MP machines. * Original development of Linux SMP code supported by Caldera. * - * This code is released under the GNU General Public License version 2 or - * later. + * This code is released under the GNU General Public License version 2 * * Fixes * Felix Koop : NR_CPUS used properly @@ -31,9 +30,13 @@ * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. * Maciej W. Rozycki : Bits for genuine 82489DX APICs * Andi Kleen : Changed for SMP boot into long mode. - * Rusty Russell : Hacked into shape for new "hotplug" boot process. + * Rusty Russell : Hacked into shape for new "hotplug" boot process. + * Andi Kleen : Converted to new state machine. + * Various cleanups. + * Probably mostly hotplug CPU ready now. */ + #include #include @@ -53,19 +56,35 @@ #include #include #include +#include + +/* Change for real CPU hotplug. Note other files need to be fixed + first too. */ +#define __cpuinit __init +#define __cpuinitdata __initdata /* Number of siblings per CPU package */ int smp_num_siblings = 1; /* Package ID of each logical CPU */ u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; +u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; EXPORT_SYMBOL(phys_proc_id); +EXPORT_SYMBOL(cpu_core_id); /* Bitmask of currently online CPUs */ cpumask_t cpu_online_map; +EXPORT_SYMBOL(cpu_online_map); + +/* + * Private maps to synchronize booting between AP and BP. + * Probably not needed anymore, but it makes for easier debugging. -AK + */ cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; -static cpumask_t smp_commenced_mask; + +cpumask_t cpu_possible_map; +EXPORT_SYMBOL(cpu_possible_map); /* Per CPU bogomips and other parameters */ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; @@ -74,13 +93,15 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; int smp_threads_ready; cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; +cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_core_map); /* * Trampoline 80x86 program as an array. */ -extern unsigned char trampoline_data []; -extern unsigned char trampoline_end []; +extern unsigned char trampoline_data[]; +extern unsigned char trampoline_end[]; /* * Currently trivial. Write the real->protected mode @@ -88,11 +109,9 @@ extern unsigned char trampoline_end []; * has made sure it's suitably aligned. */ -static unsigned long __init setup_trampoline(void) +static unsigned long __cpuinit setup_trampoline(void) { void *tramp = __va(SMP_TRAMPOLINE_BASE); - extern volatile __u32 tramp_gdt_ptr; - tramp_gdt_ptr = __pa_symbol(&cpu_gdt_table); memcpy(tramp, trampoline_data, trampoline_end - trampoline_data); return virt_to_phys(tramp); } @@ -102,154 +121,224 @@ static unsigned long __init setup_trampoline(void) * a given CPU */ -static void __init smp_store_cpu_info(int id) +static void __cpuinit smp_store_cpu_info(int id) { struct cpuinfo_x86 *c = cpu_data + id; *c = boot_cpu_data; identify_cpu(c); + print_cpu_info(c); } /* - * TSC synchronization. + * New Funky TSC sync algorithm borrowed from IA64. + * Main advantage is that it doesn't reset the TSCs fully and + * in general looks more robust and it works better than my earlier + * attempts. I believe it was written by David Mosberger. Some minor + * adjustments for x86-64 by me -AK + * + * Original comment reproduced below. + * + * Synchronize TSC of the current (slave) CPU with the TSC of the + * MASTER CPU (normally the time-keeper CPU). We use a closed loop to + * eliminate the possibility of unaccounted-for errors (such as + * getting a machine check in the middle of a calibration step). The + * basic idea is for the slave to ask the master what itc value it has + * and to read its own itc before and after the master responds. Each + * iteration gives us three timestamps: * - * We first check whether all CPUs have their TSC's synchronized, - * then we print a warning if not, and always resync. + * slave master + * + * t0 ---\ + * ---\ + * ---> + * tm + * /--- + * /--- + * t1 <--- + * + * + * The goal is to adjust the slave's TSC such that tm falls exactly + * half-way between t0 and t1. If we achieve this, the clocks are + * synchronized provided the interconnect between the slave and the + * master is symmetric. Even if the interconnect were asymmetric, we + * would still know that the synchronization error is smaller than the + * roundtrip latency (t0 - t1). + * + * When the interconnect is quiet and symmetric, this lets us + * synchronize the TSC to within one or two cycles. However, we can + * only *guarantee* that the synchronization is accurate to within a + * round-trip time, which is typically in the range of several hundred + * cycles (e.g., ~500 cycles). In practice, this means that the TSCs + * are usually almost perfectly synchronized, but we shouldn't assume + * that the accuracy is much better than half a micro second or so. + * + * [there are other errors like the latency of RDTSC and of the + * WRMSR. These can also account to hundreds of cycles. So it's + * probably worse. It claims 153 cycles error on a dual Opteron, + * but I suspect the numbers are actually somewhat worse -AK] */ -static atomic_t tsc_start_flag = ATOMIC_INIT(0); -static atomic_t tsc_count_start = ATOMIC_INIT(0); -static atomic_t tsc_count_stop = ATOMIC_INIT(0); -static unsigned long long tsc_values[NR_CPUS]; - -#define NR_LOOPS 5 +#define MASTER 0 +#define SLAVE (SMP_CACHE_BYTES/8) -extern unsigned int fast_gettimeoffset_quotient; +/* Intentionally don't use cpu_relax() while TSC synchronization + because we don't want to go into funky power save modi or cause + hypervisors to schedule us away. Going to sleep would likely affect + latency and low latency is the primary objective here. -AK */ +#define no_cpu_relax() barrier() -static void __init synchronize_tsc_bp (void) -{ - int i; - unsigned long long t0; - unsigned long long sum, avg; - long long delta; - long one_usec; - int buggy = 0; +static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock); +static volatile __cpuinitdata unsigned long go[SLAVE + 1]; +static int notscsync __cpuinitdata; - printk(KERN_INFO "checking TSC synchronization across %u CPUs: ",num_booting_cpus()); +#undef DEBUG_TSC_SYNC - one_usec = cpu_khz; +#define NUM_ROUNDS 64 /* magic value */ +#define NUM_ITERS 5 /* likewise */ - atomic_set(&tsc_start_flag, 1); - wmb(); +/* Callback on boot CPU */ +static __cpuinit void sync_master(void *arg) +{ + unsigned long flags, i; - /* - * We loop a few times to get a primed instruction cache, - * then the last pass is more or less synchronized and - * the BP and APs set their cycle counters to zero all at - * once. This reduces the chance of having random offsets - * between the processors, and guarantees that the maximum - * delay between the cycle counters is never bigger than - * the latency of information-passing (cachelines) between - * two CPUs. - */ - for (i = 0; i < NR_LOOPS; i++) { - /* - * all APs synchronize but they loop on '== num_cpus' - */ - while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) mb(); - atomic_set(&tsc_count_stop, 0); - wmb(); - /* - * this lets the APs save their current TSC: - */ - atomic_inc(&tsc_count_start); + if (smp_processor_id() != boot_cpu_id) + return; - sync_core(); - rdtscll(tsc_values[smp_processor_id()]); - /* - * We clear the TSC in the last loop: - */ - if (i == NR_LOOPS-1) - write_tsc(0, 0); + go[MASTER] = 0; - /* - * Wait for all APs to leave the synchronization point: - */ - while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) mb(); - atomic_set(&tsc_count_start, 0); - wmb(); - atomic_inc(&tsc_count_stop); + local_irq_save(flags); + { + for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { + while (!go[MASTER]) + no_cpu_relax(); + go[MASTER] = 0; + rdtscll(go[SLAVE]); + } } + local_irq_restore(flags); +} - sum = 0; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_isset(i, cpu_callout_map)) { - t0 = tsc_values[i]; - sum += t0; - } - } - avg = sum / num_booting_cpus(); +/* + * Return the number of cycles by which our tsc differs from the tsc + * on the master (time-keeper) CPU. A positive number indicates our + * tsc is ahead of the master, negative that it is behind. + */ +static inline long +get_delta(long *rt, long *master) +{ + unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; + unsigned long tcenter, t0, t1, tm; + int i; - sum = 0; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_isset(i, cpu_callout_map)) - continue; + for (i = 0; i < NUM_ITERS; ++i) { + rdtscll(t0); + go[MASTER] = 1; + while (!(tm = go[SLAVE])) + no_cpu_relax(); + go[SLAVE] = 0; + rdtscll(t1); - delta = tsc_values[i] - avg; - if (delta < 0) - delta = -delta; - /* - * We report bigger than 2 microseconds clock differences. - */ - if (delta > 2*one_usec) { - long realdelta; - if (!buggy) { - buggy = 1; - printk("\n"); - } - realdelta = delta / one_usec; - if (tsc_values[i] < avg) - realdelta = -realdelta; + if (t1 - t0 < best_t1 - best_t0) + best_t0 = t0, best_t1 = t1, best_tm = tm; + } - printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", - i, realdelta); - } + *rt = best_t1 - best_t0; + *master = best_tm - best_t0; - sum += delta; - } - if (!buggy) - printk("passed.\n"); + /* average best_t0 and best_t1 without overflow: */ + tcenter = (best_t0/2 + best_t1/2); + if (best_t0 % 2 + best_t1 % 2 == 2) + ++tcenter; + return tcenter - best_tm; } -static void __init synchronize_tsc_ap (void) +static __cpuinit void sync_tsc(void) { - int i; + int i, done = 0; + long delta, adj, adjust_latency = 0; + unsigned long flags, rt, master_time_stamp, bound; +#if DEBUG_TSC_SYNC + static struct syncdebug { + long rt; /* roundtrip time */ + long master; /* master's timestamp */ + long diff; /* difference between midpoint and master's timestamp */ + long lat; /* estimate of tsc adjustment latency */ + } t[NUM_ROUNDS] __cpuinitdata; +#endif - /* - * Not every cpu is online at the time - * this gets called, so we first wait for the BP to - * finish SMP initialization: - */ - while (!atomic_read(&tsc_start_flag)) mb(); + go[MASTER] = 1; + + smp_call_function(sync_master, NULL, 1, 0); + + while (go[MASTER]) /* wait for master to be ready */ + no_cpu_relax(); - for (i = 0; i < NR_LOOPS; i++) { - atomic_inc(&tsc_count_start); - while (atomic_read(&tsc_count_start) != num_booting_cpus()) mb(); + spin_lock_irqsave(&tsc_sync_lock, flags); + { + for (i = 0; i < NUM_ROUNDS; ++i) { + delta = get_delta(&rt, &master_time_stamp); + if (delta == 0) { + done = 1; /* let's lock on to this... */ + bound = rt; + } - sync_core(); - rdtscll(tsc_values[smp_processor_id()]); - if (i == NR_LOOPS-1) - write_tsc(0, 0); + if (!done) { + unsigned long t; + if (i > 0) { + adjust_latency += -delta; + adj = -delta + adjust_latency/4; + } else + adj = -delta; - atomic_inc(&tsc_count_stop); - while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb(); + rdtscll(t); + wrmsrl(MSR_IA32_TSC, t + adj); + } +#if DEBUG_TSC_SYNC + t[i].rt = rt; + t[i].master = master_time_stamp; + t[i].diff = delta; + t[i].lat = adjust_latency/4; +#endif + } } + spin_unlock_irqrestore(&tsc_sync_lock, flags); + +#if DEBUG_TSC_SYNC + for (i = 0; i < NUM_ROUNDS; ++i) + printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", + t[i].rt, t[i].master, t[i].diff, t[i].lat); +#endif + + printk(KERN_INFO + "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, " + "maxerr %lu cycles)\n", + smp_processor_id(), boot_cpu_id, delta, rt); } -#undef NR_LOOPS -static atomic_t init_deasserted; +static void __cpuinit tsc_sync_wait(void) +{ + if (notscsync || !cpu_has_tsc) + return; + printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(), + boot_cpu_id); + sync_tsc(); +} -void __init smp_callin(void) +static __init int notscsync_setup(char *s) +{ + notscsync = 1; + return 0; +} +__setup("notscsync", notscsync_setup); + +static atomic_t init_deasserted __cpuinitdata; + +/* + * Report back to the Boot Processor. + * Running on AP. + */ +void __cpuinit smp_callin(void) { int cpuid, phys_id; unsigned long timeout; @@ -260,7 +349,8 @@ void __init smp_callin(void) * our local APIC. We have to wait for the IPI or we'll * lock up on an APIC access. */ - while (!atomic_read(&init_deasserted)); + while (!atomic_read(&init_deasserted)) + cpu_relax(); /* * (This works even if the APIC is not enabled.) @@ -291,7 +381,7 @@ void __init smp_callin(void) */ if (cpu_isset(cpuid, cpu_callout_map)) break; - rep_nop(); + cpu_relax(); } if (!time_before(jiffies, timeout)) { @@ -309,8 +399,6 @@ void __init smp_callin(void) Dprintk("CALLIN, before setup_local_APIC().\n"); setup_local_APIC(); - local_irq_enable(); - /* * Get our bogomips. */ @@ -324,26 +412,16 @@ void __init smp_callin(void) */ smp_store_cpu_info(cpuid); - local_irq_disable(); - /* * Allow the master to continue. */ cpu_set(cpuid, cpu_callin_map); - - /* - * Synchronize the TSC with the BP - */ - if (cpu_has_tsc) - synchronize_tsc_ap(); } -int cpucount; - /* - * Activate a secondary processor. + * Setup code on secondary processor (after comming out of the trampoline) */ -void __init start_secondary(void) +void __cpuinit start_secondary(void) { /* * Dont put anything before smp_callin(), SMP @@ -356,14 +434,10 @@ void __init start_secondary(void) /* otherwise gcc will move up the smp_processor_id before the cpu_init */ barrier(); - Dprintk("cpu %d: waiting for commence\n", smp_processor_id()); - while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) - rep_nop(); - Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); setup_secondary_APIC_clock(); - Dprintk("cpu %d: enabling apic timer\n", smp_processor_id()); + Dprintk("cpu %d: enabling apic timer\n", smp_processor_id()); if (nmi_watchdog == NMI_IO_APIC) { disable_8259A_irq(0); @@ -371,27 +445,27 @@ void __init start_secondary(void) enable_8259A_irq(0); } - - enable_APIC_timer(); + enable_APIC_timer(); /* - * low-memory mappings have been cleared, flush them from - * the local TLBs too. + * Allow the master to continue. */ - local_flush_tlb(); - - Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id()); cpu_set(smp_processor_id(), cpu_online_map); - wmb(); - + mb(); + + /* Wait for TSC sync to not schedule things before. + We still process interrupts, which could see an inconsistent + time in that window unfortunately. */ + tsc_sync_wait(); + cpu_idle(); } -extern volatile unsigned long init_rsp; +extern volatile unsigned long init_rsp; extern void (*initial_code)(void); #if APIC_DEBUG -static inline void inquire_remote_apic(int apicid) +static void inquire_remote_apic(int apicid) { unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; char *names[] = { "ID", "VERSION", "SPIV" }; @@ -428,7 +502,10 @@ static inline void inquire_remote_apic(int apicid) } #endif -static int __init wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip) +/* + * Kick the secondary to wake up. + */ +static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip) { unsigned long send_status = 0, accept_status = 0; int maxlvt, timeout, num_starts, j; @@ -551,33 +628,35 @@ static int __init wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_ return (send_status | accept_status); } -static void __init do_boot_cpu (int apicid) +/* + * Boot one CPU. + */ +static int __cpuinit do_boot_cpu(int cpu, int apicid) { struct task_struct *idle; unsigned long boot_error; - int timeout, cpu; + int timeout; unsigned long start_rip; - - cpu = ++cpucount; /* * We can't use kernel_thread since we must avoid to * reschedule the child. */ idle = fork_idle(cpu); - if (IS_ERR(idle)) - panic("failed fork for CPU %d", cpu); - x86_cpu_to_apicid[cpu] = apicid; + if (IS_ERR(idle)) { + printk("failed fork for CPU %d\n", cpu); + return PTR_ERR(idle); + } cpu_pda[cpu].pcurrent = idle; start_rip = setup_trampoline(); - init_rsp = idle->thread.rsp; + init_rsp = idle->thread.rsp; per_cpu(init_tss,cpu).rsp0 = init_rsp; initial_code = start_secondary; clear_ti_thread_flag(idle->thread_info, TIF_FORK); - printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, + printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, start_rip, init_rsp); /* @@ -614,7 +693,7 @@ static void __init do_boot_cpu (int apicid) /* * Starting actual IPI sequence... */ - boot_error = wakeup_secondary_via_INIT(apicid, start_rip); + boot_error = wakeup_secondary_via_INIT(apicid, start_rip); if (!boot_error) { /* @@ -635,8 +714,6 @@ static void __init do_boot_cpu (int apicid) if (cpu_isset(cpu, cpu_callin_map)) { /* number CPUs logically, starting from 1 (BSP is 0) */ - Dprintk("OK.\n"); - print_cpu_info(&cpu_data[cpu]); Dprintk("CPU has booted.\n"); } else { boot_error = 1; @@ -655,76 +732,131 @@ static void __init do_boot_cpu (int apicid) if (boot_error) { cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ - cpucount--; + cpu_clear(cpu, cpu_present_map); + cpu_clear(cpu, cpu_possible_map); x86_cpu_to_apicid[cpu] = BAD_APICID; x86_cpu_to_log_apicid[cpu] = BAD_APICID; + return -EIO; } + + return 0; } cycles_t cacheflush_time; unsigned long cache_decay_ticks; -static void smp_tune_scheduling (void) +/* + * Construct cpu_sibling_map[], so that we can tell the sibling CPU + * on SMT systems efficiently. + */ +static __cpuinit void detect_siblings(void) { - int cachesize; /* kB */ - unsigned long bandwidth = 1000; /* MB/s */ - /* - * Rough estimation for SMP scheduling, this is the number of - * cycles it takes for a fully memory-limited process to flush - * the SMP-local cache. - * - * (For a P5 this pretty much means we will choose another idle - * CPU almost always at wakeup time (this is due to the small - * L1 cache), on PIIs it's around 50-100 usecs, depending on - * the cache size) - */ - - if (!cpu_khz) { - /* - * this basically disables processor-affinity - * scheduling on SMP without a TSC. - */ - cacheflush_time = 0; - return; - } else { - cachesize = boot_cpu_data.x86_cache_size; - if (cachesize == -1) { - cachesize = 16; /* Pentiums, 2x8kB cache */ - bandwidth = 100; - } + int cpu; - cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; + for (cpu = 0; cpu < NR_CPUS; cpu++) { + cpus_clear(cpu_sibling_map[cpu]); + cpus_clear(cpu_core_map[cpu]); } - cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; + for_each_online_cpu (cpu) { + struct cpuinfo_x86 *c = cpu_data + cpu; + int siblings = 0; + int i; + if (smp_num_siblings > 1) { + for_each_online_cpu (i) { + if (cpu_core_id[cpu] == cpu_core_id[i]) { + siblings++; + cpu_set(i, cpu_sibling_map[cpu]); + } + } + } else { + siblings++; + cpu_set(cpu, cpu_sibling_map[cpu]); + } - printk(KERN_INFO "per-CPU timeslice cutoff: %ld.%02ld usecs.\n", - (long)cacheflush_time/(cpu_khz/1000), - ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); - printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n", - (cache_decay_ticks + 1) * 1000 / HZ); + if (siblings != smp_num_siblings) { + printk(KERN_WARNING + "WARNING: %d siblings found for CPU%d, should be %d\n", + siblings, cpu, smp_num_siblings); + smp_num_siblings = siblings; + } + if (c->x86_num_cores > 1) { + for_each_online_cpu(i) { + if (phys_proc_id[cpu] == phys_proc_id[i]) + cpu_set(i, cpu_core_map[cpu]); + } + } else + cpu_core_map[cpu] = cpu_sibling_map[cpu]; + } } /* - * Cycle through the processors sending APIC IPIs to boot each. + * Cleanup possible dangling ends... */ - -static void __init smp_boot_cpus(unsigned int max_cpus) +static __cpuinit void smp_cleanup_boot(void) { - unsigned apicid, cpu, bit, kicked; + /* + * Paranoid: Set warm reset code and vector here back + * to default values. + */ + CMOS_WRITE(0, 0xf); - nmi_watchdog_default(); + /* + * Reset trampoline flag + */ + *((volatile int *) phys_to_virt(0x467)) = 0; +#ifndef CONFIG_HOTPLUG_CPU /* - * Setup boot CPU information + * Free pages reserved for SMP bootup. + * When you add hotplug CPU support later remove this + * Note there is more work to be done for later CPU bootup. */ - smp_store_cpu_info(0); /* Final full version of the data */ - printk(KERN_INFO "CPU%d: ", 0); - print_cpu_info(&cpu_data[0]); - current_thread_info()->cpu = 0; - smp_tune_scheduling(); + free_page((unsigned long) __va(PAGE_SIZE)); + free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE)); +#endif +} + +/* + * Fall back to non SMP mode after errors. + * + * RED-PEN audit/test this more. I bet there is more state messed up here. + */ +static __cpuinit void disable_smp(void) +{ + cpu_present_map = cpumask_of_cpu(0); + cpu_possible_map = cpumask_of_cpu(0); + if (smp_found_config) + phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); + else + phys_cpu_present_map = physid_mask_of_physid(0); + cpu_set(0, cpu_sibling_map[0]); + cpu_set(0, cpu_core_map[0]); +} + +/* + * Handle user cpus=... parameter. + */ +static __cpuinit void enforce_max_cpus(unsigned max_cpus) +{ + int i, k; + k = 0; + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_possible(i)) + continue; + if (++k > max_cpus) { + cpu_clear(i, cpu_possible_map); + cpu_clear(i, cpu_present_map); + } + } +} +/* + * Various sanity checks. + */ +static int __cpuinit smp_sanity_check(unsigned max_cpus) +{ if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { printk("weird, boot CPU (#%d) not listed by the BIOS.\n", hard_smp_processor_id()); @@ -737,13 +869,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus) */ if (!smp_found_config) { printk(KERN_NOTICE "SMP motherboard not detected.\n"); - io_apic_irqs = 0; - cpu_online_map = cpumask_of_cpu(0); - phys_cpu_present_map = physid_mask_of_physid(0); + disable_smp(); if (APIC_init_uniprocessor()) printk(KERN_NOTICE "Local APIC not detected." " Using dummy APIC emulation.\n"); - goto smp_done; + return -1; } /* @@ -763,196 +893,143 @@ static void __init smp_boot_cpus(unsigned int max_cpus) printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_id); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); - io_apic_irqs = 0; - cpu_online_map = cpumask_of_cpu(0); - phys_cpu_present_map = physid_mask_of_physid(0); - disable_apic = 1; - goto smp_done; + nr_ioapics = 0; + return -1; } - verify_local_APIC(); - /* * If SMP should be disabled, then really disable it! */ if (!max_cpus) { - smp_found_config = 0; printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); - io_apic_irqs = 0; - cpu_online_map = cpumask_of_cpu(0); - phys_cpu_present_map = physid_mask_of_physid(0); - disable_apic = 1; - goto smp_done; + nr_ioapics = 0; + return -1; } - connect_bsp_APIC(); - setup_local_APIC(); - - if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) - BUG(); - - x86_cpu_to_apicid[0] = boot_cpu_id; - - /* - * Now scan the CPU present map and fire up the other CPUs. - */ - Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); + return 0; +} - kicked = 1; - for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { - apicid = cpu_present_to_apicid(bit); - /* - * Don't even attempt to start the boot CPU! - */ - if (apicid == boot_cpu_id || (apicid == BAD_APICID)) - continue; +/* + * Prepare for SMP bootup. The MP table or ACPI has been read + * earlier. Just do some sanity checking here and enable APIC mode. + */ +void __cpuinit smp_prepare_cpus(unsigned int max_cpus) +{ + int i; - if (!physid_isset(apicid, phys_cpu_present_map)) - continue; - if ((max_cpus >= 0) && (max_cpus <= cpucount+1)) - continue; + nmi_watchdog_default(); + current_cpu_data = boot_cpu_data; + current_thread_info()->cpu = 0; /* needed? */ - do_boot_cpu(apicid); - ++kicked; - } + enforce_max_cpus(max_cpus); /* - * Cleanup possible dangling ends... + * Fill in cpu_present_mask */ - { - /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - - /* - * Paranoid: Set warm reset code and vector here back - * to default values. - */ - CMOS_WRITE(0, 0xf); - - *((volatile int *) phys_to_virt(0x467)) = 0; + for (i = 0; i < NR_CPUS; i++) { + int apicid = cpu_present_to_apicid(i); + if (physid_isset(apicid, phys_cpu_present_map)) { + cpu_set(i, cpu_present_map); + /* possible map would be different if we supported real + CPU hotplug. */ + cpu_set(i, cpu_possible_map); + } } - /* - * Allow the user to impress friends. - */ - - Dprintk("Before bogomips.\n"); - if (!cpucount) { - printk(KERN_INFO "Only one processor found.\n"); - } else { - unsigned long bogosum = 0; - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (cpu_isset(cpu, cpu_callout_map)) - bogosum += cpu_data[cpu].loops_per_jiffy; - printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", - cpucount+1, - bogosum/(500000/HZ), - (bogosum/(5000/HZ))%100); - Dprintk("Before bogocount - setting activated=1.\n"); + if (smp_sanity_check(max_cpus) < 0) { + printk(KERN_INFO "SMP disabled\n"); + disable_smp(); + return; } + /* - * Construct cpu_sibling_map[], so that we can tell the - * sibling CPU efficiently. + * Switch from PIC to APIC mode. */ - for (cpu = 0; cpu < NR_CPUS; cpu++) - cpus_clear(cpu_sibling_map[cpu]); - - for (cpu = 0; cpu < NR_CPUS; cpu++) { - int siblings = 0; - int i; - if (!cpu_isset(cpu, cpu_callout_map)) - continue; - - if (smp_num_siblings > 1) { - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_isset(i, cpu_callout_map)) - continue; - if (phys_proc_id[cpu] == phys_proc_id[i]) { - siblings++; - cpu_set(i, cpu_sibling_map[cpu]); - } - } - } else { - siblings++; - cpu_set(cpu, cpu_sibling_map[cpu]); - } + connect_bsp_APIC(); + setup_local_APIC(); - if (siblings != smp_num_siblings) { - printk(KERN_WARNING - "WARNING: %d siblings found for CPU%d, should be %d\n", - siblings, cpu, smp_num_siblings); - smp_num_siblings = siblings; - } + if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) { + panic("Boot APIC ID in local APIC unexpected (%d vs %d)", + GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id); + /* Or can we switch back to PIC here? */ } - Dprintk("Boot done.\n"); - /* - * Here we can be sure that there is an IO-APIC in the system. Let's - * go and set it up: + * Now start the IO-APICs */ if (!skip_ioapic_setup && nr_ioapics) setup_IO_APIC(); else nr_ioapics = 0; - setup_boot_APIC_clock(); - /* - * Synchronize the TSC with the AP + * Set up local APIC timer on boot CPU. */ - if (cpu_has_tsc && cpucount) - synchronize_tsc_bp(); - smp_done: - time_init_smp(); + setup_boot_APIC_clock(); } -/* These are wrappers to interface to the new boot process. Someone - who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ -void __init smp_prepare_cpus(unsigned int max_cpus) +/* + * Early setup to make printk work. + */ +void __init smp_prepare_boot_cpu(void) { - smp_boot_cpus(max_cpus); + int me = smp_processor_id(); + cpu_set(me, cpu_online_map); + cpu_set(me, cpu_callout_map); } -void __devinit smp_prepare_boot_cpu(void) +/* + * Entry point to boot a CPU. + * + * This is all __cpuinit, not __devinit for now because we don't support + * CPU hotplug (yet). + */ +int __cpuinit __cpu_up(unsigned int cpu) { - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), cpu_callout_map); -} + int err; + int apicid = cpu_present_to_apicid(cpu); -int __devinit __cpu_up(unsigned int cpu) -{ - /* This only works at boot for x86. See "rewrite" above. */ - if (cpu_isset(cpu, smp_commenced_mask)) { - local_irq_enable(); - return -ENOSYS; + WARN_ON(irqs_disabled()); + + Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); + + if (apicid == BAD_APICID || apicid == boot_cpu_id || + !physid_isset(apicid, phys_cpu_present_map)) { + printk("__cpu_up: bad cpu %d\n", cpu); + return -EINVAL; } - /* In case one didn't come up */ - if (!cpu_isset(cpu, cpu_callin_map)) { - local_irq_enable(); - return -EIO; + /* Boot it! */ + err = do_boot_cpu(cpu, apicid); + if (err < 0) { + Dprintk("do_boot_cpu failed %d\n", err); + return err; } - local_irq_enable(); /* Unleash the CPU! */ Dprintk("waiting for cpu %d\n", cpu); - cpu_set(cpu, smp_commenced_mask); while (!cpu_isset(cpu, cpu_online_map)) - mb(); + cpu_relax(); return 0; } -void __init smp_cpus_done(unsigned int max_cpus) +/* + * Finish the SMP boot. + */ +void __cpuinit smp_cpus_done(unsigned int max_cpus) { + zap_low_mappings(); + smp_cleanup_boot(); + #ifdef CONFIG_X86_IO_APIC setup_ioapic_dest(); #endif - zap_low_mappings(); -} + detect_siblings(); + time_init_gtod(); + + check_nmi_watchdog(); +}