#include <linux/string.h>
#include <linux/delay.h>
#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/bootmem.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <mach_apic.h>
+#endif
#include "cpu.h"
-static int cachesize_override __initdata = -1;
-static int disable_x86_fxsr __initdata = 0;
-static int disable_x86_serial_nr __initdata = 1;
+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
-struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
+EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
-extern void mcheck_init(struct cpuinfo_x86 *c);
+static int cachesize_override __devinitdata = -1;
+static int disable_x86_fxsr __devinitdata = 0;
+static int disable_x86_serial_nr __devinitdata = 1;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
extern int disable_pse;
static struct cpu_dev default_cpu = {
.c_init = default_init,
+ .c_vendor = "Unknown",
};
static struct cpu_dev * this_cpu = &default_cpu;
}
__setup("cachesize=", cachesize_setup);
-int __init get_model_name(struct cpuinfo_x86 *c)
+int __devinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
char *p, *q;
}
-void __init display_cacheinfo(struct cpuinfo_x86 *c)
+void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ecx, edx, l2size;
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
/* Look up CPU names by table lookup. */
-static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
{
struct cpu_model_info *info;
}
-void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
+ static int printed;
for (i = 0; i < X86_VENDOR_NUM; i++) {
if (cpu_devs[i]) {
c->x86_vendor = i;
if (!early)
this_cpu = cpu_devs[i];
- break;
+ return;
}
}
}
+ if (!printed) {
+ printed++;
+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
+ }
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ this_cpu = &default_cpu;
}
/* Probe for the CPUID instruction */
-int __init have_cpuid_p(void)
+static int __devinit have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
- The others are not touched to avoid unwanted side effects. */
-void __init early_cpu_detect(void)
+ The others are not touched to avoid unwanted side effects.
+
+ WARNING: this function is only called on the BP. Don't add code here
+ that is supposed to run on all CPUs. */
+static void __init early_cpu_detect(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
+ if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
c->x86_mask = tfms & 15;
if (cap0 & (1<<19))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
}
-
- early_intel_workaround(c);
}
-void __init generic_identify(struct cpuinfo_x86 * c)
+void __devinit generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
int junk;
c->x86_capability[4] = excap;
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
+ if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
c->x86_mask = tfms & 15;
} else {
/* Have CPUID level 0 only - unheard of */
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
if ( (xlvl & 0xffff0000) == 0x80000000 ) {
- if ( xlvl >= 0x80000001 )
+ if ( xlvl >= 0x80000001 ) {
c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[6] = cpuid_ecx(0x80000001);
+ }
if ( xlvl >= 0x80000004 )
get_model_name(c); /* Default name */
}
}
+
+ early_intel_workaround(c);
+
+#ifdef CONFIG_X86_HT
+ phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+#endif
}
-static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
/* Disable processor serial number */
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
-void __init identify_cpu(struct cpuinfo_x86 *c)
+void __devinit identify_cpu(struct cpuinfo_x86 *c)
{
int i;
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_max_cores = 1;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!have_cpuid_p()) {
generic_identify(c);
- printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
+ printk(KERN_DEBUG "CPU: After generic identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
if (this_cpu->c_identify) {
this_cpu->c_identify(c);
- printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
-}
+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+ }
/*
* Vendor-specific initialization. In this section we
if (disable_pse)
clear_bit(X86_FEATURE_PSE, c->x86_capability);
- /* hack: disable SEP unconditionally. */
- clear_bit(X86_FEATURE_SEP, c->x86_capability);
-
/* If the model name is still unset, do table lookup. */
if ( !c->x86_model_id[0] ) {
char *p;
/* Now the feature flags better reflect actual CPU features! */
- printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
+ printk(KERN_DEBUG "CPU: After all inits, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
/*
* On SMP, boot_cpu_data holds the common feature set between
}
/* Init Machine Check Exception if available. */
-#ifdef CONFIG_X86_MCE
mcheck_init(c);
-#endif
+
+ if (c == &boot_cpu_data)
+ sysenter_setup();
+ enable_sep_cpu();
+
+ if (c == &boot_cpu_data)
+ mtrr_bp_init();
+ else
+ mtrr_ap_init();
}
-/*
- * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
- */
-
-void __init dodgy_tsc(void)
+
+#ifdef CONFIG_X86_HT
+void __devinit detect_ht(struct cpuinfo_x86 *c)
{
- if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
- ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
- cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+ u32 eax, ebx, ecx, edx;
+ int index_msb, core_bits;
+ int cpu = smp_processor_id();
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+
+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
+
+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ return;
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+ if (smp_num_siblings == 1) {
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ } else if (smp_num_siblings > 1 ) {
+
+ if (smp_num_siblings > NR_CPUS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
+
+ index_msb = get_count_order(smp_num_siblings);
+ phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
+
+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+
+ index_msb = get_count_order(smp_num_siblings) ;
+
+ core_bits = get_count_order(c->x86_max_cores);
+
+ cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+ ((1 << core_bits) - 1);
+
+ if (c->x86_max_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ cpu_core_id[cpu]);
+ }
}
+#endif
-void __init print_cpu_info(struct cpuinfo_x86 *c)
+void __devinit print_cpu_info(struct cpuinfo_x86 *c)
{
char *vendor = NULL;
printk("\n");
}
-unsigned long cpu_initialized __initdata = 0;
+cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
/* This is hacky. :)
* We're emulating future behavior.
extern int rise_init_cpu(void);
extern int nexgen_init_cpu(void);
extern int umc_init_cpu(void);
-void early_cpu_detect(void);
void __init early_cpu_init(void)
{
- early_cpu_detect();
intel_cpu_init();
cyrix_init_cpu();
nsc_init_cpu();
rise_init_cpu();
nexgen_init_cpu();
umc_init_cpu();
+ early_cpu_detect();
#ifdef CONFIG_DEBUG_PAGEALLOC
/* pse is not compatible with on-the-fly unmapping,
* and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
*/
-void __init cpu_init (void)
+void __devinit cpu_init(void)
{
int cpu = smp_processor_id();
- struct tss_struct * t = init_tss + cpu;
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = ¤t->thread;
+ struct desc_struct *gdt;
+ __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
- if (test_and_set_bit(cpu, &cpu_initialized)) {
+ if (cpu_test_and_set(cpu, cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
}
}
/*
- * Initialize the per-CPU GDT with the boot GDT,
- * and set up the GDT descriptor:
+ * This is a horrible hack to allocate the GDT. The problem
+ * is that cpu_init() is called really early for the boot CPU
+ * (and hence needs bootmem) but much later for the secondary
+ * CPUs, when bootmem will have gone away
*/
- if (cpu) {
- memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
- cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
- cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
+ if (NODE_DATA(0)->bdata->node_bootmem_map) {
+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
+ /* alloc_bootmem_pages panics on failure, so no check */
+ memset(gdt, 0, PAGE_SIZE);
+ } else {
+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+ if (unlikely(!gdt)) {
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+ for (;;)
+ local_irq_enable();
+ }
}
+
/*
- * Set up the per-thread TLS descriptor cache:
+ * Initialize the per-CPU GDT with the boot GDT,
+ * and set up the GDT descriptor:
*/
- memcpy(thread->tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
- __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
- __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
+ /* Set up GDT entry for 16bit stack */
+ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
+ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
+ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
+ (CPU_16BIT_STACK_SIZE - 1);
- /*
- * Delete NT
- */
- __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
+ cpu_gdt_descr->size = GDT_SIZE - 1;
+ cpu_gdt_descr->address = (unsigned long)gdt;
+
+ load_gdt(cpu_gdt_descr);
+ load_idt(&idt_descr);
/*
* Set up and load the per-CPU TSS and LDT
load_esp0(t, thread);
set_tss_desc(cpu,t);
- cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
load_TR_desc();
- if (cpu)
- load_LDT(&init_mm.context);
+ load_LDT(&init_mm.context);
+#ifdef CONFIG_DOUBLEFAULT
/* Set up doublefault TSS pointer in the GDT */
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
- cpu_gdt_table[cpu][GDT_ENTRY_DOUBLEFAULT_TSS].b &= 0xfffffdff;
-
- if (cpu)
- trap_init_virtual_GDT();
+#endif
/* Clear %fs and %gs. */
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
/* Clear all 6 debug registers: */
-
-#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
-
- CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
-
-#undef CD
+ set_debugreg(0, 0);
+ set_debugreg(0, 1);
+ set_debugreg(0, 2);
+ set_debugreg(0, 3);
+ set_debugreg(0, 6);
+ set_debugreg(0, 7);
/*
* Force FPU initialization:
*/
current_thread_info()->status = 0;
- current->used_math = 0;
+ clear_used_math();
mxcsr_feature_mask_init();
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __devinit cpu_uninit(void)
+{
+ int cpu = raw_smp_processor_id();
+ cpu_clear(cpu, cpu_initialized);
+
+ /* lazy TLB state */
+ per_cpu(cpu_tlbstate, cpu).state = 0;
+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
+}
+#endif