1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <asm/semaphore.h>
8 #include <asm/processor.h>
12 #include <asm/mmu_context.h>
13 #ifdef CONFIG_X86_LOCAL_APIC
14 #include <asm/mpspec.h>
16 #include <mach_apic.h>
21 DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
22 EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
24 static int cachesize_override __initdata = -1;
25 static int disable_x86_fxsr __initdata = 0;
26 static int disable_x86_serial_nr __initdata = 1;
28 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
30 extern void mcheck_init(struct cpuinfo_x86 *c);
32 extern int disable_pse;
34 static void default_init(struct cpuinfo_x86 * c)
36 /* Not much we can do here... */
37 /* Check if at least it has cpuid */
38 if (c->cpuid_level == -1) {
39 /* No cpuid. It must be an ancient CPU */
41 strcpy(c->x86_model_id, "486");
43 strcpy(c->x86_model_id, "386");
47 static struct cpu_dev default_cpu = {
48 .c_init = default_init,
50 static struct cpu_dev * this_cpu = &default_cpu;
52 static int __init cachesize_setup(char *str)
54 get_option (&str, &cachesize_override);
57 __setup("cachesize=", cachesize_setup);
59 int __init get_model_name(struct cpuinfo_x86 *c)
64 if (cpuid_eax(0x80000000) < 0x80000004)
67 v = (unsigned int *) c->x86_model_id;
68 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
69 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
70 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
71 c->x86_model_id[48] = 0;
73 /* Intel chips right-justify this string for some dumb reason;
74 undo that brain damage */
75 p = q = &c->x86_model_id[0];
81 while ( q <= &c->x86_model_id[48] )
82 *q++ = '\0'; /* Zero-pad the rest */
89 void __init display_cacheinfo(struct cpuinfo_x86 *c)
91 unsigned int n, dummy, ecx, edx, l2size;
93 n = cpuid_eax(0x80000000);
95 if (n >= 0x80000005) {
96 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
97 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
98 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
99 c->x86_cache_size=(ecx>>24)+(edx>>24);
102 if (n < 0x80000006) /* Some chips just has a large L1. */
105 ecx = cpuid_ecx(0x80000006);
108 /* do processor-specific cache resizing */
109 if (this_cpu->c_size_cache)
110 l2size = this_cpu->c_size_cache(c,l2size);
112 /* Allow user to override all this if necessary. */
113 if (cachesize_override != -1)
114 l2size = cachesize_override;
117 return; /* Again, no L2 cache is possible */
119 c->x86_cache_size = l2size;
121 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
125 /* Naming convention should be: <Name> [(<Codename>)] */
126 /* This table only is used unless init_<vendor>() below doesn't set it; */
127 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
129 /* Look up CPU names by table lookup. */
130 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
132 struct cpu_model_info *info;
134 if ( c->x86_model >= 16 )
135 return NULL; /* Range check */
140 info = this_cpu->c_models;
142 while (info && info->family) {
143 if (info->family == c->x86)
144 return info->model_names[c->x86_model];
147 return NULL; /* Not found */
151 void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
153 char *v = c->x86_vendor_id;
156 for (i = 0; i < X86_VENDOR_NUM; i++) {
158 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
159 (cpu_devs[i]->c_ident[1] &&
160 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
163 this_cpu = cpu_devs[i];
171 static int __init x86_fxsr_setup(char * s)
173 disable_x86_fxsr = 1;
176 __setup("nofxsr", x86_fxsr_setup);
179 /* Standard macro to see if a specific flag is changeable */
180 static inline int flag_is_changeable_p(u32 flag)
194 : "=&r" (f1), "=&r" (f2)
197 return ((f1^f2) & flag) != 0;
201 /* Probe for the CPUID instruction */
202 int __init have_cpuid_p(void)
204 return flag_is_changeable_p(X86_EFLAGS_ID);
207 /* Do minimum CPU detection early.
208 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
209 The others are not touched to avoid unwanted side effects. */
210 void __init early_cpu_detect(void)
212 struct cpuinfo_x86 *c = &boot_cpu_data;
214 c->x86_cache_alignment = 32;
219 /* Get vendor name */
220 cpuid(0x00000000, &c->cpuid_level,
221 (int *)&c->x86_vendor_id[0],
222 (int *)&c->x86_vendor_id[8],
223 (int *)&c->x86_vendor_id[4]);
225 get_cpu_vendor(c, 1);
228 if (c->cpuid_level >= 0x00000001) {
229 u32 junk, tfms, cap0, misc;
230 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
231 c->x86 = (tfms >> 8) & 15;
232 c->x86_model = (tfms >> 4) & 15;
234 c->x86 += (tfms >> 20) & 0xff;
235 c->x86_model += ((tfms >> 16) & 0xF) << 4;
237 c->x86_mask = tfms & 15;
239 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
242 early_intel_workaround(c);
245 void __init generic_identify(struct cpuinfo_x86 * c)
250 if (have_cpuid_p()) {
251 /* Get vendor name */
252 cpuid(0x00000000, &c->cpuid_level,
253 (int *)&c->x86_vendor_id[0],
254 (int *)&c->x86_vendor_id[8],
255 (int *)&c->x86_vendor_id[4]);
257 get_cpu_vendor(c, 0);
258 /* Initialize the standard set of capabilities */
259 /* Note that the vendor-specific code below might override */
261 /* Intel-defined flags: level 0x00000001 */
262 if ( c->cpuid_level >= 0x00000001 ) {
263 u32 capability, excap;
264 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
265 c->x86_capability[0] = capability;
266 c->x86_capability[4] = excap;
267 c->x86 = (tfms >> 8) & 15;
268 c->x86_model = (tfms >> 4) & 15;
270 c->x86 += (tfms >> 20) & 0xff;
271 c->x86_model += ((tfms >> 16) & 0xF) << 4;
273 c->x86_mask = tfms & 15;
275 /* Have CPUID level 0 only - unheard of */
279 /* AMD-defined flags: level 0x80000001 */
280 xlvl = cpuid_eax(0x80000000);
281 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
282 if ( xlvl >= 0x80000001 ) {
283 c->x86_capability[1] = cpuid_edx(0x80000001);
284 c->x86_capability[6] = cpuid_ecx(0x80000001);
286 if ( xlvl >= 0x80000004 )
287 get_model_name(c); /* Default name */
292 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
294 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
295 /* Disable processor serial number */
297 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
299 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
300 printk(KERN_NOTICE "CPU serial number disabled.\n");
301 clear_bit(X86_FEATURE_PN, c->x86_capability);
303 /* Disabling the serial number may affect the cpuid level */
304 c->cpuid_level = cpuid_eax(0);
308 static int __init x86_serial_nr_setup(char *s)
310 disable_x86_serial_nr = 0;
313 __setup("serialnumber", x86_serial_nr_setup);
318 * This does the hard work of actually picking apart the CPU stuff...
320 void __init identify_cpu(struct cpuinfo_x86 *c)
324 c->loops_per_jiffy = loops_per_jiffy;
325 c->x86_cache_size = -1;
326 c->x86_vendor = X86_VENDOR_UNKNOWN;
327 c->cpuid_level = -1; /* CPUID not detected */
328 c->x86_model = c->x86_mask = 0; /* So far unknown... */
329 c->x86_vendor_id[0] = '\0'; /* Unset */
330 c->x86_model_id[0] = '\0'; /* Unset */
331 c->x86_num_cores = 1;
332 memset(&c->x86_capability, 0, sizeof c->x86_capability);
334 if (!have_cpuid_p()) {
335 /* First of all, decide if this is a 486 or higher */
336 /* It's a 486 if we can modify the AC flag */
337 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
345 printk(KERN_DEBUG "CPU: After generic identify, caps:");
346 for (i = 0; i < NCAPINTS; i++)
347 printk(" %08lx", c->x86_capability[i]);
350 if (this_cpu->c_identify) {
351 this_cpu->c_identify(c);
353 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
354 for (i = 0; i < NCAPINTS; i++)
355 printk(" %08lx", c->x86_capability[i]);
360 * Vendor-specific initialization. In this section we
361 * canonicalize the feature flags, meaning if there are
362 * features a certain CPU supports which CPUID doesn't
363 * tell us, CPUID claiming incorrect flags, or other bugs,
364 * we handle them here.
366 * At the end of this section, c->x86_capability better
367 * indicate the features this CPU genuinely supports!
369 if (this_cpu->c_init)
372 /* Disable the PN if appropriate */
373 squash_the_stupid_serial_number(c);
376 * The vendor-specific functions might have changed features. Now
377 * we do "generic changes."
382 clear_bit(X86_FEATURE_TSC, c->x86_capability);
385 if (disable_x86_fxsr) {
386 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
387 clear_bit(X86_FEATURE_XMM, c->x86_capability);
391 clear_bit(X86_FEATURE_PSE, c->x86_capability);
393 /* If the model name is still unset, do table lookup. */
394 if ( !c->x86_model_id[0] ) {
396 p = table_lookup_model(c);
398 strcpy(c->x86_model_id, p);
401 sprintf(c->x86_model_id, "%02x/%02x",
402 c->x86_vendor, c->x86_model);
405 /* Now the feature flags better reflect actual CPU features! */
407 printk(KERN_DEBUG "CPU: After all inits, caps:");
408 for (i = 0; i < NCAPINTS; i++)
409 printk(" %08lx", c->x86_capability[i]);
413 * On SMP, boot_cpu_data holds the common feature set between
414 * all CPUs; so make sure that we indicate which features are
415 * common between the CPUs. The first time this routine gets
416 * executed, c == &boot_cpu_data.
418 if ( c != &boot_cpu_data ) {
419 /* AND the already accumulated flags with these */
420 for ( i = 0 ; i < NCAPINTS ; i++ )
421 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
424 /* Init Machine Check Exception if available. */
425 #ifdef CONFIG_X86_MCE
430 * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
433 void __init dodgy_tsc(void)
435 if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
436 ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
437 cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
441 void __init detect_ht(struct cpuinfo_x86 *c)
443 u32 eax, ebx, ecx, edx;
444 int index_lsb, index_msb, tmp;
445 int cpu = smp_processor_id();
447 if (!cpu_has(c, X86_FEATURE_HT))
450 cpuid(1, &eax, &ebx, &ecx, &edx);
451 smp_num_siblings = (ebx & 0xff0000) >> 16;
453 if (smp_num_siblings == 1) {
454 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
455 } else if (smp_num_siblings > 1 ) {
459 if (smp_num_siblings > NR_CPUS) {
460 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
461 smp_num_siblings = 1;
464 tmp = smp_num_siblings;
465 while ((tmp & 1) == 0) {
469 tmp = smp_num_siblings;
470 while ((tmp & 0x80000000 ) == 0) {
474 if (index_lsb != index_msb )
476 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
478 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
484 void __init print_cpu_info(struct cpuinfo_x86 *c)
488 if (c->x86_vendor < X86_VENDOR_NUM)
489 vendor = this_cpu->c_vendor;
490 else if (c->cpuid_level >= 0)
491 vendor = c->x86_vendor_id;
493 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
494 printk("%s ", vendor);
496 if (!c->x86_model_id[0])
497 printk("%d86", c->x86);
499 printk("%s", c->x86_model_id);
501 if (c->x86_mask || c->cpuid_level >= 0)
502 printk(" stepping %02x\n", c->x86_mask);
507 cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
510 * We're emulating future behavior.
511 * In the future, the cpu-specific init functions will be called implicitly
512 * via the magic of initcalls.
513 * They will insert themselves into the cpu_devs structure.
514 * Then, when cpu_init() is called, we can just iterate over that array.
517 extern int intel_cpu_init(void);
518 extern int cyrix_init_cpu(void);
519 extern int nsc_init_cpu(void);
520 extern int amd_init_cpu(void);
521 extern int centaur_init_cpu(void);
522 extern int transmeta_init_cpu(void);
523 extern int rise_init_cpu(void);
524 extern int nexgen_init_cpu(void);
525 extern int umc_init_cpu(void);
526 void early_cpu_detect(void);
528 void __init early_cpu_init(void)
535 transmeta_init_cpu();
541 #ifdef CONFIG_DEBUG_PAGEALLOC
542 /* pse is not compatible with on-the-fly unmapping,
543 * disable it even if the cpus claim to support it.
545 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
550 * cpu_init() initializes state that is per-CPU. Some data is already
551 * initialized (naturally) in the bootstrap process, such as the GDT
552 * and IDT. We reload them nevertheless, this function acts as a
553 * 'CPU state barrier', nothing should get across.
555 void __init cpu_init (void)
557 int cpu = smp_processor_id();
558 struct tss_struct * t = &per_cpu(init_tss, cpu);
559 struct thread_struct *thread = ¤t->thread;
561 if (cpu_test_and_set(cpu, cpu_initialized)) {
562 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
563 for (;;) local_irq_enable();
565 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
567 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
568 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
569 if (tsc_disable && cpu_has_tsc) {
570 printk(KERN_NOTICE "Disabling TSC...\n");
571 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
572 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
573 set_in_cr4(X86_CR4_TSD);
577 * Initialize the per-CPU GDT with the boot GDT,
578 * and set up the GDT descriptor:
580 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table,
582 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
583 cpu_gdt_descr[cpu].address =
584 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
587 * Set up the per-thread TLS descriptor cache:
589 memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
590 GDT_ENTRY_TLS_ENTRIES * 8);
592 __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
593 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
598 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
601 * Set up and load the per-CPU TSS and LDT
603 atomic_inc(&init_mm.mm_count);
604 current->active_mm = &init_mm;
607 enter_lazy_tlb(&init_mm, current);
609 load_esp0(t, thread);
612 load_LDT(&init_mm.context);
614 /* Set up doublefault TSS pointer in the GDT */
615 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
617 /* Clear %fs and %gs. */
618 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
620 /* Clear all 6 debug registers: */
622 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
624 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
629 * Force FPU initialization:
631 current_thread_info()->status = 0;
633 mxcsr_feature_mask_init();