1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <asm/semaphore.h>
8 #include <asm/processor.h>
12 #include <asm/mmu_context.h>
16 DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
17 EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
19 static int cachesize_override __initdata = -1;
20 static int disable_x86_fxsr __initdata = 0;
21 static int disable_x86_serial_nr __initdata = 1;
23 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
25 extern void mcheck_init(struct cpuinfo_x86 *c);
27 extern int disable_pse;
29 static void default_init(struct cpuinfo_x86 * c)
31 /* Not much we can do here... */
32 /* Check if at least it has cpuid */
33 if (c->cpuid_level == -1) {
34 /* No cpuid. It must be an ancient CPU */
36 strcpy(c->x86_model_id, "486");
38 strcpy(c->x86_model_id, "386");
42 static struct cpu_dev default_cpu = {
43 .c_init = default_init,
45 static struct cpu_dev * this_cpu = &default_cpu;
47 static int __init cachesize_setup(char *str)
49 get_option (&str, &cachesize_override);
52 __setup("cachesize=", cachesize_setup);
54 int __init get_model_name(struct cpuinfo_x86 *c)
59 if (cpuid_eax(0x80000000) < 0x80000004)
62 v = (unsigned int *) c->x86_model_id;
63 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
64 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
65 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
66 c->x86_model_id[48] = 0;
68 /* Intel chips right-justify this string for some dumb reason;
69 undo that brain damage */
70 p = q = &c->x86_model_id[0];
76 while ( q <= &c->x86_model_id[48] )
77 *q++ = '\0'; /* Zero-pad the rest */
84 void __init display_cacheinfo(struct cpuinfo_x86 *c)
86 unsigned int n, dummy, ecx, edx, l2size;
88 n = cpuid_eax(0x80000000);
90 if (n >= 0x80000005) {
91 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
92 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
93 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
94 c->x86_cache_size=(ecx>>24)+(edx>>24);
97 if (n < 0x80000006) /* Some chips just has a large L1. */
100 ecx = cpuid_ecx(0x80000006);
103 /* do processor-specific cache resizing */
104 if (this_cpu->c_size_cache)
105 l2size = this_cpu->c_size_cache(c,l2size);
107 /* Allow user to override all this if necessary. */
108 if (cachesize_override != -1)
109 l2size = cachesize_override;
112 return; /* Again, no L2 cache is possible */
114 c->x86_cache_size = l2size;
116 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
120 /* Naming convention should be: <Name> [(<Codename>)] */
121 /* This table only is used unless init_<vendor>() below doesn't set it; */
122 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
124 /* Look up CPU names by table lookup. */
125 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
127 struct cpu_model_info *info;
129 if ( c->x86_model >= 16 )
130 return NULL; /* Range check */
135 info = this_cpu->c_models;
137 while (info && info->family) {
138 if (info->family == c->x86)
139 return info->model_names[c->x86_model];
142 return NULL; /* Not found */
146 void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
148 char *v = c->x86_vendor_id;
151 for (i = 0; i < X86_VENDOR_NUM; i++) {
153 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
154 (cpu_devs[i]->c_ident[1] &&
155 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
158 this_cpu = cpu_devs[i];
166 static int __init x86_fxsr_setup(char * s)
168 disable_x86_fxsr = 1;
171 __setup("nofxsr", x86_fxsr_setup);
174 /* Standard macro to see if a specific flag is changeable */
175 static inline int flag_is_changeable_p(u32 flag)
189 : "=&r" (f1), "=&r" (f2)
192 return ((f1^f2) & flag) != 0;
196 /* Probe for the CPUID instruction */
197 int __init have_cpuid_p(void)
199 return flag_is_changeable_p(X86_EFLAGS_ID);
202 /* Do minimum CPU detection early.
203 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
204 The others are not touched to avoid unwanted side effects. */
205 void __init early_cpu_detect(void)
207 struct cpuinfo_x86 *c = &boot_cpu_data;
209 c->x86_cache_alignment = 32;
214 /* Get vendor name */
215 cpuid(0x00000000, &c->cpuid_level,
216 (int *)&c->x86_vendor_id[0],
217 (int *)&c->x86_vendor_id[8],
218 (int *)&c->x86_vendor_id[4]);
220 get_cpu_vendor(c, 1);
223 if (c->cpuid_level >= 0x00000001) {
224 u32 junk, tfms, cap0, misc;
225 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
226 c->x86 = (tfms >> 8) & 15;
227 c->x86_model = (tfms >> 4) & 15;
229 c->x86 += (tfms >> 20) & 0xff;
230 c->x86_model += ((tfms >> 16) & 0xF) << 4;
232 c->x86_mask = tfms & 15;
234 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
237 early_intel_workaround(c);
240 void __init generic_identify(struct cpuinfo_x86 * c)
245 if (have_cpuid_p()) {
246 /* Get vendor name */
247 cpuid(0x00000000, &c->cpuid_level,
248 (int *)&c->x86_vendor_id[0],
249 (int *)&c->x86_vendor_id[8],
250 (int *)&c->x86_vendor_id[4]);
252 get_cpu_vendor(c, 0);
253 /* Initialize the standard set of capabilities */
254 /* Note that the vendor-specific code below might override */
256 /* Intel-defined flags: level 0x00000001 */
257 if ( c->cpuid_level >= 0x00000001 ) {
258 u32 capability, excap;
259 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
260 c->x86_capability[0] = capability;
261 c->x86_capability[4] = excap;
262 c->x86 = (tfms >> 8) & 15;
263 c->x86_model = (tfms >> 4) & 15;
265 c->x86 += (tfms >> 20) & 0xff;
266 c->x86_model += ((tfms >> 16) & 0xF) << 4;
268 c->x86_mask = tfms & 15;
270 /* Have CPUID level 0 only - unheard of */
274 /* AMD-defined flags: level 0x80000001 */
275 xlvl = cpuid_eax(0x80000000);
276 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
277 if ( xlvl >= 0x80000001 )
278 c->x86_capability[1] = cpuid_edx(0x80000001);
279 if ( xlvl >= 0x80000004 )
280 get_model_name(c); /* Default name */
285 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
287 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
288 /* Disable processor serial number */
290 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
292 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
293 printk(KERN_NOTICE "CPU serial number disabled.\n");
294 clear_bit(X86_FEATURE_PN, c->x86_capability);
296 /* Disabling the serial number may affect the cpuid level */
297 c->cpuid_level = cpuid_eax(0);
301 static int __init x86_serial_nr_setup(char *s)
303 disable_x86_serial_nr = 0;
306 __setup("serialnumber", x86_serial_nr_setup);
311 * This does the hard work of actually picking apart the CPU stuff...
313 void __init identify_cpu(struct cpuinfo_x86 *c)
317 c->loops_per_jiffy = loops_per_jiffy;
318 c->x86_cache_size = -1;
319 c->x86_vendor = X86_VENDOR_UNKNOWN;
320 c->cpuid_level = -1; /* CPUID not detected */
321 c->x86_model = c->x86_mask = 0; /* So far unknown... */
322 c->x86_vendor_id[0] = '\0'; /* Unset */
323 c->x86_model_id[0] = '\0'; /* Unset */
324 memset(&c->x86_capability, 0, sizeof c->x86_capability);
326 if (!have_cpuid_p()) {
327 /* First of all, decide if this is a 486 or higher */
328 /* It's a 486 if we can modify the AC flag */
329 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
337 printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
338 c->x86_capability[0],
339 c->x86_capability[1],
340 c->x86_capability[2],
341 c->x86_capability[3]);
343 if (this_cpu->c_identify) {
344 this_cpu->c_identify(c);
346 printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
347 c->x86_capability[0],
348 c->x86_capability[1],
349 c->x86_capability[2],
350 c->x86_capability[3]);
354 * Vendor-specific initialization. In this section we
355 * canonicalize the feature flags, meaning if there are
356 * features a certain CPU supports which CPUID doesn't
357 * tell us, CPUID claiming incorrect flags, or other bugs,
358 * we handle them here.
360 * At the end of this section, c->x86_capability better
361 * indicate the features this CPU genuinely supports!
363 if (this_cpu->c_init)
366 /* Disable the PN if appropriate */
367 squash_the_stupid_serial_number(c);
370 * The vendor-specific functions might have changed features. Now
371 * we do "generic changes."
376 clear_bit(X86_FEATURE_TSC, c->x86_capability);
379 if (disable_x86_fxsr) {
380 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
381 clear_bit(X86_FEATURE_XMM, c->x86_capability);
385 clear_bit(X86_FEATURE_PSE, c->x86_capability);
387 /* If the model name is still unset, do table lookup. */
388 if ( !c->x86_model_id[0] ) {
390 p = table_lookup_model(c);
392 strcpy(c->x86_model_id, p);
395 sprintf(c->x86_model_id, "%02x/%02x",
396 c->x86_vendor, c->x86_model);
399 /* Now the feature flags better reflect actual CPU features! */
401 printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n",
402 c->x86_capability[0],
403 c->x86_capability[1],
404 c->x86_capability[2],
405 c->x86_capability[3]);
408 * On SMP, boot_cpu_data holds the common feature set between
409 * all CPUs; so make sure that we indicate which features are
410 * common between the CPUs. The first time this routine gets
411 * executed, c == &boot_cpu_data.
413 if ( c != &boot_cpu_data ) {
414 /* AND the already accumulated flags with these */
415 for ( i = 0 ; i < NCAPINTS ; i++ )
416 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
419 /* Init Machine Check Exception if available. */
420 #ifdef CONFIG_X86_MCE
425 * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
428 void __init dodgy_tsc(void)
430 if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
431 ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
432 cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
435 void __init print_cpu_info(struct cpuinfo_x86 *c)
439 if (c->x86_vendor < X86_VENDOR_NUM)
440 vendor = this_cpu->c_vendor;
441 else if (c->cpuid_level >= 0)
442 vendor = c->x86_vendor_id;
444 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
445 printk("%s ", vendor);
447 if (!c->x86_model_id[0])
448 printk("%d86", c->x86);
450 printk("%s", c->x86_model_id);
452 if (c->x86_mask || c->cpuid_level >= 0)
453 printk(" stepping %02x\n", c->x86_mask);
458 unsigned long cpu_initialized __initdata = 0;
461 * We're emulating future behavior.
462 * In the future, the cpu-specific init functions will be called implicitly
463 * via the magic of initcalls.
464 * They will insert themselves into the cpu_devs structure.
465 * Then, when cpu_init() is called, we can just iterate over that array.
468 extern int intel_cpu_init(void);
469 extern int cyrix_init_cpu(void);
470 extern int nsc_init_cpu(void);
471 extern int amd_init_cpu(void);
472 extern int centaur_init_cpu(void);
473 extern int transmeta_init_cpu(void);
474 extern int rise_init_cpu(void);
475 extern int nexgen_init_cpu(void);
476 extern int umc_init_cpu(void);
477 void early_cpu_detect(void);
479 void __init early_cpu_init(void)
486 transmeta_init_cpu();
492 #ifdef CONFIG_DEBUG_PAGEALLOC
493 /* pse is not compatible with on-the-fly unmapping,
494 * disable it even if the cpus claim to support it.
496 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
501 * cpu_init() initializes state that is per-CPU. Some data is already
502 * initialized (naturally) in the bootstrap process, such as the GDT
503 * and IDT. We reload them nevertheless, this function acts as a
504 * 'CPU state barrier', nothing should get across.
506 void __init cpu_init (void)
508 int cpu = smp_processor_id();
509 struct tss_struct * t = &per_cpu(init_tss, cpu);
510 struct thread_struct *thread = ¤t->thread;
512 if (test_and_set_bit(cpu, &cpu_initialized)) {
513 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
514 for (;;) local_irq_enable();
516 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
518 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
519 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
520 if (tsc_disable && cpu_has_tsc) {
521 printk(KERN_NOTICE "Disabling TSC...\n");
522 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
523 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
524 set_in_cr4(X86_CR4_TSD);
528 * Initialize the per-CPU GDT with the boot GDT,
529 * and set up the GDT descriptor:
531 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table,
533 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
534 cpu_gdt_descr[cpu].address =
535 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
538 * Set up the per-thread TLS descriptor cache:
540 memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
541 GDT_ENTRY_TLS_ENTRIES * 8);
543 __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
544 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
549 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
552 * Set up and load the per-CPU TSS and LDT
554 atomic_inc(&init_mm.mm_count);
555 current->active_mm = &init_mm;
558 enter_lazy_tlb(&init_mm, current);
560 load_esp0(t, thread);
563 load_LDT(&init_mm.context);
565 /* Set up doublefault TSS pointer in the GDT */
566 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
568 /* Clear %fs and %gs. */
569 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
571 /* Clear all 6 debug registers: */
573 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
575 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
580 * Force FPU initialization:
582 current_thread_info()->status = 0;
583 current->used_math = 0;
584 mxcsr_feature_mask_init();