1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <asm/semaphore.h>
6 #include <asm/processor.h>
10 #include <asm/mmu_context.h>
14 static int cachesize_override __initdata = -1;
15 static int disable_x86_fxsr __initdata = 0;
16 static int disable_x86_serial_nr __initdata = 1;
18 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
20 extern void mcheck_init(struct cpuinfo_x86 *c);
22 extern int disable_pse;
24 static void default_init(struct cpuinfo_x86 * c)
26 /* Not much we can do here... */
27 /* Check if at least it has cpuid */
28 if (c->cpuid_level == -1) {
29 /* No cpuid. It must be an ancient CPU */
31 strcpy(c->x86_model_id, "486");
33 strcpy(c->x86_model_id, "386");
37 static struct cpu_dev default_cpu = {
38 .c_init = default_init,
40 static struct cpu_dev * this_cpu = &default_cpu;
42 static int __init cachesize_setup(char *str)
44 get_option (&str, &cachesize_override);
47 __setup("cachesize=", cachesize_setup);
49 int __init get_model_name(struct cpuinfo_x86 *c)
54 if (cpuid_eax(0x80000000) < 0x80000004)
57 v = (unsigned int *) c->x86_model_id;
58 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
59 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
60 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
61 c->x86_model_id[48] = 0;
63 /* Intel chips right-justify this string for some dumb reason;
64 undo that brain damage */
65 p = q = &c->x86_model_id[0];
71 while ( q <= &c->x86_model_id[48] )
72 *q++ = '\0'; /* Zero-pad the rest */
79 void __init display_cacheinfo(struct cpuinfo_x86 *c)
81 unsigned int n, dummy, ecx, edx, l2size;
83 n = cpuid_eax(0x80000000);
85 if (n >= 0x80000005) {
86 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
87 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
88 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
89 c->x86_cache_size=(ecx>>24)+(edx>>24);
92 if (n < 0x80000006) /* Some chips just has a large L1. */
95 ecx = cpuid_ecx(0x80000006);
98 /* do processor-specific cache resizing */
99 if (this_cpu->c_size_cache)
100 l2size = this_cpu->c_size_cache(c,l2size);
102 /* Allow user to override all this if necessary. */
103 if (cachesize_override != -1)
104 l2size = cachesize_override;
107 return; /* Again, no L2 cache is possible */
109 c->x86_cache_size = l2size;
111 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
115 /* Naming convention should be: <Name> [(<Codename>)] */
116 /* This table only is used unless init_<vendor>() below doesn't set it; */
117 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
119 /* Look up CPU names by table lookup. */
120 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
122 struct cpu_model_info *info;
124 if ( c->x86_model >= 16 )
125 return NULL; /* Range check */
130 info = this_cpu->c_models;
132 while (info && info->family) {
133 if (info->family == c->x86)
134 return info->model_names[c->x86_model];
137 return NULL; /* Not found */
141 void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
143 char *v = c->x86_vendor_id;
146 for (i = 0; i < X86_VENDOR_NUM; i++) {
148 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
149 (cpu_devs[i]->c_ident[1] &&
150 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
153 this_cpu = cpu_devs[i];
161 static int __init x86_fxsr_setup(char * s)
163 disable_x86_fxsr = 1;
166 __setup("nofxsr", x86_fxsr_setup);
169 /* Standard macro to see if a specific flag is changeable */
170 static inline int flag_is_changeable_p(u32 flag)
184 : "=&r" (f1), "=&r" (f2)
187 return ((f1^f2) & flag) != 0;
191 /* Probe for the CPUID instruction */
192 int __init have_cpuid_p(void)
194 return flag_is_changeable_p(X86_EFLAGS_ID);
197 /* Do minimum CPU detection early.
198 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
199 The others are not touched to avoid unwanted side effects. */
200 void __init early_cpu_detect(void)
202 struct cpuinfo_x86 *c = &boot_cpu_data;
204 c->x86_cache_alignment = 32;
209 /* Get vendor name */
210 cpuid(0x00000000, &c->cpuid_level,
211 (int *)&c->x86_vendor_id[0],
212 (int *)&c->x86_vendor_id[8],
213 (int *)&c->x86_vendor_id[4]);
215 get_cpu_vendor(c, 1);
218 if (c->cpuid_level >= 0x00000001) {
219 u32 junk, tfms, cap0, misc;
220 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
221 c->x86 = (tfms >> 8) & 15;
222 c->x86_model = (tfms >> 4) & 15;
224 c->x86 += (tfms >> 20) & 0xff;
225 c->x86_model += ((tfms >> 16) & 0xF) << 4;
227 c->x86_mask = tfms & 15;
229 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
232 early_intel_workaround(c);
235 void __init generic_identify(struct cpuinfo_x86 * c)
240 if (have_cpuid_p()) {
241 /* Get vendor name */
242 cpuid(0x00000000, &c->cpuid_level,
243 (int *)&c->x86_vendor_id[0],
244 (int *)&c->x86_vendor_id[8],
245 (int *)&c->x86_vendor_id[4]);
247 get_cpu_vendor(c, 0);
248 /* Initialize the standard set of capabilities */
249 /* Note that the vendor-specific code below might override */
251 /* Intel-defined flags: level 0x00000001 */
252 if ( c->cpuid_level >= 0x00000001 ) {
253 u32 capability, excap;
254 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
255 c->x86_capability[0] = capability;
256 c->x86_capability[4] = excap;
257 c->x86 = (tfms >> 8) & 15;
258 c->x86_model = (tfms >> 4) & 15;
260 c->x86 += (tfms >> 20) & 0xff;
261 c->x86_model += ((tfms >> 16) & 0xF) << 4;
263 c->x86_mask = tfms & 15;
265 /* Have CPUID level 0 only - unheard of */
269 /* AMD-defined flags: level 0x80000001 */
270 xlvl = cpuid_eax(0x80000000);
271 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
272 if ( xlvl >= 0x80000001 )
273 c->x86_capability[1] = cpuid_edx(0x80000001);
274 if ( xlvl >= 0x80000004 )
275 get_model_name(c); /* Default name */
280 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
282 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
283 /* Disable processor serial number */
285 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
287 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
288 printk(KERN_NOTICE "CPU serial number disabled.\n");
289 clear_bit(X86_FEATURE_PN, c->x86_capability);
291 /* Disabling the serial number may affect the cpuid level */
292 c->cpuid_level = cpuid_eax(0);
296 static int __init x86_serial_nr_setup(char *s)
298 disable_x86_serial_nr = 0;
301 __setup("serialnumber", x86_serial_nr_setup);
306 * This does the hard work of actually picking apart the CPU stuff...
308 void __init identify_cpu(struct cpuinfo_x86 *c)
312 c->loops_per_jiffy = loops_per_jiffy;
313 c->x86_cache_size = -1;
314 c->x86_vendor = X86_VENDOR_UNKNOWN;
315 c->cpuid_level = -1; /* CPUID not detected */
316 c->x86_model = c->x86_mask = 0; /* So far unknown... */
317 c->x86_vendor_id[0] = '\0'; /* Unset */
318 c->x86_model_id[0] = '\0'; /* Unset */
319 memset(&c->x86_capability, 0, sizeof c->x86_capability);
321 if (!have_cpuid_p()) {
322 /* First of all, decide if this is a 486 or higher */
323 /* It's a 486 if we can modify the AC flag */
324 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
332 printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
333 c->x86_capability[0],
334 c->x86_capability[1],
335 c->x86_capability[2],
336 c->x86_capability[3]);
338 if (this_cpu->c_identify) {
339 this_cpu->c_identify(c);
341 printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
342 c->x86_capability[0],
343 c->x86_capability[1],
344 c->x86_capability[2],
345 c->x86_capability[3]);
349 * Vendor-specific initialization. In this section we
350 * canonicalize the feature flags, meaning if there are
351 * features a certain CPU supports which CPUID doesn't
352 * tell us, CPUID claiming incorrect flags, or other bugs,
353 * we handle them here.
355 * At the end of this section, c->x86_capability better
356 * indicate the features this CPU genuinely supports!
358 if (this_cpu->c_init)
361 /* Disable the PN if appropriate */
362 squash_the_stupid_serial_number(c);
365 * The vendor-specific functions might have changed features. Now
366 * we do "generic changes."
371 clear_bit(X86_FEATURE_TSC, c->x86_capability);
374 if (disable_x86_fxsr) {
375 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
376 clear_bit(X86_FEATURE_XMM, c->x86_capability);
380 clear_bit(X86_FEATURE_PSE, c->x86_capability);
382 /* hack: disable SEP for non-NX cpus; SEP breaks Execshield. */
383 if (!test_bit(X86_FEATURE_NX, c->x86_capability))
384 clear_bit(X86_FEATURE_SEP, c->x86_capability);
386 /* If the model name is still unset, do table lookup. */
387 if ( !c->x86_model_id[0] ) {
389 p = table_lookup_model(c);
391 strcpy(c->x86_model_id, p);
394 sprintf(c->x86_model_id, "%02x/%02x",
395 c->x86_vendor, c->x86_model);
398 /* Now the feature flags better reflect actual CPU features! */
400 printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n",
401 c->x86_capability[0],
402 c->x86_capability[1],
403 c->x86_capability[2],
404 c->x86_capability[3]);
407 * On SMP, boot_cpu_data holds the common feature set between
408 * all CPUs; so make sure that we indicate which features are
409 * common between the CPUs. The first time this routine gets
410 * executed, c == &boot_cpu_data.
412 if ( c != &boot_cpu_data ) {
413 /* AND the already accumulated flags with these */
414 for ( i = 0 ; i < NCAPINTS ; i++ )
415 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
418 /* Init Machine Check Exception if available. */
419 #ifdef CONFIG_X86_MCE
424 * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
427 void __init dodgy_tsc(void)
429 if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
430 ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
431 cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
434 void __init print_cpu_info(struct cpuinfo_x86 *c)
438 if (c->x86_vendor < X86_VENDOR_NUM)
439 vendor = this_cpu->c_vendor;
440 else if (c->cpuid_level >= 0)
441 vendor = c->x86_vendor_id;
443 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
444 printk("%s ", vendor);
446 if (!c->x86_model_id[0])
447 printk("%d86", c->x86);
449 printk("%s", c->x86_model_id);
451 if (c->x86_mask || c->cpuid_level >= 0)
452 printk(" stepping %02x\n", c->x86_mask);
457 unsigned long cpu_initialized __initdata = 0;
460 * We're emulating future behavior.
461 * In the future, the cpu-specific init functions will be called implicitly
462 * via the magic of initcalls.
463 * They will insert themselves into the cpu_devs structure.
464 * Then, when cpu_init() is called, we can just iterate over that array.
467 extern int intel_cpu_init(void);
468 extern int cyrix_init_cpu(void);
469 extern int nsc_init_cpu(void);
470 extern int amd_init_cpu(void);
471 extern int centaur_init_cpu(void);
472 extern int transmeta_init_cpu(void);
473 extern int rise_init_cpu(void);
474 extern int nexgen_init_cpu(void);
475 extern int umc_init_cpu(void);
476 void early_cpu_detect(void);
478 void __init early_cpu_init(void)
485 transmeta_init_cpu();
491 #ifdef CONFIG_DEBUG_PAGEALLOC
492 /* pse is not compatible with on-the-fly unmapping,
493 * disable it even if the cpus claim to support it.
495 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
500 * cpu_init() initializes state that is per-CPU. Some data is already
501 * initialized (naturally) in the bootstrap process, such as the GDT
502 * and IDT. We reload them nevertheless, this function acts as a
503 * 'CPU state barrier', nothing should get across.
505 void __init cpu_init (void)
507 int cpu = smp_processor_id();
508 struct tss_struct * t = init_tss + cpu;
509 struct thread_struct *thread = ¤t->thread;
511 if (test_and_set_bit(cpu, &cpu_initialized)) {
512 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
513 for (;;) local_irq_enable();
515 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
517 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
518 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
519 if (tsc_disable && cpu_has_tsc) {
520 printk(KERN_NOTICE "Disabling TSC...\n");
521 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
522 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
523 set_in_cr4(X86_CR4_TSD);
527 * Initialize the per-CPU GDT with the boot GDT,
528 * and set up the GDT descriptor:
531 memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
532 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
533 cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
536 * Set up the per-thread TLS descriptor cache:
538 memcpy(thread->tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
540 __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
541 __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
546 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
549 * Set up and load the per-CPU TSS and LDT
551 atomic_inc(&init_mm.mm_count);
552 current->active_mm = &init_mm;
555 enter_lazy_tlb(&init_mm, current);
557 load_esp0(t, thread);
559 cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
562 load_LDT(&init_mm.context);
564 /* Set up doublefault TSS pointer in the GDT */
565 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
566 cpu_gdt_table[cpu][GDT_ENTRY_DOUBLEFAULT_TSS].b &= 0xfffffdff;
569 trap_init_virtual_GDT();
571 /* Clear %fs and %gs. */
572 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
574 /* Clear all 6 debug registers: */
576 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
578 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
583 * Force FPU initialization:
585 current_thread_info()->status = 0;
586 current->used_math = 0;
587 mxcsr_feature_mask_init();