2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
57 #include <asm/proto.h>
63 struct cpuinfo_x86 boot_cpu_data;
65 unsigned long mmu_cr4_features;
66 EXPORT_SYMBOL_GPL(mmu_cr4_features);
69 EXPORT_SYMBOL(acpi_disabled);
70 #ifdef CONFIG_ACPI_BOOT
71 extern int __initdata acpi_ht;
72 extern acpi_interrupt_flags acpi_sci_flags;
73 /* int __initdata acpi_force = 0; */
76 /* For PCI or other memory-mapped resources */
77 unsigned long pci_mem_start = 0x10000000;
79 unsigned long saved_video_mode;
82 EXPORT_SYMBOL(swiotlb);
87 struct drive_info_struct { char dummy[32]; } drive_info;
88 struct screen_info screen_info;
89 struct sys_desc_table_struct {
90 unsigned short length;
91 unsigned char table[0];
94 struct edid_info edid_info;
97 unsigned char aux_device_present;
99 extern int root_mountflags;
100 extern char _text, _etext, _edata, _end;
102 char command_line[COMMAND_LINE_SIZE];
103 char saved_command_line[COMMAND_LINE_SIZE];
105 struct resource standard_io_resources[] = {
106 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY | IORESOURCE_IO },
107 { "pic1", 0x20, 0x21, IORESOURCE_BUSY | IORESOURCE_IO },
108 { "timer", 0x40, 0x5f, IORESOURCE_BUSY | IORESOURCE_IO },
109 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY | IORESOURCE_IO },
110 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY | IORESOURCE_IO },
111 { "pic2", 0xa0, 0xa1, IORESOURCE_BUSY | IORESOURCE_IO },
112 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY | IORESOURCE_IO },
113 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY | IORESOURCE_IO }
116 #define STANDARD_IO_RESOURCES \
117 (sizeof standard_io_resources / sizeof standard_io_resources[0])
119 struct resource code_resource = { "Kernel code", 0x100000, 0, IORESOURCE_MEM };
120 struct resource data_resource = { "Kernel data", 0, 0, IORESOURCE_MEM };
121 struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY | IORESOURCE_MEM };
123 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
125 static struct resource system_rom_resource = { "System ROM", 0xf0000, 0xfffff, IORESOURCE_ROM };
126 static struct resource extension_rom_resource = { "Extension ROM", 0xe0000, 0xeffff, IORESOURCE_ROM };
128 static struct resource adapter_rom_resources[] = {
129 { "Adapter ROM", 0xc8000, 0, IORESOURCE_ROM },
130 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
131 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
132 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
133 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
134 { "Adapter ROM", 0, 0, IORESOURCE_ROM }
137 #define ADAPTER_ROM_RESOURCES \
138 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
140 static struct resource video_rom_resource = { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_ROM };
142 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
144 static int __init checksum(unsigned char *rom, unsigned long length)
146 unsigned char *p, sum = 0;
148 for (p = rom; p < rom + length; p++)
153 static void __init probe_roms(void)
155 unsigned long start, length, upper;
160 upper = adapter_rom_resources[0].start;
161 for (start = video_rom_resource.start; start < upper; start += 2048) {
162 rom = isa_bus_to_virt(start);
163 if (!romsignature(rom))
166 video_rom_resource.start = start;
168 /* 0 < length <= 0x7f * 512, historically */
169 length = rom[2] * 512;
171 /* if checksum okay, trust length byte */
172 if (length && checksum(rom, length))
173 video_rom_resource.end = start + length - 1;
175 request_resource(&iomem_resource, &video_rom_resource);
179 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
184 request_resource(&iomem_resource, &system_rom_resource);
185 upper = system_rom_resource.start;
187 /* check for extension rom (ignore length byte!) */
188 rom = isa_bus_to_virt(extension_rom_resource.start);
189 if (romsignature(rom)) {
190 length = extension_rom_resource.end - extension_rom_resource.start + 1;
191 if (checksum(rom, length)) {
192 request_resource(&iomem_resource, &extension_rom_resource);
193 upper = extension_rom_resource.start;
197 /* check for adapter roms on 2k boundaries */
198 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
199 rom = isa_bus_to_virt(start);
200 if (!romsignature(rom))
203 /* 0 < length <= 0x7f * 512, historically */
204 length = rom[2] * 512;
206 /* but accept any length that fits if checksum okay */
207 if (!length || start + length > upper || !checksum(rom, length))
210 adapter_rom_resources[i].start = start;
211 adapter_rom_resources[i].end = start + length - 1;
212 request_resource(&iomem_resource, &adapter_rom_resources[i]);
214 start = adapter_rom_resources[i++].end & ~2047UL;
218 static __init void parse_cmdline_early (char ** cmdline_p)
220 char c = ' ', *to = command_line, *from = COMMAND_LINE;
223 /* Save unparsed command line copy for /proc/cmdline */
224 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
225 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
233 * If the BIOS enumerates physical processors before logical,
234 * maxcpus=N at enumeration-time can be used to disable HT.
236 else if (!memcmp(from, "maxcpus=", 8)) {
237 extern unsigned int maxcpus;
239 maxcpus = simple_strtoul(from + 8, NULL, 0);
242 #ifdef CONFIG_ACPI_BOOT
243 /* "acpi=off" disables both ACPI table parsing and interpreter init */
244 if (!memcmp(from, "acpi=off", 8))
247 if (!memcmp(from, "acpi=force", 10)) {
248 /* add later when we do DMI horrors: */
249 /* acpi_force = 1; */
253 /* acpi=ht just means: do ACPI MADT parsing
254 at bootup, but don't enable the full ACPI interpreter */
255 if (!memcmp(from, "acpi=ht", 7)) {
256 /* if (!acpi_force) */
260 else if (!memcmp(from, "pci=noacpi", 10))
262 else if (!memcmp(from, "acpi=noirq", 10))
265 else if (!memcmp(from, "acpi_sci=edge", 13))
266 acpi_sci_flags.trigger = 1;
267 else if (!memcmp(from, "acpi_sci=level", 14))
268 acpi_sci_flags.trigger = 3;
269 else if (!memcmp(from, "acpi_sci=high", 13))
270 acpi_sci_flags.polarity = 1;
271 else if (!memcmp(from, "acpi_sci=low", 12))
272 acpi_sci_flags.polarity = 3;
274 /* acpi=strict disables out-of-spec workarounds */
275 else if (!memcmp(from, "acpi=strict", 11)) {
280 if (!memcmp(from, "nolapic", 7) ||
281 !memcmp(from, "disableapic", 11))
284 if (!memcmp(from, "noapic", 6))
285 skip_ioapic_setup = 1;
287 if (!memcmp(from, "apic", 4)) {
288 skip_ioapic_setup = 0;
292 if (!memcmp(from, "mem=", 4))
293 parse_memopt(from+4, &from);
295 #ifdef CONFIG_DISCONTIGMEM
296 if (!memcmp(from, "numa=", 5))
300 #ifdef CONFIG_GART_IOMMU
301 if (!memcmp(from,"iommu=",6)) {
306 if (!memcmp(from,"oops=panic", 10))
313 if (COMMAND_LINE_SIZE <= ++len)
318 *cmdline_p = command_line;
321 #ifndef CONFIG_DISCONTIGMEM
322 static void __init contig_initmem_init(void)
324 unsigned long bootmap_size, bootmap;
325 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
326 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
328 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
329 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
330 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
331 reserve_bootmem(bootmap, bootmap_size);
335 /* Use inline assembly to define this because the nops are defined
336 as inline assembly strings in the include files and we cannot
337 get them easily into strings. */
338 asm("\t.data\nk8nops: "
339 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
342 extern unsigned char k8nops[];
343 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
349 k8nops + 1 + 2 + 3 + 4,
350 k8nops + 1 + 2 + 3 + 4 + 5,
351 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
352 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
355 /* Replace instructions with better alternatives for this CPU type.
357 This runs before SMP is initialized to avoid SMP problems with
358 self modifying code. This implies that assymetric systems where
359 APs have less capabilities than the boot processor are not handled.
360 In this case boot with "noreplacement". */
361 void apply_alternatives(void *start, void *end)
365 for (a = start; (void *)a < end; a++) {
366 if (!boot_cpu_has(a->cpuid))
369 BUG_ON(a->replacementlen > a->instrlen);
370 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
371 diff = a->instrlen - a->replacementlen;
373 /* Pad the rest with nops */
374 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
378 __inline_memcpy(a->instr + i, k8_nops[k], k);
383 static int no_replacement __initdata = 0;
385 void __init alternative_instructions(void)
387 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
390 apply_alternatives(__alt_instructions, __alt_instructions_end);
393 static int __init noreplacement_setup(char *s)
399 __setup("noreplacement", noreplacement_setup);
401 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
403 struct edd_info edd[EDDMAXNR];
404 unsigned int edd_disk80_sig;
405 #ifdef CONFIG_EDD_MODULE
406 EXPORT_SYMBOL(eddnr);
408 EXPORT_SYMBOL(edd_disk80_sig);
411 * copy_edd() - Copy the BIOS EDD information
412 * from empty_zero_page into a safe place.
415 static inline void copy_edd(void)
418 memcpy(edd, EDD_BUF, sizeof(edd));
419 edd_disk80_sig = DISK80_SIGNATURE;
422 #define copy_edd() do {} while (0)
425 void __init setup_arch(char **cmdline_p)
427 unsigned long low_mem_size;
428 unsigned long kernel_end;
430 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
431 drive_info = DRIVE_INFO;
432 screen_info = SCREEN_INFO;
433 edid_info = EDID_INFO;
434 aux_device_present = AUX_DEVICE_INFO;
435 saved_video_mode = SAVED_VIDEO_MODE;
437 #ifdef CONFIG_BLK_DEV_RAM
438 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
439 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
440 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
442 setup_memory_region();
445 if (!MOUNT_ROOT_RDONLY)
446 root_mountflags &= ~MS_RDONLY;
447 init_mm.start_code = (unsigned long) &_text;
448 init_mm.end_code = (unsigned long) &_etext;
449 init_mm.end_data = (unsigned long) &_edata;
450 init_mm.brk = (unsigned long) &_end;
452 code_resource.start = virt_to_phys(&_text);
453 code_resource.end = virt_to_phys(&_etext)-1;
454 data_resource.start = virt_to_phys(&_etext);
455 data_resource.end = virt_to_phys(&_edata)-1;
457 parse_cmdline_early(cmdline_p);
460 * partially used pages are not usable - thus
461 * we are rounding upwards:
463 end_pfn = e820_end_of_ram();
467 init_memory_mapping();
469 #ifdef CONFIG_DISCONTIGMEM
470 numa_initmem_init(0, end_pfn);
472 contig_initmem_init();
475 /* Reserve direct mapping */
476 reserve_bootmem_generic(table_start << PAGE_SHIFT,
477 (table_end - table_start) << PAGE_SHIFT);
480 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
481 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
484 * reserve physical page 0 - it's a special BIOS page on many boxes,
485 * enabling clean reboots, SMP operation, laptop functions.
487 reserve_bootmem_generic(0, PAGE_SIZE);
491 * But first pinch a few for the stack/trampoline stuff
492 * FIXME: Don't need the extra page at 4K, but need to fix
493 * trampoline before removing it. (see the GDT stuff)
495 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
497 /* Reserve SMP trampoline */
498 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
501 #ifdef CONFIG_ACPI_SLEEP
503 * Reserve low memory region for sleep support.
505 acpi_reserve_bootmem();
507 #ifdef CONFIG_X86_LOCAL_APIC
509 * Find and reserve possible boot-time SMP configuration:
513 #ifdef CONFIG_BLK_DEV_INITRD
514 if (LOADER_TYPE && INITRD_START) {
515 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
516 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
518 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
519 initrd_end = initrd_start+INITRD_SIZE;
522 printk(KERN_ERR "initrd extends beyond end of memory "
523 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
524 (unsigned long)(INITRD_START + INITRD_SIZE),
525 (unsigned long)(end_pfn << PAGE_SHIFT));
533 #ifdef CONFIG_ACPI_BOOT
535 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
536 * Must do this after paging_init (due to reliance on fixmap, and thus
537 * the bootmem allocator) but before get_smp_config (to allow parsing
542 #ifdef CONFIG_X86_LOCAL_APIC
544 * get boot-time SMP configuration:
546 if (smp_found_config)
548 init_apic_mappings();
552 * Request address space for all standard RAM and ROM resources
553 * and also for regions reported as reserved by the e820.
556 e820_reserve_resources();
558 request_resource(&iomem_resource, &vram_resource);
562 /* request I/O space for devices used on all i[345]86 PCs */
563 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
564 request_resource(&ioport_resource, standard_io_resources+i);
567 /* Will likely break when you have unassigned resources with more
568 than 4GB memory and bridges that don't support more than 4GB.
569 Doing it properly would require to use pci_alloc_consistent
571 low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
572 if (low_mem_size > pci_mem_start)
573 pci_mem_start = low_mem_size;
575 #ifdef CONFIG_GART_IOMMU
580 #if defined(CONFIG_VGA_CONSOLE)
581 conswitchp = &vga_con;
582 #elif defined(CONFIG_DUMMY_CONSOLE)
583 conswitchp = &dummy_con;
588 static int __init get_model_name(struct cpuinfo_x86 *c)
592 if (cpuid_eax(0x80000000) < 0x80000004)
595 v = (unsigned int *) c->x86_model_id;
596 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
597 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
598 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
599 c->x86_model_id[48] = 0;
604 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
606 unsigned int n, dummy, eax, ebx, ecx, edx;
608 n = cpuid_eax(0x80000000);
610 if (n >= 0x80000005) {
611 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
612 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
613 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
614 c->x86_cache_size=(ecx>>24)+(edx>>24);
615 /* DTLB and ITLB together, but only 4K */
616 c->x86_tlbsize = ((ebx>>16)&0xff) + (ebx&0xff);
619 if (n >= 0x80000006) {
620 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
621 ecx = cpuid_ecx(0x80000006);
622 c->x86_cache_size = ecx >> 16;
623 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
625 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
626 c->x86_cache_size, ecx & 0xFF);
630 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
631 if (n >= 0x80000008) {
632 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
633 c->x86_virt_bits = (eax >> 8) & 0xff;
634 c->x86_phys_bits = eax & 0xff;
639 static int __init init_amd(struct cpuinfo_x86 *c)
644 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
645 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
646 clear_bit(0*32+31, &c->x86_capability);
649 level = cpuid_eax(1);
650 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
651 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
653 r = get_model_name(c);
657 /* Should distinguish Models here, but this is only
658 a fallback anyways. */
659 strcpy(c->x86_model_id, "Hammer");
663 display_cacheinfo(c);
667 static void __init detect_ht(struct cpuinfo_x86 *c)
670 u32 eax, ebx, ecx, edx;
671 int index_lsb, index_msb, tmp;
673 int cpu = smp_processor_id();
675 if (!cpu_has(c, X86_FEATURE_HT))
678 cpuid(1, &eax, &ebx, &ecx, &edx);
679 smp_num_siblings = (ebx & 0xff0000) >> 16;
681 if (smp_num_siblings == 1) {
682 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
683 } else if (smp_num_siblings > 1) {
687 * At this point we only support two siblings per
690 if (smp_num_siblings > NR_CPUS) {
691 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
692 smp_num_siblings = 1;
695 tmp = smp_num_siblings;
696 while ((tmp & 1) == 0) {
700 tmp = smp_num_siblings;
701 while ((tmp & 0x80000000 ) == 0) {
705 if (index_lsb != index_msb )
707 initial_apic_id = ebx >> 24 & 0xff;
708 phys_proc_id[cpu] = initial_apic_id >> index_msb;
710 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
724 unsigned char descriptor;
729 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
730 static struct _cache_table cache_table[] __initdata =
732 { 0x06, LVL_1_INST, 8 },
733 { 0x08, LVL_1_INST, 16 },
734 { 0x0a, LVL_1_DATA, 8 },
735 { 0x0c, LVL_1_DATA, 16 },
736 { 0x22, LVL_3, 512 },
737 { 0x23, LVL_3, 1024 },
738 { 0x25, LVL_3, 2048 },
739 { 0x29, LVL_3, 4096 },
740 { 0x2c, LVL_1_DATA, 32 },
741 { 0x30, LVL_1_INST, 32 },
742 { 0x39, LVL_2, 128 },
743 { 0x3b, LVL_2, 128 },
744 { 0x3c, LVL_2, 256 },
745 { 0x41, LVL_2, 128 },
746 { 0x42, LVL_2, 256 },
747 { 0x43, LVL_2, 512 },
748 { 0x44, LVL_2, 1024 },
749 { 0x45, LVL_2, 2048 },
750 { 0x66, LVL_1_DATA, 8 },
751 { 0x67, LVL_1_DATA, 16 },
752 { 0x68, LVL_1_DATA, 32 },
753 { 0x70, LVL_TRACE, 12 },
754 { 0x71, LVL_TRACE, 16 },
755 { 0x72, LVL_TRACE, 32 },
756 { 0x79, LVL_2, 128 },
757 { 0x7a, LVL_2, 256 },
758 { 0x7b, LVL_2, 512 },
759 { 0x7c, LVL_2, 1024 },
760 { 0x82, LVL_2, 256 },
761 { 0x83, LVL_2, 512 },
762 { 0x84, LVL_2, 1024 },
763 { 0x85, LVL_2, 2048 },
764 { 0x86, LVL_2, 512 },
765 { 0x87, LVL_2, 1024 },
769 static void __init init_intel(struct cpuinfo_x86 *c)
772 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
775 if (c->cpuid_level > 1) {
776 /* supports eax=2 call */
779 unsigned char *dp = (unsigned char *)regs;
781 /* Number of times to iterate */
782 n = cpuid_eax(2) & 0xFF;
784 for ( i = 0 ; i < n ; i++ ) {
785 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
787 /* If bit 31 is set, this is an unknown format */
788 for ( j = 0 ; j < 3 ; j++ ) {
789 if ( regs[j] < 0 ) regs[j] = 0;
792 /* Byte 0 is level count, not a descriptor */
793 for ( j = 1 ; j < 16 ; j++ ) {
794 unsigned char des = dp[j];
797 /* look up this descriptor in the table */
798 while (cache_table[k].descriptor != 0)
800 if (cache_table[k].descriptor == des) {
801 switch (cache_table[k].cache_type) {
803 l1i += cache_table[k].size;
806 l1d += cache_table[k].size;
809 l2 += cache_table[k].size;
812 l3 += cache_table[k].size;
815 trace += cache_table[k].size;
828 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
830 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
832 printk(", L1 D cache: %dK\n", l1d);
836 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
838 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
840 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
843 n = cpuid_eax(0x80000000);
844 if (n >= 0x80000008) {
845 unsigned eax = cpuid_eax(0x80000008);
846 c->x86_virt_bits = (eax >> 8) & 0xff;
847 c->x86_phys_bits = eax & 0xff;
851 c->x86_cache_alignment = c->x86_clflush_size * 2;
854 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
856 char *v = c->x86_vendor_id;
858 if (!strcmp(v, "AuthenticAMD"))
859 c->x86_vendor = X86_VENDOR_AMD;
860 else if (!strcmp(v, "GenuineIntel"))
861 c->x86_vendor = X86_VENDOR_INTEL;
863 c->x86_vendor = X86_VENDOR_UNKNOWN;
866 struct cpu_model_info {
869 char *model_names[16];
872 /* Do some early cpuid on the boot CPU to get some parameter that are
873 needed before check_bugs. Everything advanced is in identify_cpu
875 void __init early_identify_cpu(struct cpuinfo_x86 *c)
879 c->loops_per_jiffy = loops_per_jiffy;
880 c->x86_cache_size = -1;
881 c->x86_vendor = X86_VENDOR_UNKNOWN;
882 c->x86_model = c->x86_mask = 0; /* So far unknown... */
883 c->x86_vendor_id[0] = '\0'; /* Unset */
884 c->x86_model_id[0] = '\0'; /* Unset */
885 c->x86_clflush_size = 64;
886 c->x86_cache_alignment = c->x86_clflush_size;
887 memset(&c->x86_capability, 0, sizeof c->x86_capability);
889 /* Get vendor name */
890 cpuid(0x00000000, &c->cpuid_level,
891 (int *)&c->x86_vendor_id[0],
892 (int *)&c->x86_vendor_id[8],
893 (int *)&c->x86_vendor_id[4]);
897 /* Initialize the standard set of capabilities */
898 /* Note that the vendor-specific code below might override */
900 /* Intel-defined flags: level 0x00000001 */
901 if (c->cpuid_level >= 0x00000001) {
903 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
904 &c->x86_capability[0]);
905 c->x86 = (tfms >> 8) & 0xf;
906 c->x86_model = (tfms >> 4) & 0xf;
907 c->x86_mask = tfms & 0xf;
909 c->x86 += (tfms >> 20) & 0xff;
910 c->x86_model += ((tfms >> 16) & 0xF) << 4;
912 if (c->x86_capability[0] & (1<<19))
913 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
915 /* Have CPUID level 0 only - unheard of */
921 * This does the hard work of actually picking apart the CPU stuff...
923 void __init identify_cpu(struct cpuinfo_x86 *c)
928 early_identify_cpu(c);
930 /* AMD-defined flags: level 0x80000001 */
931 xlvl = cpuid_eax(0x80000000);
932 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
933 if ( xlvl >= 0x80000001 )
934 c->x86_capability[1] = cpuid_edx(0x80000001);
935 if ( xlvl >= 0x80000004 )
936 get_model_name(c); /* Default name */
939 /* Transmeta-defined flags: level 0x80860001 */
940 xlvl = cpuid_eax(0x80860000);
941 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
942 if ( xlvl >= 0x80860001 )
943 c->x86_capability[2] = cpuid_edx(0x80860001);
947 * Vendor-specific initialization. In this section we
948 * canonicalize the feature flags, meaning if there are
949 * features a certain CPU supports which CPUID doesn't
950 * tell us, CPUID claiming incorrect flags, or other bugs,
951 * we handle them here.
953 * At the end of this section, c->x86_capability better
954 * indicate the features this CPU genuinely supports!
956 switch ( c->x86_vendor ) {
962 case X86_VENDOR_INTEL:
966 case X86_VENDOR_UNKNOWN:
968 display_cacheinfo(c);
972 select_idle_routine(c);
976 * On SMP, boot_cpu_data holds the common feature set between
977 * all CPUs; so make sure that we indicate which features are
978 * common between the CPUs. The first time this routine gets
979 * executed, c == &boot_cpu_data.
981 if ( c != &boot_cpu_data ) {
982 /* AND the already accumulated flags with these */
983 for ( i = 0 ; i < NCAPINTS ; i++ )
984 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
991 void __init print_cpu_info(struct cpuinfo_x86 *c)
993 if (c->x86_model_id[0])
994 printk("%s", c->x86_model_id);
996 if (c->x86_mask || c->cpuid_level >= 0)
997 printk(" stepping %02x\n", c->x86_mask);
1003 * Get CPU information for use by the procfs.
1006 static int show_cpuinfo(struct seq_file *m, void *v)
1008 struct cpuinfo_x86 *c = v;
1011 * These flag bits must match the definitions in <asm/cpufeature.h>.
1012 * NULL means this bit is undefined or reserved; either way it doesn't
1013 * have meaning as far as Linux is concerned. Note that it's important
1014 * to realize there is a difference between this table and CPUID -- if
1015 * applications want to get the raw CPUID data, they should access
1016 * /dev/cpu/<cpu_nr>/cpuid instead.
1018 static char *x86_cap_flags[] = {
1020 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1021 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1022 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1023 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1026 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1027 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1029 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1031 /* Transmeta-defined */
1032 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1037 /* Other (Linux-defined) */
1038 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
1039 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1043 /* Intel-defined (#2) */
1044 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
1045 "est", NULL, "cid", NULL, NULL, "cmpxchg16b", NULL, NULL,
1046 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1047 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1049 static char *x86_power_flags[] = {
1050 "ts", /* temperature sensor */
1051 "fid", /* frequency id control */
1052 "vid", /* voltage id control */
1053 "ttp", /* thermal trip */
1058 if (!cpu_online(c-cpu_data))
1062 seq_printf(m,"processor\t: %u\n"
1064 "cpu family\t: %d\n"
1066 "model name\t: %s\n",
1067 (unsigned)(c-cpu_data),
1068 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1071 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1073 if (c->x86_mask || c->cpuid_level >= 0)
1074 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1076 seq_printf(m, "stepping\t: unknown\n");
1078 if (cpu_has(c,X86_FEATURE_TSC)) {
1079 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1080 cpu_khz / 1000, (cpu_khz % 1000));
1084 if (c->x86_cache_size >= 0)
1085 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1087 #ifdef CONFIG_X86_HT
1089 seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
1090 seq_printf(m, "siblings\t: %d\n", smp_num_siblings);
1096 "fpu_exception\t: yes\n"
1097 "cpuid level\t: %d\n"
1104 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1105 if ( test_bit(i, &c->x86_capability) &&
1106 x86_cap_flags[i] != NULL )
1107 seq_printf(m, " %s", x86_cap_flags[i]);
1110 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1111 c->loops_per_jiffy/(500000/HZ),
1112 (c->loops_per_jiffy/(5000/HZ)) % 100);
1114 if (c->x86_tlbsize > 0)
1115 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1116 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1117 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1119 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1120 c->x86_phys_bits, c->x86_virt_bits);
1122 seq_printf(m, "power management:");
1125 for (i = 0; i < 32; i++)
1126 if (c->x86_power & (1 << i)) {
1127 if (i < ARRAY_SIZE(x86_power_flags))
1128 seq_printf(m, " %s", x86_power_flags[i]);
1130 seq_printf(m, " [%d]", i);
1134 seq_printf(m, "\n\n");
1139 static void *c_start(struct seq_file *m, loff_t *pos)
1141 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1144 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1147 return c_start(m, pos);
1150 static void c_stop(struct seq_file *m, void *v)
1154 struct seq_operations cpuinfo_op = {
1158 .show = show_cpuinfo,