2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
57 #include <asm/proto.h>
58 #include <asm/setup.h>
64 struct cpuinfo_x86 boot_cpu_data;
66 unsigned long mmu_cr4_features;
67 EXPORT_SYMBOL_GPL(mmu_cr4_features);
70 EXPORT_SYMBOL(acpi_disabled);
71 #ifdef CONFIG_ACPI_BOOT
72 extern int __initdata acpi_ht;
73 extern acpi_interrupt_flags acpi_sci_flags;
74 int __initdata acpi_force = 0;
77 /* For PCI or other memory-mapped resources */
78 unsigned long pci_mem_start = 0x10000000;
80 unsigned long saved_video_mode;
84 EXPORT_SYMBOL(swiotlb);
90 struct drive_info_struct { char dummy[32]; } drive_info;
91 struct screen_info screen_info;
92 struct sys_desc_table_struct {
93 unsigned short length;
94 unsigned char table[0];
97 struct edid_info edid_info;
100 unsigned char aux_device_present;
102 extern int root_mountflags;
103 extern char _text, _etext, _edata, _end;
105 char command_line[COMMAND_LINE_SIZE];
107 struct resource standard_io_resources[] = {
108 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY | IORESOURCE_IO },
109 { "pic1", 0x20, 0x21, IORESOURCE_BUSY | IORESOURCE_IO },
110 { "timer0", 0x40, 0x43, IORESOURCE_BUSY | IORESOURCE_IO },
111 { "timer1", 0x50, 0x53, IORESOURCE_BUSY | IORESOURCE_IO },
112 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY | IORESOURCE_IO },
113 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY | IORESOURCE_IO },
114 { "pic2", 0xa0, 0xa1, IORESOURCE_BUSY | IORESOURCE_IO },
115 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY | IORESOURCE_IO },
116 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY | IORESOURCE_IO }
119 #define STANDARD_IO_RESOURCES \
120 (sizeof standard_io_resources / sizeof standard_io_resources[0])
122 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
124 struct resource data_resource = { "Kernel data", 0, 0, IORESOURCE_RAM };
125 struct resource code_resource = { "Kernel code", 0, 0, IORESOURCE_RAM };
127 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
129 static struct resource system_rom_resource = { "System ROM", 0xf0000, 0xfffff, IORESOURCE_ROM };
130 static struct resource extension_rom_resource = { "Extension ROM", 0xe0000, 0xeffff, IORESOURCE_ROM };
132 static struct resource adapter_rom_resources[] = {
133 { "Adapter ROM", 0xc8000, 0, IORESOURCE_ROM },
134 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
135 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
136 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
137 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
138 { "Adapter ROM", 0, 0, IORESOURCE_ROM }
141 #define ADAPTER_ROM_RESOURCES \
142 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
144 static struct resource video_rom_resource = { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_ROM };
145 static struct resource video_ram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_RAM };
147 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
149 static int __init romchecksum(unsigned char *rom, unsigned long length)
151 unsigned char *p, sum = 0;
153 for (p = rom; p < rom + length; p++)
158 static void __init probe_roms(void)
160 unsigned long start, length, upper;
165 upper = adapter_rom_resources[0].start;
166 for (start = video_rom_resource.start; start < upper; start += 2048) {
167 rom = isa_bus_to_virt(start);
168 if (!romsignature(rom))
171 video_rom_resource.start = start;
173 /* 0 < length <= 0x7f * 512, historically */
174 length = rom[2] * 512;
176 /* if checksum okay, trust length byte */
177 if (length && romchecksum(rom, length))
178 video_rom_resource.end = start + length - 1;
180 request_resource(&iomem_resource, &video_rom_resource);
184 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
189 request_resource(&iomem_resource, &system_rom_resource);
190 upper = system_rom_resource.start;
192 /* check for extension rom (ignore length byte!) */
193 rom = isa_bus_to_virt(extension_rom_resource.start);
194 if (romsignature(rom)) {
195 length = extension_rom_resource.end - extension_rom_resource.start + 1;
196 if (romchecksum(rom, length)) {
197 request_resource(&iomem_resource, &extension_rom_resource);
198 upper = extension_rom_resource.start;
202 /* check for adapter roms on 2k boundaries */
203 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
204 rom = isa_bus_to_virt(start);
205 if (!romsignature(rom))
208 /* 0 < length <= 0x7f * 512, historically */
209 length = rom[2] * 512;
211 /* but accept any length that fits if checksum okay */
212 if (!length || start + length > upper || !romchecksum(rom, length))
215 adapter_rom_resources[i].start = start;
216 adapter_rom_resources[i].end = start + length - 1;
217 request_resource(&iomem_resource, &adapter_rom_resources[i]);
219 start = adapter_rom_resources[i++].end & ~2047UL;
223 static __init void parse_cmdline_early (char ** cmdline_p)
225 char c = ' ', *to = command_line, *from = COMMAND_LINE;
228 /* Save unparsed command line copy for /proc/cmdline */
229 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
230 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
238 * If the BIOS enumerates physical processors before logical,
239 * maxcpus=N at enumeration-time can be used to disable HT.
241 else if (!memcmp(from, "maxcpus=", 8)) {
242 extern unsigned int maxcpus;
244 maxcpus = simple_strtoul(from + 8, NULL, 0);
247 #ifdef CONFIG_ACPI_BOOT
248 /* "acpi=off" disables both ACPI table parsing and interpreter init */
249 if (!memcmp(from, "acpi=off", 8))
252 if (!memcmp(from, "acpi=force", 10)) {
253 /* add later when we do DMI horrors: */
258 /* acpi=ht just means: do ACPI MADT parsing
259 at bootup, but don't enable the full ACPI interpreter */
260 if (!memcmp(from, "acpi=ht", 7)) {
265 else if (!memcmp(from, "pci=noacpi", 10))
267 else if (!memcmp(from, "acpi=noirq", 10))
270 else if (!memcmp(from, "acpi_sci=edge", 13))
271 acpi_sci_flags.trigger = 1;
272 else if (!memcmp(from, "acpi_sci=level", 14))
273 acpi_sci_flags.trigger = 3;
274 else if (!memcmp(from, "acpi_sci=high", 13))
275 acpi_sci_flags.polarity = 1;
276 else if (!memcmp(from, "acpi_sci=low", 12))
277 acpi_sci_flags.polarity = 3;
279 /* acpi=strict disables out-of-spec workarounds */
280 else if (!memcmp(from, "acpi=strict", 11)) {
285 if (!memcmp(from, "nolapic", 7) ||
286 !memcmp(from, "disableapic", 11))
289 if (!memcmp(from, "noapic", 6))
290 skip_ioapic_setup = 1;
292 if (!memcmp(from, "apic", 4)) {
293 skip_ioapic_setup = 0;
297 if (!memcmp(from, "mem=", 4))
298 parse_memopt(from+4, &from);
300 #ifdef CONFIG_DISCONTIGMEM
301 if (!memcmp(from, "numa=", 5))
305 #ifdef CONFIG_GART_IOMMU
306 if (!memcmp(from,"iommu=",6)) {
311 if (!memcmp(from,"oops=panic", 10))
318 if (COMMAND_LINE_SIZE <= ++len)
323 *cmdline_p = command_line;
326 #ifndef CONFIG_DISCONTIGMEM
327 static void __init contig_initmem_init(void)
329 unsigned long bootmap_size, bootmap;
330 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
331 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
333 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
334 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
335 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
336 reserve_bootmem(bootmap, bootmap_size);
340 /* Use inline assembly to define this because the nops are defined
341 as inline assembly strings in the include files and we cannot
342 get them easily into strings. */
343 asm("\t.data\nk8nops: "
344 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
347 extern unsigned char k8nops[];
348 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
354 k8nops + 1 + 2 + 3 + 4,
355 k8nops + 1 + 2 + 3 + 4 + 5,
356 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
357 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
360 /* Replace instructions with better alternatives for this CPU type.
362 This runs before SMP is initialized to avoid SMP problems with
363 self modifying code. This implies that assymetric systems where
364 APs have less capabilities than the boot processor are not handled.
365 In this case boot with "noreplacement". */
366 void apply_alternatives(void *start, void *end)
370 for (a = start; (void *)a < end; a++) {
371 if (!boot_cpu_has(a->cpuid))
374 BUG_ON(a->replacementlen > a->instrlen);
375 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
376 diff = a->instrlen - a->replacementlen;
378 /* Pad the rest with nops */
379 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
383 __inline_memcpy(a->instr + i, k8_nops[k], k);
388 static int no_replacement __initdata = 0;
390 void __init alternative_instructions(void)
392 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
395 apply_alternatives(__alt_instructions, __alt_instructions_end);
398 static int __init noreplacement_setup(char *s)
404 __setup("noreplacement", noreplacement_setup);
406 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
408 #ifdef CONFIG_EDD_MODULE
412 * copy_edd() - Copy the BIOS EDD information
413 * from boot_params into a safe place.
416 static inline void copy_edd(void)
418 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
419 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
420 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
421 edd.edd_info_nr = EDD_NR;
424 static inline void copy_edd(void)
429 #define EBDA_ADDR_POINTER 0x40E
430 static void __init reserve_ebda_region(void)
434 * there is a real-mode segmented pointer pointing to the
435 * 4K EBDA area at 0x40E
437 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
440 reserve_bootmem_generic(addr, PAGE_SIZE);
443 void __init setup_arch(char **cmdline_p)
445 unsigned long low_mem_size;
446 unsigned long kernel_end;
448 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
449 drive_info = DRIVE_INFO;
450 screen_info = SCREEN_INFO;
451 edid_info = EDID_INFO;
452 aux_device_present = AUX_DEVICE_INFO;
453 saved_video_mode = SAVED_VIDEO_MODE;
455 #ifdef CONFIG_BLK_DEV_RAM
456 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
457 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
458 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
460 setup_memory_region();
463 if (!MOUNT_ROOT_RDONLY)
464 root_mountflags &= ~MS_RDONLY;
465 init_mm.start_code = (unsigned long) &_text;
466 init_mm.end_code = (unsigned long) &_etext;
467 init_mm.end_data = (unsigned long) &_edata;
468 init_mm.brk = (unsigned long) &_end;
470 code_resource.start = virt_to_phys(&_text);
471 code_resource.end = virt_to_phys(&_etext)-1;
472 data_resource.start = virt_to_phys(&_etext);
473 data_resource.end = virt_to_phys(&_edata)-1;
475 parse_cmdline_early(cmdline_p);
478 * partially used pages are not usable - thus
479 * we are rounding upwards:
481 end_pfn = e820_end_of_ram();
485 init_memory_mapping();
487 #ifdef CONFIG_DISCONTIGMEM
488 numa_initmem_init(0, end_pfn);
490 contig_initmem_init();
493 /* Reserve direct mapping */
494 reserve_bootmem_generic(table_start << PAGE_SHIFT,
495 (table_end - table_start) << PAGE_SHIFT);
498 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
499 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
502 * reserve physical page 0 - it's a special BIOS page on many boxes,
503 * enabling clean reboots, SMP operation, laptop functions.
505 reserve_bootmem_generic(0, PAGE_SIZE);
507 /* reserve ebda region */
508 reserve_ebda_region();
512 * But first pinch a few for the stack/trampoline stuff
513 * FIXME: Don't need the extra page at 4K, but need to fix
514 * trampoline before removing it. (see the GDT stuff)
516 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
518 /* Reserve SMP trampoline */
519 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
522 #ifdef CONFIG_ACPI_SLEEP
524 * Reserve low memory region for sleep support.
526 acpi_reserve_bootmem();
528 #ifdef CONFIG_X86_LOCAL_APIC
530 * Find and reserve possible boot-time SMP configuration:
534 #ifdef CONFIG_BLK_DEV_INITRD
535 if (LOADER_TYPE && INITRD_START) {
536 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
537 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
539 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
540 initrd_end = initrd_start+INITRD_SIZE;
543 printk(KERN_ERR "initrd extends beyond end of memory "
544 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
545 (unsigned long)(INITRD_START + INITRD_SIZE),
546 (unsigned long)(end_pfn << PAGE_SHIFT));
554 #ifdef CONFIG_ACPI_BOOT
556 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
557 * Must do this after paging_init (due to reliance on fixmap, and thus
558 * the bootmem allocator) but before get_smp_config (to allow parsing
563 #ifdef CONFIG_X86_LOCAL_APIC
565 * get boot-time SMP configuration:
567 if (smp_found_config)
569 init_apic_mappings();
573 * Request address space for all standard RAM and ROM resources
574 * and also for regions reported as reserved by the e820.
577 e820_reserve_resources();
579 request_resource(&iomem_resource, &video_ram_resource);
583 /* request I/O space for devices used on all i[345]86 PCs */
584 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
585 request_resource(&ioport_resource, &standard_io_resources[i]);
588 /* Will likely break when you have unassigned resources with more
589 than 4GB memory and bridges that don't support more than 4GB.
590 Doing it properly would require to use pci_alloc_consistent
592 low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
593 if (low_mem_size > pci_mem_start)
594 pci_mem_start = low_mem_size;
596 #ifdef CONFIG_GART_IOMMU
601 #if defined(CONFIG_VGA_CONSOLE)
602 conswitchp = &vga_con;
603 #elif defined(CONFIG_DUMMY_CONSOLE)
604 conswitchp = &dummy_con;
609 static int __init get_model_name(struct cpuinfo_x86 *c)
613 if (cpuid_eax(0x80000000) < 0x80000004)
616 v = (unsigned int *) c->x86_model_id;
617 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
618 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
619 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
620 c->x86_model_id[48] = 0;
625 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
627 unsigned int n, dummy, eax, ebx, ecx, edx;
629 n = cpuid_eax(0x80000000);
631 if (n >= 0x80000005) {
632 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
633 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
634 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
635 c->x86_cache_size=(ecx>>24)+(edx>>24);
636 /* DTLB and ITLB together, but only 4K */
637 c->x86_tlbsize = ((ebx>>16)&0xff) + (ebx&0xff);
640 if (n >= 0x80000006) {
641 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
642 ecx = cpuid_ecx(0x80000006);
643 c->x86_cache_size = ecx >> 16;
644 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
646 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
647 c->x86_cache_size, ecx & 0xFF);
651 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
652 if (n >= 0x80000008) {
653 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
654 c->x86_virt_bits = (eax >> 8) & 0xff;
655 c->x86_phys_bits = eax & 0xff;
660 static int __init init_amd(struct cpuinfo_x86 *c)
665 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
666 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
667 clear_bit(0*32+31, &c->x86_capability);
670 level = cpuid_eax(1);
671 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
672 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
674 r = get_model_name(c);
678 /* Should distinguish Models here, but this is only
679 a fallback anyways. */
680 strcpy(c->x86_model_id, "Hammer");
684 display_cacheinfo(c);
686 if (c->cpuid_level >= 0x80000008) {
687 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
688 if (c->x86_num_cores & (c->x86_num_cores - 1))
689 c->x86_num_cores = 1;
692 /* On a dual core setup the lower bits of apic id
693 distingush the cores. Fix up the CPU<->node mappings
695 Assumes number of cores is a power of two. */
696 if (c->x86_num_cores > 1) {
697 int cpu = c->x86_apicid;
698 cpu_to_node[cpu] = cpu >> hweight32(c->x86_num_cores - 1);
699 printk(KERN_INFO "CPU %d -> Node %d\n",
700 cpu, cpu_to_node[cpu]);
708 static void __init detect_ht(struct cpuinfo_x86 *c)
711 u32 eax, ebx, ecx, edx;
712 int index_lsb, index_msb, tmp;
714 int cpu = smp_processor_id();
716 if (!cpu_has(c, X86_FEATURE_HT))
719 cpuid(1, &eax, &ebx, &ecx, &edx);
720 smp_num_siblings = (ebx & 0xff0000) >> 16;
722 if (smp_num_siblings == 1) {
723 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
724 } else if (smp_num_siblings > 1) {
728 * At this point we only support two siblings per
731 if (smp_num_siblings > NR_CPUS) {
732 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
733 smp_num_siblings = 1;
736 tmp = smp_num_siblings;
737 while ((tmp & 1) == 0) {
741 tmp = smp_num_siblings;
742 while ((tmp & 0x80000000 ) == 0) {
746 if (index_lsb != index_msb )
748 initial_apic_id = hard_smp_processor_id();
749 phys_proc_id[cpu] = initial_apic_id >> index_msb;
751 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
765 unsigned char descriptor;
770 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
771 static struct _cache_table cache_table[] __initdata =
773 { 0x06, LVL_1_INST, 8 },
774 { 0x08, LVL_1_INST, 16 },
775 { 0x0a, LVL_1_DATA, 8 },
776 { 0x0c, LVL_1_DATA, 16 },
777 { 0x22, LVL_3, 512 },
778 { 0x23, LVL_3, 1024 },
779 { 0x25, LVL_3, 2048 },
780 { 0x29, LVL_3, 4096 },
781 { 0x2c, LVL_1_DATA, 32 },
782 { 0x30, LVL_1_INST, 32 },
783 { 0x39, LVL_2, 128 },
784 { 0x3b, LVL_2, 128 },
785 { 0x3c, LVL_2, 256 },
786 { 0x41, LVL_2, 128 },
787 { 0x42, LVL_2, 256 },
788 { 0x43, LVL_2, 512 },
789 { 0x44, LVL_2, 1024 },
790 { 0x45, LVL_2, 2048 },
791 { 0x60, LVL_1_DATA, 16 },
792 { 0x66, LVL_1_DATA, 8 },
793 { 0x67, LVL_1_DATA, 16 },
794 { 0x68, LVL_1_DATA, 32 },
795 { 0x70, LVL_TRACE, 12 },
796 { 0x71, LVL_TRACE, 16 },
797 { 0x72, LVL_TRACE, 32 },
798 { 0x79, LVL_2, 128 },
799 { 0x7a, LVL_2, 256 },
800 { 0x7b, LVL_2, 512 },
801 { 0x7c, LVL_2, 1024 },
802 { 0x82, LVL_2, 256 },
803 { 0x83, LVL_2, 512 },
804 { 0x84, LVL_2, 1024 },
805 { 0x85, LVL_2, 2048 },
806 { 0x86, LVL_2, 512 },
807 { 0x87, LVL_2, 1024 },
811 static void __init init_intel(struct cpuinfo_x86 *c)
814 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
817 if (c->cpuid_level > 1) {
818 /* supports eax=2 call */
821 unsigned char *dp = (unsigned char *)regs;
823 /* Number of times to iterate */
824 n = cpuid_eax(2) & 0xFF;
826 for ( i = 0 ; i < n ; i++ ) {
827 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
829 /* If bit 31 is set, this is an unknown format */
830 for ( j = 0 ; j < 3 ; j++ ) {
831 if ( regs[j] < 0 ) regs[j] = 0;
834 /* Byte 0 is level count, not a descriptor */
835 for ( j = 1 ; j < 16 ; j++ ) {
836 unsigned char des = dp[j];
839 /* look up this descriptor in the table */
840 while (cache_table[k].descriptor != 0)
842 if (cache_table[k].descriptor == des) {
843 switch (cache_table[k].cache_type) {
845 l1i += cache_table[k].size;
848 l1d += cache_table[k].size;
851 l2 += cache_table[k].size;
854 l3 += cache_table[k].size;
857 trace += cache_table[k].size;
870 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
872 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
874 printk(", L1 D cache: %dK\n", l1d);
878 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
880 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
882 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
885 n = cpuid_eax(0x80000000);
886 if (n >= 0x80000008) {
887 unsigned eax = cpuid_eax(0x80000008);
888 c->x86_virt_bits = (eax >> 8) & 0xff;
889 c->x86_phys_bits = eax & 0xff;
893 c->x86_cache_alignment = c->x86_clflush_size * 2;
896 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
898 char *v = c->x86_vendor_id;
900 if (!strcmp(v, "AuthenticAMD"))
901 c->x86_vendor = X86_VENDOR_AMD;
902 else if (!strcmp(v, "GenuineIntel"))
903 c->x86_vendor = X86_VENDOR_INTEL;
905 c->x86_vendor = X86_VENDOR_UNKNOWN;
908 struct cpu_model_info {
911 char *model_names[16];
914 /* Do some early cpuid on the boot CPU to get some parameter that are
915 needed before check_bugs. Everything advanced is in identify_cpu
917 void __init early_identify_cpu(struct cpuinfo_x86 *c)
921 c->loops_per_jiffy = loops_per_jiffy;
922 c->x86_cache_size = -1;
923 c->x86_vendor = X86_VENDOR_UNKNOWN;
924 c->x86_model = c->x86_mask = 0; /* So far unknown... */
925 c->x86_vendor_id[0] = '\0'; /* Unset */
926 c->x86_model_id[0] = '\0'; /* Unset */
927 c->x86_clflush_size = 64;
928 c->x86_cache_alignment = c->x86_clflush_size;
929 c->x86_num_cores = 1;
930 c->x86_apicid = c == &boot_cpu_data ? 0 : c - cpu_data;
931 memset(&c->x86_capability, 0, sizeof c->x86_capability);
933 /* Get vendor name */
934 cpuid(0x00000000, &c->cpuid_level,
935 (int *)&c->x86_vendor_id[0],
936 (int *)&c->x86_vendor_id[8],
937 (int *)&c->x86_vendor_id[4]);
941 /* Initialize the standard set of capabilities */
942 /* Note that the vendor-specific code below might override */
944 /* Intel-defined flags: level 0x00000001 */
945 if (c->cpuid_level >= 0x00000001) {
947 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
948 &c->x86_capability[0]);
949 c->x86 = (tfms >> 8) & 0xf;
950 c->x86_model = (tfms >> 4) & 0xf;
951 c->x86_mask = tfms & 0xf;
953 c->x86 += (tfms >> 20) & 0xff;
954 c->x86_model += ((tfms >> 16) & 0xF) << 4;
956 if (c->x86_capability[0] & (1<<19))
957 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
958 c->x86_apicid = misc >> 24;
960 /* Have CPUID level 0 only - unheard of */
966 * This does the hard work of actually picking apart the CPU stuff...
968 void __init identify_cpu(struct cpuinfo_x86 *c)
973 early_identify_cpu(c);
975 /* AMD-defined flags: level 0x80000001 */
976 xlvl = cpuid_eax(0x80000000);
977 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
978 if ( xlvl >= 0x80000001 ) {
979 c->x86_capability[1] = cpuid_edx(0x80000001);
980 c->x86_capability[5] = cpuid_ecx(0x80000001);
982 if ( xlvl >= 0x80000004 )
983 get_model_name(c); /* Default name */
986 /* Transmeta-defined flags: level 0x80860001 */
987 xlvl = cpuid_eax(0x80860000);
988 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
989 if ( xlvl >= 0x80860001 )
990 c->x86_capability[2] = cpuid_edx(0x80860001);
994 * Vendor-specific initialization. In this section we
995 * canonicalize the feature flags, meaning if there are
996 * features a certain CPU supports which CPUID doesn't
997 * tell us, CPUID claiming incorrect flags, or other bugs,
998 * we handle them here.
1000 * At the end of this section, c->x86_capability better
1001 * indicate the features this CPU genuinely supports!
1003 switch ( c->x86_vendor ) {
1005 case X86_VENDOR_AMD:
1009 case X86_VENDOR_INTEL:
1013 case X86_VENDOR_UNKNOWN:
1015 display_cacheinfo(c);
1019 select_idle_routine(c);
1023 * On SMP, boot_cpu_data holds the common feature set between
1024 * all CPUs; so make sure that we indicate which features are
1025 * common between the CPUs. The first time this routine gets
1026 * executed, c == &boot_cpu_data.
1028 if ( c != &boot_cpu_data ) {
1029 /* AND the already accumulated flags with these */
1030 for ( i = 0 ; i < NCAPINTS ; i++ )
1031 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1038 void __init print_cpu_info(struct cpuinfo_x86 *c)
1040 if (c->x86_model_id[0])
1041 printk("%s", c->x86_model_id);
1043 if (c->x86_mask || c->cpuid_level >= 0)
1044 printk(" stepping %02x\n", c->x86_mask);
1050 * Get CPU information for use by the procfs.
1053 static int show_cpuinfo(struct seq_file *m, void *v)
1055 struct cpuinfo_x86 *c = v;
1058 * These flag bits must match the definitions in <asm/cpufeature.h>.
1059 * NULL means this bit is undefined or reserved; either way it doesn't
1060 * have meaning as far as Linux is concerned. Note that it's important
1061 * to realize there is a difference between this table and CPUID -- if
1062 * applications want to get the raw CPUID data, they should access
1063 * /dev/cpu/<cpu_nr>/cpuid instead.
1065 static char *x86_cap_flags[] = {
1067 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1068 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1069 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1070 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1073 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1074 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1075 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1076 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1078 /* Transmeta-defined */
1079 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1080 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1081 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1082 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1084 /* Other (Linux-defined) */
1085 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1088 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1090 /* Intel-defined (#2) */
1091 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1092 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1093 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1094 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1096 static char *x86_power_flags[] = {
1097 "ts", /* temperature sensor */
1098 "fid", /* frequency id control */
1099 "vid", /* voltage id control */
1100 "ttp", /* thermal trip */
1105 if (!cpu_online(c-cpu_data))
1109 seq_printf(m,"processor\t: %u\n"
1111 "cpu family\t: %d\n"
1113 "model name\t: %s\n",
1114 (unsigned)(c-cpu_data),
1115 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1118 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1120 if (c->x86_mask || c->cpuid_level >= 0)
1121 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1123 seq_printf(m, "stepping\t: unknown\n");
1125 if (cpu_has(c,X86_FEATURE_TSC)) {
1126 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1127 cpu_khz / 1000, (cpu_khz % 1000));
1131 if (c->x86_cache_size >= 0)
1132 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1134 #ifdef CONFIG_X86_HT
1136 seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
1137 seq_printf(m, "siblings\t: %d\n", smp_num_siblings);
1143 "fpu_exception\t: yes\n"
1144 "cpuid level\t: %d\n"
1151 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1152 if ( test_bit(i, &c->x86_capability) &&
1153 x86_cap_flags[i] != NULL )
1154 seq_printf(m, " %s", x86_cap_flags[i]);
1157 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1158 c->loops_per_jiffy/(500000/HZ),
1159 (c->loops_per_jiffy/(5000/HZ)) % 100);
1161 if (c->x86_tlbsize > 0)
1162 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1163 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1164 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1166 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1167 c->x86_phys_bits, c->x86_virt_bits);
1169 seq_printf(m, "power management:");
1172 for (i = 0; i < 32; i++)
1173 if (c->x86_power & (1 << i)) {
1174 if (i < ARRAY_SIZE(x86_power_flags))
1175 seq_printf(m, " %s", x86_power_flags[i]);
1177 seq_printf(m, " [%d]", i);
1181 if (c->x86_num_cores > 1)
1182 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1184 seq_printf(m, "\n\n");
1189 static void *c_start(struct seq_file *m, loff_t *pos)
1191 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1194 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1197 return c_start(m, pos);
1200 static void c_stop(struct seq_file *m, void *v)
1204 struct seq_operations cpuinfo_op = {
1208 .show = show_cpuinfo,