2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
56 #include <asm/proto.h>
57 #include <asm/setup.h>
58 #include <asm/mach_apic.h>
65 struct cpuinfo_x86 boot_cpu_data;
67 unsigned long mmu_cr4_features;
68 EXPORT_SYMBOL_GPL(mmu_cr4_features);
71 EXPORT_SYMBOL(acpi_disabled);
72 #ifdef CONFIG_ACPI_BOOT
73 extern int __initdata acpi_ht;
74 extern acpi_interrupt_flags acpi_sci_flags;
75 int __initdata acpi_force = 0;
78 int acpi_numa __initdata;
80 /* For PCI or other memory-mapped resources */
81 unsigned long pci_mem_start = 0x10000000;
83 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
86 unsigned long saved_video_mode;
90 EXPORT_SYMBOL(swiotlb);
96 struct drive_info_struct { char dummy[32]; } drive_info;
97 struct screen_info screen_info;
98 struct sys_desc_table_struct {
99 unsigned short length;
100 unsigned char table[0];
103 struct edid_info edid_info;
106 unsigned char aux_device_present;
108 extern int root_mountflags;
109 extern char _text, _etext, _edata, _end;
111 char command_line[COMMAND_LINE_SIZE];
113 struct resource standard_io_resources[] = {
114 { .name = "dma1", .start = 0x00, .end = 0x1f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic1", .start = 0x20, .end = 0x21,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "timer0", .start = 0x40, .end = 0x43,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer1", .start = 0x50, .end = 0x53,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "keyboard", .start = 0x60, .end = 0x6f,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "pic2", .start = 0xa0, .end = 0xa1,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma2", .start = 0xc0, .end = 0xdf,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "fpu", .start = 0xf0, .end = 0xff,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
134 #define STANDARD_IO_RESOURCES \
135 (sizeof standard_io_resources / sizeof standard_io_resources[0])
137 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
139 struct resource data_resource = {
140 .name = "Kernel data",
143 .flags = IORESOURCE_RAM,
145 struct resource code_resource = {
146 .name = "Kernel code",
149 .flags = IORESOURCE_RAM,
152 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
154 static struct resource system_rom_resource = {
155 .name = "System ROM",
158 .flags = IORESOURCE_ROM,
161 static struct resource extension_rom_resource = {
162 .name = "Extension ROM",
165 .flags = IORESOURCE_ROM,
168 static struct resource adapter_rom_resources[] = {
169 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
170 .flags = IORESOURCE_ROM },
171 { .name = "Adapter ROM", .start = 0, .end = 0,
172 .flags = IORESOURCE_ROM },
173 { .name = "Adapter ROM", .start = 0, .end = 0,
174 .flags = IORESOURCE_ROM },
175 { .name = "Adapter ROM", .start = 0, .end = 0,
176 .flags = IORESOURCE_ROM },
177 { .name = "Adapter ROM", .start = 0, .end = 0,
178 .flags = IORESOURCE_ROM },
179 { .name = "Adapter ROM", .start = 0, .end = 0,
180 .flags = IORESOURCE_ROM }
183 #define ADAPTER_ROM_RESOURCES \
184 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
186 static struct resource video_rom_resource = {
190 .flags = IORESOURCE_ROM,
193 static struct resource video_ram_resource = {
194 .name = "Video RAM area",
197 .flags = IORESOURCE_RAM,
200 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
202 static int __init romchecksum(unsigned char *rom, unsigned long length)
204 unsigned char *p, sum = 0;
206 for (p = rom; p < rom + length; p++)
211 static void __init probe_roms(void)
213 unsigned long start, length, upper;
218 upper = adapter_rom_resources[0].start;
219 for (start = video_rom_resource.start; start < upper; start += 2048) {
220 rom = isa_bus_to_virt(start);
221 if (!romsignature(rom))
224 video_rom_resource.start = start;
226 /* 0 < length <= 0x7f * 512, historically */
227 length = rom[2] * 512;
229 /* if checksum okay, trust length byte */
230 if (length && romchecksum(rom, length))
231 video_rom_resource.end = start + length - 1;
233 request_resource(&iomem_resource, &video_rom_resource);
237 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
242 request_resource(&iomem_resource, &system_rom_resource);
243 upper = system_rom_resource.start;
245 /* check for extension rom (ignore length byte!) */
246 rom = isa_bus_to_virt(extension_rom_resource.start);
247 if (romsignature(rom)) {
248 length = extension_rom_resource.end - extension_rom_resource.start + 1;
249 if (romchecksum(rom, length)) {
250 request_resource(&iomem_resource, &extension_rom_resource);
251 upper = extension_rom_resource.start;
255 /* check for adapter roms on 2k boundaries */
256 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
257 rom = isa_bus_to_virt(start);
258 if (!romsignature(rom))
261 /* 0 < length <= 0x7f * 512, historically */
262 length = rom[2] * 512;
264 /* but accept any length that fits if checksum okay */
265 if (!length || start + length > upper || !romchecksum(rom, length))
268 adapter_rom_resources[i].start = start;
269 adapter_rom_resources[i].end = start + length - 1;
270 request_resource(&iomem_resource, &adapter_rom_resources[i]);
272 start = adapter_rom_resources[i++].end & ~2047UL;
276 static __init void parse_cmdline_early (char ** cmdline_p)
278 char c = ' ', *to = command_line, *from = COMMAND_LINE;
281 /* Save unparsed command line copy for /proc/cmdline */
282 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
283 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
291 * If the BIOS enumerates physical processors before logical,
292 * maxcpus=N at enumeration-time can be used to disable HT.
294 else if (!memcmp(from, "maxcpus=", 8)) {
295 extern unsigned int maxcpus;
297 maxcpus = simple_strtoul(from + 8, NULL, 0);
300 #ifdef CONFIG_ACPI_BOOT
301 /* "acpi=off" disables both ACPI table parsing and interpreter init */
302 if (!memcmp(from, "acpi=off", 8))
305 if (!memcmp(from, "acpi=force", 10)) {
306 /* add later when we do DMI horrors: */
311 /* acpi=ht just means: do ACPI MADT parsing
312 at bootup, but don't enable the full ACPI interpreter */
313 if (!memcmp(from, "acpi=ht", 7)) {
318 else if (!memcmp(from, "pci=noacpi", 10))
320 else if (!memcmp(from, "acpi=noirq", 10))
323 else if (!memcmp(from, "acpi_sci=edge", 13))
324 acpi_sci_flags.trigger = 1;
325 else if (!memcmp(from, "acpi_sci=level", 14))
326 acpi_sci_flags.trigger = 3;
327 else if (!memcmp(from, "acpi_sci=high", 13))
328 acpi_sci_flags.polarity = 1;
329 else if (!memcmp(from, "acpi_sci=low", 12))
330 acpi_sci_flags.polarity = 3;
332 /* acpi=strict disables out-of-spec workarounds */
333 else if (!memcmp(from, "acpi=strict", 11)) {
338 if (!memcmp(from, "nolapic", 7) ||
339 !memcmp(from, "disableapic", 11))
342 if (!memcmp(from, "noapic", 6))
343 skip_ioapic_setup = 1;
345 if (!memcmp(from, "apic", 4)) {
346 skip_ioapic_setup = 0;
350 if (!memcmp(from, "mem=", 4))
351 parse_memopt(from+4, &from);
353 #ifdef CONFIG_DISCONTIGMEM
354 if (!memcmp(from, "numa=", 5))
358 #ifdef CONFIG_GART_IOMMU
359 if (!memcmp(from,"iommu=",6)) {
364 if (!memcmp(from,"oops=panic", 10))
367 if (!memcmp(from, "noexec=", 7))
368 nonx_setup(from + 7);
374 if (COMMAND_LINE_SIZE <= ++len)
379 *cmdline_p = command_line;
382 #ifndef CONFIG_DISCONTIGMEM
383 static void __init contig_initmem_init(void)
385 unsigned long bootmap_size, bootmap;
386 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
387 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
389 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
390 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
391 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
392 reserve_bootmem(bootmap, bootmap_size);
396 /* Use inline assembly to define this because the nops are defined
397 as inline assembly strings in the include files and we cannot
398 get them easily into strings. */
399 asm("\t.data\nk8nops: "
400 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
403 extern unsigned char k8nops[];
404 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
410 k8nops + 1 + 2 + 3 + 4,
411 k8nops + 1 + 2 + 3 + 4 + 5,
412 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
413 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
416 /* Replace instructions with better alternatives for this CPU type.
418 This runs before SMP is initialized to avoid SMP problems with
419 self modifying code. This implies that assymetric systems where
420 APs have less capabilities than the boot processor are not handled.
421 In this case boot with "noreplacement". */
422 void apply_alternatives(void *start, void *end)
426 for (a = start; (void *)a < end; a++) {
427 if (!boot_cpu_has(a->cpuid))
430 BUG_ON(a->replacementlen > a->instrlen);
431 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
432 diff = a->instrlen - a->replacementlen;
434 /* Pad the rest with nops */
435 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
439 __inline_memcpy(a->instr + i, k8_nops[k], k);
444 static int no_replacement __initdata = 0;
446 void __init alternative_instructions(void)
448 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
451 apply_alternatives(__alt_instructions, __alt_instructions_end);
454 static int __init noreplacement_setup(char *s)
460 __setup("noreplacement", noreplacement_setup);
462 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
464 #ifdef CONFIG_EDD_MODULE
468 * copy_edd() - Copy the BIOS EDD information
469 * from boot_params into a safe place.
472 static inline void copy_edd(void)
474 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
475 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
476 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
477 edd.edd_info_nr = EDD_NR;
480 static inline void copy_edd(void)
485 #define EBDA_ADDR_POINTER 0x40E
486 static void __init reserve_ebda_region(void)
490 * there is a real-mode segmented pointer pointing to the
491 * 4K EBDA area at 0x40E
493 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
496 reserve_bootmem_generic(addr, PAGE_SIZE);
499 void __init setup_arch(char **cmdline_p)
501 unsigned long low_mem_size;
502 unsigned long kernel_end;
504 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
505 drive_info = DRIVE_INFO;
506 screen_info = SCREEN_INFO;
507 edid_info = EDID_INFO;
508 aux_device_present = AUX_DEVICE_INFO;
509 saved_video_mode = SAVED_VIDEO_MODE;
510 bootloader_type = LOADER_TYPE;
512 #ifdef CONFIG_BLK_DEV_RAM
513 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
514 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
515 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
517 setup_memory_region();
520 if (!MOUNT_ROOT_RDONLY)
521 root_mountflags &= ~MS_RDONLY;
522 init_mm.start_code = (unsigned long) &_text;
523 init_mm.end_code = (unsigned long) &_etext;
524 init_mm.end_data = (unsigned long) &_edata;
525 init_mm.brk = (unsigned long) &_end;
527 code_resource.start = virt_to_phys(&_text);
528 code_resource.end = virt_to_phys(&_etext)-1;
529 data_resource.start = virt_to_phys(&_etext);
530 data_resource.end = virt_to_phys(&_edata)-1;
532 parse_cmdline_early(cmdline_p);
534 early_identify_cpu(&boot_cpu_data);
537 * partially used pages are not usable - thus
538 * we are rounding upwards:
540 end_pfn = e820_end_of_ram();
544 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
546 #ifdef CONFIG_ACPI_BOOT
548 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
549 * Call this early for SRAT node setup.
551 acpi_boot_table_init();
554 #ifdef CONFIG_ACPI_NUMA
556 * Parse SRAT to discover nodes.
561 #ifdef CONFIG_DISCONTIGMEM
562 numa_initmem_init(0, end_pfn);
564 contig_initmem_init();
567 /* Reserve direct mapping */
568 reserve_bootmem_generic(table_start << PAGE_SHIFT,
569 (table_end - table_start) << PAGE_SHIFT);
572 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
573 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
576 * reserve physical page 0 - it's a special BIOS page on many boxes,
577 * enabling clean reboots, SMP operation, laptop functions.
579 reserve_bootmem_generic(0, PAGE_SIZE);
581 /* reserve ebda region */
582 reserve_ebda_region();
586 * But first pinch a few for the stack/trampoline stuff
587 * FIXME: Don't need the extra page at 4K, but need to fix
588 * trampoline before removing it. (see the GDT stuff)
590 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
592 /* Reserve SMP trampoline */
593 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
596 #ifdef CONFIG_ACPI_SLEEP
598 * Reserve low memory region for sleep support.
600 acpi_reserve_bootmem();
602 #ifdef CONFIG_X86_LOCAL_APIC
604 * Find and reserve possible boot-time SMP configuration:
608 #ifdef CONFIG_BLK_DEV_INITRD
609 if (LOADER_TYPE && INITRD_START) {
610 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
611 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
613 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
614 initrd_end = initrd_start+INITRD_SIZE;
617 printk(KERN_ERR "initrd extends beyond end of memory "
618 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
619 (unsigned long)(INITRD_START + INITRD_SIZE),
620 (unsigned long)(end_pfn << PAGE_SHIFT));
629 #ifdef CONFIG_ACPI_BOOT
631 * Read APIC and some other early information from ACPI tables.
636 #ifdef CONFIG_X86_LOCAL_APIC
638 * get boot-time SMP configuration:
640 if (smp_found_config)
642 init_apic_mappings();
646 * Request address space for all standard RAM and ROM resources
647 * and also for regions reported as reserved by the e820.
650 e820_reserve_resources();
652 request_resource(&iomem_resource, &video_ram_resource);
656 /* request I/O space for devices used on all i[345]86 PCs */
657 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
658 request_resource(&ioport_resource, &standard_io_resources[i]);
661 /* Will likely break when you have unassigned resources with more
662 than 4GB memory and bridges that don't support more than 4GB.
663 Doing it properly would require to use pci_alloc_consistent
665 low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
666 if (low_mem_size > pci_mem_start)
667 pci_mem_start = low_mem_size;
669 #ifdef CONFIG_GART_IOMMU
674 #if defined(CONFIG_VGA_CONSOLE)
675 conswitchp = &vga_con;
676 #elif defined(CONFIG_DUMMY_CONSOLE)
677 conswitchp = &dummy_con;
682 static int __init get_model_name(struct cpuinfo_x86 *c)
686 if (c->x86_cpuid_level < 0x80000004)
689 v = (unsigned int *) c->x86_model_id;
690 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
691 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
692 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
693 c->x86_model_id[48] = 0;
698 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
700 unsigned int n, dummy, eax, ebx, ecx, edx;
702 n = c->x86_cpuid_level;
704 if (n >= 0x80000005) {
705 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
706 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
707 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
708 c->x86_cache_size=(ecx>>24)+(edx>>24);
709 /* On K8 L1 TLB is inclusive, so don't count it */
713 if (n >= 0x80000006) {
714 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
715 ecx = cpuid_ecx(0x80000006);
716 c->x86_cache_size = ecx >> 16;
717 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
719 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
720 c->x86_cache_size, ecx & 0xFF);
724 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
725 if (n >= 0x80000008) {
726 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
727 c->x86_virt_bits = (eax >> 8) & 0xff;
728 c->x86_phys_bits = eax & 0xff;
733 static int __init init_amd(struct cpuinfo_x86 *c)
741 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
742 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
743 clear_bit(0*32+31, &c->x86_capability);
746 level = cpuid_eax(1);
747 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
748 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
750 r = get_model_name(c);
754 /* Should distinguish Models here, but this is only
755 a fallback anyways. */
756 strcpy(c->x86_model_id, "Hammer");
760 display_cacheinfo(c);
762 if (c->x86_cpuid_level >= 0x80000008) {
763 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
764 if (c->x86_num_cores & (c->x86_num_cores - 1))
765 c->x86_num_cores = 1;
768 /* On a dual core setup the lower bits of apic id
769 distingush the cores. Fix up the CPU<->node mappings
771 Assumes number of cores is a power of two.
772 When using SRAT use mapping from SRAT. */
774 if (acpi_numa <= 0 && c->x86_num_cores > 1) {
775 cpu_to_node[cpu] = cpu >> hweight32(c->x86_num_cores - 1);
776 if (!node_online(cpu_to_node[cpu]))
777 cpu_to_node[cpu] = first_node(node_online_map);
779 printk(KERN_INFO "CPU %d(%d) -> Node %d\n",
780 cpu, c->x86_num_cores, cpu_to_node[cpu]);
787 static void __init detect_ht(struct cpuinfo_x86 *c)
790 u32 eax, ebx, ecx, edx;
791 int index_lsb, index_msb, tmp;
792 int cpu = smp_processor_id();
794 if (!cpu_has(c, X86_FEATURE_HT))
797 cpuid(1, &eax, &ebx, &ecx, &edx);
798 smp_num_siblings = (ebx & 0xff0000) >> 16;
800 if (smp_num_siblings == 1) {
801 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
802 } else if (smp_num_siblings > 1) {
806 * At this point we only support two siblings per
809 if (smp_num_siblings > NR_CPUS) {
810 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
811 smp_num_siblings = 1;
814 tmp = smp_num_siblings;
815 while ((tmp & 1) == 0) {
819 tmp = smp_num_siblings;
820 while ((tmp & 0x80000000 ) == 0) {
824 if (index_lsb != index_msb )
826 phys_proc_id[cpu] = phys_pkg_id(index_msb);
828 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
834 static void __init sched_cmp_hack(struct cpuinfo_x86 *c)
837 /* AMD dual core looks like HT but isn't really. Hide it from the
838 scheduler. This works around problems with the domain scheduler.
839 Also probably gives slightly better scheduling and disables
840 SMT nice which is harmful on dual core.
841 TBD tune the domain scheduler for dual core. */
842 if (c->x86_vendor == X86_VENDOR_AMD && cpu_has(c, X86_FEATURE_CMP_LEGACY))
843 smp_num_siblings = 1;
847 static void __init init_intel(struct cpuinfo_x86 *c)
852 init_intel_cacheinfo(c);
853 n = c->x86_cpuid_level;
854 if (n >= 0x80000008) {
855 unsigned eax = cpuid_eax(0x80000008);
856 c->x86_virt_bits = (eax >> 8) & 0xff;
857 c->x86_phys_bits = eax & 0xff;
861 c->x86_cache_alignment = c->x86_clflush_size * 2;
864 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
866 char *v = c->x86_vendor_id;
868 if (!strcmp(v, "AuthenticAMD"))
869 c->x86_vendor = X86_VENDOR_AMD;
870 else if (!strcmp(v, "GenuineIntel"))
871 c->x86_vendor = X86_VENDOR_INTEL;
873 c->x86_vendor = X86_VENDOR_UNKNOWN;
876 struct cpu_model_info {
879 char *model_names[16];
882 /* Do some early cpuid on the boot CPU to get some parameter that are
883 needed before check_bugs. Everything advanced is in identify_cpu
885 void __init early_identify_cpu(struct cpuinfo_x86 *c)
889 c->loops_per_jiffy = loops_per_jiffy;
890 c->x86_cache_size = -1;
891 c->x86_vendor = X86_VENDOR_UNKNOWN;
892 c->x86_model = c->x86_mask = 0; /* So far unknown... */
893 c->x86_vendor_id[0] = '\0'; /* Unset */
894 c->x86_model_id[0] = '\0'; /* Unset */
895 c->x86_clflush_size = 64;
896 c->x86_cache_alignment = c->x86_clflush_size;
897 c->x86_num_cores = 1;
898 c->x86_apicid = c == &boot_cpu_data ? 0 : c - cpu_data;
899 c->x86_cpuid_level = 0;
900 memset(&c->x86_capability, 0, sizeof c->x86_capability);
902 /* Get vendor name */
903 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
904 (unsigned int *)&c->x86_vendor_id[0],
905 (unsigned int *)&c->x86_vendor_id[8],
906 (unsigned int *)&c->x86_vendor_id[4]);
910 /* Initialize the standard set of capabilities */
911 /* Note that the vendor-specific code below might override */
913 /* Intel-defined flags: level 0x00000001 */
914 if (c->cpuid_level >= 0x00000001) {
916 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
917 &c->x86_capability[0]);
918 c->x86 = (tfms >> 8) & 0xf;
919 c->x86_model = (tfms >> 4) & 0xf;
920 c->x86_mask = tfms & 0xf;
922 c->x86 += (tfms >> 20) & 0xff;
923 c->x86_model += ((tfms >> 16) & 0xF) << 4;
925 if (c->x86_capability[0] & (1<<19))
926 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
927 c->x86_apicid = misc >> 24;
929 /* Have CPUID level 0 only - unheard of */
935 * This does the hard work of actually picking apart the CPU stuff...
937 void __init identify_cpu(struct cpuinfo_x86 *c)
942 early_identify_cpu(c);
944 /* AMD-defined flags: level 0x80000001 */
945 xlvl = cpuid_eax(0x80000000);
946 c->x86_cpuid_level = xlvl;
947 if ((xlvl & 0xffff0000) == 0x80000000) {
948 if (xlvl >= 0x80000001) {
949 c->x86_capability[1] = cpuid_edx(0x80000001);
950 c->x86_capability[5] = cpuid_ecx(0x80000001);
952 if (xlvl >= 0x80000004)
953 get_model_name(c); /* Default name */
956 /* Transmeta-defined flags: level 0x80860001 */
957 xlvl = cpuid_eax(0x80860000);
958 if ((xlvl & 0xffff0000) == 0x80860000) {
959 /* Don't set x86_cpuid_level here for now to not confuse. */
960 if (xlvl >= 0x80860001)
961 c->x86_capability[2] = cpuid_edx(0x80860001);
965 * Vendor-specific initialization. In this section we
966 * canonicalize the feature flags, meaning if there are
967 * features a certain CPU supports which CPUID doesn't
968 * tell us, CPUID claiming incorrect flags, or other bugs,
969 * we handle them here.
971 * At the end of this section, c->x86_capability better
972 * indicate the features this CPU genuinely supports!
974 switch (c->x86_vendor) {
979 case X86_VENDOR_INTEL:
983 case X86_VENDOR_UNKNOWN:
985 display_cacheinfo(c);
989 select_idle_routine(c);
994 * On SMP, boot_cpu_data holds the common feature set between
995 * all CPUs; so make sure that we indicate which features are
996 * common between the CPUs. The first time this routine gets
997 * executed, c == &boot_cpu_data.
999 if (c != &boot_cpu_data) {
1000 /* AND the already accumulated flags with these */
1001 for (i = 0 ; i < NCAPINTS ; i++)
1002 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1005 #ifdef CONFIG_X86_MCE
1009 if (c != &boot_cpu_data)
1010 numa_add_cpu(c - cpu_data);
1015 void __init print_cpu_info(struct cpuinfo_x86 *c)
1017 if (c->x86_model_id[0])
1018 printk("%s", c->x86_model_id);
1020 if (c->x86_mask || c->cpuid_level >= 0)
1021 printk(" stepping %02x\n", c->x86_mask);
1027 * Get CPU information for use by the procfs.
1030 static int show_cpuinfo(struct seq_file *m, void *v)
1032 struct cpuinfo_x86 *c = v;
1035 * These flag bits must match the definitions in <asm/cpufeature.h>.
1036 * NULL means this bit is undefined or reserved; either way it doesn't
1037 * have meaning as far as Linux is concerned. Note that it's important
1038 * to realize there is a difference between this table and CPUID -- if
1039 * applications want to get the raw CPUID data, they should access
1040 * /dev/cpu/<cpu_nr>/cpuid instead.
1042 static char *x86_cap_flags[] = {
1044 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1045 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1046 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1047 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1050 "pni", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1051 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1052 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1053 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1055 /* Transmeta-defined */
1056 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1057 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1058 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1059 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1061 /* Other (Linux-defined) */
1062 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1067 /* Intel-defined (#2) */
1068 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1069 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1070 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1071 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1073 /* AMD-defined (#2) */
1074 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1075 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1076 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1077 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
1079 static char *x86_power_flags[] = {
1080 "ts", /* temperature sensor */
1081 "fid", /* frequency id control */
1082 "vid", /* voltage id control */
1083 "ttp", /* thermal trip */
1088 if (!cpu_online(c-cpu_data))
1092 seq_printf(m,"processor\t: %u\n"
1094 "cpu family\t: %d\n"
1096 "model name\t: %s\n",
1097 (unsigned)(c-cpu_data),
1098 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1101 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1103 if (c->x86_mask || c->cpuid_level >= 0)
1104 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1106 seq_printf(m, "stepping\t: unknown\n");
1108 if (cpu_has(c,X86_FEATURE_TSC)) {
1109 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1110 cpu_khz / 1000, (cpu_khz % 1000));
1114 if (c->x86_cache_size >= 0)
1115 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1118 seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
1119 seq_printf(m, "siblings\t: %d\n", c->x86_num_cores * smp_num_siblings);
1124 "fpu_exception\t: yes\n"
1125 "cpuid level\t: %d\n"
1132 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1133 if ( test_bit(i, &c->x86_capability) &&
1134 x86_cap_flags[i] != NULL )
1135 seq_printf(m, " %s", x86_cap_flags[i]);
1138 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1139 c->loops_per_jiffy/(500000/HZ),
1140 (c->loops_per_jiffy/(5000/HZ)) % 100);
1142 if (c->x86_tlbsize > 0)
1143 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1144 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1145 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1147 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1148 c->x86_phys_bits, c->x86_virt_bits);
1150 seq_printf(m, "power management:");
1153 for (i = 0; i < 32; i++)
1154 if (c->x86_power & (1 << i)) {
1155 if (i < ARRAY_SIZE(x86_power_flags))
1156 seq_printf(m, " %s", x86_power_flags[i]);
1158 seq_printf(m, " [%d]", i);
1161 seq_printf(m, "\n");
1163 if (c->x86_num_cores > 1)
1164 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1166 seq_printf(m, "\n\n");
1171 static void *c_start(struct seq_file *m, loff_t *pos)
1173 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1176 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1179 return c_start(m, pos);
1182 static void c_stop(struct seq_file *m, void *v)
1186 struct seq_operations cpuinfo_op = {
1190 .show = show_cpuinfo,