2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/bootsetup.h>
57 #include <asm/proto.h>
63 struct cpuinfo_x86 boot_cpu_data;
65 unsigned long mmu_cr4_features;
66 EXPORT_SYMBOL_GPL(mmu_cr4_features);
69 EXPORT_SYMBOL(acpi_disabled);
70 #ifdef CONFIG_ACPI_BOOT
71 extern int __initdata acpi_ht;
72 extern acpi_interrupt_flags acpi_sci_flags;
73 /* int __initdata acpi_force = 0; */
76 /* For PCI or other memory-mapped resources */
77 unsigned long pci_mem_start = 0x10000000;
79 unsigned long saved_video_mode;
82 EXPORT_SYMBOL(swiotlb);
87 struct drive_info_struct { char dummy[32]; } drive_info;
88 struct screen_info screen_info;
89 struct sys_desc_table_struct {
90 unsigned short length;
91 unsigned char table[0];
94 struct edid_info edid_info;
97 unsigned char aux_device_present;
99 extern int root_mountflags;
100 extern char _text, _etext, _edata, _end;
102 char command_line[COMMAND_LINE_SIZE];
103 char saved_command_line[COMMAND_LINE_SIZE];
105 struct resource standard_io_resources[] = {
106 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY | IORESOURCE_IO },
107 { "pic1", 0x20, 0x21, IORESOURCE_BUSY | IORESOURCE_IO },
108 { "timer", 0x40, 0x5f, IORESOURCE_BUSY | IORESOURCE_IO },
109 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY | IORESOURCE_IO },
110 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY | IORESOURCE_IO },
111 { "pic2", 0xa0, 0xa1, IORESOURCE_BUSY | IORESOURCE_IO },
112 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY | IORESOURCE_IO },
113 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY | IORESOURCE_IO }
116 #define STANDARD_IO_RESOURCES \
117 (sizeof standard_io_resources / sizeof standard_io_resources[0])
119 struct resource code_resource = { "Kernel code", 0x100000, 0, IORESOURCE_MEM };
120 struct resource data_resource = { "Kernel data", 0, 0, IORESOURCE_MEM };
121 struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY | IORESOURCE_MEM };
123 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
125 static struct resource system_rom_resource = { "System ROM", 0xf0000, 0xfffff, IORESOURCE_ROM };
126 static struct resource extension_rom_resource = { "Extension ROM", 0xe0000, 0xeffff, IORESOURCE_ROM };
128 static struct resource adapter_rom_resources[] = {
129 { "Adapter ROM", 0xc8000, 0, IORESOURCE_ROM },
130 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
131 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
132 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
133 { "Adapter ROM", 0, 0, IORESOURCE_ROM },
134 { "Adapter ROM", 0, 0, IORESOURCE_ROM }
137 #define ADAPTER_ROM_RESOURCES \
138 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
140 static struct resource video_rom_resource = { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_ROM };
142 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
144 static int __init checksum(unsigned char *rom, unsigned long length)
146 unsigned char *p, sum = 0;
148 for (p = rom; p < rom + length; p++)
153 static void __init probe_roms(void)
155 unsigned long start, length, upper;
160 upper = adapter_rom_resources[0].start;
161 for (start = video_rom_resource.start; start < upper; start += 2048) {
162 rom = isa_bus_to_virt(start);
163 if (!romsignature(rom))
166 video_rom_resource.start = start;
168 /* 0 < length <= 0x7f * 512, historically */
169 length = rom[2] * 512;
171 /* if checksum okay, trust length byte */
172 if (length && checksum(rom, length))
173 video_rom_resource.end = start + length - 1;
175 request_resource(&iomem_resource, &video_rom_resource);
179 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
184 request_resource(&iomem_resource, &system_rom_resource);
185 upper = system_rom_resource.start;
187 /* check for extension rom (ignore length byte!) */
188 rom = isa_bus_to_virt(extension_rom_resource.start);
189 if (romsignature(rom)) {
190 length = extension_rom_resource.end - extension_rom_resource.start + 1;
191 if (checksum(rom, length)) {
192 request_resource(&iomem_resource, &extension_rom_resource);
193 upper = extension_rom_resource.start;
197 /* check for adapter roms on 2k boundaries */
198 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
199 rom = isa_bus_to_virt(start);
200 if (!romsignature(rom))
203 /* 0 < length <= 0x7f * 512, historically */
204 length = rom[2] * 512;
206 /* but accept any length that fits if checksum okay */
207 if (!length || start + length > upper || !checksum(rom, length))
210 adapter_rom_resources[i].start = start;
211 adapter_rom_resources[i].end = start + length - 1;
212 request_resource(&iomem_resource, &adapter_rom_resources[i]);
214 start = adapter_rom_resources[i++].end & ~2047UL;
218 static __init void parse_cmdline_early (char ** cmdline_p)
220 char c = ' ', *to = command_line, *from = COMMAND_LINE;
223 /* Save unparsed command line copy for /proc/cmdline */
224 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
225 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
233 * If the BIOS enumerates physical processors before logical,
234 * maxcpus=N at enumeration-time can be used to disable HT.
236 else if (!memcmp(from, "maxcpus=", 8)) {
237 extern unsigned int maxcpus;
239 maxcpus = simple_strtoul(from + 8, NULL, 0);
242 #ifdef CONFIG_ACPI_BOOT
243 /* "acpi=off" disables both ACPI table parsing and interpreter init */
244 if (!memcmp(from, "acpi=off", 8))
247 if (!memcmp(from, "acpi=force", 10)) {
248 /* add later when we do DMI horrors: */
249 /* acpi_force = 1; */
253 /* acpi=ht just means: do ACPI MADT parsing
254 at bootup, but don't enable the full ACPI interpreter */
255 if (!memcmp(from, "acpi=ht", 7)) {
258 else if (!memcmp(from, "pci=noacpi", 10))
260 else if (!memcmp(from, "acpi=noirq", 10))
263 else if (!memcmp(from, "acpi_sci=edge", 13))
264 acpi_sci_flags.trigger = 1;
265 else if (!memcmp(from, "acpi_sci=level", 14))
266 acpi_sci_flags.trigger = 3;
267 else if (!memcmp(from, "acpi_sci=high", 13))
268 acpi_sci_flags.polarity = 1;
269 else if (!memcmp(from, "acpi_sci=low", 12))
270 acpi_sci_flags.polarity = 3;
272 /* acpi=strict disables out-of-spec workarounds */
273 else if (!memcmp(from, "acpi=strict", 11)) {
278 if (!memcmp(from, "nolapic", 7) ||
279 !memcmp(from, "disableapic", 11))
282 if (!memcmp(from, "noapic", 6))
283 skip_ioapic_setup = 1;
285 if (!memcmp(from, "apic", 4)) {
286 skip_ioapic_setup = 0;
290 if (!memcmp(from, "mem=", 4))
291 parse_memopt(from+4, &from);
293 #ifdef CONFIG_DISCONTIGMEM
294 if (!memcmp(from, "numa=", 5))
298 #ifdef CONFIG_GART_IOMMU
299 if (!memcmp(from,"iommu=",6)) {
304 if (!memcmp(from,"oops=panic", 10))
311 if (COMMAND_LINE_SIZE <= ++len)
316 *cmdline_p = command_line;
319 #ifndef CONFIG_DISCONTIGMEM
320 static void __init contig_initmem_init(void)
322 unsigned long bootmap_size, bootmap;
323 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
324 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
326 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
327 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
328 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
329 reserve_bootmem(bootmap, bootmap_size);
333 /* Use inline assembly to define this because the nops are defined
334 as inline assembly strings in the include files and we cannot
335 get them easily into strings. */
336 asm("\t.data\nk8nops: "
337 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
340 extern unsigned char k8nops[];
341 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
347 k8nops + 1 + 2 + 3 + 4,
348 k8nops + 1 + 2 + 3 + 4 + 5,
349 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
350 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
353 /* Replace instructions with better alternatives for this CPU type.
355 This runs before SMP is initialized to avoid SMP problems with
356 self modifying code. This implies that assymetric systems where
357 APs have less capabilities than the boot processor are not handled.
358 In this case boot with "noreplacement". */
359 void apply_alternatives(void *start, void *end)
363 for (a = start; (void *)a < end; a++) {
364 if (!boot_cpu_has(a->cpuid))
367 BUG_ON(a->replacementlen > a->instrlen);
368 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
369 diff = a->instrlen - a->replacementlen;
371 /* Pad the rest with nops */
372 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
376 __inline_memcpy(a->instr + i, k8_nops[k], k);
381 static int no_replacement __initdata = 0;
383 void __init alternative_instructions(void)
385 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
388 apply_alternatives(__alt_instructions, __alt_instructions_end);
391 static int __init noreplacement_setup(char *s)
397 __setup("noreplacement", noreplacement_setup);
399 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
401 struct edd_info edd[EDDMAXNR];
402 unsigned int edd_disk80_sig;
403 #ifdef CONFIG_EDD_MODULE
404 EXPORT_SYMBOL(eddnr);
406 EXPORT_SYMBOL(edd_disk80_sig);
409 * copy_edd() - Copy the BIOS EDD information
410 * from empty_zero_page into a safe place.
413 static inline void copy_edd(void)
416 memcpy(edd, EDD_BUF, sizeof(edd));
417 edd_disk80_sig = DISK80_SIGNATURE;
420 #define copy_edd() do {} while (0)
423 void __init setup_arch(char **cmdline_p)
425 unsigned long low_mem_size;
426 unsigned long kernel_end;
428 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
429 drive_info = DRIVE_INFO;
430 screen_info = SCREEN_INFO;
431 edid_info = EDID_INFO;
432 aux_device_present = AUX_DEVICE_INFO;
433 saved_video_mode = SAVED_VIDEO_MODE;
435 #ifdef CONFIG_BLK_DEV_RAM
436 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
437 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
438 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
440 setup_memory_region();
443 if (!MOUNT_ROOT_RDONLY)
444 root_mountflags &= ~MS_RDONLY;
445 init_mm.start_code = (unsigned long) &_text;
446 init_mm.end_code = (unsigned long) &_etext;
447 init_mm.end_data = (unsigned long) &_edata;
448 init_mm.brk = (unsigned long) &_end;
450 code_resource.start = virt_to_phys(&_text);
451 code_resource.end = virt_to_phys(&_etext)-1;
452 data_resource.start = virt_to_phys(&_etext);
453 data_resource.end = virt_to_phys(&_edata)-1;
455 parse_cmdline_early(cmdline_p);
458 * partially used pages are not usable - thus
459 * we are rounding upwards:
461 end_pfn = e820_end_of_ram();
465 init_memory_mapping();
467 #ifdef CONFIG_DISCONTIGMEM
468 numa_initmem_init(0, end_pfn);
470 contig_initmem_init();
473 /* Reserve direct mapping */
474 reserve_bootmem_generic(table_start << PAGE_SHIFT,
475 (table_end - table_start) << PAGE_SHIFT);
478 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
479 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
482 * reserve physical page 0 - it's a special BIOS page on many boxes,
483 * enabling clean reboots, SMP operation, laptop functions.
485 reserve_bootmem_generic(0, PAGE_SIZE);
489 * But first pinch a few for the stack/trampoline stuff
490 * FIXME: Don't need the extra page at 4K, but need to fix
491 * trampoline before removing it. (see the GDT stuff)
493 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
495 /* Reserve SMP trampoline */
496 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
499 #ifdef CONFIG_ACPI_SLEEP
501 * Reserve low memory region for sleep support.
503 acpi_reserve_bootmem();
505 #ifdef CONFIG_X86_LOCAL_APIC
507 * Find and reserve possible boot-time SMP configuration:
511 #ifdef CONFIG_BLK_DEV_INITRD
512 if (LOADER_TYPE && INITRD_START) {
513 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
514 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
516 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
517 initrd_end = initrd_start+INITRD_SIZE;
520 printk(KERN_ERR "initrd extends beyond end of memory "
521 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
522 (unsigned long)(INITRD_START + INITRD_SIZE),
523 (unsigned long)(end_pfn << PAGE_SHIFT));
531 #ifdef CONFIG_ACPI_BOOT
533 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
534 * Must do this after paging_init (due to reliance on fixmap, and thus
535 * the bootmem allocator) but before get_smp_config (to allow parsing
541 #ifdef CONFIG_X86_LOCAL_APIC
543 * get boot-time SMP configuration:
545 if (smp_found_config)
547 init_apic_mappings();
551 * Request address space for all standard RAM and ROM resources
552 * and also for regions reported as reserved by the e820.
555 e820_reserve_resources();
557 request_resource(&iomem_resource, &vram_resource);
561 /* request I/O space for devices used on all i[345]86 PCs */
562 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
563 request_resource(&ioport_resource, standard_io_resources+i);
566 /* Will likely break when you have unassigned resources with more
567 than 4GB memory and bridges that don't support more than 4GB.
568 Doing it properly would require to use pci_alloc_consistent
570 low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
571 if (low_mem_size > pci_mem_start)
572 pci_mem_start = low_mem_size;
574 #ifdef CONFIG_GART_IOMMU
579 #if defined(CONFIG_VGA_CONSOLE)
580 conswitchp = &vga_con;
581 #elif defined(CONFIG_DUMMY_CONSOLE)
582 conswitchp = &dummy_con;
587 static int __init get_model_name(struct cpuinfo_x86 *c)
591 if (cpuid_eax(0x80000000) < 0x80000004)
594 v = (unsigned int *) c->x86_model_id;
595 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
596 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
597 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
598 c->x86_model_id[48] = 0;
603 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
605 unsigned int n, dummy, eax, ebx, ecx, edx;
607 n = cpuid_eax(0x80000000);
609 if (n >= 0x80000005) {
610 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
611 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
612 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
613 c->x86_cache_size=(ecx>>24)+(edx>>24);
614 /* DTLB and ITLB together, but only 4K */
615 c->x86_tlbsize = ((ebx>>16)&0xff) + (ebx&0xff);
618 if (n >= 0x80000006) {
619 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
620 ecx = cpuid_ecx(0x80000006);
621 c->x86_cache_size = ecx >> 16;
622 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
624 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
625 c->x86_cache_size, ecx & 0xFF);
629 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
630 if (n >= 0x80000008) {
631 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
632 c->x86_virt_bits = (eax >> 8) & 0xff;
633 c->x86_phys_bits = eax & 0xff;
638 static int __init init_amd(struct cpuinfo_x86 *c)
643 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
644 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
645 clear_bit(0*32+31, &c->x86_capability);
648 level = cpuid_eax(1);
649 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
650 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
652 r = get_model_name(c);
656 /* Should distinguish Models here, but this is only
657 a fallback anyways. */
658 strcpy(c->x86_model_id, "Hammer");
662 display_cacheinfo(c);
666 static void __init detect_ht(struct cpuinfo_x86 *c)
669 u32 eax, ebx, ecx, edx;
670 int index_lsb, index_msb, tmp;
672 int cpu = smp_processor_id();
674 if (!cpu_has(c, X86_FEATURE_HT))
677 cpuid(1, &eax, &ebx, &ecx, &edx);
678 smp_num_siblings = (ebx & 0xff0000) >> 16;
680 if (smp_num_siblings == 1) {
681 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
682 } else if (smp_num_siblings > 1) {
686 * At this point we only support two siblings per
689 if (smp_num_siblings > NR_CPUS) {
690 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
691 smp_num_siblings = 1;
694 tmp = smp_num_siblings;
695 while ((tmp & 1) == 0) {
699 tmp = smp_num_siblings;
700 while ((tmp & 0x80000000 ) == 0) {
704 if (index_lsb != index_msb )
706 initial_apic_id = ebx >> 24 & 0xff;
707 phys_proc_id[cpu] = initial_apic_id >> index_msb;
709 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
723 unsigned char descriptor;
728 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
729 static struct _cache_table cache_table[] __initdata =
731 { 0x06, LVL_1_INST, 8 },
732 { 0x08, LVL_1_INST, 16 },
733 { 0x0a, LVL_1_DATA, 8 },
734 { 0x0c, LVL_1_DATA, 16 },
735 { 0x22, LVL_3, 512 },
736 { 0x23, LVL_3, 1024 },
737 { 0x25, LVL_3, 2048 },
738 { 0x29, LVL_3, 4096 },
739 { 0x2c, LVL_1_DATA, 32 },
740 { 0x30, LVL_1_INST, 32 },
741 { 0x39, LVL_2, 128 },
742 { 0x3b, LVL_2, 128 },
743 { 0x3c, LVL_2, 256 },
744 { 0x41, LVL_2, 128 },
745 { 0x42, LVL_2, 256 },
746 { 0x43, LVL_2, 512 },
747 { 0x44, LVL_2, 1024 },
748 { 0x45, LVL_2, 2048 },
749 { 0x66, LVL_1_DATA, 8 },
750 { 0x67, LVL_1_DATA, 16 },
751 { 0x68, LVL_1_DATA, 32 },
752 { 0x70, LVL_TRACE, 12 },
753 { 0x71, LVL_TRACE, 16 },
754 { 0x72, LVL_TRACE, 32 },
755 { 0x79, LVL_2, 128 },
756 { 0x7a, LVL_2, 256 },
757 { 0x7b, LVL_2, 512 },
758 { 0x7c, LVL_2, 1024 },
759 { 0x82, LVL_2, 256 },
760 { 0x83, LVL_2, 512 },
761 { 0x84, LVL_2, 1024 },
762 { 0x85, LVL_2, 2048 },
763 { 0x86, LVL_2, 512 },
764 { 0x87, LVL_2, 1024 },
768 static void __init init_intel(struct cpuinfo_x86 *c)
771 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
774 if (c->cpuid_level > 1) {
775 /* supports eax=2 call */
778 unsigned char *dp = (unsigned char *)regs;
780 /* Number of times to iterate */
781 n = cpuid_eax(2) & 0xFF;
783 for ( i = 0 ; i < n ; i++ ) {
784 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
786 /* If bit 31 is set, this is an unknown format */
787 for ( j = 0 ; j < 3 ; j++ ) {
788 if ( regs[j] < 0 ) regs[j] = 0;
791 /* Byte 0 is level count, not a descriptor */
792 for ( j = 1 ; j < 16 ; j++ ) {
793 unsigned char des = dp[j];
796 /* look up this descriptor in the table */
797 while (cache_table[k].descriptor != 0)
799 if (cache_table[k].descriptor == des) {
800 switch (cache_table[k].cache_type) {
802 l1i += cache_table[k].size;
805 l1d += cache_table[k].size;
808 l2 += cache_table[k].size;
811 l3 += cache_table[k].size;
814 trace += cache_table[k].size;
827 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
829 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
831 printk(", L1 D cache: %dK\n", l1d);
835 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
837 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
839 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
842 n = cpuid_eax(0x80000000);
843 if (n >= 0x80000008) {
844 unsigned eax = cpuid_eax(0x80000008);
845 c->x86_virt_bits = (eax >> 8) & 0xff;
846 c->x86_phys_bits = eax & 0xff;
850 c->x86_cache_alignment = c->x86_clflush_size * 2;
853 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
855 char *v = c->x86_vendor_id;
857 if (!strcmp(v, "AuthenticAMD"))
858 c->x86_vendor = X86_VENDOR_AMD;
859 else if (!strcmp(v, "GenuineIntel"))
860 c->x86_vendor = X86_VENDOR_INTEL;
862 c->x86_vendor = X86_VENDOR_UNKNOWN;
865 struct cpu_model_info {
868 char *model_names[16];
871 /* Do some early cpuid on the boot CPU to get some parameter that are
872 needed before check_bugs. Everything advanced is in identify_cpu
874 void __init early_identify_cpu(struct cpuinfo_x86 *c)
878 c->loops_per_jiffy = loops_per_jiffy;
879 c->x86_cache_size = -1;
880 c->x86_vendor = X86_VENDOR_UNKNOWN;
881 c->x86_model = c->x86_mask = 0; /* So far unknown... */
882 c->x86_vendor_id[0] = '\0'; /* Unset */
883 c->x86_model_id[0] = '\0'; /* Unset */
884 c->x86_clflush_size = 64;
885 c->x86_cache_alignment = c->x86_clflush_size;
886 memset(&c->x86_capability, 0, sizeof c->x86_capability);
888 /* Get vendor name */
889 cpuid(0x00000000, &c->cpuid_level,
890 (int *)&c->x86_vendor_id[0],
891 (int *)&c->x86_vendor_id[8],
892 (int *)&c->x86_vendor_id[4]);
896 /* Initialize the standard set of capabilities */
897 /* Note that the vendor-specific code below might override */
899 /* Intel-defined flags: level 0x00000001 */
900 if (c->cpuid_level >= 0x00000001) {
902 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
903 &c->x86_capability[0]);
904 c->x86 = (tfms >> 8) & 0xf;
905 c->x86_model = (tfms >> 4) & 0xf;
906 c->x86_mask = tfms & 0xf;
908 c->x86 += (tfms >> 20) & 0xff;
909 c->x86_model += ((tfms >> 16) & 0xF) << 4;
911 if (c->x86_capability[0] & (1<<19))
912 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
914 /* Have CPUID level 0 only - unheard of */
920 * This does the hard work of actually picking apart the CPU stuff...
922 void __init identify_cpu(struct cpuinfo_x86 *c)
927 early_identify_cpu(c);
929 /* AMD-defined flags: level 0x80000001 */
930 xlvl = cpuid_eax(0x80000000);
931 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
932 if ( xlvl >= 0x80000001 )
933 c->x86_capability[1] = cpuid_edx(0x80000001);
934 if ( xlvl >= 0x80000004 )
935 get_model_name(c); /* Default name */
938 /* Transmeta-defined flags: level 0x80860001 */
939 xlvl = cpuid_eax(0x80860000);
940 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
941 if ( xlvl >= 0x80860001 )
942 c->x86_capability[2] = cpuid_edx(0x80860001);
946 * Vendor-specific initialization. In this section we
947 * canonicalize the feature flags, meaning if there are
948 * features a certain CPU supports which CPUID doesn't
949 * tell us, CPUID claiming incorrect flags, or other bugs,
950 * we handle them here.
952 * At the end of this section, c->x86_capability better
953 * indicate the features this CPU genuinely supports!
955 switch ( c->x86_vendor ) {
961 case X86_VENDOR_INTEL:
965 case X86_VENDOR_UNKNOWN:
967 display_cacheinfo(c);
971 select_idle_routine(c);
975 * On SMP, boot_cpu_data holds the common feature set between
976 * all CPUs; so make sure that we indicate which features are
977 * common between the CPUs. The first time this routine gets
978 * executed, c == &boot_cpu_data.
980 if ( c != &boot_cpu_data ) {
981 /* AND the already accumulated flags with these */
982 for ( i = 0 ; i < NCAPINTS ; i++ )
983 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
990 void __init print_cpu_info(struct cpuinfo_x86 *c)
992 if (c->x86_model_id[0])
993 printk("%s", c->x86_model_id);
995 if (c->x86_mask || c->cpuid_level >= 0)
996 printk(" stepping %02x\n", c->x86_mask);
1002 * Get CPU information for use by the procfs.
1005 static int show_cpuinfo(struct seq_file *m, void *v)
1007 struct cpuinfo_x86 *c = v;
1010 * These flag bits must match the definitions in <asm/cpufeature.h>.
1011 * NULL means this bit is undefined or reserved; either way it doesn't
1012 * have meaning as far as Linux is concerned. Note that it's important
1013 * to realize there is a difference between this table and CPUID -- if
1014 * applications want to get the raw CPUID data, they should access
1015 * /dev/cpu/<cpu_nr>/cpuid instead.
1017 static char *x86_cap_flags[] = {
1019 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1020 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1021 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1022 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1025 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1026 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1028 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1030 /* Transmeta-defined */
1031 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1036 /* Other (Linux-defined) */
1037 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
1038 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1039 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1042 /* Intel-defined (#2) */
1043 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
1044 "est", NULL, "cid", NULL, NULL, "cmpxchg16b", NULL, NULL,
1045 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1046 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1048 static char *x86_power_flags[] = {
1049 "ts", /* temperature sensor */
1050 "fid", /* frequency id control */
1051 "vid", /* voltage id control */
1052 "ttp", /* thermal trip */
1057 if (!cpu_online(c-cpu_data))
1061 seq_printf(m,"processor\t: %u\n"
1063 "cpu family\t: %d\n"
1065 "model name\t: %s\n",
1066 (unsigned)(c-cpu_data),
1067 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1070 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1072 if (c->x86_mask || c->cpuid_level >= 0)
1073 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1075 seq_printf(m, "stepping\t: unknown\n");
1077 if (cpu_has(c,X86_FEATURE_TSC)) {
1078 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1079 cpu_khz / 1000, (cpu_khz % 1000));
1083 if (c->x86_cache_size >= 0)
1084 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1086 #ifdef CONFIG_X86_HT
1088 seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
1089 seq_printf(m, "siblings\t: %d\n", smp_num_siblings);
1095 "fpu_exception\t: yes\n"
1096 "cpuid level\t: %d\n"
1103 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1104 if ( test_bit(i, &c->x86_capability) &&
1105 x86_cap_flags[i] != NULL )
1106 seq_printf(m, " %s", x86_cap_flags[i]);
1109 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1110 c->loops_per_jiffy/(500000/HZ),
1111 (c->loops_per_jiffy/(5000/HZ)) % 100);
1113 if (c->x86_tlbsize > 0)
1114 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1115 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1116 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1118 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1119 c->x86_phys_bits, c->x86_virt_bits);
1121 seq_printf(m, "power management:");
1124 for (i = 0; i < 32; i++)
1125 if (c->x86_power & (1 << i)) {
1126 if (i < ARRAY_SIZE(x86_power_flags))
1127 seq_printf(m, " %s", x86_power_flags[i]);
1129 seq_printf(m, " [%d]", i);
1133 seq_printf(m, "\n\n");
1138 static void *c_start(struct seq_file *m, loff_t *pos)
1140 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1143 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1146 return c_start(m, pos);
1149 static void c_stop(struct seq_file *m, void *v)
1153 struct seq_operations cpuinfo_op = {
1157 .show = show_cpuinfo,