2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/crash_dump.h>
39 #include <linux/root_dev.h>
40 #include <linux/pci.h>
41 #include <linux/acpi.h>
42 #include <linux/kallsyms.h>
43 #include <linux/edd.h>
44 #include <linux/mmzone.h>
45 #include <linux/kexec.h>
46 #include <linux/cpufreq.h>
47 #include <linux/dmi.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/ctype.h>
52 #include <asm/uaccess.h>
53 #include <asm/system.h>
58 #include <video/edid.h>
61 #include <asm/mpspec.h>
62 #include <asm/mmu_context.h>
63 #include <asm/bootsetup.h>
64 #include <asm/proto.h>
65 #include <asm/setup.h>
66 #include <asm/mach_apic.h>
68 #include <asm/swiotlb.h>
69 #include <asm/sections.h>
70 #include <asm/gart-mapping.h>
73 #include <linux/percpu.h>
74 #include <xen/interface/physdev.h>
75 #include "setup_arch_pre.h"
76 #include <asm/hypervisor.h>
77 #include <xen/interface/nmi.h>
78 #include <xen/features.h>
79 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
80 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
81 #include <asm/mach-xen/setup_arch_post.h>
82 #include <xen/interface/memory.h>
84 extern unsigned long start_pfn;
85 extern struct edid_info edid_info;
87 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
88 EXPORT_SYMBOL(HYPERVISOR_shared_info);
90 extern char hypercall_page[PAGE_SIZE];
91 EXPORT_SYMBOL(hypercall_page);
93 /* Allows setting of maximum possible memory size */
94 unsigned long xen_override_max_pfn;
96 static int xen_panic_event(struct notifier_block *, unsigned long, void *);
97 static struct notifier_block xen_panic_block = {
98 xen_panic_event, NULL, 0 /* try to go last */
101 unsigned long *phys_to_machine_mapping;
102 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
104 EXPORT_SYMBOL(phys_to_machine_mapping);
106 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
107 DEFINE_PER_CPU(int, nr_multicall_ents);
109 /* Raw start-of-day parameters from the hypervisor. */
110 start_info_t *xen_start_info;
111 EXPORT_SYMBOL(xen_start_info);
118 struct cpuinfo_x86 boot_cpu_data __read_mostly;
120 unsigned long mmu_cr4_features;
123 EXPORT_SYMBOL(acpi_disabled);
125 extern int __initdata acpi_ht;
126 extern acpi_interrupt_flags acpi_sci_flags;
127 int __initdata acpi_force = 0;
130 int acpi_numa __initdata;
132 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
135 unsigned long saved_video_mode;
141 char dmi_alloc_data[DMI_MAX_DATA];
146 struct screen_info screen_info;
147 struct sys_desc_table_struct {
148 unsigned short length;
149 unsigned char table[0];
152 struct edid_info edid_info;
155 extern int root_mountflags;
157 char command_line[COMMAND_LINE_SIZE];
159 struct resource standard_io_resources[] = {
160 { .name = "dma1", .start = 0x00, .end = 0x1f,
161 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
162 { .name = "pic1", .start = 0x20, .end = 0x21,
163 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
164 { .name = "timer0", .start = 0x40, .end = 0x43,
165 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
166 { .name = "timer1", .start = 0x50, .end = 0x53,
167 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
168 { .name = "keyboard", .start = 0x60, .end = 0x6f,
169 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
170 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
171 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
172 { .name = "pic2", .start = 0xa0, .end = 0xa1,
173 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
174 { .name = "dma2", .start = 0xc0, .end = 0xdf,
175 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
176 { .name = "fpu", .start = 0xf0, .end = 0xff,
177 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
180 #define STANDARD_IO_RESOURCES \
181 (sizeof standard_io_resources / sizeof standard_io_resources[0])
183 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
185 struct resource data_resource = {
186 .name = "Kernel data",
189 .flags = IORESOURCE_RAM,
191 struct resource code_resource = {
192 .name = "Kernel code",
195 .flags = IORESOURCE_RAM,
198 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
200 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
201 static struct resource system_rom_resource = {
202 .name = "System ROM",
205 .flags = IORESOURCE_ROM,
208 static struct resource extension_rom_resource = {
209 .name = "Extension ROM",
212 .flags = IORESOURCE_ROM,
215 static struct resource adapter_rom_resources[] = {
216 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
217 .flags = IORESOURCE_ROM },
218 { .name = "Adapter ROM", .start = 0, .end = 0,
219 .flags = IORESOURCE_ROM },
220 { .name = "Adapter ROM", .start = 0, .end = 0,
221 .flags = IORESOURCE_ROM },
222 { .name = "Adapter ROM", .start = 0, .end = 0,
223 .flags = IORESOURCE_ROM },
224 { .name = "Adapter ROM", .start = 0, .end = 0,
225 .flags = IORESOURCE_ROM },
226 { .name = "Adapter ROM", .start = 0, .end = 0,
227 .flags = IORESOURCE_ROM }
231 #define ADAPTER_ROM_RESOURCES \
232 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
234 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
235 static struct resource video_rom_resource = {
239 .flags = IORESOURCE_ROM,
243 static struct resource video_ram_resource = {
244 .name = "Video RAM area",
247 .flags = IORESOURCE_RAM,
250 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
251 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
253 static int __init romchecksum(unsigned char *rom, unsigned long length)
255 unsigned char *p, sum = 0;
257 for (p = rom; p < rom + length; p++)
262 static void __init probe_roms(void)
264 unsigned long start, length, upper;
269 upper = adapter_rom_resources[0].start;
270 for (start = video_rom_resource.start; start < upper; start += 2048) {
271 rom = isa_bus_to_virt(start);
272 if (!romsignature(rom))
275 video_rom_resource.start = start;
277 /* 0 < length <= 0x7f * 512, historically */
278 length = rom[2] * 512;
280 /* if checksum okay, trust length byte */
281 if (length && romchecksum(rom, length))
282 video_rom_resource.end = start + length - 1;
284 request_resource(&iomem_resource, &video_rom_resource);
288 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
293 request_resource(&iomem_resource, &system_rom_resource);
294 upper = system_rom_resource.start;
296 /* check for extension rom (ignore length byte!) */
297 rom = isa_bus_to_virt(extension_rom_resource.start);
298 if (romsignature(rom)) {
299 length = extension_rom_resource.end - extension_rom_resource.start + 1;
300 if (romchecksum(rom, length)) {
301 request_resource(&iomem_resource, &extension_rom_resource);
302 upper = extension_rom_resource.start;
306 /* check for adapter roms on 2k boundaries */
307 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
308 rom = isa_bus_to_virt(start);
309 if (!romsignature(rom))
312 /* 0 < length <= 0x7f * 512, historically */
313 length = rom[2] * 512;
315 /* but accept any length that fits if checksum okay */
316 if (!length || start + length > upper || !romchecksum(rom, length))
319 adapter_rom_resources[i].start = start;
320 adapter_rom_resources[i].end = start + length - 1;
321 request_resource(&iomem_resource, &adapter_rom_resources[i]);
323 start = adapter_rom_resources[i++].end & ~2047UL;
328 /* Check for full argument with no trailing characters */
329 static int fullarg(char *p, char *arg)
332 return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
335 static __init void parse_cmdline_early (char ** cmdline_p)
337 char c = ' ', *to = command_line, *from = COMMAND_LINE;
347 * If the BIOS enumerates physical processors before logical,
348 * maxcpus=N at enumeration-time can be used to disable HT.
350 else if (!memcmp(from, "maxcpus=", 8)) {
351 extern unsigned int maxcpus;
353 maxcpus = simple_strtoul(from + 8, NULL, 0);
357 /* "acpi=off" disables both ACPI table parsing and interpreter init */
358 if (fullarg(from,"acpi=off"))
361 if (fullarg(from, "acpi=force")) {
362 /* add later when we do DMI horrors: */
367 /* acpi=ht just means: do ACPI MADT parsing
368 at bootup, but don't enable the full ACPI interpreter */
369 if (fullarg(from, "acpi=ht")) {
374 else if (fullarg(from, "pci=noacpi"))
376 else if (fullarg(from, "acpi=noirq"))
379 else if (fullarg(from, "acpi_sci=edge"))
380 acpi_sci_flags.trigger = 1;
381 else if (fullarg(from, "acpi_sci=level"))
382 acpi_sci_flags.trigger = 3;
383 else if (fullarg(from, "acpi_sci=high"))
384 acpi_sci_flags.polarity = 1;
385 else if (fullarg(from, "acpi_sci=low"))
386 acpi_sci_flags.polarity = 3;
388 /* acpi=strict disables out-of-spec workarounds */
389 else if (fullarg(from, "acpi=strict")) {
392 #ifdef CONFIG_X86_IO_APIC
393 else if (fullarg(from, "acpi_skip_timer_override"))
394 acpi_skip_timer_override = 1;
399 if (fullarg(from, "disable_timer_pin_1"))
400 disable_timer_pin_1 = 1;
401 if (fullarg(from, "enable_timer_pin_1"))
402 disable_timer_pin_1 = -1;
404 if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
405 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
409 if (fullarg(from, "noapic"))
410 skip_ioapic_setup = 1;
412 if (fullarg(from,"apic")) {
413 skip_ioapic_setup = 0;
418 if (!memcmp(from, "mem=", 4))
419 parse_memopt(from+4, &from);
421 if (!memcmp(from, "memmap=", 7)) {
422 /* exactmap option is for used defined memory */
423 if (!memcmp(from+7, "exactmap", 8)) {
424 #ifdef CONFIG_CRASH_DUMP
425 /* If we are doing a crash dump, we
426 * still need to know the real mem
427 * size before original memory map is
430 saved_max_pfn = e820_end_of_ram();
438 parse_memmapopt(from+7, &from);
444 if (!memcmp(from, "numa=", 5))
448 if (!memcmp(from,"iommu=",6)) {
452 if (fullarg(from,"oops=panic"))
455 if (!memcmp(from, "noexec=", 7))
456 nonx_setup(from + 7);
459 /* crashkernel=size@addr specifies the location to reserve for
460 * a crash kernel. By reserving this memory we guarantee
461 * that linux never set's it up as a DMA target.
462 * Useful for holding code to do something appropriate
463 * after a kernel panic.
465 else if (!memcmp(from, "crashkernel=", 12)) {
466 unsigned long size, base;
467 size = memparse(from+12, &from);
469 base = memparse(from+1, &from);
470 /* FIXME: Do I want a sanity check
471 * to validate the memory range?
473 crashk_res.start = base;
474 crashk_res.end = base + size - 1;
479 #ifdef CONFIG_PROC_VMCORE
480 /* elfcorehdr= specifies the location of elf core header
481 * stored by the crashed kernel. This option will be passed
482 * by kexec loader to the capture kernel.
484 else if(!memcmp(from, "elfcorehdr=", 11))
485 elfcorehdr_addr = memparse(from+11, &from);
488 #if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
489 else if (!memcmp(from, "additional_cpus=", 16))
490 setup_additional_cpus(from+16);
497 if (COMMAND_LINE_SIZE <= ++len)
502 printk(KERN_INFO "user-defined physical RAM map:\n");
503 e820_print_map("user");
506 *cmdline_p = command_line;
511 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
513 unsigned long bootmap_size, bootmap;
515 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
516 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
518 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
519 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
521 e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
523 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
525 reserve_bootmem(bootmap, bootmap_size);
529 /* Use inline assembly to define this because the nops are defined
530 as inline assembly strings in the include files and we cannot
531 get them easily into strings. */
532 asm("\t.data\nk8nops: "
533 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
536 extern unsigned char k8nops[];
537 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
543 k8nops + 1 + 2 + 3 + 4,
544 k8nops + 1 + 2 + 3 + 4 + 5,
545 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
546 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
549 extern char __vsyscall_0;
551 /* Replace instructions with better alternatives for this CPU type.
553 This runs before SMP is initialized to avoid SMP problems with
554 self modifying code. This implies that assymetric systems where
555 APs have less capabilities than the boot processor are not handled.
556 In this case boot with "noreplacement". */
557 void apply_alternatives(void *start, void *end)
561 for (a = start; (void *)a < end; a++) {
564 if (!boot_cpu_has(a->cpuid))
567 BUG_ON(a->replacementlen > a->instrlen);
569 /* vsyscall code is not mapped yet. resolve it manually. */
570 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
571 instr -= VSYSCALL_START - (unsigned long)&__vsyscall_0;
572 __inline_memcpy(instr, a->replacement, a->replacementlen);
573 diff = a->instrlen - a->replacementlen;
575 /* Pad the rest with nops */
576 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
580 __inline_memcpy(instr + i, k8_nops[k], k);
585 static int no_replacement __initdata = 0;
587 void __init alternative_instructions(void)
589 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
592 apply_alternatives(__alt_instructions, __alt_instructions_end);
595 static int __init noreplacement_setup(char *s)
601 __setup("noreplacement", noreplacement_setup);
603 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
605 #ifdef CONFIG_EDD_MODULE
609 * copy_edd() - Copy the BIOS EDD information
610 * from boot_params into a safe place.
613 static inline void copy_edd(void)
615 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
616 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
617 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
618 edd.edd_info_nr = EDD_NR;
621 static inline void copy_edd(void)
627 #define EBDA_ADDR_POINTER 0x40E
629 unsigned __initdata ebda_addr;
630 unsigned __initdata ebda_size;
632 static void discover_ebda(void)
635 * there is a real-mode segmented pointer pointing to the
636 * 4K EBDA area at 0x40E
638 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
641 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
643 /* Round EBDA up to pages */
647 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
648 if (ebda_size > 64*1024)
653 void __init setup_arch(char **cmdline_p)
655 unsigned long kernel_end;
657 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
658 struct e820entry *machine_e820;
659 struct xen_memory_map memmap;
663 /* Register a call for panic conditions. */
664 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
666 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
667 kernel_end = 0; /* dummy */
668 screen_info = SCREEN_INFO;
670 if (xen_start_info->flags & SIF_INITDOMAIN) {
671 /* This is drawn from a dump from vgacon:startup in
673 screen_info.orig_video_mode = 3;
674 screen_info.orig_video_isVGA = 1;
675 screen_info.orig_video_lines = 25;
676 screen_info.orig_video_cols = 80;
677 screen_info.orig_video_ega_bx = 3;
678 screen_info.orig_video_points = 16;
680 screen_info.orig_video_isVGA = 0;
682 edid_info = EDID_INFO;
683 saved_video_mode = SAVED_VIDEO_MODE;
684 bootloader_type = LOADER_TYPE;
686 #ifdef CONFIG_BLK_DEV_RAM
687 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
688 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
689 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
694 setup_xen_features();
696 HYPERVISOR_vm_assist(VMASST_CMD_enable,
697 VMASST_TYPE_writable_pagetables);
701 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
702 screen_info = SCREEN_INFO;
703 edid_info = EDID_INFO;
704 saved_video_mode = SAVED_VIDEO_MODE;
705 bootloader_type = LOADER_TYPE;
707 #ifdef CONFIG_BLK_DEV_RAM
708 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
709 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
710 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
712 #endif /* !CONFIG_XEN */
713 setup_memory_region();
716 if (!MOUNT_ROOT_RDONLY)
717 root_mountflags &= ~MS_RDONLY;
718 init_mm.start_code = (unsigned long) &_text;
719 init_mm.end_code = (unsigned long) &_etext;
720 init_mm.end_data = (unsigned long) &_edata;
721 init_mm.brk = (unsigned long) &_end;
723 code_resource.start = virt_to_phys(&_text);
724 code_resource.end = virt_to_phys(&_etext)-1;
725 data_resource.start = virt_to_phys(&_etext);
726 data_resource.end = virt_to_phys(&_edata)-1;
728 parse_cmdline_early(cmdline_p);
730 early_identify_cpu(&boot_cpu_data);
733 * partially used pages are not usable - thus
734 * we are rounding upwards:
736 end_pfn = e820_end_of_ram();
737 num_physpages = end_pfn; /* for pfn_valid */
745 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
747 #ifdef CONFIG_ACPI_NUMA
749 * Parse SRAT to discover nodes.
755 numa_initmem_init(0, end_pfn);
757 contig_initmem_init(0, end_pfn);
760 /* Reserve direct mapping */
761 reserve_bootmem_generic(table_start << PAGE_SHIFT,
762 (table_end - table_start) << PAGE_SHIFT);
765 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
766 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
769 /* reserve physmap, start info and initial page tables */
770 reserve_bootmem(kernel_end, (table_start<<PAGE_SHIFT)-kernel_end);
773 * reserve physical page 0 - it's a special BIOS page on many boxes,
774 * enabling clean reboots, SMP operation, laptop functions.
776 reserve_bootmem_generic(0, PAGE_SIZE);
778 /* reserve ebda region */
780 reserve_bootmem_generic(ebda_addr, ebda_size);
785 * But first pinch a few for the stack/trampoline stuff
786 * FIXME: Don't need the extra page at 4K, but need to fix
787 * trampoline before removing it. (see the GDT stuff)
789 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
791 /* Reserve SMP trampoline */
792 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
795 #ifdef CONFIG_ACPI_SLEEP
797 * Reserve low memory region for sleep support.
799 acpi_reserve_bootmem();
802 #ifdef CONFIG_BLK_DEV_INITRD
803 if (xen_start_info->mod_start) {
804 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
805 /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
806 initrd_start = INITRD_START + PAGE_OFFSET;
807 initrd_end = initrd_start+INITRD_SIZE;
808 initrd_below_start_ok = 1;
810 printk(KERN_ERR "initrd extends beyond end of memory "
811 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
812 (unsigned long)(INITRD_START + INITRD_SIZE),
813 (unsigned long)(end_pfn << PAGE_SHIFT));
818 #else /* CONFIG_XEN */
819 #ifdef CONFIG_BLK_DEV_INITRD
820 if (LOADER_TYPE && INITRD_START) {
821 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
822 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
824 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
825 initrd_end = initrd_start+INITRD_SIZE;
828 printk(KERN_ERR "initrd extends beyond end of memory "
829 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
830 (unsigned long)(INITRD_START + INITRD_SIZE),
831 (unsigned long)(end_pfn << PAGE_SHIFT));
836 #endif /* !CONFIG_XEN */
838 if (crashk_res.start != crashk_res.end) {
839 reserve_bootmem(crashk_res.start,
840 crashk_res.end - crashk_res.start + 1);
845 #ifdef CONFIG_X86_LOCAL_APIC
847 * Find and reserve possible boot-time SMP configuration:
855 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
856 /* Make sure we have a large enough P->M table. */
857 phys_to_machine_mapping = alloc_bootmem(
858 end_pfn * sizeof(unsigned long));
859 memset(phys_to_machine_mapping, ~0,
860 end_pfn * sizeof(unsigned long));
861 memcpy(phys_to_machine_mapping,
862 (unsigned long *)xen_start_info->mfn_list,
863 xen_start_info->nr_pages * sizeof(unsigned long));
865 __pa(xen_start_info->mfn_list),
866 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
867 sizeof(unsigned long))));
870 * Initialise the list of the frames that specify the
871 * list of frames that make up the p2m table. Used by
874 pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
875 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
876 virt_to_mfn(pfn_to_mfn_frame_list_list);
878 fpp = PAGE_SIZE/sizeof(unsigned long);
879 for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
880 if ((j % fpp) == 0) {
883 pfn_to_mfn_frame_list[k] =
884 alloc_bootmem(PAGE_SIZE);
885 pfn_to_mfn_frame_list_list[k] =
886 virt_to_mfn(pfn_to_mfn_frame_list[k]);
889 pfn_to_mfn_frame_list[k][j] =
890 virt_to_mfn(&phys_to_machine_mapping[i]);
892 HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
897 if (xen_start_info->flags & SIF_INITDOMAIN)
900 if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
916 * set this early, so we dont allocate cpu0
917 * if MADT list doesnt list BSP first
918 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
920 cpu_set(0, cpu_present_map);
923 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
924 * Call this early for SRAT node setup.
926 acpi_boot_table_init();
929 * Read APIC and some other early information from ACPI tables.
936 #ifdef CONFIG_X86_LOCAL_APIC
938 * get boot-time SMP configuration:
940 if (smp_found_config)
943 init_apic_mappings();
946 #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
947 prefill_possible_map();
951 * Request address space for all standard RAM and ROM resources
952 * and also for regions reported as reserved by the e820.
954 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
956 if (xen_start_info->flags & SIF_INITDOMAIN) {
957 machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE);
959 memmap.nr_entries = E820MAX;
960 set_xen_guest_handle(memmap.buffer, machine_e820);
962 BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap));
964 e820_reserve_resources(machine_e820, memmap.nr_entries);
965 } else if (!(xen_start_info->flags & SIF_INITDOMAIN))
966 e820_reserve_resources(e820.map, e820.nr_map);
967 #elif defined(CONFIG_XEN)
968 e820_reserve_resources(e820.map, e820.nr_map);
971 e820_reserve_resources(e820.map, e820.nr_map);
974 request_resource(&iomem_resource, &video_ram_resource);
978 /* request I/O space for devices used on all i[345]86 PCs */
979 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
980 request_resource(&ioport_resource, &standard_io_resources[i]);
983 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
984 if (xen_start_info->flags & SIF_INITDOMAIN) {
985 e820_setup_gap(machine_e820, memmap.nr_entries);
986 free_bootmem(__pa(machine_e820), PAGE_SIZE);
988 #elif !defined(CONFIG_XEN)
989 e820_setup_gap(e820.map, e820.nr_map);
992 #ifdef CONFIG_GART_IOMMU
998 struct physdev_set_iopl set_iopl;
1001 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1003 if (xen_start_info->flags & SIF_INITDOMAIN) {
1004 if (!(xen_start_info->flags & SIF_PRIVILEGED))
1005 panic("Xen granted us console access "
1006 "but not privileged status");
1009 #if defined(CONFIG_VGA_CONSOLE)
1010 conswitchp = &vga_con;
1011 #elif defined(CONFIG_DUMMY_CONSOLE)
1012 conswitchp = &dummy_con;
1016 extern int console_use_vt;
1020 #else /* CONFIG_XEN */
1023 #if defined(CONFIG_VGA_CONSOLE)
1024 conswitchp = &vga_con;
1025 #elif defined(CONFIG_DUMMY_CONSOLE)
1026 conswitchp = &dummy_con;
1030 #endif /* !CONFIG_XEN */
1035 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1037 HYPERVISOR_shutdown(SHUTDOWN_crash);
1038 /* we're never actually going to get here... */
1041 #endif /* !CONFIG_XEN */
1044 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1048 if (c->extended_cpuid_level < 0x80000004)
1051 v = (unsigned int *) c->x86_model_id;
1052 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
1053 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
1054 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
1055 c->x86_model_id[48] = 0;
1060 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1062 unsigned int n, dummy, eax, ebx, ecx, edx;
1064 n = c->extended_cpuid_level;
1066 if (n >= 0x80000005) {
1067 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
1068 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
1069 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
1070 c->x86_cache_size=(ecx>>24)+(edx>>24);
1071 /* On K8 L1 TLB is inclusive, so don't count it */
1075 if (n >= 0x80000006) {
1076 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1077 ecx = cpuid_ecx(0x80000006);
1078 c->x86_cache_size = ecx >> 16;
1079 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
1081 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1082 c->x86_cache_size, ecx & 0xFF);
1085 if (n >= 0x80000007)
1086 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1087 if (n >= 0x80000008) {
1088 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1089 c->x86_virt_bits = (eax >> 8) & 0xff;
1090 c->x86_phys_bits = eax & 0xff;
1095 static int nearby_node(int apicid)
1098 for (i = apicid - 1; i >= 0; i--) {
1099 int node = apicid_to_node[i];
1100 if (node != NUMA_NO_NODE && node_online(node))
1103 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
1104 int node = apicid_to_node[i];
1105 if (node != NUMA_NO_NODE && node_online(node))
1108 return first_node(node_online_map); /* Shouldn't happen */
1113 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
1114 * Assumes number of cores is a power of two.
1116 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
1119 int cpu = smp_processor_id();
1123 unsigned apicid = hard_smp_processor_id();
1127 while ((1 << bits) < c->x86_max_cores)
1130 /* Low order bits define the core id (index of core in socket) */
1131 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
1132 /* Convert the APIC ID into the socket ID */
1133 phys_proc_id[cpu] = phys_pkg_id(bits);
1136 node = phys_proc_id[cpu];
1137 if (apicid_to_node[apicid] != NUMA_NO_NODE)
1138 node = apicid_to_node[apicid];
1139 if (!node_online(node)) {
1140 /* Two possibilities here:
1141 - The CPU is missing memory and no node was created.
1142 In that case try picking one from a nearby CPU
1143 - The APIC IDs differ from the HyperTransport node IDs
1144 which the K8 northbridge parsing fills in.
1145 Assume they are all increased by a constant offset,
1146 but in the same order as the HT nodeids.
1147 If that doesn't result in a usable node fall back to the
1148 path for the previous case. */
1149 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
1150 if (ht_nodeid >= 0 &&
1151 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
1152 node = apicid_to_node[ht_nodeid];
1153 /* Pick a nearby node */
1154 if (!node_online(node))
1155 node = nearby_node(apicid);
1157 numa_set_node(cpu, node);
1159 printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
1160 cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
1165 static int __init init_amd(struct cpuinfo_x86 *c)
1171 unsigned long value;
1174 * Disable TLB flush filter by setting HWCR.FFDIS on K8
1175 * bit 6 of msr C001_0015
1177 * Errata 63 for SH-B3 steppings
1178 * Errata 122 for all steppings (F+ have it disabled by default)
1181 rdmsrl(MSR_K8_HWCR, value);
1183 wrmsrl(MSR_K8_HWCR, value);
1187 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1188 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1189 clear_bit(0*32+31, &c->x86_capability);
1191 /* On C+ stepping K8 rep microcode works well for copy/memset */
1192 level = cpuid_eax(1);
1193 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
1194 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
1196 /* Enable workaround for FXSAVE leak */
1198 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
1200 r = get_model_name(c);
1204 /* Should distinguish Models here, but this is only
1205 a fallback anyways. */
1206 strcpy(c->x86_model_id, "Hammer");
1210 display_cacheinfo(c);
1212 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
1213 if (c->x86_power & (1<<8))
1214 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1216 if (c->extended_cpuid_level >= 0x80000008) {
1217 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
1225 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1228 u32 eax, ebx, ecx, edx;
1229 int index_msb, core_bits;
1230 int cpu = smp_processor_id();
1232 cpuid(1, &eax, &ebx, &ecx, &edx);
1235 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1238 smp_num_siblings = (ebx & 0xff0000) >> 16;
1240 if (smp_num_siblings == 1) {
1241 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1242 } else if (smp_num_siblings > 1 ) {
1244 if (smp_num_siblings > NR_CPUS) {
1245 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
1246 smp_num_siblings = 1;
1250 index_msb = get_count_order(smp_num_siblings);
1251 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1253 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1256 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1258 index_msb = get_count_order(smp_num_siblings) ;
1260 core_bits = get_count_order(c->x86_max_cores);
1262 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
1263 ((1 << core_bits) - 1);
1265 if (c->x86_max_cores > 1)
1266 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1273 * find out the number of processor cores on the die
1275 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1279 if (c->cpuid_level < 4)
1288 return ((eax >> 26) + 1);
1293 static void srat_detect_node(void)
1297 int cpu = smp_processor_id();
1299 /* Don't do the funky fallback heuristics the AMD version employs
1301 node = apicid_to_node[hard_smp_processor_id()];
1302 if (node == NUMA_NO_NODE)
1304 numa_set_node(cpu, node);
1307 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
1311 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1316 init_intel_cacheinfo(c);
1317 n = c->extended_cpuid_level;
1318 if (n >= 0x80000008) {
1319 unsigned eax = cpuid_eax(0x80000008);
1320 c->x86_virt_bits = (eax >> 8) & 0xff;
1321 c->x86_phys_bits = eax & 0xff;
1322 /* CPUID workaround for Intel 0F34 CPU */
1323 if (c->x86_vendor == X86_VENDOR_INTEL &&
1324 c->x86 == 0xF && c->x86_model == 0x3 &&
1326 c->x86_phys_bits = 36;
1330 c->x86_cache_alignment = c->x86_clflush_size * 2;
1331 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1332 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1333 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1334 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1335 c->x86_max_cores = intel_num_cpu_cores(c);
1340 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1342 char *v = c->x86_vendor_id;
1344 if (!strcmp(v, "AuthenticAMD"))
1345 c->x86_vendor = X86_VENDOR_AMD;
1346 else if (!strcmp(v, "GenuineIntel"))
1347 c->x86_vendor = X86_VENDOR_INTEL;
1349 c->x86_vendor = X86_VENDOR_UNKNOWN;
1352 struct cpu_model_info {
1355 char *model_names[16];
1358 /* Do some early cpuid on the boot CPU to get some parameter that are
1359 needed before check_bugs. Everything advanced is in identify_cpu
1361 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1365 c->loops_per_jiffy = loops_per_jiffy;
1366 c->x86_cache_size = -1;
1367 c->x86_vendor = X86_VENDOR_UNKNOWN;
1368 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1369 c->x86_vendor_id[0] = '\0'; /* Unset */
1370 c->x86_model_id[0] = '\0'; /* Unset */
1371 c->x86_clflush_size = 64;
1372 c->x86_cache_alignment = c->x86_clflush_size;
1373 c->x86_max_cores = 1;
1374 c->extended_cpuid_level = 0;
1375 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1377 /* Get vendor name */
1378 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1379 (unsigned int *)&c->x86_vendor_id[0],
1380 (unsigned int *)&c->x86_vendor_id[8],
1381 (unsigned int *)&c->x86_vendor_id[4]);
1385 /* Initialize the standard set of capabilities */
1386 /* Note that the vendor-specific code below might override */
1388 /* Intel-defined flags: level 0x00000001 */
1389 if (c->cpuid_level >= 0x00000001) {
1391 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1392 &c->x86_capability[0]);
1393 c->x86 = (tfms >> 8) & 0xf;
1394 c->x86_model = (tfms >> 4) & 0xf;
1395 c->x86_mask = tfms & 0xf;
1397 c->x86 += (tfms >> 20) & 0xff;
1399 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1400 if (c->x86_capability[0] & (1<<19))
1401 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1403 /* Have CPUID level 0 only - unheard of */
1408 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1413 * This does the hard work of actually picking apart the CPU stuff...
1415 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1420 early_identify_cpu(c);
1422 /* AMD-defined flags: level 0x80000001 */
1423 xlvl = cpuid_eax(0x80000000);
1424 c->extended_cpuid_level = xlvl;
1425 if ((xlvl & 0xffff0000) == 0x80000000) {
1426 if (xlvl >= 0x80000001) {
1427 c->x86_capability[1] = cpuid_edx(0x80000001);
1428 c->x86_capability[6] = cpuid_ecx(0x80000001);
1430 if (xlvl >= 0x80000004)
1431 get_model_name(c); /* Default name */
1434 /* Transmeta-defined flags: level 0x80860001 */
1435 xlvl = cpuid_eax(0x80860000);
1436 if ((xlvl & 0xffff0000) == 0x80860000) {
1437 /* Don't set x86_cpuid_level here for now to not confuse. */
1438 if (xlvl >= 0x80860001)
1439 c->x86_capability[2] = cpuid_edx(0x80860001);
1442 c->apicid = phys_pkg_id(0);
1445 * Vendor-specific initialization. In this section we
1446 * canonicalize the feature flags, meaning if there are
1447 * features a certain CPU supports which CPUID doesn't
1448 * tell us, CPUID claiming incorrect flags, or other bugs,
1449 * we handle them here.
1451 * At the end of this section, c->x86_capability better
1452 * indicate the features this CPU genuinely supports!
1454 switch (c->x86_vendor) {
1455 case X86_VENDOR_AMD:
1459 case X86_VENDOR_INTEL:
1463 case X86_VENDOR_UNKNOWN:
1465 display_cacheinfo(c);
1469 select_idle_routine(c);
1473 * On SMP, boot_cpu_data holds the common feature set between
1474 * all CPUs; so make sure that we indicate which features are
1475 * common between the CPUs. The first time this routine gets
1476 * executed, c == &boot_cpu_data.
1478 if (c != &boot_cpu_data) {
1479 /* AND the already accumulated flags with these */
1480 for (i = 0 ; i < NCAPINTS ; i++)
1481 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1484 #ifdef CONFIG_X86_MCE
1487 if (c == &boot_cpu_data)
1492 numa_add_cpu(smp_processor_id());
1497 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1499 if (c->x86_model_id[0])
1500 printk("%s", c->x86_model_id);
1502 if (c->x86_mask || c->cpuid_level >= 0)
1503 printk(" stepping %02x\n", c->x86_mask);
1509 * Get CPU information for use by the procfs.
1512 static int show_cpuinfo(struct seq_file *m, void *v)
1514 struct cpuinfo_x86 *c = v;
1517 * These flag bits must match the definitions in <asm/cpufeature.h>.
1518 * NULL means this bit is undefined or reserved; either way it doesn't
1519 * have meaning as far as Linux is concerned. Note that it's important
1520 * to realize there is a difference between this table and CPUID -- if
1521 * applications want to get the raw CPUID data, they should access
1522 * /dev/cpu/<cpu_nr>/cpuid instead.
1524 static char *x86_cap_flags[] = {
1526 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1527 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1528 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1529 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1532 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1533 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1534 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1535 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1537 /* Transmeta-defined */
1538 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1539 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1540 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1541 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1543 /* Other (Linux-defined) */
1544 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1545 "constant_tsc", NULL, NULL,
1546 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1547 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1548 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1550 /* Intel-defined (#2) */
1551 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1552 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1553 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1554 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1556 /* VIA/Cyrix/Centaur-defined */
1557 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1558 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1559 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1560 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1562 /* AMD-defined (#2) */
1563 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1564 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1565 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1566 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1568 static char *x86_power_flags[] = {
1569 "ts", /* temperature sensor */
1570 "fid", /* frequency id control */
1571 "vid", /* voltage id control */
1572 "ttp", /* thermal trip */
1576 /* nothing */ /* constant_tsc - moved to flags */
1581 if (!cpu_online(c-cpu_data))
1585 seq_printf(m,"processor\t: %u\n"
1587 "cpu family\t: %d\n"
1589 "model name\t: %s\n",
1590 (unsigned)(c-cpu_data),
1591 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1594 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1596 if (c->x86_mask || c->cpuid_level >= 0)
1597 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1599 seq_printf(m, "stepping\t: unknown\n");
1601 if (cpu_has(c,X86_FEATURE_TSC)) {
1602 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1605 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1606 freq / 1000, (freq % 1000));
1610 if (c->x86_cache_size >= 0)
1611 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1614 if (smp_num_siblings * c->x86_max_cores > 1) {
1615 int cpu = c - cpu_data;
1616 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1617 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1618 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1619 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1625 "fpu_exception\t: yes\n"
1626 "cpuid level\t: %d\n"
1633 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1634 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1635 seq_printf(m, " %s", x86_cap_flags[i]);
1638 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1639 c->loops_per_jiffy/(500000/HZ),
1640 (c->loops_per_jiffy/(5000/HZ)) % 100);
1642 if (c->x86_tlbsize > 0)
1643 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1644 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1645 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1647 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1648 c->x86_phys_bits, c->x86_virt_bits);
1650 seq_printf(m, "power management:");
1653 for (i = 0; i < 32; i++)
1654 if (c->x86_power & (1 << i)) {
1655 if (i < ARRAY_SIZE(x86_power_flags) &&
1657 seq_printf(m, "%s%s",
1658 x86_power_flags[i][0]?" ":"",
1659 x86_power_flags[i]);
1661 seq_printf(m, " [%d]", i);
1665 seq_printf(m, "\n\n");
1670 static void *c_start(struct seq_file *m, loff_t *pos)
1672 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1675 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1678 return c_start(m, pos);
1681 static void c_stop(struct seq_file *m, void *v)
1685 struct seq_operations cpuinfo_op = {
1689 .show = show_cpuinfo,
1692 #if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
1693 #include <linux/platform_device.h>
1694 static __init int add_pcspkr(void)
1696 struct platform_device *pd;
1699 pd = platform_device_alloc("pcspkr", -1);
1703 ret = platform_device_add(pd);
1705 platform_device_put(pd);
1709 device_initcall(add_pcspkr);