fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / x86_64 / kernel / setup-xen.c
1 /*
2  *  linux/arch/x86-64/kernel/setup.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Nov 2001 Dave Jones <davej@suse.de>
7  *  Forked from i386 setup code.
8  */
9
10 /*
11  * This file handles the architecture-dependent parts of initialization
12  */
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/screen_info.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/initrd.h>
29 #include <linux/highmem.h>
30 #include <linux/bootmem.h>
31 #include <linux/module.h>
32 #include <asm/processor.h>
33 #include <linux/console.h>
34 #include <linux/seq_file.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/pci.h>
38 #include <linux/acpi.h>
39 #include <linux/kallsyms.h>
40 #include <linux/edd.h>
41 #include <linux/mmzone.h>
42 #include <linux/kexec.h>
43 #include <linux/cpufreq.h>
44 #include <linux/dmi.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/ctype.h>
47
48 #include <asm/mtrr.h>
49 #include <asm/uaccess.h>
50 #include <asm/system.h>
51 #include <asm/io.h>
52 #include <asm/smp.h>
53 #include <asm/msr.h>
54 #include <asm/desc.h>
55 #include <video/edid.h>
56 #include <asm/e820.h>
57 #include <asm/dma.h>
58 #include <asm/mpspec.h>
59 #include <asm/mmu_context.h>
60 #include <asm/bootsetup.h>
61 #include <asm/proto.h>
62 #include <asm/setup.h>
63 #include <asm/mach_apic.h>
64 #include <asm/numa.h>
65 #include <asm/sections.h>
66 #include <asm/dmi.h>
67 #ifdef CONFIG_XEN
68 #include <linux/percpu.h>
69 #include <linux/pfn.h>
70 #include <xen/interface/physdev.h>
71 #include "setup_arch_pre.h"
72 #include <asm/hypervisor.h>
73 #include <xen/interface/nmi.h>
74 #include <xen/features.h>
75 #include <xen/xencons.h>
76 #include <asm/mach-xen/setup_arch_post.h>
77 #include <xen/interface/memory.h>
78
79 extern unsigned long start_pfn;
80
81 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
82 EXPORT_SYMBOL(HYPERVISOR_shared_info);
83
84 extern char hypercall_page[PAGE_SIZE];
85 EXPORT_SYMBOL(hypercall_page);
86
87 /* Allows setting of maximum possible memory size  */
88 unsigned long xen_override_max_pfn;
89
90 static int xen_panic_event(struct notifier_block *, unsigned long, void *);
91 static struct notifier_block xen_panic_block = {
92         xen_panic_event, NULL, 0 /* try to go last */
93 };
94
95 unsigned long *phys_to_machine_mapping;
96 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
97
98 EXPORT_SYMBOL(phys_to_machine_mapping);
99
100 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
101 DEFINE_PER_CPU(int, nr_multicall_ents);
102
103 /* Raw start-of-day parameters from the hypervisor. */
104 start_info_t *xen_start_info;
105 EXPORT_SYMBOL(xen_start_info);
106 #endif
107
108 /*
109  * Machine setup..
110  */
111
112 struct cpuinfo_x86 boot_cpu_data __read_mostly;
113 EXPORT_SYMBOL(boot_cpu_data);
114
115 unsigned long mmu_cr4_features;
116
117 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
118 int bootloader_type;
119
120 unsigned long saved_video_mode;
121
122 /* 
123  * Early DMI memory
124  */
125 int dmi_alloc_index;
126 char dmi_alloc_data[DMI_MAX_DATA];
127
128 /*
129  * Setup options
130  */
131 struct screen_info screen_info;
132 EXPORT_SYMBOL(screen_info);
133 struct sys_desc_table_struct {
134         unsigned short length;
135         unsigned char table[0];
136 };
137
138 struct edid_info edid_info;
139 EXPORT_SYMBOL_GPL(edid_info);
140 #ifdef CONFIG_XEN
141 struct e820map machine_e820;
142 #endif
143
144 extern int root_mountflags;
145
146 char command_line[COMMAND_LINE_SIZE];
147
148 struct resource standard_io_resources[] = {
149         { .name = "dma1", .start = 0x00, .end = 0x1f,
150                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
151         { .name = "pic1", .start = 0x20, .end = 0x21,
152                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
153         { .name = "timer0", .start = 0x40, .end = 0x43,
154                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
155         { .name = "timer1", .start = 0x50, .end = 0x53,
156                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
157         { .name = "keyboard", .start = 0x60, .end = 0x6f,
158                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
159         { .name = "dma page reg", .start = 0x80, .end = 0x8f,
160                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
161         { .name = "pic2", .start = 0xa0, .end = 0xa1,
162                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
163         { .name = "dma2", .start = 0xc0, .end = 0xdf,
164                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
165         { .name = "fpu", .start = 0xf0, .end = 0xff,
166                 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
167 };
168
169 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
170
171 struct resource data_resource = {
172         .name = "Kernel data",
173         .start = 0,
174         .end = 0,
175         .flags = IORESOURCE_RAM,
176 };
177 struct resource code_resource = {
178         .name = "Kernel code",
179         .start = 0,
180         .end = 0,
181         .flags = IORESOURCE_RAM,
182 };
183
184 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
185
186 static struct resource system_rom_resource = {
187         .name = "System ROM",
188         .start = 0xf0000,
189         .end = 0xfffff,
190         .flags = IORESOURCE_ROM,
191 };
192
193 static struct resource extension_rom_resource = {
194         .name = "Extension ROM",
195         .start = 0xe0000,
196         .end = 0xeffff,
197         .flags = IORESOURCE_ROM,
198 };
199
200 static struct resource adapter_rom_resources[] = {
201         { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
202                 .flags = IORESOURCE_ROM },
203         { .name = "Adapter ROM", .start = 0, .end = 0,
204                 .flags = IORESOURCE_ROM },
205         { .name = "Adapter ROM", .start = 0, .end = 0,
206                 .flags = IORESOURCE_ROM },
207         { .name = "Adapter ROM", .start = 0, .end = 0,
208                 .flags = IORESOURCE_ROM },
209         { .name = "Adapter ROM", .start = 0, .end = 0,
210                 .flags = IORESOURCE_ROM },
211         { .name = "Adapter ROM", .start = 0, .end = 0,
212                 .flags = IORESOURCE_ROM }
213 };
214
215 static struct resource video_rom_resource = {
216         .name = "Video ROM",
217         .start = 0xc0000,
218         .end = 0xc7fff,
219         .flags = IORESOURCE_ROM,
220 };
221
222 static struct resource video_ram_resource = {
223         .name = "Video RAM area",
224         .start = 0xa0000,
225         .end = 0xbffff,
226         .flags = IORESOURCE_RAM,
227 };
228
229 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
230
231 static int __init romchecksum(unsigned char *rom, unsigned long length)
232 {
233         unsigned char *p, sum = 0;
234
235         for (p = rom; p < rom + length; p++)
236                 sum += *p;
237         return sum == 0;
238 }
239
240 static void __init probe_roms(void)
241 {
242         unsigned long start, length, upper;
243         unsigned char *rom;
244         int           i;
245
246 #ifdef CONFIG_XEN
247         /* Nothing to do if not running in dom0. */
248         if (!is_initial_xendomain())
249                 return;
250 #endif
251
252         /* video rom */
253         upper = adapter_rom_resources[0].start;
254         for (start = video_rom_resource.start; start < upper; start += 2048) {
255                 rom = isa_bus_to_virt(start);
256                 if (!romsignature(rom))
257                         continue;
258
259                 video_rom_resource.start = start;
260
261                 /* 0 < length <= 0x7f * 512, historically */
262                 length = rom[2] * 512;
263
264                 /* if checksum okay, trust length byte */
265                 if (length && romchecksum(rom, length))
266                         video_rom_resource.end = start + length - 1;
267
268                 request_resource(&iomem_resource, &video_rom_resource);
269                 break;
270                         }
271
272         start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
273         if (start < upper)
274                 start = upper;
275
276         /* system rom */
277         request_resource(&iomem_resource, &system_rom_resource);
278         upper = system_rom_resource.start;
279
280         /* check for extension rom (ignore length byte!) */
281         rom = isa_bus_to_virt(extension_rom_resource.start);
282         if (romsignature(rom)) {
283                 length = extension_rom_resource.end - extension_rom_resource.start + 1;
284                 if (romchecksum(rom, length)) {
285                         request_resource(&iomem_resource, &extension_rom_resource);
286                         upper = extension_rom_resource.start;
287                 }
288         }
289
290         /* check for adapter roms on 2k boundaries */
291         for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper;
292              start += 2048) {
293                 rom = isa_bus_to_virt(start);
294                 if (!romsignature(rom))
295                         continue;
296
297                 /* 0 < length <= 0x7f * 512, historically */
298                 length = rom[2] * 512;
299
300                 /* but accept any length that fits if checksum okay */
301                 if (!length || start + length > upper || !romchecksum(rom, length))
302                         continue;
303
304                 adapter_rom_resources[i].start = start;
305                 adapter_rom_resources[i].end = start + length - 1;
306                 request_resource(&iomem_resource, &adapter_rom_resources[i]);
307
308                 start = adapter_rom_resources[i++].end & ~2047UL;
309         }
310 }
311
312 #ifdef CONFIG_PROC_VMCORE
313 /* elfcorehdr= specifies the location of elf core header
314  * stored by the crashed kernel. This option will be passed
315  * by kexec loader to the capture kernel.
316  */
317 static int __init setup_elfcorehdr(char *arg)
318 {
319         char *end;
320         if (!arg)
321                 return -EINVAL;
322         elfcorehdr_addr = memparse(arg, &end);
323         return end > arg ? 0 : -EINVAL;
324 }
325 early_param("elfcorehdr", setup_elfcorehdr);
326 #endif
327
328 #ifndef CONFIG_NUMA
329 static void __init
330 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
331 {
332         unsigned long bootmap_size, bootmap;
333
334         bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
335         bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
336         if (bootmap == -1L)
337                 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
338         bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
339         e820_register_active_regions(0, start_pfn, end_pfn);
340 #ifdef CONFIG_XEN
341         free_bootmem_with_active_regions(0, xen_start_info->nr_pages);
342 #else
343         free_bootmem_with_active_regions(0, end_pfn);
344 #endif
345         reserve_bootmem(bootmap, bootmap_size);
346
347 #endif
348
349 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
350 struct edd edd;
351 #ifdef CONFIG_EDD_MODULE
352 EXPORT_SYMBOL(edd);
353 #endif
354 /**
355  * copy_edd() - Copy the BIOS EDD information
356  *              from boot_params into a safe place.
357  *
358  */
359 static inline void copy_edd(void)
360 {
361      memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
362      memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
363      edd.mbr_signature_nr = EDD_MBR_SIG_NR;
364      edd.edd_info_nr = EDD_NR;
365 }
366 #else
367 static inline void copy_edd(void)
368 {
369 }
370 #endif
371
372 #ifndef CONFIG_XEN
373 #define EBDA_ADDR_POINTER 0x40E
374
375 unsigned __initdata ebda_addr;
376 unsigned __initdata ebda_size;
377
378 static void discover_ebda(void)
379 {
380         /*
381          * there is a real-mode segmented pointer pointing to the 
382          * 4K EBDA area at 0x40E
383          */
384         ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
385         ebda_addr <<= 4;
386
387         ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
388
389         /* Round EBDA up to pages */
390         if (ebda_size == 0)
391                 ebda_size = 1;
392         ebda_size <<= 10;
393         ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
394         if (ebda_size > 64*1024)
395                 ebda_size = 64*1024;
396 }
397 #endif
398
399 void __init setup_arch(char **cmdline_p)
400 {
401         printk(KERN_INFO "Command line: %s\n", saved_command_line);
402
403 #ifdef CONFIG_XEN
404         /* Register a call for panic conditions. */
405         atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
406
407         ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
408         screen_info = SCREEN_INFO;
409
410         if (is_initial_xendomain()) {
411                 /* This is drawn from a dump from vgacon:startup in
412                  * standard Linux. */
413                 screen_info.orig_video_mode = 3;
414                 screen_info.orig_video_isVGA = 1;
415                 screen_info.orig_video_lines = 25;
416                 screen_info.orig_video_cols = 80;
417                 screen_info.orig_video_ega_bx = 3;
418                 screen_info.orig_video_points = 16;
419                 screen_info.orig_y = screen_info.orig_video_lines - 1;
420                 if (xen_start_info->console.dom0.info_size >=
421                     sizeof(struct dom0_vga_console_info)) {
422                         const struct dom0_vga_console_info *info =
423                                 (struct dom0_vga_console_info *)(
424                                         (char *)xen_start_info +
425                                         xen_start_info->console.dom0.info_off);
426                         dom0_init_screen_info(info);
427                 }
428                 xen_start_info->console.domU.mfn = 0;
429                 xen_start_info->console.domU.evtchn = 0;
430         } else
431                 screen_info.orig_video_isVGA = 0;
432 #else
433         ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
434         screen_info = SCREEN_INFO;
435 #endif  /* !CONFIG_XEN */
436
437         edid_info = EDID_INFO;
438         saved_video_mode = SAVED_VIDEO_MODE;
439         bootloader_type = LOADER_TYPE;
440
441 #ifdef CONFIG_BLK_DEV_RAM
442         rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
443         rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
444         rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
445 #endif
446 #ifdef CONFIG_XEN
447         setup_xen_features();
448
449         HYPERVISOR_vm_assist(VMASST_CMD_enable,
450                              VMASST_TYPE_writable_pagetables);
451
452         ARCH_SETUP
453 #endif
454
455         setup_memory_region();
456         copy_edd();
457
458         if (!MOUNT_ROOT_RDONLY)
459                 root_mountflags &= ~MS_RDONLY;
460         init_mm.start_code = (unsigned long) &_text;
461         init_mm.end_code = (unsigned long) &_etext;
462         init_mm.end_data = (unsigned long) &_edata;
463         init_mm.brk = (unsigned long) &_end;
464
465         code_resource.start = virt_to_phys(&_text);
466         code_resource.end = virt_to_phys(&_etext)-1;
467         data_resource.start = virt_to_phys(&_etext);
468         data_resource.end = virt_to_phys(&_edata)-1;
469
470         early_identify_cpu(&boot_cpu_data);
471
472         strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
473         *cmdline_p = command_line;
474
475         parse_early_param();
476
477         finish_e820_parsing();
478
479         e820_register_active_regions(0, 0, -1UL);
480         /*
481          * partially used pages are not usable - thus
482          * we are rounding upwards:
483          */
484         end_pfn = e820_end_of_ram();
485         num_physpages = end_pfn;
486
487         check_efer();
488
489 #ifndef CONFIG_XEN
490         discover_ebda();
491 #endif
492
493         init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
494
495         zap_low_mappings(0);
496
497         /* How many end-of-memory variables you have, grandma! */
498         max_low_pfn = end_pfn;
499         max_pfn = end_pfn;
500         high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
501
502         /* Remove active ranges so rediscovery with NUMA-awareness happens */
503         remove_all_active_ranges();
504
505 #ifdef CONFIG_ACPI_NUMA
506         /*
507          * Parse SRAT to discover nodes.
508          */
509         acpi_numa_init();
510 #endif
511
512 #ifdef CONFIG_NUMA
513         numa_initmem_init(0, end_pfn); 
514 #else
515         contig_initmem_init(0, end_pfn);
516 #endif
517
518         /* Reserve direct mapping */
519         reserve_bootmem_generic(table_start << PAGE_SHIFT, 
520                                 (table_end - table_start) << PAGE_SHIFT);
521
522         /* reserve kernel */
523         reserve_bootmem_generic(__pa_symbol(&_text),
524                                 __pa_symbol(&_end) - __pa_symbol(&_text));
525
526 #ifdef CONFIG_XEN
527         /* reserve physmap, start info and initial page tables */
528         reserve_bootmem(__pa_symbol(&_end), (table_start<<PAGE_SHIFT)-__pa_symbol(&_end));
529 #else
530         /*
531          * reserve physical page 0 - it's a special BIOS page on many boxes,
532          * enabling clean reboots, SMP operation, laptop functions.
533          */
534         reserve_bootmem_generic(0, PAGE_SIZE);
535
536         /* reserve ebda region */
537         if (ebda_addr)
538                 reserve_bootmem_generic(ebda_addr, ebda_size);
539 #endif
540
541 #ifdef CONFIG_SMP
542         /*
543          * But first pinch a few for the stack/trampoline stuff
544          * FIXME: Don't need the extra page at 4K, but need to fix
545          * trampoline before removing it. (see the GDT stuff)
546          */
547         reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
548
549         /* Reserve SMP trampoline */
550         reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
551 #endif
552
553 #ifdef CONFIG_ACPI_SLEEP
554        /*
555         * Reserve low memory region for sleep support.
556         */
557        acpi_reserve_bootmem();
558 #endif
559 #ifdef CONFIG_XEN
560 #ifdef CONFIG_BLK_DEV_INITRD
561         if (xen_start_info->mod_start) {
562                 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
563                         /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
564                         initrd_start = INITRD_START + PAGE_OFFSET;
565                         initrd_end = initrd_start+INITRD_SIZE;
566                         initrd_below_start_ok = 1;
567                 } else {
568                         printk(KERN_ERR "initrd extends beyond end of memory "
569                                 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
570                                 (unsigned long)(INITRD_START + INITRD_SIZE),
571                                 (unsigned long)(end_pfn << PAGE_SHIFT));
572                         initrd_start = 0;
573                 }
574         }
575 #endif
576 #else   /* CONFIG_XEN */
577 #ifdef CONFIG_BLK_DEV_INITRD
578         if (LOADER_TYPE && INITRD_START) {
579                 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
580                         reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
581                         initrd_start = INITRD_START + PAGE_OFFSET;
582                         initrd_end = initrd_start+INITRD_SIZE;
583                 }
584                 else {
585                         printk(KERN_ERR "initrd extends beyond end of memory "
586                             "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
587                             (unsigned long)(INITRD_START + INITRD_SIZE),
588                             (unsigned long)(end_pfn << PAGE_SHIFT));
589                         initrd_start = 0;
590                 }
591         }
592 #endif
593 #endif  /* !CONFIG_XEN */
594 #ifdef CONFIG_KEXEC
595         if (crashk_res.start != crashk_res.end) {
596                 reserve_bootmem_generic(crashk_res.start,
597                         crashk_res.end - crashk_res.start + 1);
598         }
599 #endif
600
601         paging_init();
602         /*
603          * Find and reserve possible boot-time SMP configuration:
604          */
605         find_smp_config();
606 #ifdef CONFIG_XEN
607         {
608                 int i, j, k, fpp;
609
610                 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
611                         /* Make sure we have a large enough P->M table. */
612                         phys_to_machine_mapping = alloc_bootmem_pages(
613                                 end_pfn * sizeof(unsigned long));
614                         memset(phys_to_machine_mapping, ~0,
615                                end_pfn * sizeof(unsigned long));
616                         memcpy(phys_to_machine_mapping,
617                                (unsigned long *)xen_start_info->mfn_list,
618                                xen_start_info->nr_pages * sizeof(unsigned long));
619                         free_bootmem(
620                                 __pa(xen_start_info->mfn_list),
621                                 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
622                                                 sizeof(unsigned long))));
623
624                         /*
625                          * Initialise the list of the frames that specify the
626                          * list of frames that make up the p2m table. Used by
627                          * save/restore.
628                          */
629                         pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
630                         HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
631                                 virt_to_mfn(pfn_to_mfn_frame_list_list);
632
633                         fpp = PAGE_SIZE/sizeof(unsigned long);
634                         for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
635                                 if ((j % fpp) == 0) {
636                                         k++;
637                                         BUG_ON(k>=fpp);
638                                         pfn_to_mfn_frame_list[k] =
639                                                 alloc_bootmem_pages(PAGE_SIZE);
640                                         pfn_to_mfn_frame_list_list[k] =
641                                                 virt_to_mfn(pfn_to_mfn_frame_list[k]);
642                                         j=0;
643                                 }
644                                 pfn_to_mfn_frame_list[k][j] =
645                                         virt_to_mfn(&phys_to_machine_mapping[i]);
646                         }
647                         HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
648                 }
649
650         }
651
652         if (is_initial_xendomain())
653                 dmi_scan_machine();
654
655 #ifdef CONFIG_ACPI
656         if (!is_initial_xendomain()) {
657                 acpi_disabled = 1;
658                 acpi_ht = 0;
659         }
660 #endif
661 #endif
662
663 #ifndef CONFIG_XEN
664 #ifdef CONFIG_PCI
665         early_quirks();
666 #endif
667 #endif
668
669         /*
670          * set this early, so we dont allocate cpu0
671          * if MADT list doesnt list BSP first
672          * mpparse.c/MP_processor_info() allocates logical cpu numbers.
673          */
674         cpu_set(0, cpu_present_map);
675 #ifdef CONFIG_ACPI
676         /*
677          * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
678          * Call this early for SRAT node setup.
679          */
680         acpi_boot_table_init();
681
682         /*
683          * Read APIC and some other early information from ACPI tables.
684          */
685         acpi_boot_init();
686 #endif
687
688         init_cpu_to_node();
689
690         /*
691          * get boot-time SMP configuration:
692          */
693         if (smp_found_config)
694                 get_smp_config();
695 #ifndef CONFIG_XEN
696         init_apic_mappings();
697 #endif
698 #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
699         prefill_possible_map();
700 #endif
701
702         /*
703          * Request address space for all standard RAM and ROM resources
704          * and also for regions reported as reserved by the e820.
705          */
706         probe_roms();
707 #ifdef CONFIG_XEN
708         if (is_initial_xendomain()) {
709                 struct xen_memory_map memmap;
710
711                 memmap.nr_entries = E820MAX;
712                 set_xen_guest_handle(memmap.buffer, machine_e820.map);
713
714                 if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
715                         BUG();
716                 machine_e820.nr_map = memmap.nr_entries;
717
718                 e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
719         }
720 #else
721         e820_reserve_resources(e820.map, e820.nr_map);
722 #endif
723         e820_mark_nosave_regions();
724
725         request_resource(&iomem_resource, &video_ram_resource);
726
727         {
728         unsigned i;
729         /* request I/O space for devices used on all i[345]86 PCs */
730         for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
731                 request_resource(&ioport_resource, &standard_io_resources[i]);
732         }
733
734 #ifdef CONFIG_XEN
735         if (is_initial_xendomain())
736                 e820_setup_gap(machine_e820.map, machine_e820.nr_map);
737 #else
738         e820_setup_gap(e820.map, e820.nr_map);
739 #endif
740
741 #ifdef CONFIG_XEN
742         {
743                 struct physdev_set_iopl set_iopl;
744
745                 set_iopl.iopl = 1;
746                 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
747
748                 if (is_initial_xendomain()) {
749 #ifdef CONFIG_VT
750 #if defined(CONFIG_VGA_CONSOLE)
751                         conswitchp = &vga_con;
752 #elif defined(CONFIG_DUMMY_CONSOLE)
753                         conswitchp = &dummy_con;
754 #endif
755 #endif
756                 } else {
757 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
758                     conswitchp = &dummy_con;
759 #endif
760                 }
761         }
762 #else   /* CONFIG_XEN */
763
764 #ifdef CONFIG_VT
765 #if defined(CONFIG_VGA_CONSOLE)
766         conswitchp = &vga_con;
767 #elif defined(CONFIG_DUMMY_CONSOLE)
768         conswitchp = &dummy_con;
769 #endif
770 #endif
771
772 #endif /* !CONFIG_XEN */
773 }
774
775 #ifdef CONFIG_XEN
776 static int
777 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
778 {
779         HYPERVISOR_shutdown(SHUTDOWN_crash);
780         /* we're never actually going to get here... */
781         return NOTIFY_DONE;
782 }
783 #endif /* !CONFIG_XEN */
784
785
786 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
787 {
788         unsigned int *v;
789
790         if (c->extended_cpuid_level < 0x80000004)
791                 return 0;
792
793         v = (unsigned int *) c->x86_model_id;
794         cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
795         cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
796         cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
797         c->x86_model_id[48] = 0;
798         return 1;
799 }
800
801
802 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
803 {
804         unsigned int n, dummy, eax, ebx, ecx, edx;
805
806         n = c->extended_cpuid_level;
807
808         if (n >= 0x80000005) {
809                 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
810                 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
811                         edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
812                 c->x86_cache_size=(ecx>>24)+(edx>>24);
813                 /* On K8 L1 TLB is inclusive, so don't count it */
814                 c->x86_tlbsize = 0;
815         }
816
817         if (n >= 0x80000006) {
818                 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
819                 ecx = cpuid_ecx(0x80000006);
820                 c->x86_cache_size = ecx >> 16;
821                 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
822
823                 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
824                 c->x86_cache_size, ecx & 0xFF);
825         }
826
827         if (n >= 0x80000007)
828                 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
829         if (n >= 0x80000008) {
830                 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
831                 c->x86_virt_bits = (eax >> 8) & 0xff;
832                 c->x86_phys_bits = eax & 0xff;
833         }
834 }
835
836 #ifdef CONFIG_NUMA
837 static int nearby_node(int apicid)
838 {
839         int i;
840         for (i = apicid - 1; i >= 0; i--) {
841                 int node = apicid_to_node[i];
842                 if (node != NUMA_NO_NODE && node_online(node))
843                         return node;
844         }
845         for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
846                 int node = apicid_to_node[i];
847                 if (node != NUMA_NO_NODE && node_online(node))
848                         return node;
849         }
850         return first_node(node_online_map); /* Shouldn't happen */
851 }
852 #endif
853
854 /*
855  * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
856  * Assumes number of cores is a power of two.
857  */
858 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
859 {
860 #ifdef CONFIG_SMP
861         unsigned bits;
862 #ifdef CONFIG_NUMA
863         int cpu = smp_processor_id();
864         int node = 0;
865         unsigned apicid = hard_smp_processor_id();
866 #endif
867         unsigned ecx = cpuid_ecx(0x80000008);
868
869         c->x86_max_cores = (ecx & 0xff) + 1;
870
871         /* CPU telling us the core id bits shift? */
872         bits = (ecx >> 12) & 0xF;
873
874         /* Otherwise recompute */
875         if (bits == 0) {
876                 while ((1 << bits) < c->x86_max_cores)
877                         bits++;
878         }
879
880         /* Low order bits define the core id (index of core in socket) */
881         c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
882         /* Convert the APIC ID into the socket ID */
883         c->phys_proc_id = phys_pkg_id(bits);
884
885 #ifdef CONFIG_NUMA
886         node = c->phys_proc_id;
887         if (apicid_to_node[apicid] != NUMA_NO_NODE)
888                 node = apicid_to_node[apicid];
889         if (!node_online(node)) {
890                 /* Two possibilities here:
891                    - The CPU is missing memory and no node was created.
892                    In that case try picking one from a nearby CPU
893                    - The APIC IDs differ from the HyperTransport node IDs
894                    which the K8 northbridge parsing fills in.
895                    Assume they are all increased by a constant offset,
896                    but in the same order as the HT nodeids.
897                    If that doesn't result in a usable node fall back to the
898                    path for the previous case.  */
899                 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
900                 if (ht_nodeid >= 0 &&
901                     apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
902                         node = apicid_to_node[ht_nodeid];
903                 /* Pick a nearby node */
904                 if (!node_online(node))
905                         node = nearby_node(apicid);
906         }
907         numa_set_node(cpu, node);
908
909         printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
910 #endif
911 #endif
912 }
913
914 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
915 {
916         unsigned level;
917
918 #ifdef CONFIG_SMP
919         unsigned long value;
920
921         /*
922          * Disable TLB flush filter by setting HWCR.FFDIS on K8
923          * bit 6 of msr C001_0015
924          *
925          * Errata 63 for SH-B3 steppings
926          * Errata 122 for all steppings (F+ have it disabled by default)
927          */
928         if (c->x86 == 15) {
929                 rdmsrl(MSR_K8_HWCR, value);
930                 value |= 1 << 6;
931                 wrmsrl(MSR_K8_HWCR, value);
932         }
933 #endif
934
935         /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
936            3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
937         clear_bit(0*32+31, &c->x86_capability);
938         
939         /* On C+ stepping K8 rep microcode works well for copy/memset */
940         level = cpuid_eax(1);
941         if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
942                 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
943
944         /* Enable workaround for FXSAVE leak */
945         if (c->x86 >= 6)
946                 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
947
948         level = get_model_name(c);
949         if (!level) {
950                 switch (c->x86) { 
951                 case 15:
952                         /* Should distinguish Models here, but this is only
953                            a fallback anyways. */
954                         strcpy(c->x86_model_id, "Hammer");
955                         break; 
956                 } 
957         } 
958         display_cacheinfo(c);
959
960         /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
961         if (c->x86_power & (1<<8))
962                 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
963
964         /* Multi core CPU? */
965         if (c->extended_cpuid_level >= 0x80000008)
966                 amd_detect_cmp(c);
967
968         /* Fix cpuid4 emulation for more */
969         num_cache_leaves = 3;
970
971         /* RDTSC can be speculated around */
972         clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
973 }
974
975 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
976 {
977 #ifdef CONFIG_SMP
978         u32     eax, ebx, ecx, edx;
979         int     index_msb, core_bits;
980
981         cpuid(1, &eax, &ebx, &ecx, &edx);
982
983
984         if (!cpu_has(c, X86_FEATURE_HT))
985                 return;
986         if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
987                 goto out;
988
989         smp_num_siblings = (ebx & 0xff0000) >> 16;
990
991         if (smp_num_siblings == 1) {
992                 printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
993         } else if (smp_num_siblings > 1 ) {
994
995                 if (smp_num_siblings > NR_CPUS) {
996                         printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
997                         smp_num_siblings = 1;
998                         return;
999                 }
1000
1001                 index_msb = get_count_order(smp_num_siblings);
1002                 c->phys_proc_id = phys_pkg_id(index_msb);
1003
1004                 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1005
1006                 index_msb = get_count_order(smp_num_siblings) ;
1007
1008                 core_bits = get_count_order(c->x86_max_cores);
1009
1010                 c->cpu_core_id = phys_pkg_id(index_msb) &
1011                                                ((1 << core_bits) - 1);
1012         }
1013 out:
1014         if ((c->x86_max_cores * smp_num_siblings) > 1) {
1015                 printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
1016                 printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
1017         }
1018
1019 #endif
1020 }
1021
1022 /*
1023  * find out the number of processor cores on the die
1024  */
1025 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1026 {
1027         unsigned int eax, t;
1028
1029         if (c->cpuid_level < 4)
1030                 return 1;
1031
1032         cpuid_count(4, 0, &eax, &t, &t, &t);
1033
1034         if (eax & 0x1f)
1035                 return ((eax >> 26) + 1);
1036         else
1037                 return 1;
1038 }
1039
1040 static void srat_detect_node(void)
1041 {
1042 #ifdef CONFIG_NUMA
1043         unsigned node;
1044         int cpu = smp_processor_id();
1045         int apicid = hard_smp_processor_id();
1046
1047         /* Don't do the funky fallback heuristics the AMD version employs
1048            for now. */
1049         node = apicid_to_node[apicid];
1050         if (node == NUMA_NO_NODE)
1051                 node = first_node(node_online_map);
1052         numa_set_node(cpu, node);
1053
1054         printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
1055 #endif
1056 }
1057
1058 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1059 {
1060         /* Cache sizes */
1061         unsigned n;
1062
1063         init_intel_cacheinfo(c);
1064         if (c->cpuid_level > 9 ) {
1065                 unsigned eax = cpuid_eax(10);
1066                 /* Check for version and the number of counters */
1067                 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
1068                         set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
1069         }
1070
1071         if (cpu_has_ds) {
1072                 unsigned int l1, l2;
1073                 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
1074                 if (!(l1 & (1<<11)))
1075                         set_bit(X86_FEATURE_BTS, c->x86_capability);
1076                 if (!(l1 & (1<<12)))
1077                         set_bit(X86_FEATURE_PEBS, c->x86_capability);
1078         }
1079
1080         n = c->extended_cpuid_level;
1081         if (n >= 0x80000008) {
1082                 unsigned eax = cpuid_eax(0x80000008);
1083                 c->x86_virt_bits = (eax >> 8) & 0xff;
1084                 c->x86_phys_bits = eax & 0xff;
1085                 /* CPUID workaround for Intel 0F34 CPU */
1086                 if (c->x86_vendor == X86_VENDOR_INTEL &&
1087                     c->x86 == 0xF && c->x86_model == 0x3 &&
1088                     c->x86_mask == 0x4)
1089                         c->x86_phys_bits = 36;
1090         }
1091
1092         if (c->x86 == 15)
1093                 c->x86_cache_alignment = c->x86_clflush_size * 2;
1094         if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1095             (c->x86 == 0x6 && c->x86_model >= 0x0e))
1096                 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1097         if (c->x86 == 6)
1098                 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
1099         if (c->x86 == 15)
1100                 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1101         else
1102                 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1103         c->x86_max_cores = intel_num_cpu_cores(c);
1104
1105         srat_detect_node();
1106 }
1107
1108 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1109 {
1110         char *v = c->x86_vendor_id;
1111
1112         if (!strcmp(v, "AuthenticAMD"))
1113                 c->x86_vendor = X86_VENDOR_AMD;
1114         else if (!strcmp(v, "GenuineIntel"))
1115                 c->x86_vendor = X86_VENDOR_INTEL;
1116         else
1117                 c->x86_vendor = X86_VENDOR_UNKNOWN;
1118 }
1119
1120 struct cpu_model_info {
1121         int vendor;
1122         int family;
1123         char *model_names[16];
1124 };
1125
1126 /* Do some early cpuid on the boot CPU to get some parameter that are
1127    needed before check_bugs. Everything advanced is in identify_cpu
1128    below. */
1129 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1130 {
1131         u32 tfms;
1132
1133         c->loops_per_jiffy = loops_per_jiffy;
1134         c->x86_cache_size = -1;
1135         c->x86_vendor = X86_VENDOR_UNKNOWN;
1136         c->x86_model = c->x86_mask = 0; /* So far unknown... */
1137         c->x86_vendor_id[0] = '\0'; /* Unset */
1138         c->x86_model_id[0] = '\0';  /* Unset */
1139         c->x86_clflush_size = 64;
1140         c->x86_cache_alignment = c->x86_clflush_size;
1141         c->x86_max_cores = 1;
1142         c->extended_cpuid_level = 0;
1143         memset(&c->x86_capability, 0, sizeof c->x86_capability);
1144
1145         /* Get vendor name */
1146         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1147               (unsigned int *)&c->x86_vendor_id[0],
1148               (unsigned int *)&c->x86_vendor_id[8],
1149               (unsigned int *)&c->x86_vendor_id[4]);
1150                 
1151         get_cpu_vendor(c);
1152
1153         /* Initialize the standard set of capabilities */
1154         /* Note that the vendor-specific code below might override */
1155
1156         /* Intel-defined flags: level 0x00000001 */
1157         if (c->cpuid_level >= 0x00000001) {
1158                 __u32 misc;
1159                 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1160                       &c->x86_capability[0]);
1161                 c->x86 = (tfms >> 8) & 0xf;
1162                 c->x86_model = (tfms >> 4) & 0xf;
1163                 c->x86_mask = tfms & 0xf;
1164                 if (c->x86 == 0xf)
1165                         c->x86 += (tfms >> 20) & 0xff;
1166                 if (c->x86 >= 0x6)
1167                         c->x86_model += ((tfms >> 16) & 0xF) << 4;
1168                 if (c->x86_capability[0] & (1<<19)) 
1169                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1170         } else {
1171                 /* Have CPUID level 0 only - unheard of */
1172                 c->x86 = 4;
1173         }
1174
1175 #ifdef CONFIG_SMP
1176         c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
1177 #endif
1178 }
1179
1180 /*
1181  * This does the hard work of actually picking apart the CPU stuff...
1182  */
1183 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1184 {
1185         int i;
1186         u32 xlvl;
1187
1188         early_identify_cpu(c);
1189
1190         /* AMD-defined flags: level 0x80000001 */
1191         xlvl = cpuid_eax(0x80000000);
1192         c->extended_cpuid_level = xlvl;
1193         if ((xlvl & 0xffff0000) == 0x80000000) {
1194                 if (xlvl >= 0x80000001) {
1195                         c->x86_capability[1] = cpuid_edx(0x80000001);
1196                         c->x86_capability[6] = cpuid_ecx(0x80000001);
1197                 }
1198                 if (xlvl >= 0x80000004)
1199                         get_model_name(c); /* Default name */
1200         }
1201
1202         /* Transmeta-defined flags: level 0x80860001 */
1203         xlvl = cpuid_eax(0x80860000);
1204         if ((xlvl & 0xffff0000) == 0x80860000) {
1205                 /* Don't set x86_cpuid_level here for now to not confuse. */
1206                 if (xlvl >= 0x80860001)
1207                         c->x86_capability[2] = cpuid_edx(0x80860001);
1208         }
1209
1210         c->apicid = phys_pkg_id(0);
1211
1212         /*
1213          * Vendor-specific initialization.  In this section we
1214          * canonicalize the feature flags, meaning if there are
1215          * features a certain CPU supports which CPUID doesn't
1216          * tell us, CPUID claiming incorrect flags, or other bugs,
1217          * we handle them here.
1218          *
1219          * At the end of this section, c->x86_capability better
1220          * indicate the features this CPU genuinely supports!
1221          */
1222         switch (c->x86_vendor) {
1223         case X86_VENDOR_AMD:
1224                 init_amd(c);
1225                 break;
1226
1227         case X86_VENDOR_INTEL:
1228                 init_intel(c);
1229                 break;
1230
1231         case X86_VENDOR_UNKNOWN:
1232         default:
1233                 display_cacheinfo(c);
1234                 break;
1235         }
1236
1237         select_idle_routine(c);
1238         detect_ht(c); 
1239
1240         /*
1241          * On SMP, boot_cpu_data holds the common feature set between
1242          * all CPUs; so make sure that we indicate which features are
1243          * common between the CPUs.  The first time this routine gets
1244          * executed, c == &boot_cpu_data.
1245          */
1246         if (c != &boot_cpu_data) {
1247                 /* AND the already accumulated flags with these */
1248                 for (i = 0 ; i < NCAPINTS ; i++)
1249                         boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1250         }
1251
1252 #ifdef CONFIG_X86_MCE
1253         mcheck_init(c);
1254 #endif
1255         if (c == &boot_cpu_data)
1256                 mtrr_bp_init();
1257         else
1258                 mtrr_ap_init();
1259 #ifdef CONFIG_NUMA
1260         numa_add_cpu(smp_processor_id());
1261 #endif
1262 }
1263  
1264
1265 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1266 {
1267         if (c->x86_model_id[0])
1268                 printk("%s", c->x86_model_id);
1269
1270         if (c->x86_mask || c->cpuid_level >= 0) 
1271                 printk(" stepping %02x\n", c->x86_mask);
1272         else
1273                 printk("\n");
1274 }
1275
1276 /*
1277  *      Get CPU information for use by the procfs.
1278  */
1279
1280 static int show_cpuinfo(struct seq_file *m, void *v)
1281 {
1282         struct cpuinfo_x86 *c = v;
1283
1284         /* 
1285          * These flag bits must match the definitions in <asm/cpufeature.h>.
1286          * NULL means this bit is undefined or reserved; either way it doesn't
1287          * have meaning as far as Linux is concerned.  Note that it's important
1288          * to realize there is a difference between this table and CPUID -- if
1289          * applications want to get the raw CPUID data, they should access
1290          * /dev/cpu/<cpu_nr>/cpuid instead.
1291          */
1292         static char *x86_cap_flags[] = {
1293                 /* Intel-defined */
1294                 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1295                 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1296                 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1297                 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1298
1299                 /* AMD-defined */
1300                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1301                 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1302                 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1303                 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1304
1305                 /* Transmeta-defined */
1306                 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1307                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1308                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1309                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1310
1311                 /* Other (Linux-defined) */
1312                 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1313                 "constant_tsc", NULL, NULL,
1314                 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1315                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1316                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1317
1318                 /* Intel-defined (#2) */
1319                 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1320                 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1321                 NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
1322                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1323
1324                 /* VIA/Cyrix/Centaur-defined */
1325                 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1326                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1327                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1328                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1329
1330                 /* AMD-defined (#2) */
1331                 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1332                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1333                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1334                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1335         };
1336         static char *x86_power_flags[] = { 
1337                 "ts",   /* temperature sensor */
1338                 "fid",  /* frequency id control */
1339                 "vid",  /* voltage id control */
1340                 "ttp",  /* thermal trip */
1341                 "tm",
1342                 "stc",
1343                 NULL,
1344                 /* nothing */   /* constant_tsc - moved to flags */
1345         };
1346
1347
1348 #ifdef CONFIG_SMP
1349         if (!cpu_online(c-cpu_data))
1350                 return 0;
1351 #endif
1352
1353         seq_printf(m,"processor\t: %u\n"
1354                      "vendor_id\t: %s\n"
1355                      "cpu family\t: %d\n"
1356                      "model\t\t: %d\n"
1357                      "model name\t: %s\n",
1358                      (unsigned)(c-cpu_data),
1359                      c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1360                      c->x86,
1361                      (int)c->x86_model,
1362                      c->x86_model_id[0] ? c->x86_model_id : "unknown");
1363         
1364         if (c->x86_mask || c->cpuid_level >= 0)
1365                 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1366         else
1367                 seq_printf(m, "stepping\t: unknown\n");
1368         
1369         if (cpu_has(c,X86_FEATURE_TSC)) {
1370                 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1371                 if (!freq)
1372                         freq = cpu_khz;
1373                 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1374                              freq / 1000, (freq % 1000));
1375         }
1376
1377         /* Cache size */
1378         if (c->x86_cache_size >= 0) 
1379                 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1380         
1381 #ifdef CONFIG_SMP
1382         if (smp_num_siblings * c->x86_max_cores > 1) {
1383                 int cpu = c - cpu_data;
1384                 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1385                 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1386                 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1387                 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1388         }
1389 #endif  
1390
1391         seq_printf(m,
1392                 "fpu\t\t: yes\n"
1393                 "fpu_exception\t: yes\n"
1394                 "cpuid level\t: %d\n"
1395                 "wp\t\t: yes\n"
1396                 "flags\t\t:",
1397                    c->cpuid_level);
1398
1399         { 
1400                 int i; 
1401                 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1402                         if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1403                                 seq_printf(m, " %s", x86_cap_flags[i]);
1404         }
1405                 
1406         seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1407                    c->loops_per_jiffy/(500000/HZ),
1408                    (c->loops_per_jiffy/(5000/HZ)) % 100);
1409
1410         if (c->x86_tlbsize > 0) 
1411                 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1412         seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1413         seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1414
1415         seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
1416                    c->x86_phys_bits, c->x86_virt_bits);
1417
1418         seq_printf(m, "power management:");
1419         {
1420                 unsigned i;
1421                 for (i = 0; i < 32; i++) 
1422                         if (c->x86_power & (1 << i)) {
1423                                 if (i < ARRAY_SIZE(x86_power_flags) &&
1424                                         x86_power_flags[i])
1425                                         seq_printf(m, "%s%s",
1426                                                 x86_power_flags[i][0]?" ":"",
1427                                                 x86_power_flags[i]);
1428                                 else
1429                                         seq_printf(m, " [%d]", i);
1430                         }
1431         }
1432
1433         seq_printf(m, "\n\n");
1434
1435         return 0;
1436 }
1437
1438 static void *c_start(struct seq_file *m, loff_t *pos)
1439 {
1440         return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1441 }
1442
1443 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1444 {
1445         ++*pos;
1446         return c_start(m, pos);
1447 }
1448
1449 static void c_stop(struct seq_file *m, void *v)
1450 {
1451 }
1452
1453 struct seq_operations cpuinfo_op = {
1454         .start =c_start,
1455         .next = c_next,
1456         .stop = c_stop,
1457         .show = show_cpuinfo,
1458 };
1459
1460 #if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
1461 #include <linux/platform_device.h>
1462 static __init int add_pcspkr(void)
1463 {
1464         struct platform_device *pd;
1465         int ret;
1466
1467         pd = platform_device_alloc("pcspkr", -1);
1468         if (!pd)
1469                 return -ENOMEM;
1470
1471         ret = platform_device_add(pd);
1472         if (ret)
1473                 platform_device_put(pd);
1474
1475         return ret;
1476 }
1477 device_initcall(add_pcspkr);
1478 #endif