This commit was generated by cvs2svn to compensate for changes in r925,
[linux-2.6.git] / arch / xen / x86_64 / kernel / setup.c
1 /*
2  *  linux/arch/x86-64/kernel/setup.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Nov 2001 Dave Jones <davej@suse.de>
7  *  Forked from i386 setup code.
8  *
9  *  $Id$
10  */
11
12 /*
13  * This file handles the architecture-dependent parts of initialization
14  */
15
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <linux/edd.h>
43 #include <linux/percpu.h>
44 #include <asm/mtrr.h>
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/io.h>
48 #include <asm/smp.h>
49 #include <asm/msr.h>
50 #include <asm/desc.h>
51 #include <video/edid.h>
52 #include <asm/e820.h>
53 #include <asm/dma.h>
54 #include <asm/mpspec.h>
55 #include <asm/mmu_context.h>
56 #include <asm/bootsetup.h>
57 #include <asm/proto.h>
58 #include <asm/setup.h>
59 #include <asm/mach_apic.h>
60 #include <asm/numa.h>
61 #include <asm-xen/xen-public/physdev.h>
62 #include "setup_arch_pre.h"
63 #include <asm-xen/hypervisor.h>
64
65 #define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
66 #define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
67
68 #include <asm/mach-xen/setup_arch_post.h>
69
70 extern unsigned long start_pfn;
71
72 #if 0
73 struct edid_info {
74         unsigned char dummy[128];
75 };
76 #endif
77
78 extern struct edid_info edid_info;
79
80 /* Allows setting of maximum possible memory size  */
81 unsigned long xen_override_max_pfn;
82 /*
83  * Machine setup..
84  */
85
86 struct cpuinfo_x86 boot_cpu_data;
87
88 unsigned long mmu_cr4_features;
89 EXPORT_SYMBOL_GPL(mmu_cr4_features);
90
91 int acpi_disabled;
92 EXPORT_SYMBOL(acpi_disabled);
93 #ifdef  CONFIG_ACPI_BOOT
94 extern int __initdata acpi_ht;
95 extern acpi_interrupt_flags     acpi_sci_flags;
96 int __initdata acpi_force = 0;
97 #endif
98
99 int acpi_numa __initdata;
100
101 /* For PCI or other memory-mapped resources */
102 unsigned long pci_mem_start = 0x10000000;
103
104 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
105 int bootloader_type;
106
107 unsigned long saved_video_mode;
108
109 #ifdef CONFIG_SWIOTLB
110 int swiotlb;
111 EXPORT_SYMBOL(swiotlb);
112 #endif
113
114 /*
115  * Setup options
116  */
117 struct drive_info_struct { char dummy[32]; } drive_info;
118 struct screen_info screen_info;
119 struct sys_desc_table_struct {
120         unsigned short length;
121         unsigned char table[0];
122 };
123
124 struct edid_info edid_info;
125 struct e820map e820;
126
127 unsigned char aux_device_present;
128
129 extern int root_mountflags;
130 extern char _text, _etext, _edata, _end;
131
132 char command_line[COMMAND_LINE_SIZE];
133
134 struct resource standard_io_resources[] = {
135         { .name = "dma1", .start = 0x00, .end = 0x1f,
136                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
137         { .name = "pic1", .start = 0x20, .end = 0x21,
138                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
139         { .name = "timer0", .start = 0x40, .end = 0x43,
140                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
141         { .name = "timer1", .start = 0x50, .end = 0x53,
142                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
143         { .name = "keyboard", .start = 0x60, .end = 0x6f,
144                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
145         { .name = "dma page reg", .start = 0x80, .end = 0x8f,
146                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
147         { .name = "pic2", .start = 0xa0, .end = 0xa1,
148                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
149         { .name = "dma2", .start = 0xc0, .end = 0xdf,
150                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
151         { .name = "fpu", .start = 0xf0, .end = 0xff,
152                 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
153 };
154
155 #define STANDARD_IO_RESOURCES \
156         (sizeof standard_io_resources / sizeof standard_io_resources[0])
157
158 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
159
160 struct resource data_resource = {
161         .name = "Kernel data",
162         .start = 0,
163         .end = 0,
164         .flags = IORESOURCE_RAM,
165 };
166 struct resource code_resource = {
167         .name = "Kernel code",
168         .start = 0,
169         .end = 0,
170         .flags = IORESOURCE_RAM,
171 };
172
173 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
174
175 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
176 static struct resource system_rom_resource = {
177         .name = "System ROM",
178         .start = 0xf0000,
179         .end = 0xfffff,
180         .flags = IORESOURCE_ROM,
181 };
182
183 static struct resource extension_rom_resource = {
184         .name = "Extension ROM",
185         .start = 0xe0000,
186         .end = 0xeffff,
187         .flags = IORESOURCE_ROM,
188 };
189
190 static struct resource adapter_rom_resources[] = {
191         { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
192                 .flags = IORESOURCE_ROM },
193         { .name = "Adapter ROM", .start = 0, .end = 0,
194                 .flags = IORESOURCE_ROM },
195         { .name = "Adapter ROM", .start = 0, .end = 0,
196                 .flags = IORESOURCE_ROM },
197         { .name = "Adapter ROM", .start = 0, .end = 0,
198                 .flags = IORESOURCE_ROM },
199         { .name = "Adapter ROM", .start = 0, .end = 0,
200                 .flags = IORESOURCE_ROM },
201         { .name = "Adapter ROM", .start = 0, .end = 0,
202                 .flags = IORESOURCE_ROM }
203 };
204 #endif
205
206 #define ADAPTER_ROM_RESOURCES \
207         (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
208
209 static struct resource video_rom_resource = {
210         .name = "Video ROM",
211         .start = 0xc0000,
212         .end = 0xc7fff,
213         .flags = IORESOURCE_ROM,
214 };
215
216 static struct resource video_ram_resource = {
217         .name = "Video RAM area",
218         .start = 0xa0000,
219         .end = 0xbffff,
220         .flags = IORESOURCE_RAM,
221 };
222
223 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
224 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
225
226 static int __init romchecksum(unsigned char *rom, unsigned long length)
227 {
228         unsigned char *p, sum = 0;
229
230         for (p = rom; p < rom + length; p++)
231                 sum += *p;
232         return sum == 0;
233 }
234
235 static void __init probe_roms(void)
236 {
237         unsigned long start, length, upper;
238         unsigned char *rom;
239         int           i;
240
241         /* video rom */
242         upper = adapter_rom_resources[0].start;
243         for (start = video_rom_resource.start; start < upper; start += 2048) {
244                 rom = isa_bus_to_virt(start);
245                 if (!romsignature(rom))
246                         continue;
247
248                 video_rom_resource.start = start;
249
250                 /* 0 < length <= 0x7f * 512, historically */
251                 length = rom[2] * 512;
252
253                 /* if checksum okay, trust length byte */
254                 if (length && romchecksum(rom, length))
255                         video_rom_resource.end = start + length - 1;
256
257                 request_resource(&iomem_resource, &video_rom_resource);
258                 break;
259                         }
260
261         start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
262         if (start < upper)
263                 start = upper;
264
265         /* system rom */
266         request_resource(&iomem_resource, &system_rom_resource);
267         upper = system_rom_resource.start;
268
269         /* check for extension rom (ignore length byte!) */
270         rom = isa_bus_to_virt(extension_rom_resource.start);
271         if (romsignature(rom)) {
272                 length = extension_rom_resource.end - extension_rom_resource.start + 1;
273                 if (romchecksum(rom, length)) {
274                         request_resource(&iomem_resource, &extension_rom_resource);
275                         upper = extension_rom_resource.start;
276                 }
277         }
278
279         /* check for adapter roms on 2k boundaries */
280         for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
281                 rom = isa_bus_to_virt(start);
282                 if (!romsignature(rom))
283                         continue;
284
285                 /* 0 < length <= 0x7f * 512, historically */
286                 length = rom[2] * 512;
287
288                 /* but accept any length that fits if checksum okay */
289                 if (!length || start + length > upper || !romchecksum(rom, length))
290                         continue;
291
292                 adapter_rom_resources[i].start = start;
293                 adapter_rom_resources[i].end = start + length - 1;
294                 request_resource(&iomem_resource, &adapter_rom_resources[i]);
295
296                 start = adapter_rom_resources[i++].end & ~2047UL;
297         }
298 }
299 #endif
300
301 /*
302  * Point at the empty zero page to start with. We map the real shared_info
303  * page as soon as fixmap is up and running.
304  */
305 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
306 EXPORT_SYMBOL(HYPERVISOR_shared_info);
307
308 u32 *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
309
310 EXPORT_SYMBOL(phys_to_machine_mapping);
311
312 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
313 DEFINE_PER_CPU(int, nr_multicall_ents);
314
315 /* Raw start-of-day parameters from the hypervisor. */
316 union xen_start_info_union xen_start_info_union;
317
318 static __init void parse_cmdline_early (char ** cmdline_p)
319 {
320         char c = ' ', *to = command_line, *from = COMMAND_LINE;
321         int len = 0;
322
323         memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
324         /* Save unparsed command line copy for /proc/cmdline */
325         memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
326         saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
327
328         for (;;) {
329                 if (c != ' ') 
330                         goto next_char; 
331
332 #ifdef  CONFIG_SMP
333                 /*
334                  * If the BIOS enumerates physical processors before logical,
335                  * maxcpus=N at enumeration-time can be used to disable HT.
336                  */
337                 else if (!memcmp(from, "maxcpus=", 8)) {
338                         extern unsigned int maxcpus;
339
340                         maxcpus = simple_strtoul(from + 8, NULL, 0);
341                 }
342 #endif
343 #ifdef CONFIG_ACPI_BOOT
344                 /* "acpi=off" disables both ACPI table parsing and interpreter init */
345                 if (!memcmp(from, "acpi=off", 8))
346                         disable_acpi();
347
348                 if (!memcmp(from, "acpi=force", 10)) { 
349                         /* add later when we do DMI horrors: */
350                         acpi_force = 1;
351                         acpi_disabled = 0;
352                 }
353
354                 /* acpi=ht just means: do ACPI MADT parsing 
355                    at bootup, but don't enable the full ACPI interpreter */
356                 if (!memcmp(from, "acpi=ht", 7)) { 
357                         if (!acpi_force)
358                                 disable_acpi();
359                         acpi_ht = 1; 
360                 }
361                 else if (!memcmp(from, "pci=noacpi", 10)) 
362                         acpi_disable_pci();
363                 else if (!memcmp(from, "acpi=noirq", 10))
364                         acpi_noirq_set();
365
366                 else if (!memcmp(from, "acpi_sci=edge", 13))
367                         acpi_sci_flags.trigger =  1;
368                 else if (!memcmp(from, "acpi_sci=level", 14))
369                         acpi_sci_flags.trigger = 3;
370                 else if (!memcmp(from, "acpi_sci=high", 13))
371                         acpi_sci_flags.polarity = 1;
372                 else if (!memcmp(from, "acpi_sci=low", 12))
373                         acpi_sci_flags.polarity = 3;
374
375                 /* acpi=strict disables out-of-spec workarounds */
376                 else if (!memcmp(from, "acpi=strict", 11)) {
377                         acpi_strict = 1;
378                 }
379 #endif
380
381 #if 0
382                 if (!memcmp(from, "nolapic", 7) ||
383                     !memcmp(from, "disableapic", 11))
384                         disable_apic = 1;
385
386                 if (!memcmp(from, "noapic", 6)) 
387                         skip_ioapic_setup = 1;
388
389                 if (!memcmp(from, "apic", 4)) { 
390                         skip_ioapic_setup = 0;
391                         ioapic_force = 1;
392                 }
393 #endif
394                         
395                 if (!memcmp(from, "mem=", 4))
396                         parse_memopt(from+4, &from); 
397
398 #ifdef CONFIG_DISCONTIGMEM
399                 if (!memcmp(from, "numa=", 5))
400                         numa_setup(from+5); 
401 #endif
402
403 #ifdef CONFIG_GART_IOMMU 
404                 if (!memcmp(from,"iommu=",6)) { 
405                         iommu_setup(from+6); 
406                 }
407 #endif
408
409                 if (!memcmp(from,"oops=panic", 10))
410                         panic_on_oops = 1;
411
412                 if (!memcmp(from, "noexec=", 7))
413                         nonx_setup(from + 7);
414
415         next_char:
416                 c = *(from++);
417                 if (!c)
418                         break;
419                 if (COMMAND_LINE_SIZE <= ++len)
420                         break;
421                 *(to++) = c;
422         }
423         *to = '\0';
424         *cmdline_p = command_line;
425 }
426
427 #ifndef CONFIG_DISCONTIGMEM
428 static void __init contig_initmem_init(void)
429 {
430         unsigned long bootmap_size, bootmap; 
431
432         /*
433          * partially used pages are not usable - thus
434          * we are rounding upwards:
435          */
436
437         bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
438         bootmap = start_pfn;
439         bootmap_size = init_bootmem(bootmap, end_pfn);
440         reserve_bootmem(bootmap, bootmap_size);
441         
442         free_bootmem(start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT);   
443         printk("Registering memory for bootmem: from  %lx, size = %lx\n",
444                      start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT);
445         /* 
446          * This should cover kernel_end
447          */
448 #if 0
449         reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
450                                       bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
451 #endif
452         reserve_bootmem(0, (PFN_PHYS(start_pfn) +
453                             bootmap_size + PAGE_SIZE-1));
454
455
456 #endif
457
458 /* Use inline assembly to define this because the nops are defined 
459    as inline assembly strings in the include files and we cannot 
460    get them easily into strings. */
461 asm("\t.data\nk8nops: " 
462     K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
463     K8_NOP7 K8_NOP8); 
464     
465 extern unsigned char k8nops[];
466 static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
467      NULL,
468      k8nops,
469      k8nops + 1,
470      k8nops + 1 + 2,
471      k8nops + 1 + 2 + 3,
472      k8nops + 1 + 2 + 3 + 4,
473      k8nops + 1 + 2 + 3 + 4 + 5,
474      k8nops + 1 + 2 + 3 + 4 + 5 + 6,
475      k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
476 }; 
477
478 /* Replace instructions with better alternatives for this CPU type.
479
480    This runs before SMP is initialized to avoid SMP problems with
481    self modifying code. This implies that assymetric systems where
482    APs have less capabilities than the boot processor are not handled. 
483    In this case boot with "noreplacement". */ 
484 void apply_alternatives(void *start, void *end) 
485
486         struct alt_instr *a; 
487         int diff, i, k;
488         for (a = start; (void *)a < end; a++) { 
489                 if (!boot_cpu_has(a->cpuid))
490                         continue;
491
492                 BUG_ON(a->replacementlen > a->instrlen); 
493                 __inline_memcpy(a->instr, a->replacement, a->replacementlen); 
494                 diff = a->instrlen - a->replacementlen; 
495
496                 /* Pad the rest with nops */
497                 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
498                         k = diff;
499                         if (k > ASM_NOP_MAX)
500                                 k = ASM_NOP_MAX;
501                         __inline_memcpy(a->instr + i, k8_nops[k], k); 
502                 } 
503         }
504
505
506 static int no_replacement __initdata = 0; 
507  
508 void __init alternative_instructions(void)
509 {
510         extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
511         if (no_replacement) 
512                 return;
513         apply_alternatives(__alt_instructions, __alt_instructions_end);
514 }
515
516 static int __init noreplacement_setup(char *s)
517
518      no_replacement = 1; 
519      return 0; 
520
521
522 __setup("noreplacement", noreplacement_setup); 
523
524 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
525 struct edd edd;
526 #ifdef CONFIG_EDD_MODULE
527 EXPORT_SYMBOL(edd);
528 #endif
529 /**
530  * copy_edd() - Copy the BIOS EDD information
531  *              from boot_params into a safe place.
532  *
533  */
534 static inline void copy_edd(void)
535 {
536      memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
537      memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
538      edd.mbr_signature_nr = EDD_MBR_SIG_NR;
539      edd.edd_info_nr = EDD_NR;
540 }
541 #else
542 static inline void copy_edd(void)
543 {
544 }
545 #endif
546
547 #if 0
548 #define EBDA_ADDR_POINTER 0x40E
549 static void __init reserve_ebda_region(void)
550 {
551         unsigned int addr;
552         /** 
553          * there is a real-mode segmented pointer pointing to the 
554          * 4K EBDA area at 0x40E
555          */
556         addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
557         addr <<= 4;
558         if (addr)
559                 reserve_bootmem_generic(addr, PAGE_SIZE);
560 }
561 #endif
562
563 /*
564  * Guest physical starts from 0.
565  */
566
567 unsigned long __init xen_end_of_ram(void)
568 {
569         unsigned long max_end_pfn = xen_start_info.nr_pages;
570
571         if ( xen_override_max_pfn <  max_end_pfn)
572                 xen_override_max_pfn = max_end_pfn;
573         
574         return xen_override_max_pfn;
575 }
576
577 static void __init print_memory_map(char *who)
578 {
579         int i;
580
581         for (i = 0; i < e820.nr_map; i++) {
582                 early_printk(" %s: %016Lx - %016Lx ", who,
583                         e820.map[i].addr,
584                         e820.map[i].addr + e820.map[i].size);
585                 switch (e820.map[i].type) {
586                 case E820_RAM:  early_printk("(usable)\n");
587                                 break;
588                 case E820_RESERVED:
589                                 early_printk("(reserved)\n");
590                                 break;
591                 case E820_ACPI:
592                                 early_printk("(ACPI data)\n");
593                                 break;
594                 case E820_NVS:
595                                 early_printk("(ACPI NVS)\n");
596                                 break;
597                 default:        early_printk("type %u\n", e820.map[i].type);
598                                 break;
599                 }
600         }
601 }
602
603 void __init setup_arch(char **cmdline_p)
604 {
605         unsigned long low_mem_size;
606         int i, j;
607         physdev_op_t op;
608
609 #if 0
610         ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
611 #else
612         ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
613 #endif
614         drive_info = DRIVE_INFO;
615
616 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
617         screen_info = SCREEN_INFO;
618 #endif
619         edid_info = EDID_INFO;
620         aux_device_present = AUX_DEVICE_INFO;
621         saved_video_mode = SAVED_VIDEO_MODE;
622         bootloader_type = LOADER_TYPE;
623
624 #ifdef CONFIG_BLK_DEV_RAM
625         rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
626         rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
627         rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
628 #endif
629 /*        register_console(&xen_console); */
630
631 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
632         /* This is drawn from a dump from vgacon:startup in standard Linux. */
633         screen_info.orig_video_mode = 3; 
634         screen_info.orig_video_isVGA = 1;
635         screen_info.orig_video_lines = 25;
636         screen_info.orig_video_cols = 80;
637         screen_info.orig_video_ega_bx = 3;
638         screen_info.orig_video_points = 16;
639 #endif       
640         ARCH_SETUP
641         print_memory_map(machine_specific_memory_setup());
642
643         /*      copy_edd();  */
644
645         if (!MOUNT_ROOT_RDONLY)
646                 root_mountflags &= ~MS_RDONLY;
647         init_mm.start_code = (unsigned long) &_text;
648         init_mm.end_code = (unsigned long) &_etext;
649         init_mm.end_data = (unsigned long) &_edata;
650 /*      init_mm.brk = (unsigned long) &_end; */
651         init_mm.brk = start_pfn << PAGE_SHIFT;
652
653
654 #if 0  /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
655         code_resource.start = virt_to_phys(&_text);
656         code_resource.end = virt_to_phys(&_etext)-1;
657         data_resource.start = virt_to_phys(&_etext);
658         data_resource.end = virt_to_phys(&_edata)-1;
659 #endif
660         parse_cmdline_early(cmdline_p);
661
662         early_identify_cpu(&boot_cpu_data);
663
664         /*
665          * partially used pages are not usable - thus
666          * we are rounding upwards:
667          */
668 #if 0
669         end_pfn = e820_end_of_ram();
670 #else
671         end_pfn = xen_end_of_ram();
672 #endif
673
674         check_efer();
675
676         init_memory_mapping(0, (end_pfn << PAGE_SHIFT));
677
678 #ifdef CONFIG_ACPI_BOOT
679         /*
680          * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
681          * Call this early for SRAT node setup.
682          */
683         acpi_boot_table_init();
684 #endif
685
686 #ifdef CONFIG_ACPI_NUMA
687         /*
688          * Parse SRAT to discover nodes.
689          */
690         acpi_numa_init();
691 #endif
692
693 #ifdef CONFIG_DISCONTIGMEM
694         numa_initmem_init(0, end_pfn); 
695 #else
696         contig_initmem_init(); 
697 #endif
698
699         /* Reserve direct mapping and shared info etc. */
700 //      reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end + 1 - table_start) << PAGE_SHIFT);
701
702 //      reserve_bootmem_generic(0, (table_end + 1) << PAGE_SHIFT);
703
704         /* reserve kernel */
705 //      kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
706
707 #if 0
708         /*
709          * reserve physical page 0 - it's a special BIOS page on many boxes,
710          * enabling clean reboots, SMP operation, laptop functions.
711          */
712         reserve_bootmem_generic(0, PAGE_SIZE);
713 #endif
714
715         /* reserve ebda region */
716 /*      reserve_ebda_region(); */
717
718 #ifdef CONFIG_SMP
719         /*
720          * But first pinch a few for the stack/trampoline stuff
721          * FIXME: Don't need the extra page at 4K, but need to fix
722          * trampoline before removing it. (see the GDT stuff)
723          */
724         reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
725
726         /* Reserve SMP trampoline */
727         reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
728 #endif
729
730 #ifdef CONFIG_ACPI_SLEEP
731        /*
732         * Reserve low memory region for sleep support.
733         */
734        acpi_reserve_bootmem();
735 #endif
736 #ifdef CONFIG_X86_LOCAL_APIC
737         /*
738          * Find and reserve possible boot-time SMP configuration:
739          */
740         find_smp_config();
741 #endif
742 #ifdef CONFIG_BLK_DEV_INITRD
743         if (xen_start_info.mod_start) {
744                 if (LOADER_TYPE && INITRD_START) {
745                         if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
746                                 /* reserve_bootmem_generic(INITRD_START, INITRD_SIZE); */
747                                 initrd_start = INITRD_START + PAGE_OFFSET;
748                                 initrd_end = initrd_start+INITRD_SIZE;
749                                 initrd_below_start_ok = 1;
750                         }
751                         else {
752                                 printk(KERN_ERR "initrd extends beyond end of memory "
753                                        "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
754                                        (unsigned long)(INITRD_START + INITRD_SIZE),
755                                        (unsigned long)(end_pfn << PAGE_SHIFT));
756                                 initrd_start = 0;
757                         }
758                 }
759         }
760 #endif
761         paging_init();
762
763         /* Make sure we have a large enough P->M table. */
764         if (end_pfn > xen_start_info.nr_pages) {
765                 phys_to_machine_mapping = alloc_bootmem(
766                         max_pfn * sizeof(unsigned long));
767                 memset(phys_to_machine_mapping, ~0,
768                         max_pfn * sizeof(unsigned long));
769                 memcpy(phys_to_machine_mapping,
770                         (unsigned long *)xen_start_info.mfn_list,
771                         xen_start_info.nr_pages * sizeof(unsigned long));
772                 free_bootmem(
773                         __pa(xen_start_info.mfn_list), 
774                         PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
775                         sizeof(unsigned long))));
776         }
777
778         pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE);
779
780         for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
781         {       
782              pfn_to_mfn_frame_list[j] = 
783                   virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
784         }
785
786 #if 0
787         check_ioapic();
788 #endif
789
790 #ifdef CONFIG_ACPI_BOOT
791         /*
792          * Read APIC and some other early information from ACPI tables.
793          */
794         acpi_boot_init();
795 #endif
796
797 #ifdef CONFIG_X86_LOCAL_APIC
798         /*
799          * get boot-time SMP configuration:
800          */
801         if (smp_found_config)
802                 get_smp_config();
803         init_apic_mappings();
804 #endif
805
806         /* XXX Disable irqdebug until we have a way to avoid interrupt
807          * conflicts. */
808 /*      noirqdebug_setup(""); */
809
810 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
811         /*
812          * Request address space for all standard RAM and ROM resources
813          * and also for regions reported as reserved by the e820.
814          */
815         probe_roms();
816 #endif
817 /*      e820_reserve_resources();  */
818
819         request_resource(&iomem_resource, &video_ram_resource);
820
821         {
822         unsigned i;
823         /* request I/O space for devices used on all i[345]86 PCs */
824         for (i = 0; i < STANDARD_IO_RESOURCES; i++)
825                 request_resource(&ioport_resource, &standard_io_resources[i]);
826         }
827
828         /* Will likely break when you have unassigned resources with more
829            than 4GB memory and bridges that don't support more than 4GB. 
830            Doing it properly would require to use pci_alloc_consistent
831            in this case. */
832         low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
833         if (low_mem_size > pci_mem_start)
834                 pci_mem_start = low_mem_size;
835
836 #ifdef CONFIG_GART_IOMMU
837        iommu_hole_init();
838 #endif
839
840         op.cmd             = PHYSDEVOP_SET_IOPL;
841         op.u.set_iopl.iopl = current->thread.io_pl = 1;
842         HYPERVISOR_physdev_op(&op);
843
844         if (xen_start_info.flags & SIF_INITDOMAIN) {
845                 if (!(xen_start_info.flags & SIF_PRIVILEGED))
846                         panic("Xen granted us console access "
847                               "but not privileged status");
848
849 #ifdef CONFIG_VT
850 #if defined(CONFIG_VGA_CONSOLE)
851         conswitchp = &vga_con;
852 #elif defined(CONFIG_DUMMY_CONSOLE)
853         conswitchp = &dummy_con;
854 #endif
855 #endif
856         } else {
857 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
858                 extern const struct consw xennull_con;
859                 extern int console_use_vt;
860 #if defined(CONFIG_VGA_CONSOLE)
861                 /* disable VGA driver */
862                 ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
863 #endif
864                 conswitchp = &xennull_con;
865                 console_use_vt = 0;
866 #endif
867         }
868 }
869
870 static int __init get_model_name(struct cpuinfo_x86 *c)
871 {
872         unsigned int *v;
873
874         if (c->x86_cpuid_level < 0x80000004)
875                 return 0;
876
877         v = (unsigned int *) c->x86_model_id;
878         cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
879         cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
880         cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
881         c->x86_model_id[48] = 0;
882         return 1;
883 }
884
885
886 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
887 {
888         unsigned int n, dummy, eax, ebx, ecx, edx;
889
890         n = c->x86_cpuid_level;
891
892         if (n >= 0x80000005) {
893                 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
894                 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
895                         edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
896                 c->x86_cache_size=(ecx>>24)+(edx>>24);
897                 /* On K8 L1 TLB is inclusive, so don't count it */
898                 c->x86_tlbsize = 0;
899         }
900
901         if (n >= 0x80000006) {
902                 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
903                 ecx = cpuid_ecx(0x80000006);
904                 c->x86_cache_size = ecx >> 16;
905                 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
906
907                 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
908                 c->x86_cache_size, ecx & 0xFF);
909         }
910
911         if (n >= 0x80000007)
912                 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
913         if (n >= 0x80000008) {
914                 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
915                 c->x86_virt_bits = (eax >> 8) & 0xff;
916                 c->x86_phys_bits = eax & 0xff;
917         }
918 }
919
920
921 static int __init init_amd(struct cpuinfo_x86 *c)
922 {
923         int r;
924         int level;
925 #ifdef CONFIG_NUMA
926         int cpu;
927 #endif
928
929         /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
930            3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
931         clear_bit(0*32+31, &c->x86_capability);
932         
933         /* C-stepping K8? */
934         level = cpuid_eax(1);
935         if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
936                 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
937
938         r = get_model_name(c);
939         if (!r) { 
940                 switch (c->x86) { 
941                 case 15:
942                         /* Should distinguish Models here, but this is only
943                            a fallback anyways. */
944                         strcpy(c->x86_model_id, "Hammer");
945                         break; 
946                 } 
947         } 
948         display_cacheinfo(c);
949
950         if (c->x86_cpuid_level >= 0x80000008) {
951                 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
952                 if (c->x86_num_cores & (c->x86_num_cores - 1))
953                         c->x86_num_cores = 1;
954
955 #ifdef CONFIG_NUMA
956                 /* On a dual core setup the lower bits of apic id
957                    distingush the cores. Fix up the CPU<->node mappings
958                    here based on that.
959                    Assumes number of cores is a power of two.
960                    When using SRAT use mapping from SRAT. */
961                 cpu = c->x86_apicid;
962                 if (acpi_numa <= 0 && c->x86_num_cores > 1) {
963                         cpu_to_node[cpu] = cpu >> hweight32(c->x86_num_cores - 1);
964                         if (!node_online(cpu_to_node[cpu]))
965                                 cpu_to_node[cpu] = first_node(node_online_map);
966                 }
967                 printk(KERN_INFO "CPU %d(%d) -> Node %d\n",
968                                 cpu, c->x86_num_cores, cpu_to_node[cpu]);
969 #endif
970         }
971
972         return r;
973 }
974
975 static void __init detect_ht(struct cpuinfo_x86 *c)
976 {
977 #ifdef CONFIG_SMP
978         u32     eax, ebx, ecx, edx;
979         int     index_lsb, index_msb, tmp;
980         int     cpu = smp_processor_id();
981         
982         if (!cpu_has(c, X86_FEATURE_HT))
983                 return;
984
985         cpuid(1, &eax, &ebx, &ecx, &edx);
986         smp_num_siblings = (ebx & 0xff0000) >> 16;
987         
988         if (smp_num_siblings == 1) {
989                 printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
990         } else if (smp_num_siblings > 1) {
991                 index_lsb = 0;
992                 index_msb = 31;
993                 /*
994                  * At this point we only support two siblings per
995                  * processor package.
996                  */
997                 if (smp_num_siblings > NR_CPUS) {
998                         printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
999                         smp_num_siblings = 1;
1000                         return;
1001                 }
1002                 tmp = smp_num_siblings;
1003                 while ((tmp & 1) == 0) {
1004                         tmp >>=1 ;
1005                         index_lsb++;
1006                 }
1007                 tmp = smp_num_siblings;
1008                 while ((tmp & 0x80000000 ) == 0) {
1009                         tmp <<=1 ;
1010                         index_msb--;
1011                 }
1012                 if (index_lsb != index_msb )
1013                         index_msb++;
1014                 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1015                 
1016                 printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
1017                        phys_proc_id[cpu]);
1018         }
1019 #endif
1020 }
1021
1022 static void __init sched_cmp_hack(struct cpuinfo_x86 *c)
1023 {
1024 #ifdef CONFIG_SMP
1025         /* AMD dual core looks like HT but isn't really. Hide it from the
1026            scheduler. This works around problems with the domain scheduler.
1027            Also probably gives slightly better scheduling and disables
1028            SMT nice which is harmful on dual core.
1029            TBD tune the domain scheduler for dual core. */
1030         if (c->x86_vendor == X86_VENDOR_AMD && cpu_has(c, X86_FEATURE_CMP_LEGACY))
1031                 smp_num_siblings = 1;
1032 #endif
1033 }
1034         
1035 static void __init init_intel(struct cpuinfo_x86 *c)
1036 {
1037         /* Cache sizes */
1038         unsigned n;
1039
1040         init_intel_cacheinfo(c);
1041         n = c->x86_cpuid_level;
1042         if (n >= 0x80000008) {
1043                 unsigned eax = cpuid_eax(0x80000008);
1044                 c->x86_virt_bits = (eax >> 8) & 0xff;
1045                 c->x86_phys_bits = eax & 0xff;
1046         }
1047
1048         if (c->x86 == 15)
1049                 c->x86_cache_alignment = c->x86_clflush_size * 2;
1050 }
1051
1052 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1053 {
1054         char *v = c->x86_vendor_id;
1055
1056         if (!strcmp(v, "AuthenticAMD"))
1057                 c->x86_vendor = X86_VENDOR_AMD;
1058         else if (!strcmp(v, "GenuineIntel"))
1059                 c->x86_vendor = X86_VENDOR_INTEL;
1060         else
1061                 c->x86_vendor = X86_VENDOR_UNKNOWN;
1062 }
1063
1064 struct cpu_model_info {
1065         int vendor;
1066         int family;
1067         char *model_names[16];
1068 };
1069
1070 /* Do some early cpuid on the boot CPU to get some parameter that are
1071    needed before check_bugs. Everything advanced is in identify_cpu
1072    below. */
1073 void __init early_identify_cpu(struct cpuinfo_x86 *c)
1074 {
1075         u32 tfms;
1076
1077         c->loops_per_jiffy = loops_per_jiffy;
1078         c->x86_cache_size = -1;
1079         c->x86_vendor = X86_VENDOR_UNKNOWN;
1080         c->x86_model = c->x86_mask = 0; /* So far unknown... */
1081         c->x86_vendor_id[0] = '\0'; /* Unset */
1082         c->x86_model_id[0] = '\0';  /* Unset */
1083         c->x86_clflush_size = 64;
1084         c->x86_cache_alignment = c->x86_clflush_size;
1085         c->x86_num_cores = 1;
1086         c->x86_apicid = c == &boot_cpu_data ? 0 : c - cpu_data;
1087         c->x86_cpuid_level = 0;
1088         memset(&c->x86_capability, 0, sizeof c->x86_capability);
1089
1090         /* Get vendor name */
1091         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1092               (unsigned int *)&c->x86_vendor_id[0],
1093               (unsigned int *)&c->x86_vendor_id[8],
1094               (unsigned int *)&c->x86_vendor_id[4]);
1095                 
1096         get_cpu_vendor(c);
1097
1098         /* Initialize the standard set of capabilities */
1099         /* Note that the vendor-specific code below might override */
1100
1101         /* Intel-defined flags: level 0x00000001 */
1102         if (c->cpuid_level >= 0x00000001) {
1103                 __u32 misc;
1104                 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1105                       &c->x86_capability[0]);
1106                 c->x86 = (tfms >> 8) & 0xf;
1107                 c->x86_model = (tfms >> 4) & 0xf;
1108                 c->x86_mask = tfms & 0xf;
1109                 if (c->x86 == 0xf) {
1110                         c->x86 += (tfms >> 20) & 0xff;
1111                         c->x86_model += ((tfms >> 16) & 0xF) << 4;
1112                 } 
1113                 if (c->x86_capability[0] & (1<<19)) 
1114                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1115                 c->x86_apicid = misc >> 24;
1116         } else {
1117                 /* Have CPUID level 0 only - unheard of */
1118                 c->x86 = 4;
1119         }
1120 }
1121
1122 /*
1123  * This does the hard work of actually picking apart the CPU stuff...
1124  */
1125 void __init identify_cpu(struct cpuinfo_x86 *c)
1126 {
1127         int i;
1128         u32 xlvl;
1129
1130         early_identify_cpu(c);
1131
1132         /* AMD-defined flags: level 0x80000001 */
1133         xlvl = cpuid_eax(0x80000000);
1134         c->x86_cpuid_level = xlvl;
1135         if ((xlvl & 0xffff0000) == 0x80000000) {
1136                 if (xlvl >= 0x80000001) {
1137                         c->x86_capability[1] = cpuid_edx(0x80000001);
1138                         c->x86_capability[5] = cpuid_ecx(0x80000001);
1139                 }
1140                 if (xlvl >= 0x80000004)
1141                         get_model_name(c); /* Default name */
1142         }
1143
1144         /* Transmeta-defined flags: level 0x80860001 */
1145         xlvl = cpuid_eax(0x80860000);
1146         if ((xlvl & 0xffff0000) == 0x80860000) {
1147                 /* Don't set x86_cpuid_level here for now to not confuse. */
1148                 if (xlvl >= 0x80860001)
1149                         c->x86_capability[2] = cpuid_edx(0x80860001);
1150         }
1151
1152         /*
1153          * Vendor-specific initialization.  In this section we
1154          * canonicalize the feature flags, meaning if there are
1155          * features a certain CPU supports which CPUID doesn't
1156          * tell us, CPUID claiming incorrect flags, or other bugs,
1157          * we handle them here.
1158          *
1159          * At the end of this section, c->x86_capability better
1160          * indicate the features this CPU genuinely supports!
1161          */
1162         switch (c->x86_vendor) {
1163         case X86_VENDOR_AMD:
1164                 init_amd(c);
1165                 break;
1166
1167         case X86_VENDOR_INTEL:
1168                 init_intel(c);
1169                 break;
1170
1171         case X86_VENDOR_UNKNOWN:
1172         default:
1173                 display_cacheinfo(c);
1174                 break;
1175         }
1176
1177         select_idle_routine(c);
1178         detect_ht(c); 
1179         sched_cmp_hack(c);
1180
1181         /*
1182          * On SMP, boot_cpu_data holds the common feature set between
1183          * all CPUs; so make sure that we indicate which features are
1184          * common between the CPUs.  The first time this routine gets
1185          * executed, c == &boot_cpu_data.
1186          */
1187         if (c != &boot_cpu_data) {
1188                 /* AND the already accumulated flags with these */
1189                 for (i = 0 ; i < NCAPINTS ; i++)
1190                         boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1191         }
1192
1193 #ifdef CONFIG_X86_MCE
1194         mcheck_init(c);
1195 #endif
1196 #ifdef CONFIG_NUMA
1197         if (c != &boot_cpu_data)
1198                 numa_add_cpu(c - cpu_data);
1199 #endif
1200 }
1201  
1202
1203 void __init print_cpu_info(struct cpuinfo_x86 *c)
1204 {
1205         if (c->x86_model_id[0])
1206                 printk("%s", c->x86_model_id);
1207
1208         if (c->x86_mask || c->cpuid_level >= 0) 
1209                 printk(" stepping %02x\n", c->x86_mask);
1210         else
1211                 printk("\n");
1212 }
1213
1214 /*
1215  *      Get CPU information for use by the procfs.
1216  */
1217
1218 static int show_cpuinfo(struct seq_file *m, void *v)
1219 {
1220         struct cpuinfo_x86 *c = v;
1221
1222         /* 
1223          * These flag bits must match the definitions in <asm/cpufeature.h>.
1224          * NULL means this bit is undefined or reserved; either way it doesn't
1225          * have meaning as far as Linux is concerned.  Note that it's important
1226          * to realize there is a difference between this table and CPUID -- if
1227          * applications want to get the raw CPUID data, they should access
1228          * /dev/cpu/<cpu_nr>/cpuid instead.
1229          */
1230         static char *x86_cap_flags[] = {
1231                 /* Intel-defined */
1232                 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1233                 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1234                 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1235                 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1236
1237                 /* AMD-defined */
1238                 "pni", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1239                 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1240                 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1241                 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1242
1243                 /* Transmeta-defined */
1244                 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1245                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1246                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1247                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1248
1249                 /* Other (Linux-defined) */
1250                 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
1251                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1252                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1253                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1254
1255                 /* Intel-defined (#2) */
1256                 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
1257                 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1258                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1259                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1260
1261                 /* AMD-defined (#2) */
1262                 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1263                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1264                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1265                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
1266         };
1267         static char *x86_power_flags[] = { 
1268                 "ts",   /* temperature sensor */
1269                 "fid",  /* frequency id control */
1270                 "vid",  /* voltage id control */
1271                 "ttp",  /* thermal trip */
1272         };
1273
1274
1275 #ifdef CONFIG_SMP
1276         if (!cpu_online(c-cpu_data))
1277                 return 0;
1278 #endif
1279
1280         seq_printf(m,"processor\t: %u\n"
1281                      "vendor_id\t: %s\n"
1282                      "cpu family\t: %d\n"
1283                      "model\t\t: %d\n"
1284                      "model name\t: %s\n",
1285                      (unsigned)(c-cpu_data),
1286                      c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1287                      c->x86,
1288                      (int)c->x86_model,
1289                      c->x86_model_id[0] ? c->x86_model_id : "unknown");
1290         
1291         if (c->x86_mask || c->cpuid_level >= 0)
1292                 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1293         else
1294                 seq_printf(m, "stepping\t: unknown\n");
1295         
1296         if (cpu_has(c,X86_FEATURE_TSC)) {
1297                 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1298                              cpu_khz / 1000, (cpu_khz % 1000));
1299         }
1300
1301         /* Cache size */
1302         if (c->x86_cache_size >= 0) 
1303                 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1304         
1305 #ifdef CONFIG_SMP
1306         seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
1307         seq_printf(m, "siblings\t: %d\n", c->x86_num_cores * smp_num_siblings);
1308 #endif  
1309
1310         seq_printf(m,
1311                 "fpu\t\t: yes\n"
1312                 "fpu_exception\t: yes\n"
1313                 "cpuid level\t: %d\n"
1314                 "wp\t\t: yes\n"
1315                 "flags\t\t:",
1316                    c->cpuid_level);
1317
1318         { 
1319                 int i; 
1320                 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1321                         if ( test_bit(i, &c->x86_capability) &&
1322                              x86_cap_flags[i] != NULL )
1323                                 seq_printf(m, " %s", x86_cap_flags[i]);
1324         }
1325                 
1326         seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1327                    c->loops_per_jiffy/(500000/HZ),
1328                    (c->loops_per_jiffy/(5000/HZ)) % 100);
1329
1330         if (c->x86_tlbsize > 0) 
1331                 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1332         seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1333         seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1334
1335         seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
1336                    c->x86_phys_bits, c->x86_virt_bits);
1337
1338         seq_printf(m, "power management:");
1339         {
1340                 unsigned i;
1341                 for (i = 0; i < 32; i++) 
1342                         if (c->x86_power & (1 << i)) {
1343                                 if (i < ARRAY_SIZE(x86_power_flags))
1344                                         seq_printf(m, " %s", x86_power_flags[i]);
1345                                 else
1346                                         seq_printf(m, " [%d]", i);
1347                         }
1348         }
1349         seq_printf(m, "\n");
1350
1351         if (c->x86_num_cores > 1)
1352                 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
1353
1354         seq_printf(m, "\n\n"); 
1355
1356         return 0;
1357 }
1358
1359 static void *c_start(struct seq_file *m, loff_t *pos)
1360 {
1361         return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1362 }
1363
1364 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1365 {
1366         ++*pos;
1367         return c_start(m, pos);
1368 }
1369
1370 static void c_stop(struct seq_file *m, void *v)
1371 {
1372 }
1373
1374 struct seq_operations cpuinfo_op = {
1375         .start =c_start,
1376         .next = c_next,
1377         .stop = c_stop,
1378         .show = show_cpuinfo,
1379 };