ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / mips / kernel / setup.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000 2001, 2002  Maciej W. Rozycki
12  */
13 #include <linux/config.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/ioport.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/stddef.h>
22 #include <linux/string.h>
23 #include <linux/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/utsname.h>
27 #include <linux/a.out.h>
28 #include <linux/tty.h>
29 #include <linux/bootmem.h>
30 #include <linux/initrd.h>
31 #include <linux/major.h>
32 #include <linux/kdev_t.h>
33 #include <linux/root_dev.h>
34 #include <linux/highmem.h>
35 #include <linux/console.h>
36
37 #include <asm/addrspace.h>
38 #include <asm/bootinfo.h>
39 #include <asm/cpu.h>
40 #include <asm/sections.h>
41 #include <asm/system.h>
42
43 struct cpuinfo_mips cpu_data[NR_CPUS];
44
45 EXPORT_SYMBOL(cpu_data);
46
47 #ifdef CONFIG_VT
48 struct screen_info screen_info;
49 #endif
50
51 /*
52  * Despite it's name this variable is even if we don't have PCI
53  */
54 unsigned int PCI_DMA_BUS_IS_PHYS;
55
56 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
57
58 extern void * __rd_start, * __rd_end;
59
60 /*
61  * Setup information
62  *
63  * These are initialized so they are in the .data section
64  */
65 unsigned long mips_machtype = MACH_UNKNOWN;
66 unsigned long mips_machgroup = MACH_GROUP_UNKNOWN;
67
68 EXPORT_SYMBOL(mips_machtype);
69 EXPORT_SYMBOL(mips_machgroup);
70
71 struct boot_mem_map boot_mem_map;
72
73 static char command_line[CL_SIZE];
74        char saved_command_line[CL_SIZE];
75        char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
76
77 /*
78  * mips_io_port_base is the begin of the address space to which x86 style
79  * I/O ports are mapped.
80  */
81 const unsigned long mips_io_port_base = -1;
82 EXPORT_SYMBOL(mips_io_port_base);
83
84 /*
85  * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
86  * for the processor.
87  */
88 unsigned long isa_slot_offset;
89 EXPORT_SYMBOL(isa_slot_offset);
90
91 static struct resource code_resource = { "Kernel code" };
92 static struct resource data_resource = { "Kernel data" };
93
94 void __init add_memory_region(phys_t start, phys_t size, long type)
95 {
96         int x = boot_mem_map.nr_map;
97         struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
98
99         /*
100          * Try to merge with previous entry if any.  This is far less than
101          * perfect but is sufficient for most real world cases.
102          */
103         if (x && prev->addr + prev->size == start && prev->type == type) {
104                 prev->size += size;
105                 return;
106         }
107
108         if (x == BOOT_MEM_MAP_MAX) {
109                 printk("Ooops! Too many entries in the memory map!\n");
110                 return;
111         }
112
113         boot_mem_map.map[x].addr = start;
114         boot_mem_map.map[x].size = size;
115         boot_mem_map.map[x].type = type;
116         boot_mem_map.nr_map++;
117 }
118
119 static void __init print_memory_map(void)
120 {
121         int i;
122         const int field = 2 * sizeof(unsigned long);
123
124         for (i = 0; i < boot_mem_map.nr_map; i++) {
125                 printk(" memory: %0*Lx @ %0*Lx ",
126                        field, (unsigned long long) boot_mem_map.map[i].size,
127                        field, (unsigned long long) boot_mem_map.map[i].addr);
128
129                 switch (boot_mem_map.map[i].type) {
130                 case BOOT_MEM_RAM:
131                         printk("(usable)\n");
132                         break;
133                 case BOOT_MEM_ROM_DATA:
134                         printk("(ROM data)\n");
135                         break;
136                 case BOOT_MEM_RESERVED:
137                         printk("(reserved)\n");
138                         break;
139                 default:
140                         printk("type %lu\n", boot_mem_map.map[i].type);
141                         break;
142                 }
143         }
144 }
145
146 static inline void parse_cmdline_early(void)
147 {
148         char c = ' ', *to = command_line, *from = saved_command_line;
149         unsigned long start_at, mem_size;
150         int len = 0;
151         int usermem = 0;
152
153         printk("Determined physical RAM map:\n");
154         print_memory_map();
155
156         for (;;) {
157                 /*
158                  * "mem=XXX[kKmM]" defines a memory region from
159                  * 0 to <XXX>, overriding the determined size.
160                  * "mem=XXX[KkmM]@YYY[KkmM]" defines a memory region from
161                  * <YYY> to <YYY>+<XXX>, overriding the determined size.
162                  */
163                 if (c == ' ' && !memcmp(from, "mem=", 4)) {
164                         if (to != command_line)
165                                 to--;
166                         /*
167                          * If a user specifies memory size, we
168                          * blow away any automatically generated
169                          * size.
170                          */
171                         if (usermem == 0) {
172                                 boot_mem_map.nr_map = 0;
173                                 usermem = 1;
174                         }
175                         mem_size = memparse(from + 4, &from);
176                         if (*from == '@')
177                                 start_at = memparse(from + 1, &from);
178                         else
179                                 start_at = 0;
180                         add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
181                 }
182                 c = *(from++);
183                 if (!c)
184                         break;
185                 if (CL_SIZE <= ++len)
186                         break;
187                 *(to++) = c;
188         }
189         *to = '\0';
190
191         if (usermem) {
192                 printk("User-defined physical RAM map:\n");
193                 print_memory_map();
194         }
195 }
196
197
198 #define PFN_UP(x)       (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
199 #define PFN_DOWN(x)     ((x) >> PAGE_SHIFT)
200 #define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
201
202 #define MAXMEM          HIGHMEM_START
203 #define MAXMEM_PFN      PFN_DOWN(MAXMEM)
204
205 static inline void bootmem_init(void)
206 {
207         unsigned long start_pfn;
208 #ifndef CONFIG_SGI_IP27
209         unsigned long bootmap_size, max_low_pfn, first_usable_pfn;
210         int i;
211 #endif
212 #ifdef CONFIG_BLK_DEV_INITRD
213         unsigned long tmp;
214         unsigned long *initrd_header;
215
216         tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8;
217         if (tmp < (unsigned long)&_end)
218                 tmp += PAGE_SIZE;
219         initrd_header = (unsigned long *)tmp;
220         if (initrd_header[0] == 0x494E5244) {
221                 initrd_start = (unsigned long)&initrd_header[2];
222                 initrd_end = initrd_start + initrd_header[1];
223         }
224         start_pfn = PFN_UP(CPHYSADDR((&_end)+(initrd_end - initrd_start) + PAGE_SIZE));
225 #else
226         /*
227          * Partially used pages are not usable - thus
228          * we are rounding upwards.
229          */
230         start_pfn = PFN_UP(CPHYSADDR(&_end));
231 #endif  /* CONFIG_BLK_DEV_INITRD */
232
233 #ifndef CONFIG_SGI_IP27
234         /* Find the highest page frame number we have available.  */
235         max_pfn = 0;
236         first_usable_pfn = -1UL;
237         for (i = 0; i < boot_mem_map.nr_map; i++) {
238                 unsigned long start, end;
239
240                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
241                         continue;
242
243                 start = PFN_UP(boot_mem_map.map[i].addr);
244                 end = PFN_DOWN(boot_mem_map.map[i].addr
245                       + boot_mem_map.map[i].size);
246
247                 if (start >= end)
248                         continue;
249                 if (end > max_pfn)
250                         max_pfn = end;
251                 if (start < first_usable_pfn) {
252                         if (start > start_pfn) {
253                                 first_usable_pfn = start;
254                         } else if (end > start_pfn) {
255                                 first_usable_pfn = start_pfn;
256                         }
257                 }
258         }
259
260         /*
261          * Determine low and high memory ranges
262          */
263         max_low_pfn = max_pfn;
264         if (max_low_pfn > MAXMEM_PFN) {
265                 max_low_pfn = MAXMEM_PFN;
266 #ifndef CONFIG_HIGHMEM
267                 /* Maximum memory usable is what is directly addressable */
268                 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
269                        MAXMEM >> 20);
270                 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
271 #endif
272         }
273
274 #ifdef CONFIG_HIGHMEM
275         /*
276          * Crude, we really should make a better attempt at detecting
277          * highstart_pfn
278          */
279         highstart_pfn = highend_pfn = max_pfn;
280         if (max_pfn > MAXMEM_PFN) {
281                 highstart_pfn = MAXMEM_PFN;
282                 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
283                        (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT));
284         }
285 #endif
286
287         /* Initialize the boot-time allocator with low memory only.  */
288         bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
289
290         /*
291          * Register fully available low RAM pages with the bootmem allocator.
292          */
293         for (i = 0; i < boot_mem_map.nr_map; i++) {
294                 unsigned long curr_pfn, last_pfn, size;
295
296                 /*
297                  * Reserve usable memory.
298                  */
299                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
300                         continue;
301
302                 /*
303                  * We are rounding up the start address of usable memory:
304                  */
305                 curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
306                 if (curr_pfn >= max_low_pfn)
307                         continue;
308                 if (curr_pfn < start_pfn)
309                         curr_pfn = start_pfn;
310
311                 /*
312                  * ... and at the end of the usable range downwards:
313                  */
314                 last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
315                                     + boot_mem_map.map[i].size);
316
317                 if (last_pfn > max_low_pfn)
318                         last_pfn = max_low_pfn;
319
320                 /*
321                  * Only register lowmem part of lowmem segment with bootmem.
322                  */
323                 size = last_pfn - curr_pfn;
324                 if (curr_pfn > PFN_DOWN(HIGHMEM_START))
325                         continue;
326                 if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START))
327                         size = PFN_DOWN(HIGHMEM_START) - curr_pfn;
328                 if (!size)
329                         continue;
330
331                 /*
332                  * ... finally, did all the rounding and playing
333                  * around just make the area go away?
334                  */
335                 if (last_pfn <= curr_pfn)
336                         continue;
337
338                 /* Register lowmem ranges */
339                 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
340         }
341
342         /* Reserve the bootmap memory.  */
343         reserve_bootmem(PFN_PHYS(first_usable_pfn), bootmap_size);
344 #endif
345
346 #ifdef CONFIG_BLK_DEV_INITRD
347         /* Board specific code should have set up initrd_start and initrd_end */
348         ROOT_DEV = Root_RAM0;
349         if (&__rd_start != &__rd_end) {
350                 initrd_start = (unsigned long)&__rd_start;
351                 initrd_end = (unsigned long)&__rd_end;
352         }
353         initrd_below_start_ok = 1;
354         if (initrd_start) {
355                 unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
356                 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
357                        (void *)initrd_start,
358                        initrd_size);
359
360                 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
361                         printk("initrd extends beyond end of memory "
362                                "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
363                                sizeof(long) * 2, CPHYSADDR(initrd_end),
364                                sizeof(long) * 2, PFN_PHYS(max_low_pfn));
365                         initrd_start = initrd_end = 0;
366                 }
367         }
368 #endif /* CONFIG_BLK_DEV_INITRD  */
369 }
370
371 static inline void resource_init(void)
372 {
373         int i;
374
375         code_resource.start = virt_to_phys(&_text);
376         code_resource.end = virt_to_phys(&_etext) - 1;
377         data_resource.start = virt_to_phys(&_etext);
378         data_resource.end = virt_to_phys(&_edata) - 1;
379
380         /*
381          * Request address space for all standard RAM.
382          */
383         for (i = 0; i < boot_mem_map.nr_map; i++) {
384                 struct resource *res;
385                 unsigned long start, end;
386
387                 start = boot_mem_map.map[i].addr;
388                 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
389                 if (start >= MAXMEM)
390                         continue;
391                 if (end >= MAXMEM)
392                         end = MAXMEM - 1;
393
394                 res = alloc_bootmem(sizeof(struct resource));
395                 switch (boot_mem_map.map[i].type) {
396                 case BOOT_MEM_RAM:
397                 case BOOT_MEM_ROM_DATA:
398                         res->name = "System RAM";
399                         break;
400                 case BOOT_MEM_RESERVED:
401                 default:
402                         res->name = "reserved";
403                 }
404
405                 res->start = start;
406                 res->end = end;
407
408                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
409                 request_resource(&iomem_resource, res);
410
411                 /*
412                  *  We don't know which RAM region contains kernel data,
413                  *  so we try it repeatedly and let the resource manager
414                  *  test it.
415                  */
416                 request_resource(res, &code_resource);
417                 request_resource(res, &data_resource);
418         }
419 }
420
421 #undef PFN_UP
422 #undef PFN_DOWN
423 #undef PFN_PHYS
424
425 #undef MAXMEM
426 #undef MAXMEM_PFN
427
428 static int __initdata earlyinit_debug;
429
430 static int __init earlyinit_debug_setup(char *str)
431 {
432         earlyinit_debug = 1;
433         return 1;
434 }
435 __setup("earlyinit_debug", earlyinit_debug_setup);
436
437 extern initcall_t __earlyinitcall_start, __earlyinitcall_end;
438
439 static void __init do_earlyinitcalls(void)
440 {
441         initcall_t *call, *start, *end;
442
443         start = &__earlyinitcall_start;
444         end = &__earlyinitcall_end;
445
446         for (call = start; call < end; call++) {
447                 if (earlyinit_debug)
448                         printk("calling earlyinitcall 0x%p\n", *call);
449
450                 (*call)();
451         }
452 }
453
454 void __init setup_arch(char **cmdline_p)
455 {
456         cpu_probe();
457         prom_init();
458         cpu_report();
459
460 #ifdef CONFIG_MIPS32
461         /* Disable coprocessors and set FPU for 16/32 FPR register model */
462         clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_FR);
463         set_c0_status(ST0_CU0);
464 #endif
465 #ifdef CONFIG_MIPS64
466         /*
467          * On IP27, I am seeing the TS bit set when the kernel is loaded.
468          * Maybe because the kernel is in ckseg0 and not xkphys? Clear it
469          * anyway ...
470          */
471         clear_c0_status(ST0_BEV|ST0_TS|ST0_CU1|ST0_CU2|ST0_CU3);
472         set_c0_status(ST0_CU0|ST0_KX|ST0_SX|ST0_FR);
473 #endif
474
475 #if defined(CONFIG_VT)
476 #if defined(CONFIG_VGA_CONSOLE)
477         conswitchp = &vga_con;
478 #elif defined(CONFIG_DUMMY_CONSOLE)
479         conswitchp = &dummy_con;
480 #endif
481 #endif
482
483         /* call board setup routine */
484         do_earlyinitcalls();
485
486         strlcpy(command_line, arcs_cmdline, sizeof(command_line));
487         strlcpy(saved_command_line, command_line, sizeof(saved_command_line));
488
489         *cmdline_p = command_line;
490
491         parse_cmdline_early();
492         bootmem_init();
493         paging_init();
494         resource_init();
495 }
496
497 int __init fpu_disable(char *s)
498 {
499         cpu_data[0].options &= ~MIPS_CPU_FPU;
500
501         return 1;
502 }
503
504 __setup("nofpu", fpu_disable);