This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / i386 / mm / init.c
1 /*
2  *  linux/arch/i386/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
30
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
36 #include <asm/dma.h>
37 #include <asm/fixmap.h>
38 #include <asm/e820.h>
39 #include <asm/apic.h>
40 #include <asm/tlb.h>
41 #include <asm/tlbflush.h>
42 #include <asm/sections.h>
43 #include <asm/desc.h>
44
45 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
46 unsigned long highstart_pfn, highend_pfn;
47
48 static int do_test_wp_bit(void);
49
50 static inline int page_kills_ppro(unsigned long pagenr)
51 {
52         if (pagenr >= 0x70000 && pagenr <= 0x7003F)
53                 return 1;
54         return 0;
55 }
56
57 extern int is_available_memory(efi_memory_desc_t *);
58
59 int page_is_ram(unsigned long pagenr)
60 {
61         int i;
62         unsigned long addr, end;
63
64         if (efi_enabled) {
65                 efi_memory_desc_t *md;
66
67                 for (i = 0; i < memmap.nr_map; i++) {
68                         md = &memmap.map[i];
69                         if (!is_available_memory(md))
70                                 continue;
71                         addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
72                         end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
73
74                         if ((pagenr >= addr) && (pagenr < end))
75                                 return 1;
76                 }
77                 return 0;
78         }
79
80         for (i = 0; i < e820.nr_map; i++) {
81
82                 if (e820.map[i].type != E820_RAM)       /* not usable memory */
83                         continue;
84                 /*
85                  *      !!!FIXME!!! Some BIOSen report areas as RAM that
86                  *      are not. Notably the 640->1Mb area. We need a sanity
87                  *      check here.
88                  */
89                 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
90                 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
91                 if  ((pagenr >= addr) && (pagenr < end))
92                         return 1;
93         }
94         return 0;
95 }
96
97 pte_t *kmap_pte;
98
99 EXPORT_SYMBOL(kmap_pte);
100
101 #define kmap_get_fixmap_pte(vaddr)                                      \
102         pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
103
104 void __init kmap_init(void)
105 {
106         kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
107 }
108
109 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
110 {
111         if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
112                 ClearPageReserved(page);
113                 set_bit(PG_highmem, &page->flags);
114                 set_page_count(page, 1);
115                 __free_page(page);
116                 totalhigh_pages++;
117         } else
118                 SetPageReserved(page);
119 }
120
121 EXPORT_SYMBOL_GPL(page_is_ram);
122
123 #ifdef CONFIG_HIGHMEM
124
125 #ifndef CONFIG_DISCONTIGMEM
126 void __init set_highmem_pages_init(int bad_ppro) 
127 {
128         int pfn;
129         for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
130                 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
131         totalram_pages += totalhigh_pages;
132 }
133 #else
134 extern void set_highmem_pages_init(int);
135 #endif /* !CONFIG_DISCONTIGMEM */
136 #else
137 # define set_highmem_pages_init(bad_ppro) do { } while (0)
138 #endif
139
140 unsigned long __PAGE_KERNEL = _PAGE_KERNEL;
141
142 #ifndef CONFIG_DISCONTIGMEM
143 #define remap_numa_kva() do {} while (0)
144 #else
145 extern void __init remap_numa_kva(void);
146 #endif
147
148 static __init void prepare_pagetables(pgd_t *pgd_base, unsigned long address)
149 {
150         pgd_t *pgd;
151         pmd_t *pmd;
152         pte_t *pte;
153
154         pgd = pgd_base + pgd_index(address);
155         pmd = pmd_offset(pgd, address);
156         if (!pmd_present(*pmd)) {
157                 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
158                 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));
159         }
160 }
161
162 static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
163 {
164         unsigned long vaddr;
165
166         for (vaddr = start; vaddr != end; vaddr += PAGE_SIZE)
167                 prepare_pagetables(pgd_base, vaddr);
168 }
169
170 void setup_identity_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end)
171 {
172         unsigned long vaddr;
173         pgd_t *pgd;
174         int i, j, k;
175         pmd_t *pmd;
176         pte_t *pte, *pte_base;
177
178         pgd = pgd_base;
179
180         for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
181                 vaddr = i*PGDIR_SIZE;
182                 if (end && (vaddr >= end))
183                         break;
184                 pmd = pmd_offset(pgd, 0);
185                 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
186                         vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
187                         if (end && (vaddr >= end))
188                                 break;
189                         if (vaddr < start)
190                                 continue;
191                         if (cpu_has_pse) {
192                                 unsigned long __pe;
193
194                                 set_in_cr4(X86_CR4_PSE);
195                                 boot_cpu_data.wp_works_ok = 1;
196                                 __pe = _KERNPG_TABLE + _PAGE_PSE + vaddr - start;
197                                 /* Make it "global" too if supported */
198                                 if (cpu_has_pge) {
199                                         set_in_cr4(X86_CR4_PGE);
200 #if !defined(CONFIG_X86_SWITCH_PAGETABLES)
201                                         __pe += _PAGE_GLOBAL;
202                                         __PAGE_KERNEL |= _PAGE_GLOBAL;
203 #endif
204                                 }
205                                 set_pmd(pmd, __pmd(__pe));
206                                 continue;
207                         }
208                         if (!pmd_present(*pmd))
209                                 pte_base = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
210                         else
211                                 pte_base = (pte_t *) pte_offset_kernel(pmd, 0);
212                         pte = pte_base;
213                         for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
214                                 vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
215                                 if (end && (vaddr >= end))
216                                         break;
217                                 if (vaddr < start)
218                                         continue;
219                                 *pte = mk_pte_phys(vaddr-start, PAGE_KERNEL);
220                         }
221                         set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
222                 }
223         }
224 }
225
226 static void __init pagetable_init (void)
227 {
228         unsigned long vaddr, end;
229         pgd_t *pgd_base;
230 #ifdef CONFIG_X86_PAE
231         int i;
232 #endif
233
234         /*
235          * This can be zero as well - no problem, in that case we exit
236          * the loops anyway due to the PTRS_PER_* conditions.
237          */
238         end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
239
240         pgd_base = swapper_pg_dir;
241 #ifdef CONFIG_X86_PAE
242         /*
243          * It causes too many problems if there's no proper pmd set up
244          * for all 4 entries of the PGD - so we allocate all of them.
245          * PAE systems will not miss this extra 4-8K anyway ...
246          */
247         for (i = 0; i < PTRS_PER_PGD; i++) {
248                 pmd_t *pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
249                 set_pgd(pgd_base + i, __pgd(__pa(pmd) + 0x1));
250         }
251 #endif
252         /*
253          * Set up lowmem-sized identity mappings at PAGE_OFFSET:
254          */
255         setup_identity_mappings(pgd_base, PAGE_OFFSET, end);
256
257         /*
258          * Add flat-mode identity-mappings - SMP needs it when
259          * starting up on an AP from real-mode. (In the non-PAE
260          * case we already have these mappings through head.S.)
261          * All user-space mappings are explicitly cleared after
262          * SMP startup.
263          */
264 #if defined(CONFIG_SMP) && defined(CONFIG_X86_PAE)
265         setup_identity_mappings(pgd_base, 0, 16*1024*1024);
266 #endif
267         remap_numa_kva();
268
269         /*
270          * Fixed mappings, only the page table structure has to be
271          * created - mappings will be set by set_fixmap():
272          */
273         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
274         fixrange_init(vaddr, 0, pgd_base);
275
276 #ifdef CONFIG_HIGHMEM
277         {
278                 pgd_t *pgd;
279                 pmd_t *pmd;
280                 pte_t *pte;
281
282                 /*
283                  * Permanent kmaps:
284                  */
285                 vaddr = PKMAP_BASE;
286                 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
287
288                 pgd = swapper_pg_dir + pgd_index(vaddr);
289                 pmd = pmd_offset(pgd, vaddr);
290                 pte = pte_offset_kernel(pmd, vaddr);
291                 pkmap_page_table = pte;
292         }
293 #endif
294 }
295
296 #if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
297 /*
298  * Swap suspend & friends need this for resume because things like the intel-agp
299  * driver might have split up a kernel 4MB mapping.
300  */
301 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
302         __attribute__ ((aligned (PAGE_SIZE)));
303
304 static inline void save_pg_dir(void)
305 {
306         memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
307 }
308 #else
309 static inline void save_pg_dir(void)
310 {
311 }
312 #endif
313
314 /*
315  * Clear kernel pagetables in a PMD_SIZE-aligned range.
316  */
317 static void clear_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end)
318 {
319         unsigned long vaddr;
320         pgd_t *pgd;
321         pmd_t *pmd;
322         int i, j;
323
324         pgd = pgd_base;
325
326         for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
327                 vaddr = i*PGDIR_SIZE;
328                 if (end && (vaddr >= end))
329                         break;
330                 pmd = pmd_offset(pgd, 0);
331                 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
332                         vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
333                         if (end && (vaddr >= end))
334                                 break;
335                         if (vaddr < start)
336                                 continue;
337                         pmd_clear(pmd);
338                 }
339         }
340         flush_tlb_all();
341 }
342
343 void zap_low_mappings(void)
344 {
345         printk("zapping low mappings.\n");
346
347         save_pg_dir();
348
349         /*
350          * Zap initial low-memory mappings.
351          */
352         clear_mappings(swapper_pg_dir, 0, 16*1024*1024);
353 }
354
355 #ifndef CONFIG_DISCONTIGMEM
356 void __init zone_sizes_init(void)
357 {
358         unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
359         unsigned int max_dma, high, low;
360         
361         max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
362         low = max_low_pfn;
363         high = highend_pfn;
364         
365         if (low < max_dma)
366                 zones_size[ZONE_DMA] = low;
367         else {
368                 zones_size[ZONE_DMA] = max_dma;
369                 zones_size[ZONE_NORMAL] = low - max_dma;
370 #ifdef CONFIG_HIGHMEM
371                 zones_size[ZONE_HIGHMEM] = high - low;
372 #endif
373         }
374         free_area_init(zones_size);     
375 }
376 #else
377 extern void zone_sizes_init(void);
378 #endif /* !CONFIG_DISCONTIGMEM */
379
380 /*
381  * paging_init() sets up the page tables - note that the first 8MB are
382  * already mapped by head.S.
383  *
384  * This routines also unmaps the page at virtual kernel address 0, so
385  * that we can trap those pesky NULL-reference errors in the kernel.
386  */
387 void __init paging_init(void)
388 {
389         pagetable_init();
390
391         load_cr3(swapper_pg_dir);
392
393 #ifdef CONFIG_X86_PAE
394         /*
395          * We will bail out later - printk doesn't work right now so
396          * the user would just see a hanging kernel.
397          */
398         if (cpu_has_pae)
399                 set_in_cr4(X86_CR4_PAE);
400 #endif
401         __flush_tlb_all();
402         /*
403          * Subtle. SMP is doing it's boot stuff late (because it has to
404          * fork idle threads) - but it also needs low mappings for the
405          * protected-mode entry to work. We zap these entries only after
406          * the WP-bit has been tested.
407          */
408 #ifndef CONFIG_SMP
409         zap_low_mappings();
410 #endif
411         kmap_init();
412         zone_sizes_init();
413 }
414
415 /*
416  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
417  * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
418  * used to involve black magic jumps to work around some nasty CPU bugs,
419  * but fortunately the switch to using exceptions got rid of all that.
420  */
421
422 void __init test_wp_bit(void)
423 {
424         printk("Checking if this processor honours the WP bit even in supervisor mode... ");
425
426         /* Any page-aligned address will do, the test is non-destructive */
427         __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
428         boot_cpu_data.wp_works_ok = do_test_wp_bit();
429         clear_fixmap(FIX_WP_TEST);
430
431         if (!boot_cpu_data.wp_works_ok) {
432                 printk("No.\n");
433 #ifdef CONFIG_X86_WP_WORKS_OK
434                 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
435 #endif
436         } else {
437                 printk("Ok.\n");
438         }
439 }
440
441 #ifndef CONFIG_DISCONTIGMEM
442 static void __init set_max_mapnr_init(void)
443 {
444 #ifdef CONFIG_HIGHMEM
445         highmem_start_page = pfn_to_page(highstart_pfn);
446         max_mapnr = num_physpages = highend_pfn;
447 #else
448         max_mapnr = num_physpages = max_low_pfn;
449 #endif
450 }
451 #define __free_all_bootmem() free_all_bootmem()
452 #else
453 #define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
454 extern void set_max_mapnr_init(void);
455 #endif /* !CONFIG_DISCONTIGMEM */
456
457 static struct kcore_list kcore_mem, kcore_vmalloc; 
458
459 void __init mem_init(void)
460 {
461         extern int ppro_with_ram_bug(void);
462         int codesize, reservedpages, datasize, initsize;
463         int tmp;
464         int bad_ppro;
465
466 #ifndef CONFIG_DISCONTIGMEM
467         if (!mem_map)
468                 BUG();
469 #endif
470         
471         bad_ppro = ppro_with_ram_bug();
472
473 #ifdef CONFIG_HIGHMEM
474         /* check that fixmap and pkmap do not overlap */
475         if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
476                 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
477                 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
478                                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
479                 BUG();
480         }
481 #endif
482  
483         set_max_mapnr_init();
484
485 #ifdef CONFIG_HIGHMEM
486         high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
487 #else
488         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
489 #endif
490
491         /* this will put all low memory onto the freelists */
492         totalram_pages += __free_all_bootmem();
493
494         reservedpages = 0;
495         for (tmp = 0; tmp < max_low_pfn; tmp++)
496                 /*
497                  * Only count reserved RAM pages
498                  */
499                 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
500                         reservedpages++;
501
502         set_highmem_pages_init(bad_ppro);
503
504         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
505         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
506         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
507
508         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
509         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
510                    VMALLOC_END-VMALLOC_START);
511
512         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
513                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
514                 num_physpages << (PAGE_SHIFT-10),
515                 codesize >> 10,
516                 reservedpages << (PAGE_SHIFT-10),
517                 datasize >> 10,
518                 initsize >> 10,
519                 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
520                );
521
522 #ifdef CONFIG_X86_PAE
523         if (!cpu_has_pae)
524                 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
525 #endif
526         if (boot_cpu_data.wp_works_ok < 0)
527                 test_wp_bit();
528
529         entry_trampoline_setup();
530         default_ldt_page = virt_to_page(default_ldt);
531         load_LDT(&init_mm.context);
532 }
533
534 kmem_cache_t *pgd_cache, *pmd_cache, *kpmd_cache;
535
536 void __init pgtable_cache_init(void)
537 {
538         void (*ctor)(void *, kmem_cache_t *, unsigned long);
539         void (*dtor)(void *, kmem_cache_t *, unsigned long);
540
541         if (PTRS_PER_PMD > 1) {
542                 pmd_cache = kmem_cache_create("pmd",
543                                         PTRS_PER_PMD*sizeof(pmd_t),
544                                         PTRS_PER_PMD*sizeof(pmd_t),
545                                         0,
546                                         pmd_ctor,
547                                         NULL);
548                 if (!pmd_cache)
549                         panic("pgtable_cache_init(): cannot create pmd cache");
550
551                 if (TASK_SIZE > PAGE_OFFSET) {
552                         kpmd_cache = kmem_cache_create("kpmd",
553                                         PTRS_PER_PMD*sizeof(pmd_t),
554                                         PTRS_PER_PMD*sizeof(pmd_t),
555                                         0,
556                                         kpmd_ctor,
557                                         NULL);
558                         if (!kpmd_cache)
559                                 panic("pgtable_cache_init(): "
560                                                 "cannot create kpmd cache");
561                 }
562         }
563
564         if (PTRS_PER_PMD == 1 || TASK_SIZE <= PAGE_OFFSET)
565                 ctor = pgd_ctor;
566         else
567                 ctor = NULL;
568
569         if (PTRS_PER_PMD == 1 && TASK_SIZE <= PAGE_OFFSET)
570                 dtor = pgd_dtor;
571         else
572                 dtor = NULL;
573
574         pgd_cache = kmem_cache_create("pgd",
575                                 PTRS_PER_PGD*sizeof(pgd_t),
576                                 PTRS_PER_PGD*sizeof(pgd_t),
577                                 0,
578                                 ctor,
579                                 dtor);
580         if (!pgd_cache)
581                 panic("pgtable_cache_init(): Cannot create pgd cache");
582 }
583
584 /*
585  * This function cannot be __init, since exceptions don't work in that
586  * section.  Put this after the callers, so that it cannot be inlined.
587  */
588 static int noinline do_test_wp_bit(void)
589 {
590         char tmp_reg;
591         int flag;
592
593         __asm__ __volatile__(
594                 "       movb %0,%1      \n"
595                 "1:     movb %1,%0      \n"
596                 "       xorl %2,%2      \n"
597                 "2:                     \n"
598                 ".section __ex_table,\"a\"\n"
599                 "       .align 4        \n"
600                 "       .long 1b,2b     \n"
601                 ".previous              \n"
602                 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
603                  "=q" (tmp_reg),
604                  "=r" (flag)
605                 :"2" (1)
606                 :"memory");
607         
608         return flag;
609 }
610
611 void free_initmem(void)
612 {
613         unsigned long addr;
614
615         addr = (unsigned long)(&__init_begin);
616         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
617                 ClearPageReserved(virt_to_page(addr));
618                 set_page_count(virt_to_page(addr), 1);
619                 free_page(addr);
620                 totalram_pages++;
621         }
622         printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
623 }
624
625 #ifdef CONFIG_BLK_DEV_INITRD
626 void free_initrd_mem(unsigned long start, unsigned long end)
627 {
628         if (start < end)
629                 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
630         for (; start < end; start += PAGE_SIZE) {
631                 ClearPageReserved(virt_to_page(start));
632                 set_page_count(virt_to_page(start), 1);
633                 free_page(start);
634                 totalram_pages++;
635         }
636 }
637 #endif