2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
36 #include <asm/fixmap.h>
40 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
43 unsigned int __VMALLOC_RESERVE = 128 << 20;
45 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
46 unsigned long highstart_pfn, highend_pfn;
48 static int noinline do_test_wp_bit(void);
51 * Creates a middle page table and puts a pointer to it in the
52 * given global directory entry. This only returns the gd entry
53 * in non-PAE compilation mode, since the middle layer is folded.
55 static pmd_t * __init one_md_table_init(pgd_t *pgd)
60 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
61 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
62 if (pmd_table != pmd_offset(pgd, 0))
65 pmd_table = pmd_offset(pgd, 0);
72 * Create a page table and place a pointer to it in a middle page
75 static pte_t * __init one_page_table_init(pmd_t *pmd)
78 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
79 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
80 if (page_table != pte_offset_kernel(pmd, 0))
86 return pte_offset_kernel(pmd, 0);
90 * This function initializes a certain range of kernel virtual memory
91 * with new bootmem page tables, everywhere page tables are missing in
96 * NOTE: The pagetables are allocated contiguous on the physical space
97 * so we can cache the place of the first one and move around without
98 * checking the pgd every time.
100 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
104 int pgd_idx, pmd_idx;
108 pgd_idx = pgd_index(vaddr);
109 pmd_idx = pmd_index(vaddr);
110 pgd = pgd_base + pgd_idx;
112 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
114 one_md_table_init(pgd);
116 pmd = pmd_offset(pgd, vaddr);
117 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
119 one_page_table_init(pmd);
127 static inline int is_kernel_text(unsigned long addr)
129 if (addr >= (unsigned long)_stext && addr <= (unsigned long)__init_end)
135 * This maps the physical memory to kernel virtual address space, a total
136 * of max_low_pfn pages, by creating page tables starting from address
139 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
145 int pgd_idx, pmd_idx, pte_ofs;
147 pgd_idx = pgd_index(PAGE_OFFSET);
148 pgd = pgd_base + pgd_idx;
151 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
152 pmd = one_md_table_init(pgd);
153 if (pfn >= max_low_pfn)
155 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
156 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
158 /* Map with big pages if possible, otherwise create normal page tables. */
160 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
162 if (is_kernel_text(address) || is_kernel_text(address2))
163 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
165 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
168 pte = one_page_table_init(pmd);
170 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
171 if (is_kernel_text(address))
172 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
174 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
181 static inline int page_kills_ppro(unsigned long pagenr)
183 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
188 extern int is_available_memory(efi_memory_desc_t *);
190 static inline int page_is_ram(unsigned long pagenr)
193 unsigned long addr, end;
196 efi_memory_desc_t *md;
198 for (i = 0; i < memmap.nr_map; i++) {
200 if (!is_available_memory(md))
202 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
203 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
205 if ((pagenr >= addr) && (pagenr < end))
211 for (i = 0; i < e820.nr_map; i++) {
213 if (e820.map[i].type != E820_RAM) /* not usable memory */
216 * !!!FIXME!!! Some BIOSen report areas as RAM that
217 * are not. Notably the 640->1Mb area. We need a sanity
220 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
221 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
222 if ((pagenr >= addr) && (pagenr < end))
229 * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
230 * valid. The argument is a physical page number.
233 * On x86, access has to be given to the first megabyte of ram because that area
234 * contains bios code and data regions used by X and dosemu and similar apps.
235 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
236 * mmio resources as well as potential bios/acpi data regions.
238 int devmem_is_allowed(unsigned long pagenr)
242 if (!page_is_ram(pagenr))
247 EXPORT_SYMBOL_GPL(page_is_ram);
249 #ifdef CONFIG_HIGHMEM
253 EXPORT_SYMBOL(kmap_prot);
254 EXPORT_SYMBOL(kmap_pte);
256 #define kmap_get_fixmap_pte(vaddr) \
257 pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
259 void __init kmap_init(void)
261 unsigned long kmap_vstart;
263 /* cache the first kmap pte */
264 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
265 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
267 kmap_prot = PAGE_KERNEL;
270 void __init permanent_kmaps_init(pgd_t *pgd_base)
278 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
280 pgd = swapper_pg_dir + pgd_index(vaddr);
281 pmd = pmd_offset(pgd, vaddr);
282 pte = pte_offset_kernel(pmd, vaddr);
283 pkmap_page_table = pte;
286 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
288 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
289 ClearPageReserved(page);
290 set_bit(PG_highmem, &page->flags);
291 set_page_count(page, 1);
295 SetPageReserved(page);
298 #ifndef CONFIG_DISCONTIGMEM
299 void __init set_highmem_pages_init(int bad_ppro)
302 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
303 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
304 totalram_pages += totalhigh_pages;
307 extern void set_highmem_pages_init(int);
308 #endif /* !CONFIG_DISCONTIGMEM */
311 #define kmap_init() do { } while (0)
312 #define permanent_kmaps_init(pgd_base) do { } while (0)
313 #define set_highmem_pages_init(bad_ppro) do { } while (0)
314 #endif /* CONFIG_HIGHMEM */
316 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
317 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
319 #ifndef CONFIG_DISCONTIGMEM
320 #define remap_numa_kva() do {} while (0)
322 extern void __init remap_numa_kva(void);
325 static void __init pagetable_init (void)
328 pgd_t *pgd_base = swapper_pg_dir;
330 #ifdef CONFIG_X86_PAE
332 /* Init entries of the first-level page table to the zero page */
333 for (i = 0; i < PTRS_PER_PGD; i++)
334 set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
337 /* Enable PSE if available */
339 set_in_cr4(X86_CR4_PSE);
342 /* Enable PGE if available */
344 set_in_cr4(X86_CR4_PGE);
345 __PAGE_KERNEL |= _PAGE_GLOBAL;
346 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
349 kernel_physical_mapping_init(pgd_base);
353 * Fixed mappings, only the page table structure has to be
354 * created - mappings will be set by set_fixmap():
356 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
357 page_table_range_init(vaddr, 0, pgd_base);
359 permanent_kmaps_init(pgd_base);
361 #ifdef CONFIG_X86_PAE
363 * Add low memory identity-mappings - SMP needs it when
364 * starting up on an AP from real-mode. In the non-PAE
365 * case we already have these mappings through head.S.
366 * All user-space mappings are explicitly cleared after
369 pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
373 #if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
375 * Swap suspend & friends need this for resume because things like the intel-agp
376 * driver might have split up a kernel 4MB mapping.
378 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
379 __attribute__ ((aligned (PAGE_SIZE)));
381 static inline void save_pg_dir(void)
383 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
386 static inline void save_pg_dir(void)
391 void zap_low_mappings (void)
398 * Zap initial low-memory mappings.
400 * Note that "pgd_clear()" doesn't do it for
401 * us, because pgd_clear() is a no-op on i386.
403 for (i = 0; i < USER_PTRS_PER_PGD; i++)
404 #ifdef CONFIG_X86_PAE
405 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
407 set_pgd(swapper_pg_dir+i, __pgd(0));
412 #ifndef CONFIG_DISCONTIGMEM
413 void __init zone_sizes_init(void)
415 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
416 unsigned int max_dma, high, low;
418 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
423 zones_size[ZONE_DMA] = low;
425 zones_size[ZONE_DMA] = max_dma;
426 zones_size[ZONE_NORMAL] = low - max_dma;
427 #ifdef CONFIG_HIGHMEM
428 zones_size[ZONE_HIGHMEM] = high - low;
431 free_area_init(zones_size);
434 extern void zone_sizes_init(void);
435 #endif /* !CONFIG_DISCONTIGMEM */
437 static int disable_nx __initdata = 0;
438 u64 __supported_pte_mask = ~_PAGE_NX;
443 * Control non executable mappings.
446 * off Disable (disables exec-shield too)
448 static int __init noexec_setup(char *str)
450 if (!strncmp(str, "on",2) && cpu_has_nx) {
451 __supported_pte_mask |= _PAGE_NX;
453 } else if (!strncmp(str,"off",3)) {
455 __supported_pte_mask &= ~_PAGE_NX;
461 __setup("noexec=", noexec_setup);
463 #ifdef CONFIG_X86_PAE
466 static void __init set_nx(void)
468 unsigned int v[4], l, h;
470 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
471 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
472 if ((v[3] & (1 << 20)) && !disable_nx) {
473 rdmsr(MSR_EFER, l, h);
475 wrmsr(MSR_EFER, l, h);
477 __supported_pte_mask |= _PAGE_NX;
483 * Enables/disables executability of a given kernel page and
484 * returns the previous setting.
486 int __init set_kernel_exec(unsigned long vaddr, int enable)
494 pte = lookup_address(vaddr);
497 if (!pte_exec_kernel(*pte))
501 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
503 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
512 * paging_init() sets up the page tables - note that the first 8MB are
513 * already mapped by head.S.
515 * This routines also unmaps the page at virtual kernel address 0, so
516 * that we can trap those pesky NULL-reference errors in the kernel.
518 void __init paging_init(void)
520 #ifdef CONFIG_X86_PAE
523 printk("NX (Execute Disable) protection: active\n");
528 load_cr3(swapper_pg_dir);
530 #ifdef CONFIG_X86_PAE
532 * We will bail out later - printk doesn't work right now so
533 * the user would just see a hanging kernel.
536 set_in_cr4(X86_CR4_PAE);
545 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
546 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
547 * used to involve black magic jumps to work around some nasty CPU bugs,
548 * but fortunately the switch to using exceptions got rid of all that.
551 void __init test_wp_bit(void)
553 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
555 /* Any page-aligned address will do, the test is non-destructive */
556 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
557 boot_cpu_data.wp_works_ok = do_test_wp_bit();
558 clear_fixmap(FIX_WP_TEST);
560 if (!boot_cpu_data.wp_works_ok) {
562 #ifdef CONFIG_X86_WP_WORKS_OK
563 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
570 #ifndef CONFIG_DISCONTIGMEM
571 static void __init set_max_mapnr_init(void)
573 #ifdef CONFIG_HIGHMEM
574 highmem_start_page = pfn_to_page(highstart_pfn);
575 max_mapnr = num_physpages = highend_pfn;
577 max_mapnr = num_physpages = max_low_pfn;
580 #define __free_all_bootmem() free_all_bootmem()
582 #define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
583 extern void set_max_mapnr_init(void);
584 #endif /* !CONFIG_DISCONTIGMEM */
586 static struct kcore_list kcore_mem, kcore_vmalloc;
588 void __init mem_init(void)
590 extern int ppro_with_ram_bug(void);
591 int codesize, reservedpages, datasize, initsize;
595 #ifndef CONFIG_DISCONTIGMEM
600 bad_ppro = ppro_with_ram_bug();
602 #ifdef CONFIG_HIGHMEM
603 /* check that fixmap and pkmap do not overlap */
604 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
605 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
606 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
607 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
612 set_max_mapnr_init();
614 #ifdef CONFIG_HIGHMEM
615 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
617 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
620 /* this will put all low memory onto the freelists */
621 totalram_pages += __free_all_bootmem();
624 for (tmp = 0; tmp < max_low_pfn; tmp++)
626 * Only count reserved RAM pages
628 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
631 set_highmem_pages_init(bad_ppro);
633 codesize = (unsigned long) &_etext - (unsigned long) &_text;
634 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
635 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
637 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
638 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
639 VMALLOC_END-VMALLOC_START);
641 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
642 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
643 num_physpages << (PAGE_SHIFT-10),
645 reservedpages << (PAGE_SHIFT-10),
648 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
651 #ifdef CONFIG_X86_PAE
653 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
655 if (boot_cpu_data.wp_works_ok < 0)
659 * Subtle. SMP is doing it's boot stuff late (because it has to
660 * fork idle threads) - but it also needs low mappings for the
661 * protected-mode entry to work. We zap these entries only after
662 * the WP-bit has been tested.
669 kmem_cache_t *pgd_cache;
670 kmem_cache_t *pmd_cache;
672 void __init pgtable_cache_init(void)
674 if (PTRS_PER_PMD > 1) {
675 pmd_cache = kmem_cache_create("pmd",
676 PTRS_PER_PMD*sizeof(pmd_t),
677 PTRS_PER_PMD*sizeof(pmd_t),
682 panic("pgtable_cache_init(): cannot create pmd cache");
684 pgd_cache = kmem_cache_create("pgd",
685 PTRS_PER_PGD*sizeof(pgd_t),
686 PTRS_PER_PGD*sizeof(pgd_t),
689 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
691 panic("pgtable_cache_init(): Cannot create pgd cache");
695 * This function cannot be __init, since exceptions don't work in that
696 * section. Put this after the callers, so that it cannot be inlined.
698 static int noinline do_test_wp_bit(void)
703 __asm__ __volatile__(
708 ".section __ex_table,\"a\"\n"
712 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
721 void free_initmem(void)
725 addr = (unsigned long)(&__init_begin);
726 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
727 ClearPageReserved(virt_to_page(addr));
728 set_page_count(virt_to_page(addr), 1);
732 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
735 #ifdef CONFIG_BLK_DEV_INITRD
736 void free_initrd_mem(unsigned long start, unsigned long end)
739 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
740 for (; start < end; start += PAGE_SIZE) {
741 ClearPageReserved(virt_to_page(start));
742 set_page_count(virt_to_page(start), 1);