2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/pagemap.h>
24 #include <linux/bootmem.h>
25 #include <linux/proc_fs.h>
27 #include <asm/processor.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
33 #include <asm/fixmap.h>
37 #include <asm/mmu_context.h>
38 #include <asm/proto.h>
45 #ifdef CONFIG_GART_IOMMU
51 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
54 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
55 * physical space so we can cache the place of the first one and move
56 * around without checking the pgd every time.
61 int i, total = 0, reserved = 0;
62 int shared = 0, cached = 0;
66 printk("Mem-info:\n");
68 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
70 for_each_pgdat(pgdat) {
71 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
72 page = pgdat->node_mem_map + i;
74 if (PageReserved(page))
76 else if (PageSwapCache(page))
78 else if (page_count(page))
79 shared += page_count(page) - 1;
82 printk("%d pages of RAM\n", total);
83 printk("%d reserved pages\n",reserved);
84 printk("%d pages shared\n",shared);
85 printk("%d pages swap cached\n",cached);
88 /* References to section boundaries */
90 extern char _text, _etext, _edata, __bss_start, _end[];
91 extern char __init_begin, __init_end;
95 static void *spp_getpage(void)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
105 Dprintk("spp_getpage %p\n", ptr);
109 static void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
119 level4 = pml4_offset_k(vaddr);
120 if (pml4_none(*level4)) {
121 printk("PML4 FIXMAP MISSING, it should be setup in head.S!\n");
124 pgd = level3_offset_k(level4, vaddr);
125 if (pgd_none(*pgd)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pgd(pgd, __pgd(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pgd, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pgd,0));
133 pmd = pmd_offset(pgd, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
148 set_pte(pte, new_pte);
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
154 __flush_tlb_one(vaddr);
157 /* NOTE: this is meant to be run only at boot */
158 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
160 unsigned long address = __fix_to_virt(idx);
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
166 set_pte_phys(address, phys, prot);
169 unsigned long __initdata table_start, table_end;
171 extern pmd_t temp_boot_pmds[];
173 static struct temp_map {
177 } temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
183 static __init void *alloc_low_page(int *index, unsigned long *phys)
187 unsigned long pfn = table_end++, paddr;
191 panic("alloc_low_page: ran out of memory");
192 for (i = 0; temp_mappings[i].allocated; i++) {
193 if (!temp_mappings[i].pmd)
194 panic("alloc_low_page: ran out of temp mappings");
196 ti = &temp_mappings[i];
197 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
198 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
201 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
203 *phys = pfn * PAGE_SIZE;
207 static __init void unmap_low_page(int i)
209 struct temp_map *ti = &temp_mappings[i];
210 set_pmd(ti->pmd, __pmd(0));
214 static void __init phys_pgd_init(pgd_t *pgd, unsigned long address, unsigned long end)
218 i = pgd_index(address);
220 for (; i < PTRS_PER_PGD; pgd++, i++) {
222 unsigned long paddr, pmd_phys;
225 paddr = (address & PML4_MASK) + i*PGDIR_SIZE;
227 for (; i < PTRS_PER_PGD; i++, pgd++)
228 set_pgd(pgd, __pgd(0));
232 if (!e820_mapped(paddr, paddr+PGDIR_SIZE, 0)) {
233 set_pgd(pgd, __pgd(0));
237 pmd = alloc_low_page(&map, &pmd_phys);
238 set_pgd(pgd, __pgd(pmd_phys | _KERNPG_TABLE));
239 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
243 for (; j < PTRS_PER_PMD; j++, pmd++)
244 set_pmd(pmd, __pmd(0));
247 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
248 pe &= __supported_pte_mask;
249 set_pmd(pmd, __pmd(pe));
256 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
257 This runs before bootmem is initialized and gets pages directly from the
258 physical memory. To access them they are temporarily mapped. */
259 void __init init_memory_mapping(void)
264 unsigned long pgds, pmds, tables;
266 Dprintk("init_memory_mapping\n");
268 end = end_pfn_map << PAGE_SHIFT;
271 * Find space for the kernel direct mapping tables.
272 * Later we should allocate these tables in the local node of the memory
273 * mapped. Unfortunately this is done currently before the nodes are
277 pgds = (end + PGDIR_SIZE - 1) >> PGDIR_SHIFT;
278 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
279 tables = round_up(pgds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE);
281 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
282 if (table_start == -1UL)
283 panic("Cannot find space for the kernel page tables");
285 table_start >>= PAGE_SHIFT;
286 table_end = table_start;
288 end += __PAGE_OFFSET; /* turn virtual */
290 for (adr = PAGE_OFFSET; adr < end; adr = next) {
292 unsigned long pgd_phys;
293 pgd_t *pgd = alloc_low_page(&map, &pgd_phys);
294 next = adr + PML4_SIZE;
297 phys_pgd_init(pgd, adr-PAGE_OFFSET, next-PAGE_OFFSET);
298 set_pml4(init_level4_pgt + pml4_index(adr), mk_kernel_pml4(pgd_phys));
301 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
303 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
304 table_start<<PAGE_SHIFT,
305 table_end<<PAGE_SHIFT);
308 extern struct x8664_pda cpu_pda[NR_CPUS];
310 static unsigned long low_pml4[NR_CPUS];
312 void swap_low_mappings(void)
315 for (i = 0; i < NR_CPUS; i++) {
317 if (!cpu_pda[i].level4_pgt)
319 t = cpu_pda[i].level4_pgt[0];
320 cpu_pda[i].level4_pgt[0] = low_pml4[i];
326 void zap_low_mappings(void)
331 #ifndef CONFIG_DISCONTIGMEM
332 void __init paging_init(void)
335 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
336 unsigned int max_dma;
338 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
340 if (end_pfn < max_dma)
341 zones_size[ZONE_DMA] = end_pfn;
343 zones_size[ZONE_DMA] = max_dma;
344 zones_size[ZONE_NORMAL] = end_pfn - max_dma;
346 free_area_init(zones_size);
352 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
353 from the CPU leading to inconsistent cache lines. address and size
354 must be aligned to 2MB boundaries.
355 Does nothing when the mapping doesn't exist. */
356 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
358 unsigned long end = address + size;
360 BUG_ON(address & ~LARGE_PAGE_MASK);
361 BUG_ON(size & ~LARGE_PAGE_MASK);
363 for (; address < end; address += LARGE_PAGE_SIZE) {
364 pgd_t *pgd = pgd_offset_k(address);
366 if (!pgd || pgd_none(*pgd))
368 pmd = pmd_offset(pgd, address);
369 if (!pmd || pmd_none(*pmd))
371 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
372 /* Could handle this, but it should not happen currently. */
374 "clear_kernel_mapping: mapping has been split. will leak memory\n");
377 set_pmd(pmd, __pmd(0));
382 static inline int page_is_ram (unsigned long pagenr)
386 for (i = 0; i < e820.nr_map; i++) {
387 unsigned long addr, end;
389 if (e820.map[i].type != E820_RAM) /* not usable memory */
392 * !!!FIXME!!! Some BIOSen report areas as RAM that
393 * are not. Notably the 640->1Mb area. We need a sanity
396 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
397 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
398 if ((pagenr >= addr) && (pagenr < end))
404 extern int swiotlb_force;
407 * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
408 * valid. The argument is a physical page number.
411 * On x86-64, access has to be given to the first megabyte of ram because that area
412 * contains bios code and data regions used by X and dosemu and similar apps.
413 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
414 * mmio resources as well as potential bios/acpi data regions.
416 int devmem_is_allowed(unsigned long pagenr)
420 if (!page_is_ram(pagenr))
426 EXPORT_SYMBOL_GPL(page_is_ram);
428 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
431 void __init mem_init(void)
433 int codesize, reservedpages, datasize, initsize;
436 #ifdef CONFIG_SWIOTLB
437 if (!iommu_aperture &&
438 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
444 /* How many end-of-memory variables you have, grandma! */
445 max_low_pfn = end_pfn;
447 num_physpages = end_pfn;
448 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
450 /* clear the zero-page */
451 memset(empty_zero_page, 0, PAGE_SIZE);
455 /* this will put all low memory onto the freelists */
456 #ifdef CONFIG_DISCONTIGMEM
457 totalram_pages += numa_free_all_bootmem();
459 /* should count reserved pages here for all nodes */
464 totalram_pages += free_all_bootmem();
466 for (tmp = 0; tmp < end_pfn; tmp++)
468 * Only count reserved RAM pages
470 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
476 codesize = (unsigned long) &_etext - (unsigned long) &_text;
477 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
478 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
480 /* Register memory areas for /proc/kcore */
481 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
482 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
483 VMALLOC_END-VMALLOC_START);
484 kclist_add(&kcore_kernel, &_stext, _end - _stext);
485 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
486 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
487 VSYSCALL_END - VSYSCALL_START);
489 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
490 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
491 end_pfn << (PAGE_SHIFT-10),
493 reservedpages << (PAGE_SHIFT-10),
498 * Subtle. SMP is doing its boot stuff late (because it has to
499 * fork idle threads) - but it also needs low mappings for the
500 * protected-mode entry to work. We zap these entries only after
501 * the WP-bit has been tested.
508 void free_initmem(void)
512 addr = (unsigned long)(&__init_begin);
513 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
514 ClearPageReserved(virt_to_page(addr));
515 set_page_count(virt_to_page(addr), 1);
516 #ifdef CONFIG_INIT_DEBUG
517 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
522 printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
525 #ifdef CONFIG_BLK_DEV_INITRD
526 void free_initrd_mem(unsigned long start, unsigned long end)
528 if (start < (unsigned long)&_end)
530 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
531 for (; start < end; start += PAGE_SIZE) {
532 ClearPageReserved(virt_to_page(start));
533 set_page_count(virt_to_page(start), 1);
540 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
542 /* Should check here against the e820 map to avoid double free */
543 #ifdef CONFIG_DISCONTIGMEM
544 int nid = phys_to_nid(phys);
545 reserve_bootmem_node(NODE_DATA(nid), phys, len);
547 reserve_bootmem(phys, len);
551 int kern_addr_valid(unsigned long addr)
553 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
559 if (above != 0 && above != -1UL)
562 pml4 = pml4_offset_k(addr);
563 if (pml4_none(*pml4))
566 pgd = pgd_offset_k(addr);
570 pmd = pmd_offset(pgd, addr);
574 return pfn_valid(pmd_pfn(*pmd));
576 pte = pte_offset_kernel(pmd, addr);
579 return pfn_valid(pte_pfn(*pte));
583 #include <linux/sysctl.h>
585 extern int exception_trace, page_fault_trace;
587 static ctl_table debug_table2[] = {
588 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
590 #ifdef CONFIG_CHECKING
591 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
597 static ctl_table debug_root_table2[] = {
598 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
599 .child = debug_table2 },
603 static __init int x8664_sysctl_init(void)
605 register_sysctl_table(debug_root_table2, 1);
608 __initcall(x8664_sysctl_init);
611 /* Pseudo VMAs to allow ptrace access for the vsyscall pages. x86-64 has two
612 different ones: one for 32bit and one for 64bit. Use the appropiate
613 for the target task. */
615 static struct vm_area_struct gate_vma = {
616 .vm_start = VSYSCALL_START,
617 .vm_end = VSYSCALL_END,
618 .vm_page_prot = PAGE_READONLY
621 static struct vm_area_struct gate32_vma = {
622 .vm_start = VSYSCALL32_BASE,
623 .vm_end = VSYSCALL32_END,
624 .vm_page_prot = PAGE_READONLY
627 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
629 #ifdef CONFIG_IA32_EMULATION
630 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
631 /* lookup code assumes the pages are present. set them up
633 if (__map_syscall32(tsk->mm, 0xfffe000) < 0)
641 int in_gate_area(struct task_struct *task, unsigned long addr)
643 struct vm_area_struct *vma = get_gate_vma(task);
644 return (addr >= vma->vm_start) && (addr < vma->vm_end);