2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/poison.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/module.h>
29 #include <linux/memory_hotplug.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
37 #include <asm/fixmap.h>
41 #include <asm/mmu_context.h>
42 #include <asm/proto.h>
44 #include <asm/sections.h>
50 struct dma_mapping_ops* dma_ops;
51 EXPORT_SYMBOL(dma_ops);
53 static unsigned long dma_reserve __initdata;
55 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
58 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
59 * physical space so we can cache the place of the first one and move
60 * around without checking the pgd every time.
65 long i, total = 0, reserved = 0;
66 long shared = 0, cached = 0;
70 printk(KERN_INFO "Mem-info:\n");
72 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
74 for_each_online_pgdat(pgdat) {
75 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
76 page = pfn_to_page(pgdat->node_start_pfn + i);
78 if (PageReserved(page))
80 else if (PageSwapCache(page))
82 else if (page_count(page))
83 shared += page_count(page) - 1;
86 printk(KERN_INFO "%lu pages of RAM\n", total);
87 printk(KERN_INFO "%lu reserved pages\n",reserved);
88 printk(KERN_INFO "%lu pages shared\n",shared);
89 printk(KERN_INFO "%lu pages swap cached\n",cached);
94 static __init void *spp_getpage(void)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104 Dprintk("spp_getpage %p\n", ptr);
108 static __init void set_pte_phys(unsigned long vaddr,
109 unsigned long phys, pgprot_t prot)
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
147 set_pte(pte, new_pte);
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
153 __flush_tlb_one(vaddr);
156 /* NOTE: this is meant to be run only at boot */
158 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
160 unsigned long address = __fix_to_virt(idx);
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
166 set_pte_phys(address, phys, prot);
169 unsigned long __initdata table_start, table_end;
171 extern pmd_t temp_boot_pmds[];
173 static struct temp_map {
177 } temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
183 static __meminit void *alloc_low_page(int *index, unsigned long *phys)
187 unsigned long pfn = table_end++, paddr;
191 adr = (void *)get_zeroed_page(GFP_ATOMIC);
197 panic("alloc_low_page: ran out of memory");
198 for (i = 0; temp_mappings[i].allocated; i++) {
199 if (!temp_mappings[i].pmd)
200 panic("alloc_low_page: ran out of temp mappings");
202 ti = &temp_mappings[i];
203 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
204 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
207 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
208 memset(adr, 0, PAGE_SIZE);
210 *phys = pfn * PAGE_SIZE;
214 static __meminit void unmap_low_page(int i)
221 ti = &temp_mappings[i];
222 set_pmd(ti->pmd, __pmd(0));
226 /* Must run before zap_low_mappings */
227 __init void *early_ioremap(unsigned long addr, unsigned long size)
229 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
231 /* actually usually some more */
232 if (size >= LARGE_PAGE_SIZE) {
233 printk("SMBIOS area too long %lu\n", size);
236 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
237 map += LARGE_PAGE_SIZE;
238 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
240 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
243 /* To avoid virtual aliases later */
244 __init void early_iounmap(void *addr, unsigned long size)
246 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
247 printk("early_iounmap: bad address %p\n", addr);
248 set_pmd(temp_mappings[0].pmd, __pmd(0));
249 set_pmd(temp_mappings[1].pmd, __pmd(0));
253 static void __meminit
254 phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
258 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
261 if (address >= end) {
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
267 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
268 entry &= __supported_pte_mask;
269 set_pmd(pmd, __pmd(entry));
273 static void __meminit
274 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
276 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
278 if (pmd_none(*pmd)) {
279 spin_lock(&init_mm.page_table_lock);
280 phys_pmd_init(pmd, address, end);
281 spin_unlock(&init_mm.page_table_lock);
286 static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
288 long i = pud_index(address);
292 if (after_bootmem && pud_val(*pud)) {
293 phys_pmd_update(pud, address, end);
297 for (; i < PTRS_PER_PUD; pud++, i++) {
299 unsigned long paddr, pmd_phys;
302 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
306 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
307 set_pud(pud, __pud(0));
311 pmd = alloc_low_page(&map, &pmd_phys);
312 spin_lock(&init_mm.page_table_lock);
313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
314 phys_pmd_init(pmd, paddr, end);
315 spin_unlock(&init_mm.page_table_lock);
321 static void __init find_early_table_space(unsigned long end)
323 unsigned long puds, pmds, tables, start;
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
334 table_start = find_e820_area(start, end, tables);
335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
342 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
343 This runs before bootmem is initialized and gets pages directly from the
344 physical memory. To access them they are temporarily mapped. */
345 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
349 Dprintk("init_memory_mapping\n");
352 * Find space for the kernel direct mapping tables.
353 * Later we should allocate these tables in the local node of the memory
354 * mapped. Unfortunately this is done currently before the nodes are
358 find_early_table_space(end);
360 start = (unsigned long)__va(start);
361 end = (unsigned long)__va(end);
363 for (; start < end; start = next) {
365 unsigned long pud_phys;
366 pgd_t *pgd = pgd_offset_k(start);
370 pud = pud_offset(pgd, start & PGDIR_MASK);
372 pud = alloc_low_page(&map, &pud_phys);
374 next = start + PGDIR_SIZE;
377 phys_pud_init(pud, __pa(start), __pa(next));
379 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
384 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
388 void __cpuinit zap_low_mappings(int cpu)
391 pgd_t *pgd = pgd_offset_k(0UL);
395 * For AP's, zap the low identity mappings by changing the cr3
396 * to init_level4_pgt and doing local flush tlb all
398 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
403 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
405 size_zones(unsigned long *z, unsigned long *h,
406 unsigned long start_pfn, unsigned long end_pfn)
411 for (i = 0; i < MAX_NR_ZONES; i++)
414 if (start_pfn < MAX_DMA_PFN)
415 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
416 if (start_pfn < MAX_DMA32_PFN) {
417 unsigned long dma32_pfn = MAX_DMA32_PFN;
418 if (dma32_pfn > end_pfn)
420 z[ZONE_DMA32] = dma32_pfn - start_pfn;
422 z[ZONE_NORMAL] = end_pfn - start_pfn;
424 /* Remove lower zones from higher ones. */
426 for (i = 0; i < MAX_NR_ZONES; i++) {
434 for (i = 0; i < MAX_NR_ZONES; i++) {
437 h[i] = e820_hole_size(s, w);
440 /* Add the space pace needed for mem_map to the holes too. */
441 for (i = 0; i < MAX_NR_ZONES; i++)
442 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
444 /* The 16MB DMA zone has the kernel and other misc mappings.
447 h[ZONE_DMA] += dma_reserve;
448 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
450 "Kernel too large and filling up ZONE_DMA?\n");
451 h[ZONE_DMA] = z[ZONE_DMA];
457 void __init paging_init(void)
459 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
461 memory_present(0, 0, end_pfn);
463 size_zones(zones, holes, 0, end_pfn);
464 free_area_init_node(0, NODE_DATA(0), zones,
465 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
469 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
470 from the CPU leading to inconsistent cache lines. address and size
471 must be aligned to 2MB boundaries.
472 Does nothing when the mapping doesn't exist. */
473 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
475 unsigned long end = address + size;
477 BUG_ON(address & ~LARGE_PAGE_MASK);
478 BUG_ON(size & ~LARGE_PAGE_MASK);
480 for (; address < end; address += LARGE_PAGE_SIZE) {
481 pgd_t *pgd = pgd_offset_k(address);
486 pud = pud_offset(pgd, address);
489 pmd = pmd_offset(pud, address);
490 if (!pmd || pmd_none(*pmd))
492 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
493 /* Could handle this, but it should not happen currently. */
495 "clear_kernel_mapping: mapping has been split. will leak memory\n");
498 set_pmd(pmd, __pmd(0));
503 static inline int page_is_ram (unsigned long pagenr)
507 for (i = 0; i < e820.nr_map; i++) {
508 unsigned long addr, end;
510 if (e820.map[i].type != E820_RAM) /* not usable memory */
513 * !!!FIXME!!! Some BIOSen report areas as RAM that
514 * are not. Notably the 640->1Mb area. We need a sanity
517 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
518 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
519 if ((pagenr >= addr) && (pagenr < end))
526 * Memory hotplug specific functions
528 void online_page(struct page *page)
530 ClearPageReserved(page);
531 init_page_count(page);
537 #ifdef CONFIG_MEMORY_HOTPLUG
539 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
540 * via probe interface of sysfs. If acpi notifies hot-add event, then it
541 * can tell node id by searching dsdt. But, probe interface doesn't have
542 * node id. So, return 0 as node id at this time.
545 int memory_add_physaddr_to_nid(u64 start)
552 * Memory is added always to NORMAL zone. This means you will never get
553 * additional DMA/DMA32 memory.
555 int arch_add_memory(int nid, u64 start, u64 size)
557 struct pglist_data *pgdat = NODE_DATA(nid);
558 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
559 unsigned long start_pfn = start >> PAGE_SHIFT;
560 unsigned long nr_pages = size >> PAGE_SHIFT;
563 ret = __add_pages(zone, start_pfn, nr_pages);
567 init_memory_mapping(start, (start + size -1));
571 printk("%s: Problem encountered in __add_pages!\n", __func__);
574 EXPORT_SYMBOL_GPL(arch_add_memory);
576 int remove_memory(u64 start, u64 size)
580 EXPORT_SYMBOL_GPL(remove_memory);
582 #else /* CONFIG_MEMORY_HOTPLUG */
584 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
585 * just online the pages.
587 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
591 unsigned long total = 0, mem = 0;
592 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
593 if (pfn_valid(pfn)) {
594 online_page(pfn_to_page(pfn));
601 z->spanned_pages += total;
602 z->present_pages += mem;
603 z->zone_pgdat->node_spanned_pages += total;
604 z->zone_pgdat->node_present_pages += mem;
608 #endif /* CONFIG_MEMORY_HOTPLUG */
611 * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
612 * valid. The argument is a physical page number.
615 * On x86-64, access has to be given to the first megabyte of ram because that area
616 * contains bios code and data regions used by X and dosemu and similar apps.
617 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
618 * mmio resources as well as potential bios/acpi data regions.
620 int devmem_is_allowed(unsigned long pagenr)
624 if (!page_is_ram(pagenr))
630 EXPORT_SYMBOL_GPL(page_is_ram);
632 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
635 void __init mem_init(void)
637 long codesize, reservedpages, datasize, initsize;
641 /* How many end-of-memory variables you have, grandma! */
642 max_low_pfn = end_pfn;
644 num_physpages = end_pfn;
645 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
647 /* clear the zero-page */
648 memset(empty_zero_page, 0, PAGE_SIZE);
652 /* this will put all low memory onto the freelists */
654 totalram_pages = numa_free_all_bootmem();
656 totalram_pages = free_all_bootmem();
658 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
662 codesize = (unsigned long) &_etext - (unsigned long) &_text;
663 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
664 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
666 /* Register memory areas for /proc/kcore */
667 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
668 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
669 VMALLOC_END-VMALLOC_START);
670 kclist_add(&kcore_kernel, &_stext, _end - _stext);
671 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
672 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
673 VSYSCALL_END - VSYSCALL_START);
675 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
676 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
677 end_pfn << (PAGE_SHIFT-10),
679 reservedpages << (PAGE_SHIFT-10),
685 * Sync boot_level4_pgt mappings with the init_level4_pgt
686 * except for the low identity mappings which are already zapped
687 * in init_level4_pgt. This sync-up is essential for AP's bringup
689 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
693 void free_init_pages(char *what, unsigned long begin, unsigned long end)
700 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
701 for (addr = begin; addr < end; addr += PAGE_SIZE) {
702 ClearPageReserved(virt_to_page(addr));
703 init_page_count(virt_to_page(addr));
704 memset((void *)(addr & ~(PAGE_SIZE-1)),
705 POISON_FREE_INITMEM, PAGE_SIZE);
711 void free_initmem(void)
713 memset(__initdata_begin, POISON_FREE_INITDATA,
714 __initdata_end - __initdata_begin);
715 free_init_pages("unused kernel memory",
716 (unsigned long)(&__init_begin),
717 (unsigned long)(&__init_end));
720 #ifdef CONFIG_DEBUG_RODATA
722 void mark_rodata_ro(void)
724 unsigned long addr = (unsigned long)__start_rodata;
726 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
727 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
729 printk ("Write protecting the kernel read-only data: %luk\n",
730 (__end_rodata - __start_rodata) >> 10);
733 * change_page_attr_addr() requires a global_flush_tlb() call after it.
734 * We do this after the printk so that if something went wrong in the
735 * change, the printk gets out at least to give a better debug hint
736 * of who is the culprit.
742 #ifdef CONFIG_BLK_DEV_INITRD
743 void free_initrd_mem(unsigned long start, unsigned long end)
745 free_init_pages("initrd memory", start, end);
749 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
751 /* Should check here against the e820 map to avoid double free */
753 int nid = phys_to_nid(phys);
754 reserve_bootmem_node(NODE_DATA(nid), phys, len);
756 reserve_bootmem(phys, len);
758 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
759 dma_reserve += len / PAGE_SIZE;
762 int kern_addr_valid(unsigned long addr)
764 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
770 if (above != 0 && above != -1UL)
773 pgd = pgd_offset_k(addr);
777 pud = pud_offset(pgd, addr);
781 pmd = pmd_offset(pud, addr);
785 return pfn_valid(pmd_pfn(*pmd));
787 pte = pte_offset_kernel(pmd, addr);
790 return pfn_valid(pte_pfn(*pte));
794 #include <linux/sysctl.h>
796 extern int exception_trace, page_fault_trace;
798 static ctl_table debug_table2[] = {
799 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
804 static ctl_table debug_root_table2[] = {
805 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
806 .child = debug_table2 },
810 static __init int x8664_sysctl_init(void)
812 register_sysctl_table(debug_root_table2, 1);
815 __initcall(x8664_sysctl_init);
818 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
819 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
820 not need special handling anymore. */
822 static struct vm_area_struct gate_vma = {
823 .vm_start = VSYSCALL_START,
824 .vm_end = VSYSCALL_END,
825 .vm_page_prot = PAGE_READONLY
828 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
830 #ifdef CONFIG_IA32_EMULATION
831 if (test_tsk_thread_flag(tsk, TIF_IA32))
837 int in_gate_area(struct task_struct *task, unsigned long addr)
839 struct vm_area_struct *vma = get_gate_vma(task);
842 return (addr >= vma->vm_start) && (addr < vma->vm_end);
845 /* Use this when you have no reliable task/vma, typically from interrupt
846 * context. It is less reliable than using the task's vma and may give
849 int in_gate_area_no_task(unsigned long addr)
851 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);