2 * Initialize MMU support.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/efi.h>
13 #include <linux/elf.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/personality.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
23 #include <asm/a.out.h>
24 #include <asm/bitops.h>
28 #include <asm/machvec.h>
30 #include <asm/patch.h>
31 #include <asm/pgalloc.h>
33 #include <asm/sections.h>
34 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/unistd.h>
40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42 extern void ia64_tlb_init (void);
44 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
46 #ifdef CONFIG_VIRTUAL_MEM_MAP
47 unsigned long vmalloc_end = VMALLOC_END_INIT;
48 EXPORT_SYMBOL(vmalloc_end);
49 struct page *vmem_map;
50 EXPORT_SYMBOL(vmem_map);
53 static int pgt_cache_water[2] = { 25, 50 };
55 struct page *zero_page_memmap_ptr; /* map entry for zero page */
56 EXPORT_SYMBOL(zero_page_memmap_ptr);
59 check_pgt_cache (void)
63 low = pgt_cache_water[0];
64 high = pgt_cache_water[1];
66 if (pgtable_cache_size > (u64) high) {
69 free_page((unsigned long)pgd_alloc_one_fast(0));
71 free_page((unsigned long)pmd_alloc_one_fast(0, 0));
72 } while (pgtable_cache_size > (u64) low);
77 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
83 return; /* not an executable page... */
86 /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
87 addr = (unsigned long) page_address(page);
89 if (test_bit(PG_arch_1, &page->flags))
90 return; /* i-cache is already coherent with d-cache */
92 flush_icache_range(addr, addr + PAGE_SIZE);
93 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
97 ia64_set_rbs_bot (void)
99 unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
101 if (stack_size > MAX_USER_STACK_SIZE)
102 stack_size = MAX_USER_STACK_SIZE;
103 current->thread.rbs_bot = STACK_TOP - stack_size;
107 * This performs some platform-dependent address space initialization.
108 * On IA-64, we want to setup the VM area for the register backing
109 * store (which grows upwards) and install the gateway page which is
110 * used for signal trampolines, etc.
113 ia64_init_addr_space (void)
115 struct vm_area_struct *vma;
120 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
121 * the problem. When the process attempts to write to the register backing store
122 * for the first time, it will get a SEGFAULT in this case.
124 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
126 vma->vm_mm = current->mm;
127 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
128 vma->vm_end = vma->vm_start + PAGE_SIZE;
129 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
130 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
134 vma->vm_private_data = NULL;
135 insert_vm_struct(current->mm, vma);
138 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
139 if (!(current->personality & MMAP_PAGE_ZERO)) {
140 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
142 memset(vma, 0, sizeof(*vma));
143 vma->vm_mm = current->mm;
144 vma->vm_end = PAGE_SIZE;
145 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
146 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
147 insert_vm_struct(current->mm, vma);
155 unsigned long addr, eaddr;
157 addr = (unsigned long) ia64_imva(__init_begin);
158 eaddr = (unsigned long) ia64_imva(__init_end);
159 while (addr < eaddr) {
160 ClearPageReserved(virt_to_page(addr));
161 set_page_count(virt_to_page(addr), 1);
166 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
167 (__init_end - __init_begin) >> 10);
171 free_initrd_mem (unsigned long start, unsigned long end)
175 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
176 * Thus EFI and the kernel may have different page sizes. It is
177 * therefore possible to have the initrd share the same page as
178 * the end of the kernel (given current setup).
180 * To avoid freeing/using the wrong page (kernel sized) we:
181 * - align up the beginning of initrd
182 * - align down the end of initrd
185 * |=============| a000
191 * |=============| 8000
194 * |/////////////| 7000
197 * |=============| 6000
200 * K=kernel using 8KB pages
202 * In this example, we must free page 8000 ONLY. So we must align up
203 * initrd_start and keep initrd_end as is.
205 start = PAGE_ALIGN(start);
206 end = end & PAGE_MASK;
209 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
211 for (; start < end; start += PAGE_SIZE) {
212 if (!virt_addr_valid(start))
214 page = virt_to_page(start);
215 ClearPageReserved(page);
216 set_page_count(page, 1);
223 * This is like put_dirty_page() but installs a clean page in the kernel's page table.
226 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
232 if (!PageReserved(page))
233 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
236 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
238 spin_lock(&init_mm.page_table_lock);
240 pmd = pmd_alloc(&init_mm, pgd, address);
243 pte = pte_alloc_map(&init_mm, pmd, address);
246 if (!pte_none(*pte)) {
250 set_pte(pte, mk_pte(page, pgprot));
253 out: spin_unlock(&init_mm.page_table_lock);
254 /* no need for flush_tlb */
264 * Map the gate page twice: once read-only to export the ELF headers etc. and once
265 * execute-only page to enable privilege-promotion via "epc":
267 page = virt_to_page(ia64_imva(__start_gate_section));
268 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
269 #ifdef HAVE_BUGGY_SEGREL
270 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
271 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
273 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
279 ia64_mmu_init (void *my_cpu_data)
281 unsigned long psr, pta, impl_va_bits;
282 extern void __init tlb_init (void);
285 #ifdef CONFIG_DISABLE_VHPT
286 # define VHPT_ENABLE_BIT 0
288 # define VHPT_ENABLE_BIT 1
291 /* Pin mapping for percpu area into TLB */
292 psr = ia64_clear_ic();
293 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
294 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
301 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
302 * address space. The IA-64 architecture guarantees that at least 50 bits of
303 * virtual address space are implemented but if we pick a large enough page size
304 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
305 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
306 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
307 * problem in practice. Alternatively, we could truncate the top of the mapped
308 * address space to not permit mappings that would overlap with the VMLPT.
312 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
314 * The virtual page table has to cover the entire implemented address space within
315 * a region even though not all of this space may be mappable. The reason for
316 * this is that the Access bit and Dirty bit fault handlers perform
317 * non-speculative accesses to the virtual page table, so the address range of the
318 * virtual page table itself needs to be covered by virtual page table.
320 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
321 # define POW2(n) (1ULL << (n))
323 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
325 if (impl_va_bits < 51 || impl_va_bits > 61)
326 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
328 /* place the VMLPT at the end of each page-table mapped region: */
329 pta = POW2(61) - POW2(vmlpt_bits);
331 if (POW2(mapped_space_bits) >= pta)
332 panic("mm/init: overlap between virtually mapped linear page table and "
333 "mapped kernel space!");
335 * Set the (virtually mapped linear) page table address. Bit
336 * 8 selects between the short and long format, bits 2-7 the
337 * size of the table, and bit 0 whether the VHPT walker is
340 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
344 #ifdef CONFIG_HUGETLB_PAGE
345 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
348 cpu = smp_processor_id();
350 /* mca handler uses cr.lid as key to pick the right entry */
351 ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
353 /* insert this percpu data information into our list for MCA recovery purposes */
354 ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
355 /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
356 ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
357 ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
358 ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
359 ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
360 ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
363 #ifdef CONFIG_VIRTUAL_MEM_MAP
366 create_mem_map_page_table (u64 start, u64 end, void *arg)
368 unsigned long address, start_page, end_page;
369 struct page *map_start, *map_end;
375 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
376 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
378 start_page = (unsigned long) map_start & PAGE_MASK;
379 end_page = PAGE_ALIGN((unsigned long) map_end);
380 node = paddr_to_nid(__pa(start));
382 for (address = start_page; address < end_page; address += PAGE_SIZE) {
383 pgd = pgd_offset_k(address);
385 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
386 pmd = pmd_offset(pgd, address);
389 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
390 pte = pte_offset_kernel(pmd, address);
393 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
399 struct memmap_init_callback_data {
407 virtual_memmap_init (u64 start, u64 end, void *arg)
409 struct memmap_init_callback_data *args;
410 struct page *map_start, *map_end;
412 args = (struct memmap_init_callback_data *) arg;
414 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
415 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
417 if (map_start < args->start)
418 map_start = args->start;
419 if (map_end > args->end)
423 * We have to initialize "out of bounds" struct page elements that fit completely
424 * on the same pages that were allocated for the "in bounds" elements because they
425 * may be referenced later (and found to be "reserved").
427 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
428 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
429 / sizeof(struct page));
431 if (map_start < map_end)
432 memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
433 args->nid, args->zone, page_to_pfn(map_start));
438 memmap_init (struct page *start, unsigned long size, int nid,
439 unsigned long zone, unsigned long start_pfn)
442 memmap_init_zone(start, size, nid, zone, start_pfn);
444 struct memmap_init_callback_data args;
447 args.end = start + size;
451 efi_memmap_walk(virtual_memmap_init, &args);
456 ia64_pfn_valid (unsigned long pfn)
459 struct page *pg = pfn_to_page(pfn);
461 return (__get_user(byte, (char *) pg) == 0)
462 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
463 || (__get_user(byte, (char *) (pg + 1) - 1) == 0));
465 EXPORT_SYMBOL(ia64_pfn_valid);
468 find_largest_hole (u64 start, u64 end, void *arg)
472 static u64 last_end = PAGE_OFFSET;
474 /* NOTE: this algorithm assumes efi memmap table is ordered */
476 if (*max_gap < (start - last_end))
477 *max_gap = start - last_end;
481 #endif /* CONFIG_VIRTUAL_MEM_MAP */
484 count_reserved_pages (u64 start, u64 end, void *arg)
486 unsigned long num_reserved = 0;
487 unsigned long *count = arg;
489 for (; start < end; start += PAGE_SIZE)
490 if (PageReserved(virt_to_page(start)))
492 *count += num_reserved;
497 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
498 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
499 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
500 * useful for performance testing, but conceivably could also come in handy for debugging
507 nolwsys_setup (char *s)
513 __setup("nolwsys", nolwsys_setup);
518 long reserved_pages, codesize, datasize, initsize;
519 unsigned long num_pgt_pages;
522 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
526 * This needs to be called _after_ the command line has been parsed but _before_
527 * any drivers that may need the PCI DMA interface are initialized or bootmem has
533 #ifndef CONFIG_DISCONTIGMEM
536 max_mapnr = max_low_pfn;
539 high_memory = __va(max_low_pfn * PAGE_SIZE);
541 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
542 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
543 kclist_add(&kcore_kernel, _stext, _end - _stext);
545 for_each_pgdat(pgdat)
546 totalram_pages += free_all_bootmem_node(pgdat);
549 efi_memmap_walk(count_reserved_pages, &reserved_pages);
551 codesize = (unsigned long) _etext - (unsigned long) _stext;
552 datasize = (unsigned long) _edata - (unsigned long) _etext;
553 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
555 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
556 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
557 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
558 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
561 * Allow for enough (cached) page table pages so that we can map the entire memory
562 * at least once. Each task also needs a couple of page tables pages, so add in a
563 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
564 * Don't allow the cache to be more than 10% of total memory, though.
566 # define NUM_TASKS 500 /* typical number of tasks */
567 num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
568 if (num_pgt_pages > nr_free_pages() / 10)
569 num_pgt_pages = nr_free_pages() / 10;
570 if (num_pgt_pages > (u64) pgt_cache_water[1])
571 pgt_cache_water[1] = num_pgt_pages;
574 * For fsyscall entrpoints with no light-weight handler, use the ordinary
575 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
576 * code can tell them apart.
578 for (i = 0; i < NR_syscalls; ++i) {
579 extern unsigned long fsyscall_table[NR_syscalls];
580 extern unsigned long sys_call_table[NR_syscalls];
582 if (!fsyscall_table[i] || nolwsys)
583 fsyscall_table[i] = sys_call_table[i] | 1;
585 setup_gate(); /* setup gate pages before we free up boot memory... */
587 #ifdef CONFIG_IA32_SUPPORT
588 ia32_boot_gdt_init();