2 * Initialize MMU support.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/efi.h>
13 #include <linux/elf.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/personality.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
23 #include <asm/a.out.h>
24 #include <asm/bitops.h>
28 #include <asm/machvec.h>
30 #include <asm/patch.h>
31 #include <asm/pgalloc.h>
33 #include <asm/sections.h>
34 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/unistd.h>
40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42 extern void ia64_tlb_init (void);
44 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
46 #ifdef CONFIG_VIRTUAL_MEM_MAP
47 unsigned long vmalloc_end = VMALLOC_END_INIT;
48 EXPORT_SYMBOL(vmalloc_end);
49 struct page *vmem_map;
50 EXPORT_SYMBOL(vmem_map);
53 static int pgt_cache_water[2] = { 25, 50 };
55 struct page *zero_page_memmap_ptr; /* map entry for zero page */
56 EXPORT_SYMBOL(zero_page_memmap_ptr);
59 check_pgt_cache (void)
63 low = pgt_cache_water[0];
64 high = pgt_cache_water[1];
67 if (pgtable_cache_size > (u64) high) {
70 free_page((unsigned long)pgd_alloc_one_fast(0));
72 free_page((unsigned long)pmd_alloc_one_fast(0, 0));
73 } while (pgtable_cache_size > (u64) low);
79 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
85 return; /* not an executable page... */
88 /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
89 addr = (unsigned long) page_address(page);
91 if (test_bit(PG_arch_1, &page->flags))
92 return; /* i-cache is already coherent with d-cache */
94 flush_icache_range(addr, addr + PAGE_SIZE);
95 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
99 ia64_set_rbs_bot (void)
101 unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
103 if (stack_size > MAX_USER_STACK_SIZE)
104 stack_size = MAX_USER_STACK_SIZE;
105 current->thread.rbs_bot = STACK_TOP - stack_size;
109 * This performs some platform-dependent address space initialization.
110 * On IA-64, we want to setup the VM area for the register backing
111 * store (which grows upwards) and install the gateway page which is
112 * used for signal trampolines, etc.
115 ia64_init_addr_space (void)
117 struct vm_area_struct *vma;
122 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
123 * the problem. When the process attempts to write to the register backing store
124 * for the first time, it will get a SEGFAULT in this case.
126 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
128 memset(vma, 0, sizeof(*vma));
129 vma->vm_mm = current->mm;
130 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
131 vma->vm_end = vma->vm_start + PAGE_SIZE;
132 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
133 vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
134 insert_vm_struct(current->mm, vma);
137 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
138 if (!(current->personality & MMAP_PAGE_ZERO)) {
139 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
141 memset(vma, 0, sizeof(*vma));
142 vma->vm_mm = current->mm;
143 vma->vm_end = PAGE_SIZE;
144 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
145 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
146 insert_vm_struct(current->mm, vma);
154 unsigned long addr, eaddr;
156 addr = (unsigned long) ia64_imva(__init_begin);
157 eaddr = (unsigned long) ia64_imva(__init_end);
158 while (addr < eaddr) {
159 ClearPageReserved(virt_to_page(addr));
160 set_page_count(virt_to_page(addr), 1);
165 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
166 (__init_end - __init_begin) >> 10);
170 free_initrd_mem (unsigned long start, unsigned long end)
174 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
175 * Thus EFI and the kernel may have different page sizes. It is
176 * therefore possible to have the initrd share the same page as
177 * the end of the kernel (given current setup).
179 * To avoid freeing/using the wrong page (kernel sized) we:
180 * - align up the beginning of initrd
181 * - align down the end of initrd
184 * |=============| a000
190 * |=============| 8000
193 * |/////////////| 7000
196 * |=============| 6000
199 * K=kernel using 8KB pages
201 * In this example, we must free page 8000 ONLY. So we must align up
202 * initrd_start and keep initrd_end as is.
204 start = PAGE_ALIGN(start);
205 end = end & PAGE_MASK;
208 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
210 for (; start < end; start += PAGE_SIZE) {
211 if (!virt_addr_valid(start))
213 page = virt_to_page(start);
214 ClearPageReserved(page);
215 set_page_count(page, 1);
221 int page_is_ram(unsigned long pagenr)
223 //FIXME: implement w/efi walk
224 printk("page is ram is called!!!!!\n");
229 * This installs a clean page in the kernel's page table.
232 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
238 if (!PageReserved(page))
239 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
242 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
244 spin_lock(&init_mm.page_table_lock);
246 pmd = pmd_alloc(&init_mm, pgd, address);
249 pte = pte_alloc_map(&init_mm, pmd, address);
252 if (!pte_none(*pte)) {
256 set_pte(pte, mk_pte(page, pgprot));
259 out: spin_unlock(&init_mm.page_table_lock);
260 /* no need for flush_tlb */
270 * Map the gate page twice: once read-only to export the ELF headers etc. and once
271 * execute-only page to enable privilege-promotion via "epc":
273 page = virt_to_page(ia64_imva(__start_gate_section));
274 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
275 #ifdef HAVE_BUGGY_SEGREL
276 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
277 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
279 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
285 ia64_mmu_init (void *my_cpu_data)
287 unsigned long psr, pta, impl_va_bits;
288 extern void __devinit tlb_init (void);
291 #ifdef CONFIG_DISABLE_VHPT
292 # define VHPT_ENABLE_BIT 0
294 # define VHPT_ENABLE_BIT 1
297 /* Pin mapping for percpu area into TLB */
298 psr = ia64_clear_ic();
299 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
300 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
307 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
308 * address space. The IA-64 architecture guarantees that at least 50 bits of
309 * virtual address space are implemented but if we pick a large enough page size
310 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
311 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
312 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
313 * problem in practice. Alternatively, we could truncate the top of the mapped
314 * address space to not permit mappings that would overlap with the VMLPT.
318 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
320 * The virtual page table has to cover the entire implemented address space within
321 * a region even though not all of this space may be mappable. The reason for
322 * this is that the Access bit and Dirty bit fault handlers perform
323 * non-speculative accesses to the virtual page table, so the address range of the
324 * virtual page table itself needs to be covered by virtual page table.
326 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
327 # define POW2(n) (1ULL << (n))
329 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
331 if (impl_va_bits < 51 || impl_va_bits > 61)
332 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
334 /* place the VMLPT at the end of each page-table mapped region: */
335 pta = POW2(61) - POW2(vmlpt_bits);
337 if (POW2(mapped_space_bits) >= pta)
338 panic("mm/init: overlap between virtually mapped linear page table and "
339 "mapped kernel space!");
341 * Set the (virtually mapped linear) page table address. Bit
342 * 8 selects between the short and long format, bits 2-7 the
343 * size of the table, and bit 0 whether the VHPT walker is
346 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
350 #ifdef CONFIG_HUGETLB_PAGE
351 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
355 cpu = smp_processor_id();
357 /* mca handler uses cr.lid as key to pick the right entry */
358 ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
360 /* insert this percpu data information into our list for MCA recovery purposes */
361 ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
362 /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
363 ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
364 ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
365 ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
366 ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
367 ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
370 #ifdef CONFIG_VIRTUAL_MEM_MAP
373 create_mem_map_page_table (u64 start, u64 end, void *arg)
375 unsigned long address, start_page, end_page;
376 struct page *map_start, *map_end;
382 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
383 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
385 start_page = (unsigned long) map_start & PAGE_MASK;
386 end_page = PAGE_ALIGN((unsigned long) map_end);
387 node = paddr_to_nid(__pa(start));
389 for (address = start_page; address < end_page; address += PAGE_SIZE) {
390 pgd = pgd_offset_k(address);
392 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
393 pmd = pmd_offset(pgd, address);
396 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
397 pte = pte_offset_kernel(pmd, address);
400 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
406 struct memmap_init_callback_data {
414 virtual_memmap_init (u64 start, u64 end, void *arg)
416 struct memmap_init_callback_data *args;
417 struct page *map_start, *map_end;
419 args = (struct memmap_init_callback_data *) arg;
421 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
422 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
424 if (map_start < args->start)
425 map_start = args->start;
426 if (map_end > args->end)
430 * We have to initialize "out of bounds" struct page elements that fit completely
431 * on the same pages that were allocated for the "in bounds" elements because they
432 * may be referenced later (and found to be "reserved").
434 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
435 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
436 / sizeof(struct page));
438 if (map_start < map_end)
439 memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
440 args->nid, args->zone, page_to_pfn(map_start));
445 memmap_init (struct page *start, unsigned long size, int nid,
446 unsigned long zone, unsigned long start_pfn)
449 memmap_init_zone(start, size, nid, zone, start_pfn);
451 struct memmap_init_callback_data args;
454 args.end = start + size;
458 efi_memmap_walk(virtual_memmap_init, &args);
463 ia64_pfn_valid (unsigned long pfn)
466 struct page *pg = pfn_to_page(pfn);
468 return (__get_user(byte, (char *) pg) == 0)
469 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
470 || (__get_user(byte, (char *) (pg + 1) - 1) == 0));
472 EXPORT_SYMBOL(ia64_pfn_valid);
475 find_largest_hole (u64 start, u64 end, void *arg)
479 static u64 last_end = PAGE_OFFSET;
481 /* NOTE: this algorithm assumes efi memmap table is ordered */
483 if (*max_gap < (start - last_end))
484 *max_gap = start - last_end;
488 #endif /* CONFIG_VIRTUAL_MEM_MAP */
491 count_reserved_pages (u64 start, u64 end, void *arg)
493 unsigned long num_reserved = 0;
494 unsigned long *count = arg;
496 for (; start < end; start += PAGE_SIZE)
497 if (PageReserved(virt_to_page(start)))
499 *count += num_reserved;
504 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
505 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
506 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
507 * useful for performance testing, but conceivably could also come in handy for debugging
514 nolwsys_setup (char *s)
520 __setup("nolwsys", nolwsys_setup);
525 long reserved_pages, codesize, datasize, initsize;
526 unsigned long num_pgt_pages;
529 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
533 * This needs to be called _after_ the command line has been parsed but _before_
534 * any drivers that may need the PCI DMA interface are initialized or bootmem has
540 #ifndef CONFIG_DISCONTIGMEM
543 max_mapnr = max_low_pfn;
546 high_memory = __va(max_low_pfn * PAGE_SIZE);
548 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
549 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
550 kclist_add(&kcore_kernel, _stext, _end - _stext);
552 for_each_pgdat(pgdat)
553 totalram_pages += free_all_bootmem_node(pgdat);
556 efi_memmap_walk(count_reserved_pages, &reserved_pages);
558 codesize = (unsigned long) _etext - (unsigned long) _stext;
559 datasize = (unsigned long) _edata - (unsigned long) _etext;
560 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
562 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
563 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
564 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
565 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
568 * Allow for enough (cached) page table pages so that we can map the entire memory
569 * at least once. Each task also needs a couple of page tables pages, so add in a
570 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
571 * Don't allow the cache to be more than 10% of total memory, though.
573 # define NUM_TASKS 500 /* typical number of tasks */
574 num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
575 if (num_pgt_pages > nr_free_pages() / 10)
576 num_pgt_pages = nr_free_pages() / 10;
577 if (num_pgt_pages > (u64) pgt_cache_water[1])
578 pgt_cache_water[1] = num_pgt_pages;
581 * For fsyscall entrpoints with no light-weight handler, use the ordinary
582 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
583 * code can tell them apart.
585 for (i = 0; i < NR_syscalls; ++i) {
586 extern unsigned long fsyscall_table[NR_syscalls];
587 extern unsigned long sys_call_table[NR_syscalls];
589 if (!fsyscall_table[i] || nolwsys)
590 fsyscall_table[i] = sys_call_table[i] | 1;
594 #ifdef CONFIG_IA32_SUPPORT
595 ia32_boot_gdt_init();