2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
30 #define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3)
31 #define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT)
32 #define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1))
34 #define HUGEPTE_INDEX_SIZE 9
35 #define HUGEPGD_INDEX_SIZE 10
37 #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
38 #define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE)
40 static inline int hugepgd_index(unsigned long addr)
42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
45 static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
49 if (! mm->context.huge_pgdir)
53 index = hugepgd_index(addr);
54 BUG_ON(index >= PTRS_PER_HUGEPGD);
55 return mm->context.huge_pgdir + index;
58 static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr)
65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
66 return (pte_t *)pgd_page(*dir) + index;
69 static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
71 BUG_ON(! in_hugepage_area(mm->context, addr));
73 if (! mm->context.huge_pgdir) {
75 spin_unlock(&mm->page_table_lock);
76 /* Don't use pgd_alloc(), because we want __GFP_REPEAT */
77 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT);
78 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
79 spin_lock(&mm->page_table_lock);
82 * Because we dropped the lock, we should re-check the
83 * entry, as somebody else could have populated it..
85 if (mm->context.huge_pgdir)
88 mm->context.huge_pgdir = new;
90 return hugepgd_offset(mm, addr);
93 static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
96 if (! pgd_present(*dir)) {
99 spin_unlock(&mm->page_table_lock);
100 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT);
101 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
102 spin_lock(&mm->page_table_lock);
104 * Because we dropped the lock, we should re-check the
105 * entry, as somebody else could have populated it..
107 if (pgd_present(*dir)) {
109 kmem_cache_free(zero_cache, new);
111 struct page *ptepage;
115 ptepage = virt_to_page(new);
116 ptepage->mapping = (void *) mm;
117 ptepage->index = addr & HUGEPGDIR_MASK;
118 pgd_populate(mm, dir, new);
122 return hugepte_offset(dir, addr);
125 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
129 BUG_ON(! in_hugepage_area(mm->context, addr));
131 pgd = hugepgd_offset(mm, addr);
135 return hugepte_offset(pgd, addr);
138 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
142 BUG_ON(! in_hugepage_area(mm->context, addr));
144 pgd = hugepgd_alloc(mm, addr);
148 return hugepte_alloc(mm, pgd, addr);
151 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
152 struct page *page, pte_t *ptep, int write_access)
156 // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
157 vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
160 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
162 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
164 entry = pte_mkyoung(entry);
165 entry = pte_mkhuge(entry);
167 set_pte(ptep, entry);
171 * This function checks for proper alignment of input addr and len parameters.
173 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
175 if (len & ~HPAGE_MASK)
177 if (addr & ~HPAGE_MASK)
179 if (! (within_hugepage_low_range(addr, len)
180 || within_hugepage_high_range(addr, len)) )
185 static void flush_segments(void *parm)
187 u16 segs = (unsigned long) parm;
190 asm volatile("isync" : : : "memory");
192 for (i = 0; i < 16; i++) {
193 if (! (segs & (1U << i)))
195 asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
198 asm volatile("isync" : : : "memory");
201 static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
203 unsigned long start = seg << SID_SHIFT;
204 unsigned long end = (seg+1) << SID_SHIFT;
205 struct vm_area_struct *vma;
207 struct mmu_gather *tlb;
211 /* Check no VMAs are in the region */
212 vma = find_vma(mm, start);
213 if (vma && (vma->vm_start < end))
216 /* Clean up any leftover PTE pages in the region */
217 spin_lock(&mm->page_table_lock);
218 tlb = tlb_gather_mmu(mm, 0);
219 for (addr = start; addr < end; addr += PMD_SIZE) {
220 pgd_t *pgd = pgd_offset(mm, addr);
228 pmd = pmd_offset(pgd, addr);
229 if (!pmd || pmd_none(*pmd))
236 pte = (pte_t *)pmd_page_kernel(*pmd);
237 /* No VMAs, so there should be no PTEs, check just in case. */
238 for (i = 0; i < PTRS_PER_PTE; i++) {
239 BUG_ON(!pte_none(*pte));
242 page = pmd_page(*pmd);
245 dec_page_state(nr_page_table_pages);
246 pte_free_tlb(tlb, page);
248 tlb_finish_mmu(tlb, start, end);
249 spin_unlock(&mm->page_table_lock);
254 static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
258 newsegs &= ~(mm->context.htlb_segs);
260 return 0; /* The segments we want are already open */
262 for (i = 0; i < 16; i++)
263 if ((1 << i) & newsegs)
264 if (prepare_low_seg_for_htlb(mm, i) != 0)
267 mm->context.htlb_segs |= newsegs;
269 /* update the paca copy of the context struct */
270 get_paca()->context = mm->context;
272 /* the context change must make it to memory before the flush,
273 * so that further SLB misses do the right thing. */
275 on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1);
280 int prepare_hugepage_range(unsigned long addr, unsigned long len)
282 if (within_hugepage_high_range(addr, len))
284 else if ((addr < 0x100000000UL) && ((addr+len) < 0x100000000UL)) {
286 /* Yes, we need both tests, in case addr+len overflows
287 * 64-bit arithmetic */
288 err = open_low_hpage_segs(current->mm,
289 LOW_ESID_MASK(addr, len));
291 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
292 " failed (segs: 0x%04hx)\n", addr, len,
293 LOW_ESID_MASK(addr, len));
300 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
301 struct vm_area_struct *vma)
303 pte_t *src_pte, *dst_pte, entry;
304 struct page *ptepage;
305 unsigned long addr = vma->vm_start;
306 unsigned long end = vma->vm_end;
310 dst_pte = huge_pte_alloc(dst, addr);
314 src_pte = huge_pte_offset(src, addr);
317 ptepage = pte_page(entry);
319 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
320 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
321 set_pte(dst_pte, entry);
332 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
333 struct page **pages, struct vm_area_struct **vmas,
334 unsigned long *position, int *length, int i)
336 unsigned long vpfn, vaddr = *position;
337 int remainder = *length;
339 WARN_ON(!is_vm_hugetlb_page(vma));
341 vpfn = vaddr/PAGE_SIZE;
342 while (vaddr < vma->vm_end && remainder) {
347 pte = huge_pte_offset(mm, vaddr);
349 /* hugetlb should be locked, and hence, prefaulted */
350 WARN_ON(!pte || pte_none(*pte));
352 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
354 WARN_ON(!PageCompound(page));
376 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
381 if (! in_hugepage_area(mm->context, address))
382 return ERR_PTR(-EINVAL);
384 ptep = huge_pte_offset(mm, address);
385 page = pte_page(*ptep);
387 page += (address % HPAGE_SIZE) / PAGE_SIZE;
392 int pmd_huge(pmd_t pmd)
398 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
399 pmd_t *pmd, int write)
405 void unmap_hugepage_range(struct vm_area_struct *vma,
406 unsigned long start, unsigned long end)
408 struct mm_struct *mm = vma->vm_mm;
413 WARN_ON(!is_vm_hugetlb_page(vma));
414 BUG_ON((start % HPAGE_SIZE) != 0);
415 BUG_ON((end % HPAGE_SIZE) != 0);
417 for (addr = start; addr < end; addr += HPAGE_SIZE) {
420 ptep = huge_pte_offset(mm, addr);
421 if (!ptep || pte_none(*ptep))
425 page = pte_page(pte);
430 // mm->rss -= (end - start) >> PAGE_SHIFT;
431 vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
435 void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
436 unsigned long start, unsigned long end)
438 /* Because the huge pgtables are only 2 level, they can take
439 * at most around 4M, much less than one hugepage which the
440 * process is presumably entitled to use. So we don't bother
441 * freeing up the pagetables on unmap, and wait until
442 * destroy_context() to clean up the lot. */
445 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
447 struct mm_struct *mm = current->mm;
451 WARN_ON(!is_vm_hugetlb_page(vma));
452 BUG_ON((vma->vm_start % HPAGE_SIZE) != 0);
453 BUG_ON((vma->vm_end % HPAGE_SIZE) != 0);
455 spin_lock(&mm->page_table_lock);
456 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
458 pte_t *pte = huge_pte_alloc(mm, addr);
465 if (! pte_none(*pte))
468 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
469 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
470 page = find_get_page(mapping, idx);
472 /* charge the fs quota first */
473 if (hugetlb_get_quota(mapping)) {
477 page = alloc_huge_page();
479 hugetlb_put_quota(mapping);
483 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
487 hugetlb_put_quota(mapping);
488 free_huge_page(page);
492 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
495 spin_unlock(&mm->page_table_lock);
499 /* Because we have an exclusive hugepage region which lies within the
500 * normal user address space, we have to take special measures to make
501 * non-huge mmap()s evade the hugepage reserved regions. */
502 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
503 unsigned long len, unsigned long pgoff,
506 struct mm_struct *mm = current->mm;
507 struct vm_area_struct *vma;
508 unsigned long start_addr;
514 addr = PAGE_ALIGN(addr);
515 vma = find_vma(mm, addr);
516 if (((TASK_SIZE - len) >= addr)
517 && (!vma || (addr+len) <= vma->vm_start)
518 && !is_hugepage_only_range(addr,len))
521 start_addr = addr = mm->free_area_cache;
524 vma = find_vma(mm, addr);
525 while (TASK_SIZE - len >= addr) {
526 BUG_ON(vma && (addr >= vma->vm_end));
528 if (touches_hugepage_low_range(addr, len)) {
529 addr = ALIGN(addr+1, 1<<SID_SHIFT);
530 vma = find_vma(mm, addr);
533 if (touches_hugepage_high_range(addr, len)) {
534 addr = TASK_HPAGE_END;
535 vma = find_vma(mm, addr);
538 if (!vma || addr + len <= vma->vm_start) {
540 * Remember the place where we stopped the search:
542 mm->free_area_cache = addr + len;
549 /* Make sure we didn't miss any holes */
550 if (start_addr != TASK_UNMAPPED_BASE) {
551 start_addr = addr = TASK_UNMAPPED_BASE;
558 * This mmap-allocator allocates new areas top-down from below the
559 * stack's low limit (the base):
561 * Because we have an exclusive hugepage region which lies within the
562 * normal user address space, we have to take special measures to make
563 * non-huge mmap()s evade the hugepage reserved regions.
566 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
567 const unsigned long len, const unsigned long pgoff,
568 const unsigned long flags)
570 struct vm_area_struct *vma, *prev_vma;
571 struct mm_struct *mm = current->mm;
572 unsigned long base = mm->mmap_base, addr = addr0;
575 /* requested length too big for entire address space */
579 /* dont allow allocations above current base */
580 if (mm->free_area_cache > base)
581 mm->free_area_cache = base;
583 /* requesting a specific address */
585 addr = PAGE_ALIGN(addr);
586 vma = find_vma(mm, addr);
587 if (TASK_SIZE - len >= addr &&
588 (!vma || addr + len <= vma->vm_start)
589 && !is_hugepage_only_range(addr,len))
594 /* make sure it can fit in the remaining address space */
595 if (mm->free_area_cache < len)
598 /* either no address requested or cant fit in requested address hole */
599 addr = (mm->free_area_cache - len) & PAGE_MASK;
602 if (touches_hugepage_low_range(addr, len)) {
603 addr = (addr & ((~0) << SID_SHIFT)) - len;
604 goto hugepage_recheck;
605 } else if (touches_hugepage_high_range(addr, len)) {
606 addr = TASK_HPAGE_BASE - len;
610 * Lookup failure means no vma is above this address,
611 * i.e. return with success:
613 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
617 * new region fits between prev_vma->vm_end and
618 * vma->vm_start, use it:
620 if (addr+len <= vma->vm_start &&
621 (!prev_vma || (addr >= prev_vma->vm_end)))
622 /* remember the address as a hint for next time */
623 return (mm->free_area_cache = addr);
625 /* pull free_area_cache down to the first hole */
626 if (mm->free_area_cache == vma->vm_end)
627 mm->free_area_cache = vma->vm_start;
629 /* try just below the current vma->vm_start */
630 addr = vma->vm_start-len;
631 } while (len <= vma->vm_start);
635 * if hint left us with no space for the requested
636 * mapping then try again:
639 mm->free_area_cache = base;
644 * A failed mmap() very likely causes application failure,
645 * so fall back to the bottom-up function here. This scenario
646 * can happen with large stack limits and large mmap()
649 mm->free_area_cache = TASK_UNMAPPED_BASE;
650 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
652 * Restore the topdown base:
654 mm->free_area_cache = base;
659 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
661 unsigned long addr = 0;
662 struct vm_area_struct *vma;
664 vma = find_vma(current->mm, addr);
665 while (addr + len <= 0x100000000UL) {
666 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
668 if (! __within_hugepage_low_range(addr, len, segmask)) {
669 addr = ALIGN(addr+1, 1<<SID_SHIFT);
670 vma = find_vma(current->mm, addr);
674 if (!vma || (addr + len) <= vma->vm_start)
676 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
677 /* Depending on segmask this might not be a confirmed
678 * hugepage region, so the ALIGN could have skipped
680 vma = find_vma(current->mm, addr);
686 static unsigned long htlb_get_high_area(unsigned long len)
688 unsigned long addr = TASK_HPAGE_BASE;
689 struct vm_area_struct *vma;
691 vma = find_vma(current->mm, addr);
692 for (vma = find_vma(current->mm, addr);
693 addr + len <= TASK_HPAGE_END;
694 vma = vma->vm_next) {
695 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
696 BUG_ON(! within_hugepage_high_range(addr, len));
698 if (!vma || (addr + len) <= vma->vm_start)
700 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
701 /* Because we're in a hugepage region, this alignment
702 * should not skip us over any VMAs */
708 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
709 unsigned long len, unsigned long pgoff,
712 if (len & ~HPAGE_MASK)
715 if (!(cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE))
718 if (test_thread_flag(TIF_32BIT)) {
720 u16 segmask, cursegs = current->mm->context.htlb_segs;
722 /* First see if we can do the mapping in the existing
723 * low hpage segments */
724 addr = htlb_get_low_area(len, cursegs);
728 for (segmask = LOW_ESID_MASK(0x100000000UL-len, len);
729 ! lastshift; segmask >>=1) {
733 addr = htlb_get_low_area(len, cursegs | segmask);
734 if ((addr != -ENOMEM)
735 && open_low_hpage_segs(current->mm, segmask) == 0)
738 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
739 " enough segments\n");
742 return htlb_get_high_area(len);
746 void hugetlb_mm_free_pgd(struct mm_struct *mm)
751 spin_lock(&mm->page_table_lock);
753 pgdir = mm->context.huge_pgdir;
757 mm->context.huge_pgdir = NULL;
759 /* cleanup any hugepte pages leftover */
760 for (i = 0; i < PTRS_PER_HUGEPGD; i++) {
761 pgd_t *pgd = pgdir + i;
763 if (! pgd_none(*pgd)) {
764 pte_t *pte = (pte_t *)pgd_page(*pgd);
765 struct page *ptepage = virt_to_page(pte);
767 ptepage->mapping = NULL;
769 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE));
770 kmem_cache_free(zero_cache, pte);
775 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE));
776 kmem_cache_free(zero_cache, pgdir);
779 spin_unlock(&mm->page_table_lock);
782 int hash_huge_page(struct mm_struct *mm, unsigned long access,
783 unsigned long ea, unsigned long vsid, int local)
786 unsigned long va, vpn;
788 pte_t old_pte, new_pte;
789 unsigned long hpteflags, prpn;
793 spin_lock(&mm->page_table_lock);
795 ptep = huge_pte_offset(mm, ea);
797 /* Search the Linux page table for a match with va */
798 va = (vsid << 28) | (ea & 0x0fffffff);
799 vpn = va >> HPAGE_SHIFT;
802 * If no pte found or not present, send the problem up to
805 if (unlikely(!ptep || pte_none(*ptep)))
808 /* BUG_ON(pte_bad(*ptep)); */
811 * Check the user's access rights to the page. If access should be
812 * prevented then send the problem up to do_page_fault.
814 is_write = access & _PAGE_RW;
815 if (unlikely(is_write && !(pte_val(*ptep) & _PAGE_RW)))
818 * At this point, we have a pte (old_pte) which can be used to build
819 * or update an HPTE. There are 2 cases:
821 * 1. There is a valid (present) pte with no associated HPTE (this is
822 * the most common case)
823 * 2. There is a valid (present) pte with an associated HPTE. The
824 * current values of the pp bits in the HPTE prevent access
825 * because we are doing software DIRTY bit management and the
826 * page is currently not DIRTY.
833 hpteflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
835 /* Check if pte already has an hpte (case 2) */
836 if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
837 /* There MIGHT be an HPTE for this pte */
838 unsigned long hash, slot;
840 hash = hpt_hash(vpn, 1);
841 if (pte_val(old_pte) & _PAGE_SECONDARY)
843 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
844 slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
846 if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1)
847 pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
850 if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
851 unsigned long hash = hpt_hash(vpn, 1);
852 unsigned long hpte_group;
854 prpn = pte_pfn(old_pte);
857 hpte_group = ((hash & htab_hash_mask) *
858 HPTES_PER_GROUP) & ~0x7UL;
860 /* Update the linux pte with the HPTE slot */
861 pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;
862 pte_val(new_pte) |= _PAGE_HASHPTE;
864 /* Add in WIMG bits */
865 /* XXX We should store these in the pte */
866 hpteflags |= _PAGE_COHERENT;
868 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
871 /* Primary is full, try the secondary */
872 if (unlikely(slot == -1)) {
873 pte_val(new_pte) |= _PAGE_SECONDARY;
874 hpte_group = ((~hash & htab_hash_mask) *
875 HPTES_PER_GROUP) & ~0x7UL;
876 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
880 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
882 ppc_md.hpte_remove(hpte_group);
887 if (unlikely(slot == -2))
888 panic("hash_huge_page: pte_insert failed\n");
890 pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX;
893 * No need to use ldarx/stdcx here because all who
894 * might be updating the pte will hold the
903 spin_unlock(&mm->page_table_lock);