2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
32 * 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0
33 * PFN>>12..... - - - - - - HASH_IX.... 2ND HASH RW - HG=1
36 #define HUGEPTE_SHIFT 15
37 #define _HUGEPAGE_PFN 0xffff8000
38 #define _HUGEPAGE_BAD 0x00007f00
39 #define _HUGEPAGE_HASHPTE 0x00000008
40 #define _HUGEPAGE_SECONDARY 0x00000010
41 #define _HUGEPAGE_GROUP_IX 0x000000e0
42 #define _HUGEPAGE_HPTEFLAGS (_HUGEPAGE_HASHPTE | _HUGEPAGE_SECONDARY | \
44 #define _HUGEPAGE_RW 0x00000004
46 typedef struct {unsigned int val;} hugepte_t;
47 #define hugepte_val(hugepte) ((hugepte).val)
48 #define __hugepte(x) ((hugepte_t) { (x) } )
49 #define hugepte_pfn(x) \
50 ((unsigned long)(hugepte_val(x)>>HUGEPTE_SHIFT) << HUGETLB_PAGE_ORDER)
51 #define mk_hugepte(page,wr) __hugepte( \
52 ((page_to_pfn(page)>>HUGETLB_PAGE_ORDER) << HUGEPTE_SHIFT ) \
53 | (!!(wr) * _HUGEPAGE_RW) | _PMD_HUGEPAGE )
55 #define hugepte_bad(x) ( !(hugepte_val(x) & _PMD_HUGEPAGE) || \
56 (hugepte_val(x) & _HUGEPAGE_BAD) )
57 #define hugepte_page(x) pfn_to_page(hugepte_pfn(x))
58 #define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN))
61 static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
62 hugepte_t pte, int local);
64 static inline unsigned int hugepte_update(hugepte_t *p, unsigned int clr,
67 unsigned int old, tmp;
70 "1: lwarx %0,0,%3 # pte_update\n\
75 : "=&r" (old), "=&r" (tmp), "=m" (*p)
76 : "r" (p), "r" (clr), "r" (set), "m" (*p)
81 static inline void set_hugepte(hugepte_t *ptep, hugepte_t pte)
83 hugepte_update(ptep, ~_HUGEPAGE_HPTEFLAGS,
84 hugepte_val(pte) & ~_HUGEPAGE_HPTEFLAGS);
87 static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr)
92 BUG_ON(!in_hugepage_area(mm->context, addr));
94 pgd = pgd_offset(mm, addr);
95 pmd = pmd_alloc(mm, pgd, addr);
97 /* We shouldn't find a (normal) PTE page pointer here */
98 BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
100 return (hugepte_t *)pmd;
103 static hugepte_t *hugepte_offset(struct mm_struct *mm, unsigned long addr)
108 BUG_ON(!in_hugepage_area(mm->context, addr));
110 pgd = pgd_offset(mm, addr);
114 pmd = pmd_offset(pgd, addr);
116 /* We shouldn't find a (normal) PTE page pointer here */
117 BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
119 return (hugepte_t *)pmd;
122 static void setup_huge_pte(struct mm_struct *mm, struct page *page,
123 hugepte_t *ptep, int write_access)
128 // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
129 vx_rsspages_sub(mm, HPAGE_SIZE / PAGE_SIZE);
130 entry = mk_hugepte(page, write_access);
131 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++)
132 set_hugepte(ptep+i, entry);
135 static void teardown_huge_pte(hugepte_t *ptep)
139 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++)
140 pmd_clear((pmd_t *)(ptep+i));
144 * This function checks for proper alignment of input addr and len parameters.
146 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
148 if (len & ~HPAGE_MASK)
150 if (addr & ~HPAGE_MASK)
152 if (! (within_hugepage_low_range(addr, len)
153 || within_hugepage_high_range(addr, len)) )
158 static void flush_segments(void *parm)
160 u16 segs = (unsigned long) parm;
163 asm volatile("isync" : : : "memory");
165 for (i = 0; i < 16; i++) {
166 if (! (segs & (1U << i)))
168 asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
171 asm volatile("isync" : : : "memory");
174 static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
176 unsigned long start = seg << SID_SHIFT;
177 unsigned long end = (seg+1) << SID_SHIFT;
178 struct vm_area_struct *vma;
180 struct mmu_gather *tlb;
184 /* Check no VMAs are in the region */
185 vma = find_vma(mm, start);
186 if (vma && (vma->vm_start < end))
189 /* Clean up any leftover PTE pages in the region */
190 spin_lock(&mm->page_table_lock);
191 tlb = tlb_gather_mmu(mm, 0);
192 for (addr = start; addr < end; addr += PMD_SIZE) {
193 pgd_t *pgd = pgd_offset(mm, addr);
201 pmd = pmd_offset(pgd, addr);
202 if (!pmd || pmd_none(*pmd))
209 pte = (pte_t *)pmd_page_kernel(*pmd);
210 /* No VMAs, so there should be no PTEs, check just in case. */
211 for (i = 0; i < PTRS_PER_PTE; i++) {
212 BUG_ON(!pte_none(*pte));
215 page = pmd_page(*pmd);
217 dec_page_state(nr_page_table_pages);
218 pte_free_tlb(tlb, page);
220 tlb_finish_mmu(tlb, start, end);
221 spin_unlock(&mm->page_table_lock);
226 static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
230 newsegs &= ~(mm->context.htlb_segs);
232 return 0; /* The segments we want are already open */
234 for (i = 0; i < 16; i++)
235 if ((1 << i) & newsegs)
236 if (prepare_low_seg_for_htlb(mm, i) != 0)
239 mm->context.htlb_segs |= newsegs;
240 /* the context change must make it to memory before the flush,
241 * so that further SLB misses do the right thing. */
243 on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1);
248 int prepare_hugepage_range(unsigned long addr, unsigned long len)
250 if (within_hugepage_high_range(addr, len))
252 else if ((addr < 0x100000000) && ((addr+len) < 0x100000000)) {
254 /* Yes, we need both tests, in case addr+len overflows
255 * 64-bit arithmetic */
256 err = open_low_hpage_segs(current->mm,
257 LOW_ESID_MASK(addr, len));
259 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
260 " failed (segs: 0x%04hx)\n", addr, len,
261 LOW_ESID_MASK(addr, len));
268 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
269 struct vm_area_struct *vma)
271 hugepte_t *src_pte, *dst_pte, entry;
272 struct page *ptepage;
273 unsigned long addr = vma->vm_start;
274 unsigned long end = vma->vm_end;
277 BUG_ON(! in_hugepage_area(src->context, addr));
278 BUG_ON(! in_hugepage_area(dst->context, addr));
280 dst_pte = hugepte_alloc(dst, addr);
284 src_pte = hugepte_offset(src, addr);
287 if ((addr % HPAGE_SIZE) == 0) {
288 /* This is the first hugepte in a batch */
289 ptepage = hugepte_page(entry);
291 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
292 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
294 set_hugepte(dst_pte, entry);
303 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
304 struct page **pages, struct vm_area_struct **vmas,
305 unsigned long *position, int *length, int i)
307 unsigned long vpfn, vaddr = *position;
308 int remainder = *length;
310 WARN_ON(!is_vm_hugetlb_page(vma));
312 vpfn = vaddr/PAGE_SIZE;
313 while (vaddr < vma->vm_end && remainder) {
314 BUG_ON(!in_hugepage_area(mm->context, vaddr));
320 pte = hugepte_offset(mm, vaddr);
322 /* hugetlb should be locked, and hence, prefaulted */
323 WARN_ON(!pte || hugepte_none(*pte));
325 page = &hugepte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
327 WARN_ON(!PageCompound(page));
349 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
351 return ERR_PTR(-EINVAL);
354 int pmd_huge(pmd_t pmd)
356 return pmd_hugepage(pmd);
360 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
361 pmd_t *pmd, int write)
365 BUG_ON(! pmd_hugepage(*pmd));
367 page = hugepte_page(*(hugepte_t *)pmd);
369 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
373 void unmap_hugepage_range(struct vm_area_struct *vma,
374 unsigned long start, unsigned long end)
376 struct mm_struct *mm = vma->vm_mm;
384 WARN_ON(!is_vm_hugetlb_page(vma));
385 BUG_ON((start % HPAGE_SIZE) != 0);
386 BUG_ON((end % HPAGE_SIZE) != 0);
388 /* XXX are there races with checking cpu_vm_mask? - Anton */
390 tmp = cpumask_of_cpu(cpu);
391 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
394 for (addr = start; addr < end; addr += HPAGE_SIZE) {
397 BUG_ON(!in_hugepage_area(mm->context, addr));
399 ptep = hugepte_offset(mm, addr);
400 if (!ptep || hugepte_none(*ptep))
404 page = hugepte_page(pte);
405 teardown_huge_pte(ptep);
407 if (hugepte_val(pte) & _HUGEPAGE_HASHPTE)
408 flush_hash_hugepage(mm->context, addr,
415 // mm->rss -= (end - start) >> PAGE_SHIFT;
416 vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
419 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
421 struct mm_struct *mm = current->mm;
425 WARN_ON(!is_vm_hugetlb_page(vma));
426 BUG_ON((vma->vm_start % HPAGE_SIZE) != 0);
427 BUG_ON((vma->vm_end % HPAGE_SIZE) != 0);
429 spin_lock(&mm->page_table_lock);
430 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
432 hugepte_t *pte = hugepte_alloc(mm, addr);
435 BUG_ON(!in_hugepage_area(mm->context, addr));
441 if (!hugepte_none(*pte))
444 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
445 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
446 page = find_get_page(mapping, idx);
448 /* charge the fs quota first */
449 if (hugetlb_get_quota(mapping)) {
453 page = alloc_huge_page();
455 hugetlb_put_quota(mapping);
459 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
463 hugetlb_put_quota(mapping);
464 free_huge_page(page);
468 setup_huge_pte(mm, page, pte, vma->vm_flags & VM_WRITE);
471 spin_unlock(&mm->page_table_lock);
475 /* Because we have an exclusive hugepage region which lies within the
476 * normal user address space, we have to take special measures to make
477 * non-huge mmap()s evade the hugepage reserved regions. */
478 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
479 unsigned long len, unsigned long pgoff,
482 struct mm_struct *mm = current->mm;
483 struct vm_area_struct *vma;
484 unsigned long start_addr;
490 addr = PAGE_ALIGN(addr);
491 vma = find_vma(mm, addr);
492 if (((TASK_SIZE - len) >= addr)
493 && (!vma || (addr+len) <= vma->vm_start)
494 && !is_hugepage_only_range(addr,len))
497 start_addr = addr = mm->free_area_cache;
500 vma = find_vma(mm, addr);
501 while (TASK_SIZE - len >= addr) {
502 BUG_ON(vma && (addr >= vma->vm_end));
504 if (touches_hugepage_low_range(addr, len)) {
505 addr = ALIGN(addr+1, 1<<SID_SHIFT);
506 vma = find_vma(mm, addr);
509 if (touches_hugepage_high_range(addr, len)) {
510 addr = TASK_HPAGE_END;
511 vma = find_vma(mm, addr);
514 if (!vma || addr + len <= vma->vm_start) {
516 * Remember the place where we stopped the search:
518 mm->free_area_cache = addr + len;
525 /* Make sure we didn't miss any holes */
526 if (start_addr != TASK_UNMAPPED_BASE) {
527 start_addr = addr = TASK_UNMAPPED_BASE;
534 * This mmap-allocator allocates new areas top-down from below the
535 * stack's low limit (the base):
537 * Because we have an exclusive hugepage region which lies within the
538 * normal user address space, we have to take special measures to make
539 * non-huge mmap()s evade the hugepage reserved regions.
542 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
543 const unsigned long len, const unsigned long pgoff,
544 const unsigned long flags)
546 struct vm_area_struct *vma, *prev_vma;
547 struct mm_struct *mm = current->mm;
548 unsigned long base = mm->mmap_base, addr = addr0;
551 /* requested length too big for entire address space */
555 /* dont allow allocations above current base */
556 if (mm->free_area_cache > base)
557 mm->free_area_cache = base;
559 /* requesting a specific address */
561 addr = PAGE_ALIGN(addr);
562 vma = find_vma(mm, addr);
563 if (TASK_SIZE - len >= addr &&
564 (!vma || addr + len <= vma->vm_start)
565 && !is_hugepage_only_range(addr,len))
570 /* make sure it can fit in the remaining address space */
571 if (mm->free_area_cache < len)
574 /* either no address requested or cant fit in requested address hole */
575 addr = (mm->free_area_cache - len) & PAGE_MASK;
578 if (touches_hugepage_low_range(addr, len)) {
579 addr = (addr & ((~0) << SID_SHIFT)) - len;
580 goto hugepage_recheck;
581 } else if (touches_hugepage_high_range(addr, len)) {
582 addr = TASK_HPAGE_BASE - len;
586 * Lookup failure means no vma is above this address,
587 * i.e. return with success:
589 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
593 * new region fits between prev_vma->vm_end and
594 * vma->vm_start, use it:
596 if (addr+len <= vma->vm_start &&
597 (!prev_vma || (addr >= prev_vma->vm_end)))
598 /* remember the address as a hint for next time */
599 return (mm->free_area_cache = addr);
601 /* pull free_area_cache down to the first hole */
602 if (mm->free_area_cache == vma->vm_end)
603 mm->free_area_cache = vma->vm_start;
605 /* try just below the current vma->vm_start */
606 addr = vma->vm_start-len;
607 } while (len <= vma->vm_start);
611 * if hint left us with no space for the requested
612 * mapping then try again:
615 mm->free_area_cache = base;
620 * A failed mmap() very likely causes application failure,
621 * so fall back to the bottom-up function here. This scenario
622 * can happen with large stack limits and large mmap()
625 mm->free_area_cache = TASK_UNMAPPED_BASE;
626 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
628 * Restore the topdown base:
630 mm->free_area_cache = base;
635 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
637 unsigned long addr = 0;
638 struct vm_area_struct *vma;
640 vma = find_vma(current->mm, addr);
641 while (addr + len <= 0x100000000UL) {
642 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
644 if (! __within_hugepage_low_range(addr, len, segmask)) {
645 addr = ALIGN(addr+1, 1<<SID_SHIFT);
646 vma = find_vma(current->mm, addr);
650 if (!vma || (addr + len) <= vma->vm_start)
652 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
653 /* Depending on segmask this might not be a confirmed
654 * hugepage region, so the ALIGN could have skipped
656 vma = find_vma(current->mm, addr);
662 static unsigned long htlb_get_high_area(unsigned long len)
664 unsigned long addr = TASK_HPAGE_BASE;
665 struct vm_area_struct *vma;
667 vma = find_vma(current->mm, addr);
668 for (vma = find_vma(current->mm, addr);
669 addr + len <= TASK_HPAGE_END;
670 vma = vma->vm_next) {
671 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
672 BUG_ON(! within_hugepage_high_range(addr, len));
674 if (!vma || (addr + len) <= vma->vm_start)
676 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
677 /* Because we're in a hugepage region, this alignment
678 * should not skip us over any VMAs */
684 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
685 unsigned long len, unsigned long pgoff,
688 if (len & ~HPAGE_MASK)
691 if (!(cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE))
694 if (test_thread_flag(TIF_32BIT)) {
696 u16 segmask, cursegs = current->mm->context.htlb_segs;
698 /* First see if we can do the mapping in the existing
699 * low hpage segments */
700 addr = htlb_get_low_area(len, cursegs);
704 for (segmask = LOW_ESID_MASK(0x100000000UL-len, len);
705 ! lastshift; segmask >>=1) {
709 addr = htlb_get_low_area(len, cursegs | segmask);
710 if ((addr != -ENOMEM)
711 && open_low_hpage_segs(current->mm, segmask) == 0)
714 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
715 " enough segments\n");
718 return htlb_get_high_area(len);
722 int hash_huge_page(struct mm_struct *mm, unsigned long access,
723 unsigned long ea, unsigned long vsid, int local)
726 unsigned long va, vpn;
728 hugepte_t old_pte, new_pte;
729 unsigned long hpteflags, prpn, flags;
732 /* We have to find the first hugepte in the batch, since
733 * that's the one that will store the HPTE flags */
735 ptep = hugepte_offset(mm, ea);
737 /* Search the Linux page table for a match with va */
738 va = (vsid << 28) | (ea & 0x0fffffff);
739 vpn = va >> HPAGE_SHIFT;
742 * If no pte found or not present, send the problem up to
745 if (unlikely(!ptep || hugepte_none(*ptep)))
748 BUG_ON(hugepte_bad(*ptep));
751 * Check the user's access rights to the page. If access should be
752 * prevented then send the problem up to do_page_fault.
754 is_write = access & _PAGE_RW;
755 if (unlikely(is_write && !(hugepte_val(*ptep) & _HUGEPAGE_RW)))
759 * At this point, we have a pte (old_pte) which can be used to build
760 * or update an HPTE. There are 2 cases:
762 * 1. There is a valid (present) pte with no associated HPTE (this is
763 * the most common case)
764 * 2. There is a valid (present) pte with an associated HPTE. The
765 * current values of the pp bits in the HPTE prevent access
766 * because we are doing software DIRTY bit management and the
767 * page is currently not DIRTY.
770 spin_lock_irqsave(&mm->page_table_lock, flags);
775 hpteflags = 0x2 | (! (hugepte_val(new_pte) & _HUGEPAGE_RW));
777 /* Check if pte already has an hpte (case 2) */
778 if (unlikely(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE)) {
779 /* There MIGHT be an HPTE for this pte */
780 unsigned long hash, slot;
782 hash = hpt_hash(vpn, 1);
783 if (hugepte_val(old_pte) & _HUGEPAGE_SECONDARY)
785 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
786 slot += (hugepte_val(old_pte) & _HUGEPAGE_GROUP_IX) >> 5;
788 if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1)
789 hugepte_val(old_pte) &= ~_HUGEPAGE_HPTEFLAGS;
792 if (likely(!(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE))) {
793 unsigned long hash = hpt_hash(vpn, 1);
794 unsigned long hpte_group;
796 prpn = hugepte_pfn(old_pte);
799 hpte_group = ((hash & htab_data.htab_hash_mask) *
800 HPTES_PER_GROUP) & ~0x7UL;
802 /* Update the linux pte with the HPTE slot */
803 hugepte_val(new_pte) &= ~_HUGEPAGE_HPTEFLAGS;
804 hugepte_val(new_pte) |= _HUGEPAGE_HASHPTE;
806 /* Add in WIMG bits */
807 /* XXX We should store these in the pte */
808 hpteflags |= _PAGE_COHERENT;
810 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
813 /* Primary is full, try the secondary */
814 if (unlikely(slot == -1)) {
815 hugepte_val(new_pte) |= _HUGEPAGE_SECONDARY;
816 hpte_group = ((~hash & htab_data.htab_hash_mask) *
817 HPTES_PER_GROUP) & ~0x7UL;
818 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
822 hpte_group = ((hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
824 ppc_md.hpte_remove(hpte_group);
829 if (unlikely(slot == -2))
830 panic("hash_huge_page: pte_insert failed\n");
832 hugepte_val(new_pte) |= (slot<<5) & _HUGEPAGE_GROUP_IX;
835 * No need to use ldarx/stdcx here because all who
836 * might be updating the pte will hold the
837 * page_table_lock or the hash_table_lock
843 spin_unlock_irqrestore(&mm->page_table_lock, flags);
848 static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
849 hugepte_t pte, int local)
851 unsigned long vsid, vpn, va, hash, slot;
853 BUG_ON(hugepte_bad(pte));
854 BUG_ON(!in_hugepage_area(context, ea));
856 vsid = get_vsid(context.id, ea);
858 va = (vsid << 28) | (ea & 0x0fffffff);
859 vpn = va >> HPAGE_SHIFT;
860 hash = hpt_hash(vpn, 1);
861 if (hugepte_val(pte) & _HUGEPAGE_SECONDARY)
863 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
864 slot += (hugepte_val(pte) & _HUGEPAGE_GROUP_IX) >> 5;
866 ppc_md.hpte_invalidate(slot, va, 1, local);