2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
29 #include <linux/sysctl.h>
33 * 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0
34 * PFN>>12..... - - - - - - HASH_IX.... 2ND HASH RW - HG=1
37 #define HUGEPTE_SHIFT 15
38 #define _HUGEPAGE_PFN 0xffff8000
39 #define _HUGEPAGE_BAD 0x00007f00
40 #define _HUGEPAGE_HASHPTE 0x00000008
41 #define _HUGEPAGE_SECONDARY 0x00000010
42 #define _HUGEPAGE_GROUP_IX 0x000000e0
43 #define _HUGEPAGE_HPTEFLAGS (_HUGEPAGE_HASHPTE | _HUGEPAGE_SECONDARY | \
45 #define _HUGEPAGE_RW 0x00000004
47 typedef struct {unsigned int val;} hugepte_t;
48 #define hugepte_val(hugepte) ((hugepte).val)
49 #define __hugepte(x) ((hugepte_t) { (x) } )
50 #define hugepte_pfn(x) \
51 ((unsigned long)(hugepte_val(x)>>HUGEPTE_SHIFT) << HUGETLB_PAGE_ORDER)
52 #define mk_hugepte(page,wr) __hugepte( \
53 ((page_to_pfn(page)>>HUGETLB_PAGE_ORDER) << HUGEPTE_SHIFT ) \
54 | (!!(wr) * _HUGEPAGE_RW) | _PMD_HUGEPAGE )
56 #define hugepte_bad(x) ( !(hugepte_val(x) & _PMD_HUGEPAGE) || \
57 (hugepte_val(x) & _HUGEPAGE_BAD) )
58 #define hugepte_page(x) pfn_to_page(hugepte_pfn(x))
59 #define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN))
62 static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
63 hugepte_t pte, int local);
65 static inline unsigned int hugepte_update(hugepte_t *p, unsigned int clr,
68 unsigned int old, tmp;
71 "1: lwarx %0,0,%3 # pte_update\n\
76 : "=&r" (old), "=&r" (tmp), "=m" (*p)
77 : "r" (p), "r" (clr), "r" (set), "m" (*p)
82 static inline void set_hugepte(hugepte_t *ptep, hugepte_t pte)
84 hugepte_update(ptep, ~_HUGEPAGE_HPTEFLAGS,
85 hugepte_val(pte) & ~_HUGEPAGE_HPTEFLAGS);
88 static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr)
93 BUG_ON(!in_hugepage_area(mm->context, addr));
95 pgd = pgd_offset(mm, addr);
96 pmd = pmd_alloc(mm, pgd, addr);
98 /* We shouldn't find a (normal) PTE page pointer here */
99 BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
101 return (hugepte_t *)pmd;
104 static hugepte_t *hugepte_offset(struct mm_struct *mm, unsigned long addr)
109 BUG_ON(!in_hugepage_area(mm->context, addr));
111 pgd = pgd_offset(mm, addr);
115 pmd = pmd_offset(pgd, addr);
117 /* We shouldn't find a (normal) PTE page pointer here */
118 BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
120 return (hugepte_t *)pmd;
123 static void setup_huge_pte(struct mm_struct *mm, struct page *page,
124 hugepte_t *ptep, int write_access)
129 mm->rss += (HPAGE_SIZE / PAGE_SIZE);
130 entry = mk_hugepte(page, write_access);
131 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++)
132 set_hugepte(ptep+i, entry);
135 static void teardown_huge_pte(hugepte_t *ptep)
139 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++)
140 pmd_clear((pmd_t *)(ptep+i));
144 * This function checks for proper alignment of input addr and len parameters.
146 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
148 if (len & ~HPAGE_MASK)
150 if (addr & ~HPAGE_MASK)
152 if (! (within_hugepage_low_range(addr, len)
153 || within_hugepage_high_range(addr, len)) )
158 static void flush_segments(void *parm)
160 u16 segs = (unsigned long) parm;
163 asm volatile("isync" : : : "memory");
165 for (i = 0; i < 16; i++) {
166 if (! (segs & (1U << i)))
168 asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
171 asm volatile("isync" : : : "memory");
174 static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
176 unsigned long start = seg << SID_SHIFT;
177 unsigned long end = (seg+1) << SID_SHIFT;
178 struct vm_area_struct *vma;
180 struct mmu_gather *tlb;
184 /* Check no VMAs are in the region */
185 vma = find_vma(mm, start);
186 if (vma && (vma->vm_start < end))
189 /* Clean up any leftover PTE pages in the region */
190 spin_lock(&mm->page_table_lock);
191 tlb = tlb_gather_mmu(mm, 0);
192 for (addr = start; addr < end; addr += PMD_SIZE) {
193 pgd_t *pgd = pgd_offset(mm, addr);
201 pmd = pmd_offset(pgd, addr);
202 if (!pmd || pmd_none(*pmd))
209 pte = (pte_t *)pmd_page_kernel(*pmd);
210 /* No VMAs, so there should be no PTEs, check just in case. */
211 for (i = 0; i < PTRS_PER_PTE; i++) {
212 BUG_ON(!pte_none(*pte));
215 page = pmd_page(*pmd);
217 pgtable_remove_rmap(page);
218 pte_free_tlb(tlb, page);
220 tlb_finish_mmu(tlb, start, end);
221 spin_unlock(&mm->page_table_lock);
226 static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
230 newsegs &= ~(mm->context.htlb_segs);
232 return 0; /* The segments we want are already open */
234 for (i = 0; i < 16; i++)
235 if ((1 << i) & newsegs)
236 if (prepare_low_seg_for_htlb(mm, i) != 0)
239 mm->context.htlb_segs |= newsegs;
240 /* the context change must make it to memory before the flush,
241 * so that further SLB misses do the right thing. */
243 on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1);
248 int prepare_hugepage_range(unsigned long addr, unsigned long len)
250 if (within_hugepage_high_range(addr, len))
252 else if ((addr < 0x100000000) && ((addr+len) < 0x100000000)) {
254 /* Yes, we need both tests, in case addr+len overflows
255 * 64-bit arithmetic */
256 err = open_low_hpage_segs(current->mm,
257 LOW_ESID_MASK(addr, len));
259 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
260 " failed (segs: 0x%04hx)\n", addr, len,
261 LOW_ESID_MASK(addr, len));
268 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
269 struct vm_area_struct *vma)
271 hugepte_t *src_pte, *dst_pte, entry;
272 struct page *ptepage;
273 unsigned long addr = vma->vm_start;
274 unsigned long end = vma->vm_end;
277 BUG_ON(! in_hugepage_area(src->context, addr));
278 BUG_ON(! in_hugepage_area(dst->context, addr));
280 dst_pte = hugepte_alloc(dst, addr);
284 src_pte = hugepte_offset(src, addr);
287 if ((addr % HPAGE_SIZE) == 0) {
288 /* This is the first hugepte in a batch */
289 ptepage = hugepte_page(entry);
291 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
293 set_hugepte(dst_pte, entry);
302 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
303 struct page **pages, struct vm_area_struct **vmas,
304 unsigned long *position, int *length, int i)
306 unsigned long vpfn, vaddr = *position;
307 int remainder = *length;
309 WARN_ON(!is_vm_hugetlb_page(vma));
311 vpfn = vaddr/PAGE_SIZE;
312 while (vaddr < vma->vm_end && remainder) {
313 BUG_ON(!in_hugepage_area(mm->context, vaddr));
319 pte = hugepte_offset(mm, vaddr);
321 /* hugetlb should be locked, and hence, prefaulted */
322 WARN_ON(!pte || hugepte_none(*pte));
324 page = &hugepte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
326 WARN_ON(!PageCompound(page));
348 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
350 return ERR_PTR(-EINVAL);
353 int pmd_huge(pmd_t pmd)
355 return pmd_hugepage(pmd);
359 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
360 pmd_t *pmd, int write)
364 BUG_ON(! pmd_hugepage(*pmd));
366 page = hugepte_page(*(hugepte_t *)pmd);
368 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
372 void unmap_hugepage_range(struct vm_area_struct *vma,
373 unsigned long start, unsigned long end)
375 struct mm_struct *mm = vma->vm_mm;
382 WARN_ON(!is_vm_hugetlb_page(vma));
383 BUG_ON((start % HPAGE_SIZE) != 0);
384 BUG_ON((end % HPAGE_SIZE) != 0);
386 /* XXX are there races with checking cpu_vm_mask? - Anton */
387 tmp = cpumask_of_cpu(smp_processor_id());
388 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
391 for (addr = start; addr < end; addr += HPAGE_SIZE) {
394 BUG_ON(!in_hugepage_area(mm->context, addr));
396 ptep = hugepte_offset(mm, addr);
397 if (!ptep || hugepte_none(*ptep))
401 page = hugepte_page(pte);
402 teardown_huge_pte(ptep);
404 if (hugepte_val(pte) & _HUGEPAGE_HASHPTE)
405 flush_hash_hugepage(mm->context, addr,
411 mm->rss -= (end - start) >> PAGE_SHIFT;
414 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
416 struct mm_struct *mm = current->mm;
420 WARN_ON(!is_vm_hugetlb_page(vma));
421 BUG_ON((vma->vm_start % HPAGE_SIZE) != 0);
422 BUG_ON((vma->vm_end % HPAGE_SIZE) != 0);
424 spin_lock(&mm->page_table_lock);
425 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
427 hugepte_t *pte = hugepte_alloc(mm, addr);
430 BUG_ON(!in_hugepage_area(mm->context, addr));
436 if (!hugepte_none(*pte))
439 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
440 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
441 page = find_get_page(mapping, idx);
443 /* charge the fs quota first */
444 if (hugetlb_get_quota(mapping)) {
448 page = alloc_huge_page();
450 hugetlb_put_quota(mapping);
454 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
457 hugetlb_put_quota(mapping);
458 free_huge_page(page);
462 setup_huge_pte(mm, page, pte, vma->vm_flags & VM_WRITE);
465 spin_unlock(&mm->page_table_lock);
469 /* Because we have an exclusive hugepage region which lies within the
470 * normal user address space, we have to take special measures to make
471 * non-huge mmap()s evade the hugepage reserved regions. */
472 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
473 unsigned long len, unsigned long pgoff,
476 struct mm_struct *mm = current->mm;
477 struct vm_area_struct *vma;
478 unsigned long start_addr;
484 addr = PAGE_ALIGN(addr);
485 vma = find_vma(mm, addr);
486 if (((TASK_SIZE - len) >= addr)
487 && (!vma || (addr+len) <= vma->vm_start)
488 && !is_hugepage_only_range(addr,len))
491 start_addr = addr = mm->free_area_cache;
494 vma = find_vma(mm, addr);
495 while (TASK_SIZE - len >= addr) {
496 BUG_ON(vma && (addr >= vma->vm_end));
498 if (touches_hugepage_low_range(addr, len)) {
499 addr = ALIGN(addr+1, 1<<SID_SHIFT);
500 vma = find_vma(mm, addr);
503 if (touches_hugepage_high_range(addr, len)) {
504 addr = TASK_HPAGE_END;
505 vma = find_vma(mm, addr);
508 if (!vma || addr + len <= vma->vm_start) {
510 * Remember the place where we stopped the search:
512 mm->free_area_cache = addr + len;
519 /* Make sure we didn't miss any holes */
520 if (start_addr != TASK_UNMAPPED_BASE) {
521 start_addr = addr = TASK_UNMAPPED_BASE;
527 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
529 unsigned long addr = 0;
530 struct vm_area_struct *vma;
532 vma = find_vma(current->mm, addr);
533 while (addr + len <= 0x100000000UL) {
534 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
536 if (! __within_hugepage_low_range(addr, len, segmask)) {
537 addr = ALIGN(addr+1, 1<<SID_SHIFT);
538 vma = find_vma(current->mm, addr);
542 if (!vma || (addr + len) <= vma->vm_start)
544 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
545 /* Depending on segmask this might not be a confirmed
546 * hugepage region, so the ALIGN could have skipped
548 vma = find_vma(current->mm, addr);
554 static unsigned long htlb_get_high_area(unsigned long len)
556 unsigned long addr = TASK_HPAGE_BASE;
557 struct vm_area_struct *vma;
559 vma = find_vma(current->mm, addr);
560 for (vma = find_vma(current->mm, addr);
561 addr + len <= TASK_HPAGE_END;
562 vma = vma->vm_next) {
563 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
564 BUG_ON(! within_hugepage_high_range(addr, len));
566 if (!vma || (addr + len) <= vma->vm_start)
568 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
569 /* Because we're in a hugepage region, this alignment
570 * should not skip us over any VMAs */
576 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
577 unsigned long len, unsigned long pgoff,
580 if (len & ~HPAGE_MASK)
583 if (!(cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE))
586 if (test_thread_flag(TIF_32BIT)) {
588 u16 segmask, cursegs = current->mm->context.htlb_segs;
590 /* First see if we can do the mapping in the existing
591 * low hpage segments */
592 addr = htlb_get_low_area(len, cursegs);
596 for (segmask = LOW_ESID_MASK(0x100000000UL-len, len);
597 ! lastshift; segmask >>=1) {
601 addr = htlb_get_low_area(len, cursegs | segmask);
602 if ((addr != -ENOMEM)
603 && open_low_hpage_segs(current->mm, segmask) == 0)
606 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
607 " enough segments\n");
610 return htlb_get_high_area(len);
614 int hash_huge_page(struct mm_struct *mm, unsigned long access,
615 unsigned long ea, unsigned long vsid, int local)
618 unsigned long va, vpn;
620 hugepte_t old_pte, new_pte;
621 unsigned long hpteflags, prpn, flags;
624 /* We have to find the first hugepte in the batch, since
625 * that's the one that will store the HPTE flags */
627 ptep = hugepte_offset(mm, ea);
629 /* Search the Linux page table for a match with va */
630 va = (vsid << 28) | (ea & 0x0fffffff);
631 vpn = va >> HPAGE_SHIFT;
634 * If no pte found or not present, send the problem up to
637 if (unlikely(!ptep || hugepte_none(*ptep)))
640 BUG_ON(hugepte_bad(*ptep));
643 * Check the user's access rights to the page. If access should be
644 * prevented then send the problem up to do_page_fault.
646 is_write = access & _PAGE_RW;
647 if (unlikely(is_write && !(hugepte_val(*ptep) & _HUGEPAGE_RW)))
651 * At this point, we have a pte (old_pte) which can be used to build
652 * or update an HPTE. There are 2 cases:
654 * 1. There is a valid (present) pte with no associated HPTE (this is
655 * the most common case)
656 * 2. There is a valid (present) pte with an associated HPTE. The
657 * current values of the pp bits in the HPTE prevent access
658 * because we are doing software DIRTY bit management and the
659 * page is currently not DIRTY.
662 spin_lock_irqsave(&mm->page_table_lock, flags);
667 hpteflags = 0x2 | (! (hugepte_val(new_pte) & _HUGEPAGE_RW));
669 /* Check if pte already has an hpte (case 2) */
670 if (unlikely(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE)) {
671 /* There MIGHT be an HPTE for this pte */
672 unsigned long hash, slot;
674 hash = hpt_hash(vpn, 1);
675 if (hugepte_val(old_pte) & _HUGEPAGE_SECONDARY)
677 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
678 slot += (hugepte_val(old_pte) & _HUGEPAGE_GROUP_IX) >> 5;
680 if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1)
681 hugepte_val(old_pte) &= ~_HUGEPAGE_HPTEFLAGS;
684 if (likely(!(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE))) {
685 unsigned long hash = hpt_hash(vpn, 1);
686 unsigned long hpte_group;
688 prpn = hugepte_pfn(old_pte);
691 hpte_group = ((hash & htab_data.htab_hash_mask) *
692 HPTES_PER_GROUP) & ~0x7UL;
694 /* Update the linux pte with the HPTE slot */
695 hugepte_val(new_pte) &= ~_HUGEPAGE_HPTEFLAGS;
696 hugepte_val(new_pte) |= _HUGEPAGE_HASHPTE;
698 /* Add in WIMG bits */
699 /* XXX We should store these in the pte */
700 hpteflags |= _PAGE_COHERENT;
702 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
705 /* Primary is full, try the secondary */
706 if (unlikely(slot == -1)) {
707 hugepte_val(new_pte) |= _HUGEPAGE_SECONDARY;
708 hpte_group = ((~hash & htab_data.htab_hash_mask) *
709 HPTES_PER_GROUP) & ~0x7UL;
710 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
714 hpte_group = ((hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
716 ppc_md.hpte_remove(hpte_group);
721 if (unlikely(slot == -2))
722 panic("hash_huge_page: pte_insert failed\n");
724 hugepte_val(new_pte) |= (slot<<5) & _HUGEPAGE_GROUP_IX;
727 * No need to use ldarx/stdcx here because all who
728 * might be updating the pte will hold the
729 * page_table_lock or the hash_table_lock
735 spin_unlock_irqrestore(&mm->page_table_lock, flags);
740 static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
741 hugepte_t pte, int local)
743 unsigned long vsid, vpn, va, hash, slot;
745 BUG_ON(hugepte_bad(pte));
746 BUG_ON(!in_hugepage_area(context, ea));
748 vsid = get_vsid(context.id, ea);
750 va = (vsid << 28) | (ea & 0x0fffffff);
751 vpn = va >> LARGE_PAGE_SHIFT;
752 hash = hpt_hash(vpn, 1);
753 if (hugepte_val(pte) & _HUGEPAGE_SECONDARY)
755 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
756 slot += (hugepte_val(pte) & _HUGEPAGE_GROUP_IX) >> 5;
758 ppc_md.hpte_invalidate(slot, va, 1, local);