2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/config.h>
8 #include <linux/init.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
21 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
27 pgd = pgd_offset(mm, addr);
28 pud = pud_alloc(mm, pgd, addr);
29 pmd = pmd_alloc(mm, pud, addr);
33 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
39 pgd = pgd_offset(mm, addr);
40 pud = pud_offset(pgd, addr);
41 pmd = pmd_offset(pud, addr);
45 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
49 // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
50 vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
53 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
55 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
56 entry = pte_mkyoung(entry);
58 set_pte(page_table, entry);
62 * This function checks for proper alignment of input addr and len parameters.
64 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
66 if (len & ~HPAGE_MASK)
68 if (addr & ~HPAGE_MASK)
73 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
74 struct vm_area_struct *vma)
76 pte_t *src_pte, *dst_pte, entry;
78 unsigned long addr = vma->vm_start;
79 unsigned long end = vma->vm_end;
82 dst_pte = huge_pte_alloc(dst, addr);
85 src_pte = huge_pte_offset(src, addr);
87 ptepage = pte_page(entry);
89 set_pte(dst_pte, entry);
90 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
91 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
101 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
102 struct page **pages, struct vm_area_struct **vmas,
103 unsigned long *position, int *length, int i)
105 unsigned long vpfn, vaddr = *position;
106 int remainder = *length;
108 WARN_ON(!is_vm_hugetlb_page(vma));
110 vpfn = vaddr/PAGE_SIZE;
111 while (vaddr < vma->vm_end && remainder) {
117 pte = huge_pte_offset(mm, vaddr);
119 /* hugetlb should be locked, and hence, prefaulted */
120 WARN_ON(!pte || pte_none(*pte));
122 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
124 WARN_ON(!PageCompound(page));
145 #if 0 /* This is just for testing */
147 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
149 unsigned long start = address;
153 struct vm_area_struct *vma;
155 vma = find_vma(mm, addr);
156 if (!vma || !is_vm_hugetlb_page(vma))
157 return ERR_PTR(-EINVAL);
159 pte = huge_pte_offset(mm, address);
161 /* hugetlb should be locked, and hence, prefaulted */
162 WARN_ON(!pte || pte_none(*pte));
164 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
166 WARN_ON(!PageCompound(page));
171 int pmd_huge(pmd_t pmd)
177 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
178 pmd_t *pmd, int write)
186 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
188 return ERR_PTR(-EINVAL);
191 int pmd_huge(pmd_t pmd)
193 return !!(pmd_val(pmd) & _PAGE_PSE);
197 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
198 pmd_t *pmd, int write)
202 page = pte_page(*(pte_t *)pmd);
204 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
209 void unmap_hugepage_range(struct vm_area_struct *vma,
210 unsigned long start, unsigned long end)
212 struct mm_struct *mm = vma->vm_mm;
213 unsigned long address;
217 BUG_ON(start & (HPAGE_SIZE - 1));
218 BUG_ON(end & (HPAGE_SIZE - 1));
220 for (address = start; address < end; address += HPAGE_SIZE) {
221 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
224 page = pte_page(pte);
227 // mm->rss -= (end - start) >> PAGE_SHIFT;
228 vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
229 flush_tlb_range(vma, start, end);
232 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
234 struct mm_struct *mm = current->mm;
238 BUG_ON(vma->vm_start & ~HPAGE_MASK);
239 BUG_ON(vma->vm_end & ~HPAGE_MASK);
241 spin_lock(&mm->page_table_lock);
242 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
244 pte_t *pte = huge_pte_alloc(mm, addr);
252 if (!pte_none(*pte)) {
253 pmd_t *pmd = (pmd_t *) pte;
255 page = pmd_page(*pmd);
258 dec_page_state(nr_page_table_pages);
259 page_cache_release(page);
262 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
263 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
264 page = find_get_page(mapping, idx);
266 /* charge the fs quota first */
267 if (hugetlb_get_quota(mapping)) {
271 page = alloc_huge_page();
273 hugetlb_put_quota(mapping);
277 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
281 hugetlb_put_quota(mapping);
282 free_huge_page(page);
286 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
289 spin_unlock(&mm->page_table_lock);
293 /* x86_64 also uses this file */
295 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
296 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
297 unsigned long addr, unsigned long len,
298 unsigned long pgoff, unsigned long flags)
300 struct mm_struct *mm = current->mm;
301 struct vm_area_struct *vma;
302 unsigned long start_addr;
304 start_addr = mm->free_area_cache;
307 addr = ALIGN(start_addr, HPAGE_SIZE);
309 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
310 /* At this point: (!vma || addr < vma->vm_end). */
311 if (TASK_SIZE - len < addr) {
313 * Start a new search - just in case we missed
316 if (start_addr != TASK_UNMAPPED_BASE) {
317 start_addr = TASK_UNMAPPED_BASE;
322 if (!vma || addr + len <= vma->vm_start) {
323 mm->free_area_cache = addr + len;
326 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
330 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
331 unsigned long addr0, unsigned long len,
332 unsigned long pgoff, unsigned long flags)
334 struct mm_struct *mm = current->mm;
335 struct vm_area_struct *vma, *prev_vma;
336 unsigned long base = mm->mmap_base, addr = addr0;
339 /* don't allow allocations above current base */
340 if (mm->free_area_cache > base)
341 mm->free_area_cache = base;
344 /* make sure it can fit in the remaining address space */
345 if (mm->free_area_cache < len)
348 /* either no address requested or cant fit in requested address hole */
349 addr = (mm->free_area_cache - len) & HPAGE_MASK;
352 * Lookup failure means no vma is above this address,
353 * i.e. return with success:
355 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
359 * new region fits between prev_vma->vm_end and
360 * vma->vm_start, use it:
362 if (addr + len <= vma->vm_start &&
363 (!prev_vma || (addr >= prev_vma->vm_end)))
364 /* remember the address as a hint for next time */
365 return (mm->free_area_cache = addr);
367 /* pull free_area_cache down to the first hole */
368 if (mm->free_area_cache == vma->vm_end)
369 mm->free_area_cache = vma->vm_start;
371 /* try just below the current vma->vm_start */
372 addr = (vma->vm_start - len) & HPAGE_MASK;
373 } while (len <= vma->vm_start);
377 * if hint left us with no space for the requested
378 * mapping then try again:
381 mm->free_area_cache = base;
386 * A failed mmap() very likely causes application failure,
387 * so fall back to the bottom-up function here. This scenario
388 * can happen with large stack limits and large mmap()
391 mm->free_area_cache = TASK_UNMAPPED_BASE;
392 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
396 * Restore the topdown base:
398 mm->free_area_cache = base;
404 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
405 unsigned long len, unsigned long pgoff, unsigned long flags)
407 struct mm_struct *mm = current->mm;
408 struct vm_area_struct *vma;
410 if (len & ~HPAGE_MASK)
416 addr = ALIGN(addr, HPAGE_SIZE);
417 vma = find_vma(mm, addr);
418 if (TASK_SIZE - len >= addr &&
419 (!vma || addr + len <= vma->vm_start))
422 if (mm->get_unmapped_area == arch_get_unmapped_area)
423 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
426 return hugetlb_get_unmapped_area_topdown(file, addr, len,
430 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/