2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/config.h>
8 #include <linux/init.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
21 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
26 pgd = pgd_offset(mm, addr);
27 pmd = pmd_alloc(mm, pgd, addr);
31 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
36 pgd = pgd_offset(mm, addr);
37 pmd = pmd_offset(pgd, addr);
41 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
45 mm->rss += (HPAGE_SIZE / PAGE_SIZE);
48 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
50 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
51 entry = pte_mkyoung(entry);
53 set_pte(page_table, entry);
57 * This function checks for proper alignment of input addr and len parameters.
59 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
61 if (len & ~HPAGE_MASK)
63 if (addr & ~HPAGE_MASK)
68 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
69 struct vm_area_struct *vma)
71 pte_t *src_pte, *dst_pte, entry;
73 unsigned long addr = vma->vm_start;
74 unsigned long end = vma->vm_end;
77 dst_pte = huge_pte_alloc(dst, addr);
80 src_pte = huge_pte_offset(src, addr);
82 ptepage = pte_page(entry);
84 set_pte(dst_pte, entry);
85 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
95 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
96 struct page **pages, struct vm_area_struct **vmas,
97 unsigned long *position, int *length, int i)
99 unsigned long vpfn, vaddr = *position;
100 int remainder = *length;
102 WARN_ON(!is_vm_hugetlb_page(vma));
104 vpfn = vaddr/PAGE_SIZE;
105 while (vaddr < vma->vm_end && remainder) {
111 pte = huge_pte_offset(mm, vaddr);
113 /* hugetlb should be locked, and hence, prefaulted */
114 WARN_ON(!pte || pte_none(*pte));
116 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
118 WARN_ON(!PageCompound(page));
139 #if 0 /* This is just for testing */
141 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
143 unsigned long start = address;
147 struct vm_area_struct *vma;
149 vma = find_vma(mm, addr);
150 if (!vma || !is_vm_hugetlb_page(vma))
151 return ERR_PTR(-EINVAL);
153 pte = huge_pte_offset(mm, address);
155 /* hugetlb should be locked, and hence, prefaulted */
156 WARN_ON(!pte || pte_none(*pte));
158 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
160 WARN_ON(!PageCompound(page));
165 int pmd_huge(pmd_t pmd)
171 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
172 pmd_t *pmd, int write)
180 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
182 return ERR_PTR(-EINVAL);
185 int pmd_huge(pmd_t pmd)
187 return !!(pmd_val(pmd) & _PAGE_PSE);
191 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
192 pmd_t *pmd, int write)
196 page = pte_page(*(pte_t *)pmd);
198 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
203 void unmap_hugepage_range(struct vm_area_struct *vma,
204 unsigned long start, unsigned long end)
206 struct mm_struct *mm = vma->vm_mm;
207 unsigned long address;
211 BUG_ON(start & (HPAGE_SIZE - 1));
212 BUG_ON(end & (HPAGE_SIZE - 1));
214 for (address = start; address < end; address += HPAGE_SIZE) {
215 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
218 page = pte_page(pte);
221 mm->rss -= (end - start) >> PAGE_SHIFT;
222 flush_tlb_range(vma, start, end);
225 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
227 struct mm_struct *mm = current->mm;
231 BUG_ON(vma->vm_start & ~HPAGE_MASK);
232 BUG_ON(vma->vm_end & ~HPAGE_MASK);
234 spin_lock(&mm->page_table_lock);
235 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
237 pte_t *pte = huge_pte_alloc(mm, addr);
245 if (!pte_none(*pte)) {
246 pmd_t *pmd = (pmd_t *) pte;
248 page = pmd_page(*pmd);
250 dec_page_state(nr_page_table_pages);
251 page_cache_release(page);
254 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
255 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
256 page = find_get_page(mapping, idx);
258 /* charge the fs quota first */
259 if (hugetlb_get_quota(mapping)) {
263 page = alloc_huge_page();
265 hugetlb_put_quota(mapping);
269 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
273 hugetlb_put_quota(mapping);
274 free_huge_page(page);
278 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
281 spin_unlock(&mm->page_table_lock);