2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
24 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
31 pgd = pgd_offset(mm, addr);
33 pud = pud_offset(pgd, addr);
35 pmd = pmd_alloc(mm, pud, addr);
37 pte = pte_alloc_map(mm, pmd, addr);
43 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
50 pgd = pgd_offset(mm, addr);
52 pud = pud_offset(pgd, addr);
54 pmd = pmd_offset(pud, addr);
56 pte = pte_offset_map(pmd, addr);
62 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
64 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
65 struct page *page, pte_t * page_table, int write_access)
70 // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
71 vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
74 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
77 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
78 entry = pte_mkyoung(entry);
81 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
82 set_pte(page_table, entry);
85 pte_val(entry) += PAGE_SIZE;
90 * This function checks for proper alignment of input addr and len parameters.
92 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
94 if (len & ~HPAGE_MASK)
96 if (addr & ~HPAGE_MASK)
101 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
102 struct vm_area_struct *vma)
104 pte_t *src_pte, *dst_pte, entry;
105 struct page *ptepage;
106 unsigned long addr = vma->vm_start;
107 unsigned long end = vma->vm_end;
111 dst_pte = huge_pte_alloc(dst, addr);
114 src_pte = huge_pte_offset(src, addr);
115 BUG_ON(!src_pte || pte_none(*src_pte));
117 ptepage = pte_page(entry);
119 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
120 set_pte(dst_pte, entry);
121 pte_val(entry) += PAGE_SIZE;
124 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
125 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
134 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
135 struct page **pages, struct vm_area_struct **vmas,
136 unsigned long *position, int *length, int i)
138 unsigned long vaddr = *position;
139 int remainder = *length;
141 WARN_ON(!is_vm_hugetlb_page(vma));
143 while (vaddr < vma->vm_end && remainder) {
148 pte = huge_pte_offset(mm, vaddr);
150 /* hugetlb should be locked, and hence, prefaulted */
151 BUG_ON(!pte || pte_none(*pte));
153 page = pte_page(*pte);
155 WARN_ON(!PageCompound(page));
175 struct page *follow_huge_addr(struct mm_struct *mm,
176 unsigned long address, int write)
178 return ERR_PTR(-EINVAL);
181 int pmd_huge(pmd_t pmd)
186 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
187 pmd_t *pmd, int write)
192 void unmap_hugepage_range(struct vm_area_struct *vma,
193 unsigned long start, unsigned long end)
195 struct mm_struct *mm = vma->vm_mm;
196 unsigned long address;
201 BUG_ON(start & (HPAGE_SIZE - 1));
202 BUG_ON(end & (HPAGE_SIZE - 1));
204 for (address = start; address < end; address += HPAGE_SIZE) {
205 pte = huge_pte_offset(mm, address);
209 page = pte_page(*pte);
211 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
216 // mm->rss -= (end - start) >> PAGE_SHIFT;
217 vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
218 flush_tlb_range(vma, start, end);
221 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
223 struct mm_struct *mm = current->mm;
227 BUG_ON(vma->vm_start & ~HPAGE_MASK);
228 BUG_ON(vma->vm_end & ~HPAGE_MASK);
230 spin_lock(&mm->page_table_lock);
231 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
233 pte_t *pte = huge_pte_alloc(mm, addr);
243 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
244 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
245 page = find_get_page(mapping, idx);
247 /* charge the fs quota first */
248 if (hugetlb_get_quota(mapping)) {
252 page = alloc_huge_page();
254 hugetlb_put_quota(mapping);
258 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
262 hugetlb_put_quota(mapping);
263 free_huge_page(page);
267 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
270 spin_unlock(&mm->page_table_lock);