2 * IA-64 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
11 #include <linux/config.h>
12 #include <linux/init.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
21 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
25 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
28 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
30 unsigned long taddr = htlbpage_to_page(addr);
35 pgd = pgd_offset(mm, taddr);
36 pmd = pmd_alloc(mm, pgd, taddr);
38 pte = pte_alloc_map(mm, pmd, taddr);
43 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
45 unsigned long taddr = htlbpage_to_page(addr);
50 pgd = pgd_offset(mm, taddr);
51 if (pgd_present(*pgd)) {
52 pmd = pmd_offset(pgd, taddr);
53 if (pmd_present(*pmd))
54 pte = pte_offset_map(pmd, taddr);
60 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
63 set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
64 struct page *page, pte_t * page_table, int write_access)
68 // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
69 vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
72 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
74 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
75 entry = pte_mkyoung(entry);
77 set_pte(page_table, entry);
81 * This function checks for proper alignment of input addr and len parameters.
83 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
85 if (len & ~HPAGE_MASK)
87 if (addr & ~HPAGE_MASK)
89 if (REGION_NUMBER(addr) != REGION_HPAGE)
95 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
96 struct vm_area_struct *vma)
98 pte_t *src_pte, *dst_pte, entry;
100 unsigned long addr = vma->vm_start;
101 unsigned long end = vma->vm_end;
104 dst_pte = huge_pte_alloc(dst, addr);
107 src_pte = huge_pte_offset(src, addr);
109 ptepage = pte_page(entry);
111 set_pte(dst_pte, entry);
112 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
113 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
122 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
123 struct page **pages, struct vm_area_struct **vmas,
124 unsigned long *st, int *length, int i)
127 unsigned long start = *st;
128 unsigned long pstart;
133 pstart = start & HPAGE_MASK;
134 ptep = huge_pte_offset(mm, start);
138 page = pte_page(pte);
140 page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
149 if (((start & HPAGE_MASK) == pstart) && len &&
150 (start < vma->vm_end))
152 } while (len && start < vma->vm_end);
158 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
163 if (! mm->used_hugetlb)
164 return ERR_PTR(-EINVAL);
165 if (REGION_NUMBER(addr) != REGION_HPAGE)
166 return ERR_PTR(-EINVAL);
168 ptep = huge_pte_offset(mm, addr);
169 if (!ptep || pte_none(*ptep))
171 page = pte_page(*ptep);
172 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
175 int pmd_huge(pmd_t pmd)
180 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
186 * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
187 * are hugetlb region specific.
189 void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
190 unsigned long start, unsigned long end)
192 unsigned long first = start & HUGETLB_PGDIR_MASK;
193 unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
194 unsigned long start_index, end_index;
195 struct mm_struct *mm = tlb->mm;
201 if (prev->vm_end > start) {
202 if (last > prev->vm_start)
203 last = prev->vm_start;
208 struct vm_area_struct *next = prev->vm_next;
211 if (next->vm_start < start) {
215 if (last > next->vm_start)
216 last = next->vm_start;
218 if (prev->vm_end > first)
219 first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
223 if (last < first) /* for arches with discontiguous pgd indices */
226 * If the PGD bits are not consecutive in the virtual address, the
227 * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
230 start_index = pgd_index(htlbpage_to_page(first));
231 end_index = pgd_index(htlbpage_to_page(last));
233 if (end_index > start_index) {
234 clear_page_tables(tlb, start_index, end_index - start_index);
238 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
240 struct mm_struct *mm = vma->vm_mm;
241 unsigned long address;
245 BUG_ON(start & (HPAGE_SIZE - 1));
246 BUG_ON(end & (HPAGE_SIZE - 1));
248 for (address = start; address < end; address += HPAGE_SIZE) {
249 pte = huge_pte_offset(mm, address);
252 page = pte_page(*pte);
256 // mm->rss -= (end - start) >> PAGE_SHIFT;
257 vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
258 flush_tlb_range(vma, start, end);
261 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
263 struct mm_struct *mm = current->mm;
267 BUG_ON(vma->vm_start & ~HPAGE_MASK);
268 BUG_ON(vma->vm_end & ~HPAGE_MASK);
270 spin_lock(&mm->page_table_lock);
271 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
273 pte_t *pte = huge_pte_alloc(mm, addr);
283 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
284 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
285 page = find_get_page(mapping, idx);
287 /* charge the fs quota first */
288 if (hugetlb_get_quota(mapping)) {
292 page = alloc_huge_page();
294 hugetlb_put_quota(mapping);
298 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
302 hugetlb_put_quota(mapping);
303 free_huge_page(page);
307 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
310 spin_unlock(&mm->page_table_lock);
314 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
315 unsigned long pgoff, unsigned long flags)
317 struct vm_area_struct *vmm;
319 if (len > RGN_MAP_LIMIT)
321 if (len & ~HPAGE_MASK)
323 /* This code assumes that REGION_HPAGE != 0. */
324 if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
325 addr = HPAGE_REGION_BASE;
327 addr = ALIGN(addr, HPAGE_SIZE);
328 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
329 /* At this point: (!vmm || addr < vmm->vm_end). */
330 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
332 if (!vmm || (addr + len) <= vmm->vm_start)
334 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
338 static int __init hugetlb_setup_sz(char *str)
341 unsigned long long size;
343 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
345 * shouldn't happen, but just in case.
347 tr_pages = 0x15557000UL;
349 size = memparse(str, &str);
350 if (*str || (size & (size-1)) || !(tr_pages & size) ||
352 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
353 printk(KERN_WARNING "Invalid huge page size specified\n");
357 hpage_shift = __ffs(size);
359 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
360 * override here with new page shift.
362 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
365 __setup("hugepagesz=", hugetlb_setup_sz);