patch-2.6.6-vs1.9.0
[linux-2.6.git] / arch / ia64 / mm / hugetlbpage.c
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
24
25 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
26
27 static pte_t *
28 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
29 {
30         unsigned long taddr = htlbpage_to_page(addr);
31         pgd_t *pgd;
32         pmd_t *pmd;
33         pte_t *pte = NULL;
34
35         pgd = pgd_offset(mm, taddr);
36         pmd = pmd_alloc(mm, pgd, taddr);
37         if (pmd)
38                 pte = pte_alloc_map(mm, pmd, taddr);
39         return pte;
40 }
41
42 static pte_t *
43 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
44 {
45         unsigned long taddr = htlbpage_to_page(addr);
46         pgd_t *pgd;
47         pmd_t *pmd;
48         pte_t *pte = NULL;
49
50         pgd = pgd_offset(mm, taddr);
51         if (pgd_present(*pgd)) {
52                 pmd = pmd_offset(pgd, taddr);
53                 if (pmd_present(*pmd))
54                         pte = pte_offset_map(pmd, taddr);
55         }
56
57         return pte;
58 }
59
60 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
61
62 static void
63 set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
64               struct page *page, pte_t * page_table, int write_access)
65 {
66         pte_t entry;
67
68         // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
69         vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
70         if (write_access) {
71                 entry =
72                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
73         } else
74                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
75         entry = pte_mkyoung(entry);
76         mk_pte_huge(entry);
77         set_pte(page_table, entry);
78         return;
79 }
80 /*
81  * This function checks for proper alignment of input addr and len parameters.
82  */
83 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
84 {
85         if (len & ~HPAGE_MASK)
86                 return -EINVAL;
87         if (addr & ~HPAGE_MASK)
88                 return -EINVAL;
89         if (REGION_NUMBER(addr) != REGION_HPAGE)
90                 return -EINVAL;
91
92         return 0;
93 }
94
95 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
96                         struct vm_area_struct *vma)
97 {
98         pte_t *src_pte, *dst_pte, entry;
99         struct page *ptepage;
100         unsigned long addr = vma->vm_start;
101         unsigned long end = vma->vm_end;
102
103         while (addr < end) {
104                 dst_pte = huge_pte_alloc(dst, addr);
105                 if (!dst_pte)
106                         goto nomem;
107                 src_pte = huge_pte_offset(src, addr);
108                 entry = *src_pte;
109                 ptepage = pte_page(entry);
110                 get_page(ptepage);
111                 set_pte(dst_pte, entry);
112                 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
113                 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
114                 addr += HPAGE_SIZE;
115         }
116         return 0;
117 nomem:
118         return -ENOMEM;
119 }
120
121 int
122 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
123                     struct page **pages, struct vm_area_struct **vmas,
124                     unsigned long *st, int *length, int i)
125 {
126         pte_t *ptep, pte;
127         unsigned long start = *st;
128         unsigned long pstart;
129         int len = *length;
130         struct page *page;
131
132         do {
133                 pstart = start & HPAGE_MASK;
134                 ptep = huge_pte_offset(mm, start);
135                 pte = *ptep;
136
137 back1:
138                 page = pte_page(pte);
139                 if (pages) {
140                         page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
141                         get_page(page);
142                         pages[i] = page;
143                 }
144                 if (vmas)
145                         vmas[i] = vma;
146                 i++;
147                 len--;
148                 start += PAGE_SIZE;
149                 if (((start & HPAGE_MASK) == pstart) && len &&
150                                 (start < vma->vm_end))
151                         goto back1;
152         } while (len && start < vma->vm_end);
153         *length = len;
154         *st = start;
155         return i;
156 }
157
158 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
159 {
160         struct page *page;
161         pte_t *ptep;
162
163         if (! mm->used_hugetlb)
164                 return ERR_PTR(-EINVAL);
165         if (REGION_NUMBER(addr) != REGION_HPAGE)
166                 return ERR_PTR(-EINVAL);
167
168         ptep = huge_pte_offset(mm, addr);
169         if (!ptep || pte_none(*ptep))
170                 return NULL;
171         page = pte_page(*ptep);
172         page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
173         return page;
174 }
175 int pmd_huge(pmd_t pmd)
176 {
177         return 0;
178 }
179 struct page *
180 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
181 {
182         return NULL;
183 }
184
185 /*
186  * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
187  * are hugetlb region specific.
188  */
189 void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
190         unsigned long start, unsigned long end)
191 {
192         unsigned long first = start & HUGETLB_PGDIR_MASK;
193         unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
194         unsigned long start_index, end_index;
195         struct mm_struct *mm = tlb->mm;
196
197         if (!prev) {
198                 prev = mm->mmap;
199                 if (!prev)
200                         goto no_mmaps;
201                 if (prev->vm_end > start) {
202                         if (last > prev->vm_start)
203                                 last = prev->vm_start;
204                         goto no_mmaps;
205                 }
206         }
207         for (;;) {
208                 struct vm_area_struct *next = prev->vm_next;
209
210                 if (next) {
211                         if (next->vm_start < start) {
212                                 prev = next;
213                                 continue;
214                         }
215                         if (last > next->vm_start)
216                                 last = next->vm_start;
217                 }
218                 if (prev->vm_end > first)
219                         first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
220                 break;
221         }
222 no_mmaps:
223         if (last < first)       /* for arches with discontiguous pgd indices */
224                 return;
225         /*
226          * If the PGD bits are not consecutive in the virtual address, the
227          * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
228          */
229
230         start_index = pgd_index(htlbpage_to_page(first));
231         end_index = pgd_index(htlbpage_to_page(last));
232
233         if (end_index > start_index) {
234                 clear_page_tables(tlb, start_index, end_index - start_index);
235         }
236 }
237
238 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
239 {
240         struct mm_struct *mm = vma->vm_mm;
241         unsigned long address;
242         pte_t *pte;
243         struct page *page;
244
245         BUG_ON(start & (HPAGE_SIZE - 1));
246         BUG_ON(end & (HPAGE_SIZE - 1));
247
248         for (address = start; address < end; address += HPAGE_SIZE) {
249                 pte = huge_pte_offset(mm, address);
250                 if (pte_none(*pte))
251                         continue;
252                 page = pte_page(*pte);
253                 put_page(page);
254                 pte_clear(pte);
255         }
256         // mm->rss -= (end - start) >> PAGE_SHIFT;
257         vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
258         flush_tlb_range(vma, start, end);
259 }
260
261 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
262 {
263         struct mm_struct *mm = current->mm;
264         unsigned long addr;
265         int ret = 0;
266
267         BUG_ON(vma->vm_start & ~HPAGE_MASK);
268         BUG_ON(vma->vm_end & ~HPAGE_MASK);
269
270         spin_lock(&mm->page_table_lock);
271         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
272                 unsigned long idx;
273                 pte_t *pte = huge_pte_alloc(mm, addr);
274                 struct page *page;
275
276                 if (!pte) {
277                         ret = -ENOMEM;
278                         goto out;
279                 }
280                 if (!pte_none(*pte))
281                         continue;
282
283                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
284                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
285                 page = find_get_page(mapping, idx);
286                 if (!page) {
287                         /* charge the fs quota first */
288                         if (hugetlb_get_quota(mapping)) {
289                                 ret = -ENOMEM;
290                                 goto out;
291                         }
292                         page = alloc_huge_page();
293                         if (!page) {
294                                 hugetlb_put_quota(mapping);
295                                 ret = -ENOMEM;
296                                 goto out;
297                         }
298                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
299                         unlock_page(page);
300                         if (ret) {
301                                 hugetlb_put_quota(mapping);
302                                 free_huge_page(page);
303                                 goto out;
304                         }
305                 }
306                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
307         }
308 out:
309         spin_unlock(&mm->page_table_lock);
310         return ret;
311 }
312
313 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
314                 unsigned long pgoff, unsigned long flags)
315 {
316         struct vm_area_struct *vmm;
317
318         if (len > RGN_MAP_LIMIT)
319                 return -ENOMEM;
320         if (len & ~HPAGE_MASK)
321                 return -EINVAL;
322         /* This code assumes that REGION_HPAGE != 0. */
323         if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
324                 addr = HPAGE_REGION_BASE;
325         else
326                 addr = ALIGN(addr, HPAGE_SIZE);
327         for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
328                 /* At this point:  (!vmm || addr < vmm->vm_end). */
329                 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
330                         return -ENOMEM;
331                 if (!vmm || (addr + len) <= vmm->vm_start)
332                         return addr;
333                 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
334         }
335 }
336
337 static int __init hugetlb_setup_sz(char *str)
338 {
339         u64 tr_pages;
340         unsigned long long size;
341
342         if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
343                 /*
344                  * shouldn't happen, but just in case.
345                  */
346                 tr_pages = 0x15557000UL;
347
348         size = memparse(str, &str);
349         if (*str || (size & (size-1)) || !(tr_pages & size) ||
350                 size <= PAGE_SIZE ||
351                 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
352                 printk(KERN_WARNING "Invalid huge page size specified\n");
353                 return 1;
354         }
355
356         hpage_shift = __ffs(size);
357         /*
358          * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
359          * override here with new page shift.
360          */
361         ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
362         return 1;
363 }
364 __setup("hugepagesz=", hugetlb_setup_sz);