ade8efb863f4e825a33a1d5c8b2d41ecad7ea179
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pud_t *pud;
25         pmd_t *pmd = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pud = pud_alloc(mm, pgd, addr);
29         pmd = pmd_alloc(mm, pud, addr);
30         return (pte_t *) pmd;
31 }
32
33 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
34 {
35         pgd_t *pgd;
36         pud_t *pud;
37         pmd_t *pmd = NULL;
38
39         pgd = pgd_offset(mm, addr);
40         pud = pud_offset(pgd, addr);
41         pmd = pmd_offset(pud, addr);
42         return (pte_t *) pmd;
43 }
44
45 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
46 {
47         pte_t entry;
48
49         // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
50         vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
51         if (write_access) {
52                 entry =
53                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
54         } else
55                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
56         entry = pte_mkyoung(entry);
57         mk_pte_huge(entry);
58         set_pte(page_table, entry);
59 }
60
61 /*
62  * This function checks for proper alignment of input addr and len parameters.
63  */
64 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
65 {
66         if (len & ~HPAGE_MASK)
67                 return -EINVAL;
68         if (addr & ~HPAGE_MASK)
69                 return -EINVAL;
70         return 0;
71 }
72
73 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
74                         struct vm_area_struct *vma)
75 {
76         pte_t *src_pte, *dst_pte, entry;
77         struct page *ptepage;
78         unsigned long addr = vma->vm_start;
79         unsigned long end = vma->vm_end;
80
81         while (addr < end) {
82                 dst_pte = huge_pte_alloc(dst, addr);
83                 if (!dst_pte)
84                         goto nomem;
85                 src_pte = huge_pte_offset(src, addr);
86                 entry = *src_pte;
87                 ptepage = pte_page(entry);
88                 get_page(ptepage);
89                 set_pte(dst_pte, entry);
90                 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
91                 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
92                 addr += HPAGE_SIZE;
93         }
94         return 0;
95
96 nomem:
97         return -ENOMEM;
98 }
99
100 int
101 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
102                     struct page **pages, struct vm_area_struct **vmas,
103                     unsigned long *position, int *length, int i)
104 {
105         unsigned long vpfn, vaddr = *position;
106         int remainder = *length;
107
108         WARN_ON(!is_vm_hugetlb_page(vma));
109
110         vpfn = vaddr/PAGE_SIZE;
111         while (vaddr < vma->vm_end && remainder) {
112
113                 if (pages) {
114                         pte_t *pte;
115                         struct page *page;
116
117                         pte = huge_pte_offset(mm, vaddr);
118
119                         /* hugetlb should be locked, and hence, prefaulted */
120                         WARN_ON(!pte || pte_none(*pte));
121
122                         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
123
124                         WARN_ON(!PageCompound(page));
125
126                         get_page(page);
127                         pages[i] = page;
128                 }
129
130                 if (vmas)
131                         vmas[i] = vma;
132
133                 vaddr += PAGE_SIZE;
134                 ++vpfn;
135                 --remainder;
136                 ++i;
137         }
138
139         *length = remainder;
140         *position = vaddr;
141
142         return i;
143 }
144
145 #if 0   /* This is just for testing */
146 struct page *
147 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
148 {
149         unsigned long start = address;
150         int length = 1;
151         int nr;
152         struct page *page;
153         struct vm_area_struct *vma;
154
155         vma = find_vma(mm, addr);
156         if (!vma || !is_vm_hugetlb_page(vma))
157                 return ERR_PTR(-EINVAL);
158
159         pte = huge_pte_offset(mm, address);
160
161         /* hugetlb should be locked, and hence, prefaulted */
162         WARN_ON(!pte || pte_none(*pte));
163
164         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
165
166         WARN_ON(!PageCompound(page));
167
168         return page;
169 }
170
171 int pmd_huge(pmd_t pmd)
172 {
173         return 0;
174 }
175
176 struct page *
177 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
178                 pmd_t *pmd, int write)
179 {
180         return NULL;
181 }
182
183 #else
184
185 struct page *
186 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
187 {
188         return ERR_PTR(-EINVAL);
189 }
190
191 int pmd_huge(pmd_t pmd)
192 {
193         return !!(pmd_val(pmd) & _PAGE_PSE);
194 }
195
196 struct page *
197 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
198                 pmd_t *pmd, int write)
199 {
200         struct page *page;
201
202         page = pte_page(*(pte_t *)pmd);
203         if (page)
204                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
205         return page;
206 }
207 #endif
208
209 void unmap_hugepage_range(struct vm_area_struct *vma,
210                 unsigned long start, unsigned long end)
211 {
212         struct mm_struct *mm = vma->vm_mm;
213         unsigned long address;
214         pte_t pte;
215         struct page *page;
216
217         BUG_ON(start & (HPAGE_SIZE - 1));
218         BUG_ON(end & (HPAGE_SIZE - 1));
219
220         for (address = start; address < end; address += HPAGE_SIZE) {
221                 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
222                 if (pte_none(pte))
223                         continue;
224                 page = pte_page(pte);
225                 put_page(page);
226         }
227         // mm->rss -= (end - start) >> PAGE_SHIFT;
228         vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
229         flush_tlb_range(vma, start, end);
230 }
231
232 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
233 {
234         struct mm_struct *mm = current->mm;
235         unsigned long addr;
236         int ret = 0;
237
238         BUG_ON(vma->vm_start & ~HPAGE_MASK);
239         BUG_ON(vma->vm_end & ~HPAGE_MASK);
240
241         spin_lock(&mm->page_table_lock);
242         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
243                 unsigned long idx;
244                 pte_t *pte = huge_pte_alloc(mm, addr);
245                 struct page *page;
246
247                 if (!pte) {
248                         ret = -ENOMEM;
249                         goto out;
250                 }
251
252                 if (!pte_none(*pte)) {
253                         pmd_t *pmd = (pmd_t *) pte;
254
255                         page = pmd_page(*pmd);
256                         pmd_clear(pmd);
257                         mm->nr_ptes--;
258                         dec_page_state(nr_page_table_pages);
259                         page_cache_release(page);
260                 }
261
262                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
263                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
264                 page = find_get_page(mapping, idx);
265                 if (!page) {
266                         /* charge the fs quota first */
267                         if (hugetlb_get_quota(mapping)) {
268                                 ret = -ENOMEM;
269                                 goto out;
270                         }
271                         page = alloc_huge_page();
272                         if (!page) {
273                                 hugetlb_put_quota(mapping);
274                                 ret = -ENOMEM;
275                                 goto out;
276                         }
277                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
278                         if (! ret) {
279                                 unlock_page(page);
280                         } else {
281                                 hugetlb_put_quota(mapping);
282                                 free_huge_page(page);
283                                 goto out;
284                         }
285                 }
286                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
287         }
288 out:
289         spin_unlock(&mm->page_table_lock);
290         return ret;
291 }
292
293 /* x86_64 also uses this file */
294
295 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
296 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
297                 unsigned long addr, unsigned long len,
298                 unsigned long pgoff, unsigned long flags)
299 {
300         struct mm_struct *mm = current->mm;
301         struct vm_area_struct *vma;
302         unsigned long start_addr;
303
304         start_addr = mm->free_area_cache;
305
306 full_search:
307         addr = ALIGN(start_addr, HPAGE_SIZE);
308
309         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
310                 /* At this point:  (!vma || addr < vma->vm_end). */
311                 if (TASK_SIZE - len < addr) {
312                         /*
313                          * Start a new search - just in case we missed
314                          * some holes.
315                          */
316                         if (start_addr != TASK_UNMAPPED_BASE) {
317                                 start_addr = TASK_UNMAPPED_BASE;
318                                 goto full_search;
319                         }
320                         return -ENOMEM;
321                 }
322                 if (!vma || addr + len <= vma->vm_start) {
323                         mm->free_area_cache = addr + len;
324                         return addr;
325                 }
326                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
327         }
328 }
329
330 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
331                 unsigned long addr0, unsigned long len,
332                 unsigned long pgoff, unsigned long flags)
333 {
334         struct mm_struct *mm = current->mm;
335         struct vm_area_struct *vma, *prev_vma;
336         unsigned long base = mm->mmap_base, addr = addr0;
337         int first_time = 1;
338
339         /* don't allow allocations above current base */
340         if (mm->free_area_cache > base)
341                 mm->free_area_cache = base;
342
343 try_again:
344         /* make sure it can fit in the remaining address space */
345         if (mm->free_area_cache < len)
346                 goto fail;
347
348         /* either no address requested or cant fit in requested address hole */
349         addr = (mm->free_area_cache - len) & HPAGE_MASK;
350         do {
351                 /*
352                  * Lookup failure means no vma is above this address,
353                  * i.e. return with success:
354                  */
355                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
356                         return addr;
357
358                 /*
359                  * new region fits between prev_vma->vm_end and
360                  * vma->vm_start, use it:
361                  */
362                 if (addr + len <= vma->vm_start &&
363                                 (!prev_vma || (addr >= prev_vma->vm_end)))
364                         /* remember the address as a hint for next time */
365                         return (mm->free_area_cache = addr);
366                 else
367                         /* pull free_area_cache down to the first hole */
368                         if (mm->free_area_cache == vma->vm_end)
369                                 mm->free_area_cache = vma->vm_start;
370
371                 /* try just below the current vma->vm_start */
372                 addr = (vma->vm_start - len) & HPAGE_MASK;
373         } while (len <= vma->vm_start);
374
375 fail:
376         /*
377          * if hint left us with no space for the requested
378          * mapping then try again:
379          */
380         if (first_time) {
381                 mm->free_area_cache = base;
382                 first_time = 0;
383                 goto try_again;
384         }
385         /*
386          * A failed mmap() very likely causes application failure,
387          * so fall back to the bottom-up function here. This scenario
388          * can happen with large stack limits and large mmap()
389          * allocations.
390          */
391         mm->free_area_cache = TASK_UNMAPPED_BASE;
392         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
393                         len, pgoff, flags);
394
395         /*
396          * Restore the topdown base:
397          */
398         mm->free_area_cache = base;
399
400         return addr;
401 }
402
403 unsigned long
404 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
405                 unsigned long len, unsigned long pgoff, unsigned long flags)
406 {
407         struct mm_struct *mm = current->mm;
408         struct vm_area_struct *vma;
409
410         if (len & ~HPAGE_MASK)
411                 return -EINVAL;
412         if (len > TASK_SIZE)
413                 return -ENOMEM;
414
415         if (addr) {
416                 addr = ALIGN(addr, HPAGE_SIZE);
417                 vma = find_vma(mm, addr);
418                 if (TASK_SIZE - len >= addr &&
419                     (!vma || addr + len <= vma->vm_start))
420                         return addr;
421         }
422         if (mm->get_unmapped_area == arch_get_unmapped_area)
423                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
424                                 pgoff, flags);
425         else
426                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
427                                 pgoff, flags);
428 }
429
430 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
431