This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pmd_t *pmd = NULL;
25
26         pgd = pgd_offset(mm, addr);
27         pmd = pmd_alloc(mm, pgd, addr);
28         return (pte_t *) pmd;
29 }
30
31 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
32 {
33         pgd_t *pgd;
34         pmd_t *pmd = NULL;
35
36         pgd = pgd_offset(mm, addr);
37         pmd = pmd_offset(pgd, addr);
38         return (pte_t *) pmd;
39 }
40
41 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
42 {
43         pte_t entry;
44
45         // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
46         vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
47         if (write_access) {
48                 entry =
49                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
50         } else
51                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
52         entry = pte_mkyoung(entry);
53         mk_pte_huge(entry);
54         set_pte(page_table, entry);
55 }
56
57 /*
58  * This function checks for proper alignment of input addr and len parameters.
59  */
60 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
61 {
62         if (len & ~HPAGE_MASK)
63                 return -EINVAL;
64         if (addr & ~HPAGE_MASK)
65                 return -EINVAL;
66         return 0;
67 }
68
69 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
70                         struct vm_area_struct *vma)
71 {
72         pte_t *src_pte, *dst_pte, entry;
73         struct page *ptepage;
74         unsigned long addr = vma->vm_start;
75         unsigned long end = vma->vm_end;
76
77         while (addr < end) {
78                 dst_pte = huge_pte_alloc(dst, addr);
79                 if (!dst_pte)
80                         goto nomem;
81                 src_pte = huge_pte_offset(src, addr);
82                 entry = *src_pte;
83                 ptepage = pte_page(entry);
84                 get_page(ptepage);
85                 set_pte(dst_pte, entry);
86                 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
87                 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
88                 addr += HPAGE_SIZE;
89         }
90         return 0;
91
92 nomem:
93         return -ENOMEM;
94 }
95
96 int
97 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
98                     struct page **pages, struct vm_area_struct **vmas,
99                     unsigned long *position, int *length, int i)
100 {
101         unsigned long vpfn, vaddr = *position;
102         int remainder = *length;
103
104         WARN_ON(!is_vm_hugetlb_page(vma));
105
106         vpfn = vaddr/PAGE_SIZE;
107         while (vaddr < vma->vm_end && remainder) {
108
109                 if (pages) {
110                         pte_t *pte;
111                         struct page *page;
112
113                         pte = huge_pte_offset(mm, vaddr);
114
115                         /* hugetlb should be locked, and hence, prefaulted */
116                         WARN_ON(!pte || pte_none(*pte));
117
118                         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
119
120                         WARN_ON(!PageCompound(page));
121
122                         get_page(page);
123                         pages[i] = page;
124                 }
125
126                 if (vmas)
127                         vmas[i] = vma;
128
129                 vaddr += PAGE_SIZE;
130                 ++vpfn;
131                 --remainder;
132                 ++i;
133         }
134
135         *length = remainder;
136         *position = vaddr;
137
138         return i;
139 }
140
141 #if 0   /* This is just for testing */
142 struct page *
143 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
144 {
145         unsigned long start = address;
146         int length = 1;
147         int nr;
148         struct page *page;
149         struct vm_area_struct *vma;
150
151         vma = find_vma(mm, addr);
152         if (!vma || !is_vm_hugetlb_page(vma))
153                 return ERR_PTR(-EINVAL);
154
155         pte = huge_pte_offset(mm, address);
156
157         /* hugetlb should be locked, and hence, prefaulted */
158         WARN_ON(!pte || pte_none(*pte));
159
160         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
161
162         WARN_ON(!PageCompound(page));
163
164         return page;
165 }
166
167 int pmd_huge(pmd_t pmd)
168 {
169         return 0;
170 }
171
172 struct page *
173 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
174                 pmd_t *pmd, int write)
175 {
176         return NULL;
177 }
178
179 #else
180
181 struct page *
182 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
183 {
184         return ERR_PTR(-EINVAL);
185 }
186
187 int pmd_huge(pmd_t pmd)
188 {
189         return !!(pmd_val(pmd) & _PAGE_PSE);
190 }
191
192 struct page *
193 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
194                 pmd_t *pmd, int write)
195 {
196         struct page *page;
197
198         page = pte_page(*(pte_t *)pmd);
199         if (page)
200                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
201         return page;
202 }
203 #endif
204
205 void unmap_hugepage_range(struct vm_area_struct *vma,
206                 unsigned long start, unsigned long end)
207 {
208         struct mm_struct *mm = vma->vm_mm;
209         unsigned long address;
210         pte_t pte;
211         struct page *page;
212
213         BUG_ON(start & (HPAGE_SIZE - 1));
214         BUG_ON(end & (HPAGE_SIZE - 1));
215
216         for (address = start; address < end; address += HPAGE_SIZE) {
217                 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
218                 if (pte_none(pte))
219                         continue;
220                 page = pte_page(pte);
221                 put_page(page);
222         }
223         // mm->rss -= (end - start) >> PAGE_SHIFT;
224         vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
225         flush_tlb_range(vma, start, end);
226 }
227
228 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
229 {
230         struct mm_struct *mm = current->mm;
231         unsigned long addr;
232         int ret = 0;
233
234         BUG_ON(vma->vm_start & ~HPAGE_MASK);
235         BUG_ON(vma->vm_end & ~HPAGE_MASK);
236
237         spin_lock(&mm->page_table_lock);
238         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
239                 unsigned long idx;
240                 pte_t *pte = huge_pte_alloc(mm, addr);
241                 struct page *page;
242
243                 if (!pte) {
244                         ret = -ENOMEM;
245                         goto out;
246                 }
247
248                 if (!pte_none(*pte)) {
249                         pmd_t *pmd = (pmd_t *) pte;
250
251                         page = pmd_page(*pmd);
252                         pmd_clear(pmd);
253                         dec_page_state(nr_page_table_pages);
254                         page_cache_release(page);
255                 }
256
257                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
258                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
259                 page = find_get_page(mapping, idx);
260                 if (!page) {
261                         /* charge the fs quota first */
262                         if (hugetlb_get_quota(mapping)) {
263                                 ret = -ENOMEM;
264                                 goto out;
265                         }
266                         page = alloc_huge_page();
267                         if (!page) {
268                                 hugetlb_put_quota(mapping);
269                                 ret = -ENOMEM;
270                                 goto out;
271                         }
272                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
273                         if (! ret) {
274                                 unlock_page(page);
275                         } else {
276                                 hugetlb_put_quota(mapping);
277                                 free_huge_page(page);
278                                 goto out;
279                         }
280                 }
281                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
282         }
283 out:
284         spin_unlock(&mm->page_table_lock);
285         return ret;
286 }
287
288 /* x86_64 also uses this file */
289
290 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
291 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
292                 unsigned long addr, unsigned long len,
293                 unsigned long pgoff, unsigned long flags)
294 {
295         struct mm_struct *mm = current->mm;
296         struct vm_area_struct *vma;
297         unsigned long start_addr;
298
299         start_addr = mm->free_area_cache;
300
301 full_search:
302         addr = ALIGN(start_addr, HPAGE_SIZE);
303
304         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
305                 /* At this point:  (!vma || addr < vma->vm_end). */
306                 if (TASK_SIZE - len < addr) {
307                         /*
308                          * Start a new search - just in case we missed
309                          * some holes.
310                          */
311                         if (start_addr != TASK_UNMAPPED_BASE) {
312                                 start_addr = TASK_UNMAPPED_BASE;
313                                 goto full_search;
314                         }
315                         return -ENOMEM;
316                 }
317                 if (!vma || addr + len <= vma->vm_start) {
318                         mm->free_area_cache = addr + len;
319                         return addr;
320                 }
321                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
322         }
323 }
324
325 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
326                 unsigned long addr0, unsigned long len,
327                 unsigned long pgoff, unsigned long flags)
328 {
329         struct mm_struct *mm = current->mm;
330         struct vm_area_struct *vma, *prev_vma;
331         unsigned long base = mm->mmap_base, addr = addr0;
332         int first_time = 1;
333
334         /* don't allow allocations above current base */
335         if (mm->free_area_cache > base)
336                 mm->free_area_cache = base;
337
338 try_again:
339         /* make sure it can fit in the remaining address space */
340         if (mm->free_area_cache < len)
341                 goto fail;
342
343         /* either no address requested or cant fit in requested address hole */
344         addr = (mm->free_area_cache - len) & HPAGE_MASK;
345         do {
346                 /*
347                  * Lookup failure means no vma is above this address,
348                  * i.e. return with success:
349                  */
350                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
351                         return addr;
352
353                 /*
354                  * new region fits between prev_vma->vm_end and
355                  * vma->vm_start, use it:
356                  */
357                 if (addr + len <= vma->vm_start &&
358                                 (!prev_vma || (addr >= prev_vma->vm_end)))
359                         /* remember the address as a hint for next time */
360                         return (mm->free_area_cache = addr);
361                 else
362                         /* pull free_area_cache down to the first hole */
363                         if (mm->free_area_cache == vma->vm_end)
364                                 mm->free_area_cache = vma->vm_start;
365
366                 /* try just below the current vma->vm_start */
367                 addr = (vma->vm_start - len) & HPAGE_MASK;
368         } while (len <= vma->vm_start);
369
370 fail:
371         /*
372          * if hint left us with no space for the requested
373          * mapping then try again:
374          */
375         if (first_time) {
376                 mm->free_area_cache = base;
377                 first_time = 0;
378                 goto try_again;
379         }
380         /*
381          * A failed mmap() very likely causes application failure,
382          * so fall back to the bottom-up function here. This scenario
383          * can happen with large stack limits and large mmap()
384          * allocations.
385          */
386         mm->free_area_cache = TASK_UNMAPPED_BASE;
387         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
388                         len, pgoff, flags);
389
390         /*
391          * Restore the topdown base:
392          */
393         mm->free_area_cache = base;
394
395         return addr;
396 }
397
398 unsigned long
399 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
400                 unsigned long len, unsigned long pgoff, unsigned long flags)
401 {
402         struct mm_struct *mm = current->mm;
403         struct vm_area_struct *vma;
404
405         if (len & ~HPAGE_MASK)
406                 return -EINVAL;
407         if (len > TASK_SIZE)
408                 return -ENOMEM;
409
410         if (addr) {
411                 addr = ALIGN(addr, HPAGE_SIZE);
412                 vma = find_vma(mm, addr);
413                 if (TASK_SIZE - len >= addr &&
414                     (!vma || addr + len <= vma->vm_start))
415                         return addr;
416         }
417         if (mm->get_unmapped_area == arch_get_unmapped_area)
418                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
419                                 pgoff, flags);
420         else
421                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
422                                 pgoff, flags);
423 }
424
425 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
426