vserver 2.0-rc4
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pud_t *pud;
25         pmd_t *pmd = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pud = pud_alloc(mm, pgd, addr);
29         pmd = pmd_alloc(mm, pud, addr);
30         return (pte_t *) pmd;
31 }
32
33 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
34 {
35         pgd_t *pgd;
36         pud_t *pud;
37         pmd_t *pmd = NULL;
38
39         pgd = pgd_offset(mm, addr);
40         pud = pud_offset(pgd, addr);
41         pmd = pmd_offset(pud, addr);
42         return (pte_t *) pmd;
43 }
44
45 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
46 {
47         pte_t entry;
48
49         vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
50         if (write_access) {
51                 entry =
52                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
53         } else
54                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
55         entry = pte_mkyoung(entry);
56         mk_pte_huge(entry);
57         set_pte(page_table, entry);
58 }
59
60 /*
61  * This function checks for proper alignment of input addr and len parameters.
62  */
63 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
64 {
65         if (len & ~HPAGE_MASK)
66                 return -EINVAL;
67         if (addr & ~HPAGE_MASK)
68                 return -EINVAL;
69         return 0;
70 }
71
72 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
73                         struct vm_area_struct *vma)
74 {
75         pte_t *src_pte, *dst_pte, entry;
76         struct page *ptepage;
77         unsigned long addr = vma->vm_start;
78         unsigned long end = vma->vm_end;
79
80         while (addr < end) {
81                 dst_pte = huge_pte_alloc(dst, addr);
82                 if (!dst_pte)
83                         goto nomem;
84                 src_pte = huge_pte_offset(src, addr);
85                 entry = *src_pte;
86                 ptepage = pte_page(entry);
87                 get_page(ptepage);
88                 set_pte(dst_pte, entry);
89                 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
90                 addr += HPAGE_SIZE;
91         }
92         return 0;
93
94 nomem:
95         return -ENOMEM;
96 }
97
98 int
99 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
100                     struct page **pages, struct vm_area_struct **vmas,
101                     unsigned long *position, int *length, int i)
102 {
103         unsigned long vpfn, vaddr = *position;
104         int remainder = *length;
105
106         WARN_ON(!is_vm_hugetlb_page(vma));
107
108         vpfn = vaddr/PAGE_SIZE;
109         while (vaddr < vma->vm_end && remainder) {
110
111                 if (pages) {
112                         pte_t *pte;
113                         struct page *page;
114
115                         pte = huge_pte_offset(mm, vaddr);
116
117                         /* hugetlb should be locked, and hence, prefaulted */
118                         WARN_ON(!pte || pte_none(*pte));
119
120                         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
121
122                         WARN_ON(!PageCompound(page));
123
124                         get_page(page);
125                         pages[i] = page;
126                 }
127
128                 if (vmas)
129                         vmas[i] = vma;
130
131                 vaddr += PAGE_SIZE;
132                 ++vpfn;
133                 --remainder;
134                 ++i;
135         }
136
137         *length = remainder;
138         *position = vaddr;
139
140         return i;
141 }
142
143 #if 0   /* This is just for testing */
144 struct page *
145 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
146 {
147         unsigned long start = address;
148         int length = 1;
149         int nr;
150         struct page *page;
151         struct vm_area_struct *vma;
152
153         vma = find_vma(mm, addr);
154         if (!vma || !is_vm_hugetlb_page(vma))
155                 return ERR_PTR(-EINVAL);
156
157         pte = huge_pte_offset(mm, address);
158
159         /* hugetlb should be locked, and hence, prefaulted */
160         WARN_ON(!pte || pte_none(*pte));
161
162         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
163
164         WARN_ON(!PageCompound(page));
165
166         return page;
167 }
168
169 int pmd_huge(pmd_t pmd)
170 {
171         return 0;
172 }
173
174 struct page *
175 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
176                 pmd_t *pmd, int write)
177 {
178         return NULL;
179 }
180
181 #else
182
183 struct page *
184 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
185 {
186         return ERR_PTR(-EINVAL);
187 }
188
189 int pmd_huge(pmd_t pmd)
190 {
191         return !!(pmd_val(pmd) & _PAGE_PSE);
192 }
193
194 struct page *
195 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
196                 pmd_t *pmd, int write)
197 {
198         struct page *page;
199
200         page = pte_page(*(pte_t *)pmd);
201         if (page)
202                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
203         return page;
204 }
205 #endif
206
207 void unmap_hugepage_range(struct vm_area_struct *vma,
208                 unsigned long start, unsigned long end)
209 {
210         struct mm_struct *mm = vma->vm_mm;
211         unsigned long address;
212         pte_t pte;
213         struct page *page;
214
215         BUG_ON(start & (HPAGE_SIZE - 1));
216         BUG_ON(end & (HPAGE_SIZE - 1));
217
218         for (address = start; address < end; address += HPAGE_SIZE) {
219                 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
220                 if (pte_none(pte))
221                         continue;
222                 page = pte_page(pte);
223                 put_page(page);
224         }
225         vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
226         flush_tlb_range(vma, start, end);
227 }
228
229 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
230 {
231         struct mm_struct *mm = current->mm;
232         unsigned long addr;
233         int ret = 0;
234
235         BUG_ON(vma->vm_start & ~HPAGE_MASK);
236         BUG_ON(vma->vm_end & ~HPAGE_MASK);
237
238         spin_lock(&mm->page_table_lock);
239         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
240                 unsigned long idx;
241                 pte_t *pte = huge_pte_alloc(mm, addr);
242                 struct page *page;
243
244                 if (!pte) {
245                         ret = -ENOMEM;
246                         goto out;
247                 }
248
249                 if (!pte_none(*pte)) {
250                         pmd_t *pmd = (pmd_t *) pte;
251
252                         page = pmd_page(*pmd);
253                         pmd_clear(pmd);
254                         mm->nr_ptes--;
255                         dec_page_state(nr_page_table_pages);
256                         page_cache_release(page);
257                 }
258
259                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
260                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
261                 page = find_get_page(mapping, idx);
262                 if (!page) {
263                         /* charge the fs quota first */
264                         if (hugetlb_get_quota(mapping)) {
265                                 ret = -ENOMEM;
266                                 goto out;
267                         }
268                         page = alloc_huge_page();
269                         if (!page) {
270                                 hugetlb_put_quota(mapping);
271                                 ret = -ENOMEM;
272                                 goto out;
273                         }
274                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
275                         if (! ret) {
276                                 unlock_page(page);
277                         } else {
278                                 hugetlb_put_quota(mapping);
279                                 free_huge_page(page);
280                                 goto out;
281                         }
282                 }
283                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
284         }
285 out:
286         spin_unlock(&mm->page_table_lock);
287         return ret;
288 }
289
290 /* x86_64 also uses this file */
291
292 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
293 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
294                 unsigned long addr, unsigned long len,
295                 unsigned long pgoff, unsigned long flags)
296 {
297         struct mm_struct *mm = current->mm;
298         struct vm_area_struct *vma;
299         unsigned long start_addr;
300
301         start_addr = mm->free_area_cache;
302
303 full_search:
304         addr = ALIGN(start_addr, HPAGE_SIZE);
305
306         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
307                 /* At this point:  (!vma || addr < vma->vm_end). */
308                 if (TASK_SIZE - len < addr) {
309                         /*
310                          * Start a new search - just in case we missed
311                          * some holes.
312                          */
313                         if (start_addr != TASK_UNMAPPED_BASE) {
314                                 start_addr = TASK_UNMAPPED_BASE;
315                                 goto full_search;
316                         }
317                         return -ENOMEM;
318                 }
319                 if (!vma || addr + len <= vma->vm_start) {
320                         mm->free_area_cache = addr + len;
321                         return addr;
322                 }
323                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
324         }
325 }
326
327 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
328                 unsigned long addr0, unsigned long len,
329                 unsigned long pgoff, unsigned long flags)
330 {
331         struct mm_struct *mm = current->mm;
332         struct vm_area_struct *vma, *prev_vma;
333         unsigned long base = mm->mmap_base, addr = addr0;
334         int first_time = 1;
335
336         /* don't allow allocations above current base */
337         if (mm->free_area_cache > base)
338                 mm->free_area_cache = base;
339
340 try_again:
341         /* make sure it can fit in the remaining address space */
342         if (mm->free_area_cache < len)
343                 goto fail;
344
345         /* either no address requested or cant fit in requested address hole */
346         addr = (mm->free_area_cache - len) & HPAGE_MASK;
347         do {
348                 /*
349                  * Lookup failure means no vma is above this address,
350                  * i.e. return with success:
351                  */
352                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
353                         return addr;
354
355                 /*
356                  * new region fits between prev_vma->vm_end and
357                  * vma->vm_start, use it:
358                  */
359                 if (addr + len <= vma->vm_start &&
360                                 (!prev_vma || (addr >= prev_vma->vm_end)))
361                         /* remember the address as a hint for next time */
362                         return (mm->free_area_cache = addr);
363                 else
364                         /* pull free_area_cache down to the first hole */
365                         if (mm->free_area_cache == vma->vm_end)
366                                 mm->free_area_cache = vma->vm_start;
367
368                 /* try just below the current vma->vm_start */
369                 addr = (vma->vm_start - len) & HPAGE_MASK;
370         } while (len <= vma->vm_start);
371
372 fail:
373         /*
374          * if hint left us with no space for the requested
375          * mapping then try again:
376          */
377         if (first_time) {
378                 mm->free_area_cache = base;
379                 first_time = 0;
380                 goto try_again;
381         }
382         /*
383          * A failed mmap() very likely causes application failure,
384          * so fall back to the bottom-up function here. This scenario
385          * can happen with large stack limits and large mmap()
386          * allocations.
387          */
388         mm->free_area_cache = TASK_UNMAPPED_BASE;
389         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
390                         len, pgoff, flags);
391
392         /*
393          * Restore the topdown base:
394          */
395         mm->free_area_cache = base;
396
397         return addr;
398 }
399
400 unsigned long
401 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
402                 unsigned long len, unsigned long pgoff, unsigned long flags)
403 {
404         struct mm_struct *mm = current->mm;
405         struct vm_area_struct *vma;
406
407         if (len & ~HPAGE_MASK)
408                 return -EINVAL;
409         if (len > TASK_SIZE)
410                 return -ENOMEM;
411
412         if (addr) {
413                 addr = ALIGN(addr, HPAGE_SIZE);
414                 vma = find_vma(mm, addr);
415                 if (TASK_SIZE - len >= addr &&
416                     (!vma || addr + len <= vma->vm_start))
417                         return addr;
418         }
419         if (mm->get_unmapped_area == arch_get_unmapped_area)
420                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
421                                 pgoff, flags);
422         else
423                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
424                                 pgoff, flags);
425 }
426
427 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
428