7e4b12121df2d171f902c8b29ad300bb7c4039be
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pmd_t *pmd = NULL;
25
26         pgd = pgd_offset(mm, addr);
27         pmd = pmd_alloc(mm, pgd, addr);
28         return (pte_t *) pmd;
29 }
30
31 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
32 {
33         pgd_t *pgd;
34         pmd_t *pmd = NULL;
35
36         pgd = pgd_offset(mm, addr);
37         pmd = pmd_offset(pgd, addr);
38         return (pte_t *) pmd;
39 }
40
41 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
42 {
43         pte_t entry;
44
45         mm->rss += (HPAGE_SIZE / PAGE_SIZE);
46         if (write_access) {
47                 entry =
48                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
49         } else
50                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
51         entry = pte_mkyoung(entry);
52         mk_pte_huge(entry);
53         set_pte(page_table, entry);
54 }
55
56 /*
57  * This function checks for proper alignment of input addr and len parameters.
58  */
59 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
60 {
61         if (len & ~HPAGE_MASK)
62                 return -EINVAL;
63         if (addr & ~HPAGE_MASK)
64                 return -EINVAL;
65         return 0;
66 }
67
68 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
69                         struct vm_area_struct *vma)
70 {
71         pte_t *src_pte, *dst_pte, entry;
72         struct page *ptepage;
73         unsigned long addr = vma->vm_start;
74         unsigned long end = vma->vm_end;
75
76         while (addr < end) {
77                 dst_pte = huge_pte_alloc(dst, addr);
78                 if (!dst_pte)
79                         goto nomem;
80                 src_pte = huge_pte_offset(src, addr);
81                 entry = *src_pte;
82                 ptepage = pte_page(entry);
83                 get_page(ptepage);
84                 set_pte(dst_pte, entry);
85                 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
86                 addr += HPAGE_SIZE;
87         }
88         return 0;
89
90 nomem:
91         return -ENOMEM;
92 }
93
94 int
95 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
96                     struct page **pages, struct vm_area_struct **vmas,
97                     unsigned long *position, int *length, int i)
98 {
99         unsigned long vpfn, vaddr = *position;
100         int remainder = *length;
101
102         WARN_ON(!is_vm_hugetlb_page(vma));
103
104         vpfn = vaddr/PAGE_SIZE;
105         while (vaddr < vma->vm_end && remainder) {
106
107                 if (pages) {
108                         pte_t *pte;
109                         struct page *page;
110
111                         pte = huge_pte_offset(mm, vaddr);
112
113                         /* hugetlb should be locked, and hence, prefaulted */
114                         WARN_ON(!pte || pte_none(*pte));
115
116                         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
117
118                         WARN_ON(!PageCompound(page));
119
120                         get_page(page);
121                         pages[i] = page;
122                 }
123
124                 if (vmas)
125                         vmas[i] = vma;
126
127                 vaddr += PAGE_SIZE;
128                 ++vpfn;
129                 --remainder;
130                 ++i;
131         }
132
133         *length = remainder;
134         *position = vaddr;
135
136         return i;
137 }
138
139 #if 0   /* This is just for testing */
140 struct page *
141 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
142 {
143         unsigned long start = address;
144         int length = 1;
145         int nr;
146         struct page *page;
147         struct vm_area_struct *vma;
148
149         vma = find_vma(mm, addr);
150         if (!vma || !is_vm_hugetlb_page(vma))
151                 return ERR_PTR(-EINVAL);
152
153         pte = huge_pte_offset(mm, address);
154
155         /* hugetlb should be locked, and hence, prefaulted */
156         WARN_ON(!pte || pte_none(*pte));
157
158         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
159
160         WARN_ON(!PageCompound(page));
161
162         return page;
163 }
164
165 int pmd_huge(pmd_t pmd)
166 {
167         return 0;
168 }
169
170 struct page *
171 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
172                 pmd_t *pmd, int write)
173 {
174         return NULL;
175 }
176
177 #else
178
179 struct page *
180 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
181 {
182         return ERR_PTR(-EINVAL);
183 }
184
185 int pmd_huge(pmd_t pmd)
186 {
187         return !!(pmd_val(pmd) & _PAGE_PSE);
188 }
189
190 struct page *
191 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
192                 pmd_t *pmd, int write)
193 {
194         struct page *page;
195
196         page = pte_page(*(pte_t *)pmd);
197         if (page)
198                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
199         return page;
200 }
201 #endif
202
203 void unmap_hugepage_range(struct vm_area_struct *vma,
204                 unsigned long start, unsigned long end)
205 {
206         struct mm_struct *mm = vma->vm_mm;
207         unsigned long address;
208         pte_t pte;
209         struct page *page;
210
211         BUG_ON(start & (HPAGE_SIZE - 1));
212         BUG_ON(end & (HPAGE_SIZE - 1));
213
214         for (address = start; address < end; address += HPAGE_SIZE) {
215                 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
216                 if (pte_none(pte))
217                         continue;
218                 page = pte_page(pte);
219                 put_page(page);
220         }
221         mm->rss -= (end - start) >> PAGE_SHIFT;
222         flush_tlb_range(vma, start, end);
223 }
224
225 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
226 {
227         struct mm_struct *mm = current->mm;
228         unsigned long addr;
229         int ret = 0;
230
231         BUG_ON(vma->vm_start & ~HPAGE_MASK);
232         BUG_ON(vma->vm_end & ~HPAGE_MASK);
233
234         spin_lock(&mm->page_table_lock);
235         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
236                 unsigned long idx;
237                 pte_t *pte = huge_pte_alloc(mm, addr);
238                 struct page *page;
239
240                 if (!pte) {
241                         ret = -ENOMEM;
242                         goto out;
243                 }
244
245                 if (!pte_none(*pte)) {
246                         pmd_t *pmd = (pmd_t *) pte;
247
248                         page = pmd_page(*pmd);
249                         pmd_clear(pmd);
250                         dec_page_state(nr_page_table_pages);
251                         page_cache_release(page);
252                 }
253
254                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
255                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
256                 page = find_get_page(mapping, idx);
257                 if (!page) {
258                         /* charge the fs quota first */
259                         if (hugetlb_get_quota(mapping)) {
260                                 ret = -ENOMEM;
261                                 goto out;
262                         }
263                         page = alloc_huge_page();
264                         if (!page) {
265                                 hugetlb_put_quota(mapping);
266                                 ret = -ENOMEM;
267                                 goto out;
268                         }
269                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
270                         if (! ret) {
271                                 unlock_page(page);
272                         } else {
273                                 hugetlb_put_quota(mapping);
274                                 free_huge_page(page);
275                                 goto out;
276                         }
277                 }
278                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
279         }
280 out:
281         spin_unlock(&mm->page_table_lock);
282         return ret;
283 }