6e2fa1f3930fb4233f306a42ca372fc8464e6d1c
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlb.h>
20 #include <asm/tlbflush.h>
21
22 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
23 {
24         pgd_t *pgd;
25         pmd_t *pmd = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pmd = pmd_alloc(mm, pgd, addr);
29         return (pte_t *) pmd;
30 }
31
32 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
33 {
34         pgd_t *pgd;
35         pmd_t *pmd = NULL;
36
37         pgd = pgd_offset(mm, addr);
38         pmd = pmd_offset(pgd, addr);
39         return (pte_t *) pmd;
40 }
41
42 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
43 {
44         pte_t entry;
45
46         mm->rss += (HPAGE_SIZE / PAGE_SIZE);
47         if (write_access) {
48                 entry =
49                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
50         } else
51                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
52         entry = pte_mkyoung(entry);
53         mk_pte_huge(entry);
54         set_pte(page_table, entry);
55 }
56
57 /*
58  * This function checks for proper alignment of input addr and len parameters.
59  */
60 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
61 {
62         if (len & ~HPAGE_MASK)
63                 return -EINVAL;
64         if (addr & ~HPAGE_MASK)
65                 return -EINVAL;
66         return 0;
67 }
68
69 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
70                         struct vm_area_struct *vma)
71 {
72         pte_t *src_pte, *dst_pte, entry;
73         struct page *ptepage;
74         unsigned long addr = vma->vm_start;
75         unsigned long end = vma->vm_end;
76
77         while (addr < end) {
78                 dst_pte = huge_pte_alloc(dst, addr);
79                 if (!dst_pte)
80                         goto nomem;
81                 src_pte = huge_pte_offset(src, addr);
82                 entry = *src_pte;
83                 ptepage = pte_page(entry);
84                 get_page(ptepage);
85                 set_pte(dst_pte, entry);
86                 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
87                 addr += HPAGE_SIZE;
88         }
89         return 0;
90
91 nomem:
92         return -ENOMEM;
93 }
94
95 int
96 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
97                     struct page **pages, struct vm_area_struct **vmas,
98                     unsigned long *position, int *length, int i)
99 {
100         unsigned long vpfn, vaddr = *position;
101         int remainder = *length;
102
103         WARN_ON(!is_vm_hugetlb_page(vma));
104
105         vpfn = vaddr/PAGE_SIZE;
106         while (vaddr < vma->vm_end && remainder) {
107
108                 if (pages) {
109                         pte_t *pte;
110                         struct page *page;
111
112                         pte = huge_pte_offset(mm, vaddr);
113
114                         /* hugetlb should be locked, and hence, prefaulted */
115                         WARN_ON(!pte || pte_none(*pte));
116
117                         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
118
119                         WARN_ON(!PageCompound(page));
120
121                         get_page(page);
122                         pages[i] = page;
123                 }
124
125                 if (vmas)
126                         vmas[i] = vma;
127
128                 vaddr += PAGE_SIZE;
129                 ++vpfn;
130                 --remainder;
131                 ++i;
132         }
133
134         *length = remainder;
135         *position = vaddr;
136
137         return i;
138 }
139
140 #if 0   /* This is just for testing */
141 struct page *
142 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
143 {
144         unsigned long start = address;
145         int length = 1;
146         int nr;
147         struct page *page;
148         struct vm_area_struct *vma;
149
150         if (! mm->used_hugetlb)
151                 return ERR_PTR(-EINVAL);
152
153         vma = find_vma(mm, addr);
154         if (!vma || !is_vm_hugetlb_page(vma))
155                 return ERR_PTR(-EINVAL);
156
157         pte = huge_pte_offset(mm, address);
158
159         /* hugetlb should be locked, and hence, prefaulted */
160         WARN_ON(!pte || pte_none(*pte));
161
162         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
163
164         WARN_ON(!PageCompound(page));
165
166         return page;
167 }
168
169 int pmd_huge(pmd_t pmd)
170 {
171         return 0;
172 }
173
174 struct page *
175 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
176                 pmd_t *pmd, int write)
177 {
178         return NULL;
179 }
180
181 #else
182
183 struct page *
184 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
185 {
186         return ERR_PTR(-EINVAL);
187 }
188
189 int pmd_huge(pmd_t pmd)
190 {
191         return !!(pmd_val(pmd) & _PAGE_PSE);
192 }
193
194 struct page *
195 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
196                 pmd_t *pmd, int write)
197 {
198         struct page *page;
199
200         page = pte_page(*(pte_t *)pmd);
201         if (page)
202                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
203         return page;
204 }
205 #endif
206
207 void unmap_hugepage_range(struct vm_area_struct *vma,
208                 unsigned long start, unsigned long end)
209 {
210         struct mm_struct *mm = vma->vm_mm;
211         unsigned long address;
212         pte_t pte;
213         struct page *page;
214
215         BUG_ON(start & (HPAGE_SIZE - 1));
216         BUG_ON(end & (HPAGE_SIZE - 1));
217
218         for (address = start; address < end; address += HPAGE_SIZE) {
219                 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
220                 if (pte_none(pte))
221                         continue;
222                 page = pte_page(pte);
223                 put_page(page);
224         }
225         mm->rss -= (end - start) >> PAGE_SHIFT;
226         flush_tlb_range(vma, start, end);
227 }
228
229 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
230 {
231         struct mm_struct *mm = current->mm;
232         unsigned long addr;
233         int ret = 0;
234
235         BUG_ON(vma->vm_start & ~HPAGE_MASK);
236         BUG_ON(vma->vm_end & ~HPAGE_MASK);
237
238         spin_lock(&mm->page_table_lock);
239         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
240                 unsigned long idx;
241                 pte_t *pte = huge_pte_alloc(mm, addr);
242                 struct page *page;
243
244                 if (!pte) {
245                         ret = -ENOMEM;
246                         goto out;
247                 }
248                 if (!pte_none(*pte))
249                         continue;
250
251                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
252                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
253                 page = find_get_page(mapping, idx);
254                 if (!page) {
255                         /* charge the fs quota first */
256                         if (hugetlb_get_quota(mapping)) {
257                                 ret = -ENOMEM;
258                                 goto out;
259                         }
260                         page = alloc_huge_page();
261                         if (!page) {
262                                 hugetlb_put_quota(mapping);
263                                 ret = -ENOMEM;
264                                 goto out;
265                         }
266                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
267                         unlock_page(page);
268                         if (ret) {
269                                 hugetlb_put_quota(mapping);
270                                 free_huge_page(page);
271                                 goto out;
272                         }
273                 }
274                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
275         }
276 out:
277         spin_unlock(&mm->page_table_lock);
278         return ret;
279 }