patch-2.6.6-vs1.9.0
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlb.h>
20 #include <asm/tlbflush.h>
21
22 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
23 {
24         pgd_t *pgd;
25         pmd_t *pmd = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pmd = pmd_alloc(mm, pgd, addr);
29         return (pte_t *) pmd;
30 }
31
32 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
33 {
34         pgd_t *pgd;
35         pmd_t *pmd = NULL;
36
37         pgd = pgd_offset(mm, addr);
38         pmd = pmd_offset(pgd, addr);
39         return (pte_t *) pmd;
40 }
41
42 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
43 {
44         pte_t entry;
45
46         // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
47         vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
48         if (write_access) {
49                 entry =
50                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
51         } else
52                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
53         entry = pte_mkyoung(entry);
54         mk_pte_huge(entry);
55         set_pte(page_table, entry);
56 }
57
58 /*
59  * This function checks for proper alignment of input addr and len parameters.
60  */
61 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
62 {
63         if (len & ~HPAGE_MASK)
64                 return -EINVAL;
65         if (addr & ~HPAGE_MASK)
66                 return -EINVAL;
67         return 0;
68 }
69
70 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
71                         struct vm_area_struct *vma)
72 {
73         pte_t *src_pte, *dst_pte, entry;
74         struct page *ptepage;
75         unsigned long addr = vma->vm_start;
76         unsigned long end = vma->vm_end;
77
78         while (addr < end) {
79                 dst_pte = huge_pte_alloc(dst, addr);
80                 if (!dst_pte)
81                         goto nomem;
82                 src_pte = huge_pte_offset(src, addr);
83                 entry = *src_pte;
84                 ptepage = pte_page(entry);
85                 get_page(ptepage);
86                 set_pte(dst_pte, entry);
87                 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
88                 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
89                 addr += HPAGE_SIZE;
90         }
91         return 0;
92
93 nomem:
94         return -ENOMEM;
95 }
96
97 int
98 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
99                     struct page **pages, struct vm_area_struct **vmas,
100                     unsigned long *position, int *length, int i)
101 {
102         unsigned long vpfn, vaddr = *position;
103         int remainder = *length;
104
105         WARN_ON(!is_vm_hugetlb_page(vma));
106
107         vpfn = vaddr/PAGE_SIZE;
108         while (vaddr < vma->vm_end && remainder) {
109
110                 if (pages) {
111                         pte_t *pte;
112                         struct page *page;
113
114                         pte = huge_pte_offset(mm, vaddr);
115
116                         /* hugetlb should be locked, and hence, prefaulted */
117                         WARN_ON(!pte || pte_none(*pte));
118
119                         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
120
121                         WARN_ON(!PageCompound(page));
122
123                         get_page(page);
124                         pages[i] = page;
125                 }
126
127                 if (vmas)
128                         vmas[i] = vma;
129
130                 vaddr += PAGE_SIZE;
131                 ++vpfn;
132                 --remainder;
133                 ++i;
134         }
135
136         *length = remainder;
137         *position = vaddr;
138
139         return i;
140 }
141
142 #if 0   /* This is just for testing */
143 struct page *
144 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
145 {
146         unsigned long start = address;
147         int length = 1;
148         int nr;
149         struct page *page;
150         struct vm_area_struct *vma;
151
152         if (! mm->used_hugetlb)
153                 return ERR_PTR(-EINVAL);
154
155         vma = find_vma(mm, addr);
156         if (!vma || !is_vm_hugetlb_page(vma))
157                 return ERR_PTR(-EINVAL);
158
159         pte = huge_pte_offset(mm, address);
160
161         /* hugetlb should be locked, and hence, prefaulted */
162         WARN_ON(!pte || pte_none(*pte));
163
164         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
165
166         WARN_ON(!PageCompound(page));
167
168         return page;
169 }
170
171 int pmd_huge(pmd_t pmd)
172 {
173         return 0;
174 }
175
176 struct page *
177 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
178                 pmd_t *pmd, int write)
179 {
180         return NULL;
181 }
182
183 #else
184
185 struct page *
186 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
187 {
188         return ERR_PTR(-EINVAL);
189 }
190
191 int pmd_huge(pmd_t pmd)
192 {
193         return !!(pmd_val(pmd) & _PAGE_PSE);
194 }
195
196 struct page *
197 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
198                 pmd_t *pmd, int write)
199 {
200         struct page *page;
201
202         page = pte_page(*(pte_t *)pmd);
203         if (page)
204                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
205         return page;
206 }
207 #endif
208
209 void unmap_hugepage_range(struct vm_area_struct *vma,
210                 unsigned long start, unsigned long end)
211 {
212         struct mm_struct *mm = vma->vm_mm;
213         unsigned long address;
214         pte_t pte;
215         struct page *page;
216
217         BUG_ON(start & (HPAGE_SIZE - 1));
218         BUG_ON(end & (HPAGE_SIZE - 1));
219
220         for (address = start; address < end; address += HPAGE_SIZE) {
221                 pte = ptep_get_and_clear(huge_pte_offset(mm, address));
222                 if (pte_none(pte))
223                         continue;
224                 page = pte_page(pte);
225                 put_page(page);
226         }
227         // mm->rss -= (end - start) >> PAGE_SHIFT;
228         vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
229         flush_tlb_range(vma, start, end);
230 }
231
232 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
233 {
234         struct mm_struct *mm = current->mm;
235         unsigned long addr;
236         int ret = 0;
237
238         BUG_ON(vma->vm_start & ~HPAGE_MASK);
239         BUG_ON(vma->vm_end & ~HPAGE_MASK);
240
241         spin_lock(&mm->page_table_lock);
242         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
243                 unsigned long idx;
244                 pte_t *pte = huge_pte_alloc(mm, addr);
245                 struct page *page;
246
247                 if (!pte) {
248                         ret = -ENOMEM;
249                         goto out;
250                 }
251                 if (!pte_none(*pte))
252                         continue;
253
254                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
255                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
256                 page = find_get_page(mapping, idx);
257                 if (!page) {
258                         /* charge the fs quota first */
259                         if (hugetlb_get_quota(mapping)) {
260                                 ret = -ENOMEM;
261                                 goto out;
262                         }
263                         page = alloc_huge_page();
264                         if (!page) {
265                                 hugetlb_put_quota(mapping);
266                                 ret = -ENOMEM;
267                                 goto out;
268                         }
269                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
270                         unlock_page(page);
271                         if (ret) {
272                                 hugetlb_put_quota(mapping);
273                                 free_huge_page(page);
274                                 goto out;
275                         }
276                 }
277                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
278         }
279 out:
280         spin_unlock(&mm->page_table_lock);
281         return ret;
282 }