patch-2.6.6-vs1.9.0
[linux-2.6.git] / arch / sparc64 / mm / hugetlbpage.c
1 /*
2  * SPARC64 Huge TLB page support.
3  *
4  * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
17
18 #include <asm/mman.h>
19 #include <asm/pgalloc.h>
20 #include <asm/tlb.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
23
24 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
25 {
26         pgd_t *pgd;
27         pmd_t *pmd;
28         pte_t *pte = NULL;
29
30         pgd = pgd_offset(mm, addr);
31         if (pgd) {
32                 pmd = pmd_alloc(mm, pgd, addr);
33                 if (pmd)
34                         pte = pte_alloc_map(mm, pmd, addr);
35         }
36         return pte;
37 }
38
39 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
40 {
41         pgd_t *pgd;
42         pmd_t *pmd;
43         pte_t *pte = NULL;
44
45         pgd = pgd_offset(mm, addr);
46         if (pgd) {
47                 pmd = pmd_offset(pgd, addr);
48                 if (pmd)
49                         pte = pte_offset_map(pmd, addr);
50         }
51         return pte;
52 }
53
54 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
55
56 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57                          struct page *page, pte_t * page_table, int write_access)
58 {
59         unsigned long i;
60         pte_t entry;
61
62         // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
63         vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
64
65         if (write_access)
66                 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
67                                                        vma->vm_page_prot)));
68         else
69                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
70         entry = pte_mkyoung(entry);
71         mk_pte_huge(entry);
72
73         for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
74                 set_pte(page_table, entry);
75                 page_table++;
76
77                 pte_val(entry) += PAGE_SIZE;
78         }
79 }
80
81 /*
82  * This function checks for proper alignment of input addr and len parameters.
83  */
84 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
85 {
86         if (len & ~HPAGE_MASK)
87                 return -EINVAL;
88         if (addr & ~HPAGE_MASK)
89                 return -EINVAL;
90         return 0;
91 }
92
93 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
94                             struct vm_area_struct *vma)
95 {
96         pte_t *src_pte, *dst_pte, entry;
97         struct page *ptepage;
98         unsigned long addr = vma->vm_start;
99         unsigned long end = vma->vm_end;
100         int i;
101
102         while (addr < end) {
103                 dst_pte = huge_pte_alloc(dst, addr);
104                 if (!dst_pte)
105                         goto nomem;
106                 src_pte = huge_pte_offset(src, addr);
107                 BUG_ON(!src_pte || pte_none(*src_pte));
108                 entry = *src_pte;
109                 ptepage = pte_page(entry);
110                 get_page(ptepage);
111                 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
112                         set_pte(dst_pte, entry);
113                         pte_val(entry) += PAGE_SIZE;
114                         dst_pte++;
115                 }
116                 // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
117                 vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
118                 addr += HPAGE_SIZE;
119         }
120         return 0;
121
122 nomem:
123         return -ENOMEM;
124 }
125
126 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
127                         struct page **pages, struct vm_area_struct **vmas,
128                         unsigned long *position, int *length, int i)
129 {
130         unsigned long vaddr = *position;
131         int remainder = *length;
132
133         WARN_ON(!is_vm_hugetlb_page(vma));
134
135         while (vaddr < vma->vm_end && remainder) {
136                 if (pages) {
137                         pte_t *pte;
138                         struct page *page;
139
140                         pte = huge_pte_offset(mm, vaddr);
141
142                         /* hugetlb should be locked, and hence, prefaulted */
143                         BUG_ON(!pte || pte_none(*pte));
144
145                         page = pte_page(*pte);
146
147                         WARN_ON(!PageCompound(page));
148
149                         get_page(page);
150                         pages[i] = page;
151                 }
152
153                 if (vmas)
154                         vmas[i] = vma;
155
156                 vaddr += PAGE_SIZE;
157                 --remainder;
158                 ++i;
159         }
160
161         *length = remainder;
162         *position = vaddr;
163
164         return i;
165 }
166
167 struct page *follow_huge_addr(struct mm_struct *mm,
168                               unsigned long address, int write)
169 {
170         return ERR_PTR(-EINVAL);
171 }
172
173 int pmd_huge(pmd_t pmd)
174 {
175         return 0;
176 }
177
178 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
179                              pmd_t *pmd, int write)
180 {
181         return NULL;
182 }
183
184 void unmap_hugepage_range(struct vm_area_struct *vma,
185                           unsigned long start, unsigned long end)
186 {
187         struct mm_struct *mm = vma->vm_mm;
188         unsigned long address;
189         pte_t *pte;
190         struct page *page;
191         int i;
192
193         BUG_ON(start & (HPAGE_SIZE - 1));
194         BUG_ON(end & (HPAGE_SIZE - 1));
195
196         for (address = start; address < end; address += HPAGE_SIZE) {
197                 pte = huge_pte_offset(mm, address);
198                 BUG_ON(!pte);
199                 if (pte_none(*pte))
200                         continue;
201                 page = pte_page(*pte);
202                 put_page(page);
203                 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
204                         pte_clear(pte);
205                         pte++;
206                 }
207         }
208         // mm->rss -= (end - start) >> PAGE_SHIFT;
209         vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
210         flush_tlb_range(vma, start, end);
211 }
212
213 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
214 {
215         struct mm_struct *mm = current->mm;
216         unsigned long addr;
217         int ret = 0;
218
219         BUG_ON(vma->vm_start & ~HPAGE_MASK);
220         BUG_ON(vma->vm_end & ~HPAGE_MASK);
221
222         spin_lock(&mm->page_table_lock);
223         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
224                 unsigned long idx;
225                 pte_t *pte = huge_pte_alloc(mm, addr);
226                 struct page *page;
227
228                 if (!pte) {
229                         ret = -ENOMEM;
230                         goto out;
231                 }
232                 if (!pte_none(*pte))
233                         continue;
234
235                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
236                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
237                 page = find_get_page(mapping, idx);
238                 if (!page) {
239                         /* charge the fs quota first */
240                         if (hugetlb_get_quota(mapping)) {
241                                 ret = -ENOMEM;
242                                 goto out;
243                         }
244                         page = alloc_huge_page();
245                         if (!page) {
246                                 hugetlb_put_quota(mapping);
247                                 ret = -ENOMEM;
248                                 goto out;
249                         }
250                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
251                         unlock_page(page);
252                         if (ret) {
253                                 hugetlb_put_quota(mapping);
254                                 free_huge_page(page);
255                                 goto out;
256                         }
257                 }
258                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
259         }
260 out:
261         spin_unlock(&mm->page_table_lock);
262         return ret;
263 }