upgrade to fedora-2.6.12-1.1398.FC4 + vserver 2.0.rc7
[linux-2.6.git] / mm / mremap.c
1 /*
2  *      mm/mremap.c
3  *
4  *      (C) Copyright 1996 Linus Torvalds
5  *
6  *      Address space accounting code   <alan@redhat.com>
7  *      (C) Copyright 2002 Red Hat Inc, All Rights Reserved
8  */
9
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/syscalls.h>
20 #include <linux/vs_memory.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25
26 static pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr)
27 {
28         pgd_t *pgd;
29         pud_t *pud;
30         pmd_t *pmd;
31         pte_t *pte = NULL;
32
33         pgd = pgd_offset(mm, addr);
34         if (pgd_none_or_clear_bad(pgd))
35                 goto end;
36
37         pud = pud_offset(pgd, addr);
38         if (pud_none_or_clear_bad(pud))
39                 goto end;
40
41         pmd = pmd_offset(pud, addr);
42         if (pmd_none_or_clear_bad(pmd))
43                 goto end;
44
45         pte = pte_offset_map_nested(pmd, addr);
46         if (pte_none(*pte)) {
47                 pte_unmap_nested(pte);
48                 pte = NULL;
49         }
50 end:
51         return pte;
52 }
53
54 static pte_t *get_one_pte_map(struct mm_struct *mm, unsigned long addr)
55 {
56         pgd_t *pgd;
57         pud_t *pud;
58         pmd_t *pmd;
59
60         pgd = pgd_offset(mm, addr);
61         if (pgd_none_or_clear_bad(pgd))
62                 return NULL;
63
64         pud = pud_offset(pgd, addr);
65         if (pud_none_or_clear_bad(pud))
66                 return NULL;
67
68         pmd = pmd_offset(pud, addr);
69         if (pmd_none_or_clear_bad(pmd))
70                 return NULL;
71
72         return pte_offset_map(pmd, addr);
73 }
74
75 static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr)
76 {
77         pgd_t *pgd;
78         pud_t *pud;
79         pmd_t *pmd;
80         pte_t *pte = NULL;
81
82         pgd = pgd_offset(mm, addr);
83
84         pud = pud_alloc(mm, pgd, addr);
85         if (!pud)
86                 return NULL;
87         pmd = pmd_alloc(mm, pud, addr);
88         if (pmd)
89                 pte = pte_alloc_map(mm, pmd, addr);
90         return pte;
91 }
92
93 static int
94 move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
95                 struct vm_area_struct *new_vma, unsigned long new_addr)
96 {
97         struct address_space *mapping = NULL;
98         struct mm_struct *mm = vma->vm_mm;
99         int error = 0;
100         pte_t *src, *dst;
101
102         if (vma->vm_file) {
103                 /*
104                  * Subtle point from Rajesh Venkatasubramanian: before
105                  * moving file-based ptes, we must lock vmtruncate out,
106                  * since it might clean the dst vma before the src vma,
107                  * and we propagate stale pages into the dst afterward.
108                  */
109                 mapping = vma->vm_file->f_mapping;
110                 spin_lock(&mapping->i_mmap_lock);
111                 if (new_vma->vm_truncate_count &&
112                     new_vma->vm_truncate_count != vma->vm_truncate_count)
113                         new_vma->vm_truncate_count = 0;
114         }
115         spin_lock(&mm->page_table_lock);
116
117         src = get_one_pte_map_nested(mm, old_addr);
118         if (src) {
119                 /*
120                  * Look to see whether alloc_one_pte_map needs to perform a
121                  * memory allocation.  If it does then we need to drop the
122                  * atomic kmap
123                  */
124                 dst = get_one_pte_map(mm, new_addr);
125                 if (unlikely(!dst)) {
126                         pte_unmap_nested(src);
127                         if (mapping)
128                                 spin_unlock(&mapping->i_mmap_lock);
129                         dst = alloc_one_pte_map(mm, new_addr);
130                         if (mapping && !spin_trylock(&mapping->i_mmap_lock)) {
131                                 spin_unlock(&mm->page_table_lock);
132                                 spin_lock(&mapping->i_mmap_lock);
133                                 spin_lock(&mm->page_table_lock);
134                         }
135                         src = get_one_pte_map_nested(mm, old_addr);
136                 }
137                 /*
138                  * Since alloc_one_pte_map can drop and re-acquire
139                  * page_table_lock, we should re-check the src entry...
140                  */
141                 if (src) {
142                         if (dst) {
143                                 pte_t pte;
144                                 pte = ptep_clear_flush(vma, old_addr, src);
145                                 set_pte_at(mm, new_addr, dst, pte);
146                         } else
147                                 error = -ENOMEM;
148                         pte_unmap_nested(src);
149                 }
150                 if (dst)
151                         pte_unmap(dst);
152         }
153         spin_unlock(&mm->page_table_lock);
154         if (mapping)
155                 spin_unlock(&mapping->i_mmap_lock);
156         return error;
157 }
158
159 static unsigned long move_page_tables(struct vm_area_struct *vma,
160                 unsigned long old_addr, struct vm_area_struct *new_vma,
161                 unsigned long new_addr, unsigned long len)
162 {
163         unsigned long offset;
164
165         flush_cache_range(vma, old_addr, old_addr + len);
166
167         /*
168          * This is not the clever way to do this, but we're taking the
169          * easy way out on the assumption that most remappings will be
170          * only a few pages.. This also makes error recovery easier.
171          */
172         for (offset = 0; offset < len; offset += PAGE_SIZE) {
173                 if (move_one_page(vma, old_addr + offset,
174                                 new_vma, new_addr + offset) < 0)
175                         break;
176                 cond_resched();
177         }
178         return offset;
179 }
180
181 static unsigned long move_vma(struct vm_area_struct *vma,
182                 unsigned long old_addr, unsigned long old_len,
183                 unsigned long new_len, unsigned long new_addr)
184 {
185         struct mm_struct *mm = vma->vm_mm;
186         struct vm_area_struct *new_vma;
187         unsigned long vm_flags = vma->vm_flags;
188         unsigned long new_pgoff;
189         unsigned long moved_len;
190         unsigned long excess = 0;
191         int split = 0;
192
193         /*
194          * We'd prefer to avoid failure later on in do_munmap:
195          * which may split one vma into three before unmapping.
196          */
197         if (mm->map_count >= sysctl_max_map_count - 3)
198                 return -ENOMEM;
199
200         new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
201         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
202         if (!new_vma)
203                 return -ENOMEM;
204
205         moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
206         if (moved_len < old_len) {
207                 /*
208                  * On error, move entries back from new area to old,
209                  * which will succeed since page tables still there,
210                  * and then proceed to unmap new area instead of old.
211                  */
212                 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
213                 vma = new_vma;
214                 old_len = new_len;
215                 old_addr = new_addr;
216                 new_addr = -ENOMEM;
217         }
218
219         /* Conceal VM_ACCOUNT so old reservation is not undone */
220         if (vm_flags & VM_ACCOUNT) {
221                 vma->vm_flags &= ~VM_ACCOUNT;
222                 excess = vma->vm_end - vma->vm_start - old_len;
223                 if (old_addr > vma->vm_start &&
224                     old_addr + old_len < vma->vm_end)
225                         split = 1;
226         }
227
228         /*
229          * if we failed to move page tables we still do total_vm increment
230          * since do_munmap() will decrement it by old_len == new_len
231          */
232         vx_vmpages_add(mm, new_len >> PAGE_SHIFT);
233
234         if (do_munmap(mm, old_addr, old_len) < 0) {
235                 /* OOM: unable to split vma, just get accounts right */
236                 vm_unacct_memory(excess >> PAGE_SHIFT);
237                 excess = 0;
238         }
239
240         /* Restore VM_ACCOUNT if one or two pieces of vma left */
241         if (excess) {
242                 vma->vm_flags |= VM_ACCOUNT;
243                 if (split)
244                         vma->vm_next->vm_flags |= VM_ACCOUNT;
245         }
246
247         vx_vmpages_add(mm, new_len >> PAGE_SHIFT);
248         __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
249         if (vm_flags & VM_LOCKED) {
250                 vx_vmlocked_add(mm, new_len >> PAGE_SHIFT);
251                 if (new_len > old_len)
252                         make_pages_present(new_addr + old_len,
253                                            new_addr + new_len);
254         }
255
256         return new_addr;
257 }
258
259 /*
260  * Expand (or shrink) an existing mapping, potentially moving it at the
261  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
262  *
263  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
264  * This option implies MREMAP_MAYMOVE.
265  */
266 unsigned long do_mremap(unsigned long addr,
267         unsigned long old_len, unsigned long new_len,
268         unsigned long flags, unsigned long new_addr)
269 {
270         struct vm_area_struct *vma;
271         unsigned long ret = -EINVAL;
272         unsigned long charged = 0;
273
274         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
275                 goto out;
276
277         if (addr & ~PAGE_MASK)
278                 goto out;
279
280         old_len = PAGE_ALIGN(old_len);
281         new_len = PAGE_ALIGN(new_len);
282
283         /*
284          * We allow a zero old-len as a special case
285          * for DOS-emu "duplicate shm area" thing. But
286          * a zero new-len is nonsensical.
287          */
288         if (!new_len)
289                 goto out;
290
291         /* new_addr is only valid if MREMAP_FIXED is specified */
292         if (flags & MREMAP_FIXED) {
293                 if (new_addr & ~PAGE_MASK)
294                         goto out;
295                 if (!(flags & MREMAP_MAYMOVE))
296                         goto out;
297
298                 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
299                         goto out;
300
301                 /* Check if the location we're moving into overlaps the
302                  * old location at all, and fail if it does.
303                  */
304                 if ((new_addr <= addr) && (new_addr+new_len) > addr)
305                         goto out;
306
307                 if ((addr <= new_addr) && (addr+old_len) > new_addr)
308                         goto out;
309
310                 ret = do_munmap(current->mm, new_addr, new_len);
311                 if (ret)
312                         goto out;
313         }
314
315         /*
316          * Always allow a shrinking remap: that just unmaps
317          * the unnecessary pages..
318          * do_munmap does all the needed commit accounting
319          */
320         if (old_len >= new_len) {
321                 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
322                 if (ret && old_len != new_len)
323                         goto out;
324                 ret = addr;
325                 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
326                         goto out;
327                 old_len = new_len;
328         }
329
330         /*
331          * Ok, we need to grow..  or relocate.
332          */
333         ret = -EFAULT;
334         vma = find_vma(current->mm, addr);
335         if (!vma || vma->vm_start > addr)
336                 goto out;
337         if (is_vm_hugetlb_page(vma)) {
338                 ret = -EINVAL;
339                 goto out;
340         }
341         /* We can't remap across vm area boundaries */
342         if (old_len > vma->vm_end - addr)
343                 goto out;
344         if (vma->vm_flags & VM_DONTEXPAND) {
345                 if (new_len > old_len)
346                         goto out;
347         }
348         if (vma->vm_flags & VM_LOCKED) {
349                 unsigned long locked, lock_limit;
350                 locked = current->mm->locked_vm << PAGE_SHIFT;
351                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
352                 locked += new_len - old_len;
353                 ret = -EAGAIN;
354                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
355                         goto out;
356                 if (!vx_vmlocked_avail(current->mm,
357                         (new_len - old_len) >> PAGE_SHIFT))
358                         goto out;
359         }
360         if (!may_expand_vm(current->mm, (new_len - old_len) >> PAGE_SHIFT)) {
361                 ret = -ENOMEM;
362                 goto out;
363         }
364
365         /* check context space, maybe only Private writable mapping? */
366         if (!vx_vmpages_avail(current->mm, (new_len - old_len) >> PAGE_SHIFT))
367                 goto out;
368
369         if (vma->vm_flags & VM_ACCOUNT) {
370                 charged = (new_len - old_len) >> PAGE_SHIFT;
371                 if (security_vm_enough_memory(charged))
372                         goto out_nc;
373         }
374
375         /* old_len exactly to the end of the area..
376          * And we're not relocating the area.
377          */
378         if (old_len == vma->vm_end - addr &&
379             !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
380             (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
381                 unsigned long max_addr = TASK_SIZE;
382                 if (vma->vm_next)
383                         max_addr = vma->vm_next->vm_start;
384                 /* can we just expand the current mapping? */
385                 if (max_addr - addr >= new_len) {
386                         int pages = (new_len - old_len) >> PAGE_SHIFT;
387
388                         vma_adjust(vma, vma->vm_start,
389                                 addr + new_len, vma->vm_pgoff, NULL);
390
391                         vx_vmpages_add(current->mm, pages);
392                         __vm_stat_account(vma->vm_mm, vma->vm_flags,
393                                                         vma->vm_file, pages);
394                         if (vma->vm_flags & VM_LOCKED) {
395                                 vx_vmlocked_add(vma->vm_mm, pages);
396                                 make_pages_present(addr + old_len,
397                                                    addr + new_len);
398                         }
399                         ret = addr;
400                         goto out;
401                 }
402         }
403
404         /*
405          * We weren't able to just expand or shrink the area,
406          * we need to create a new one and move it..
407          */
408         ret = -ENOMEM;
409         if (flags & MREMAP_MAYMOVE) {
410                 if (!(flags & MREMAP_FIXED)) {
411                         unsigned long map_flags = 0;
412                         if (vma->vm_flags & VM_MAYSHARE)
413                                 map_flags |= MAP_SHARED;
414
415                         new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len, 
416                                 vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC);
417                         ret = new_addr;
418                         if (new_addr & ~PAGE_MASK)
419                                 goto out;
420                 }
421                 ret = move_vma(vma, addr, old_len, new_len, new_addr);
422         }
423 out:
424         if (ret & ~PAGE_MASK)
425                 vm_unacct_memory(charged);
426 out_nc:
427         return ret;
428 }
429
430 asmlinkage unsigned long sys_mremap(unsigned long addr,
431         unsigned long old_len, unsigned long new_len,
432         unsigned long flags, unsigned long new_addr)
433 {
434         unsigned long ret;
435
436         down_write(&current->mm->mmap_sem);
437         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
438         up_write(&current->mm->mmap_sem);
439         return ret;
440 }