bbdcb91d440529accc01300f7df1302f2e9a0f59
[linux-2.6.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@redhat.com>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/shm.h>
11 #include <linux/mman.h>
12 #include <linux/pagemap.h>
13 #include <linux/swap.h>
14 #include <linux/syscalls.h>
15 #include <linux/init.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/personality.h>
19 #include <linux/security.h>
20 #include <linux/hugetlb.h>
21 #include <linux/profile.h>
22 #include <linux/module.h>
23 #include <linux/mount.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/pgalloc.h>
27 #include <asm/tlb.h>
28
29 /*
30  * WARNING: the debugging will use recursive algorithms so never enable this
31  * unless you know what you are doing.
32  */
33 #undef DEBUG_MM_RB
34
35 /* description of effects of mapping type and prot in current implementation.
36  * this is due to the limited x86 page protection hardware.  The expected
37  * behavior is in parens:
38  *
39  * map_type     prot
40  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
41  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
42  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
43  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
44  *              
45  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
46  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
47  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
48  *
49  */
50 pgprot_t protection_map[16] = {
51         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
52         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
53 };
54
55 int sysctl_overcommit_memory = 0;       /* default is heuristic overcommit */
56 int sysctl_overcommit_ratio = 50;       /* default is 50% */
57 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
58 atomic_t vm_committed_space = ATOMIC_INIT(0);
59
60 EXPORT_SYMBOL(sysctl_overcommit_memory);
61 EXPORT_SYMBOL(sysctl_overcommit_ratio);
62 EXPORT_SYMBOL(sysctl_max_map_count);
63 EXPORT_SYMBOL(vm_committed_space);
64
65 /*
66  * Requires inode->i_mapping->i_shared_sem
67  */
68 static inline void
69 __remove_shared_vm_struct(struct vm_area_struct *vma, struct inode *inode)
70 {
71         if (inode) {
72                 if (vma->vm_flags & VM_DENYWRITE)
73                         atomic_inc(&inode->i_writecount);
74                 list_del_init(&vma->shared);
75         }
76 }
77
78 /*
79  * Remove one vm structure from the inode's i_mapping address space.
80  */
81 static void remove_shared_vm_struct(struct vm_area_struct *vma)
82 {
83         struct file *file = vma->vm_file;
84
85         if (file) {
86                 struct address_space *mapping = file->f_mapping;
87                 down(&mapping->i_shared_sem);
88                 __remove_shared_vm_struct(vma, file->f_dentry->d_inode);
89                 up(&mapping->i_shared_sem);
90         }
91 }
92
93 /*
94  *  sys_brk() for the most part doesn't need the global kernel
95  *  lock, except when an application is doing something nasty
96  *  like trying to un-brk an area that has already been mapped
97  *  to a regular file.  in this case, the unmapping will need
98  *  to invoke file system routines that need the global lock.
99  */
100 asmlinkage unsigned long sys_brk(unsigned long brk)
101 {
102         unsigned long rlim, retval;
103         unsigned long newbrk, oldbrk;
104         struct mm_struct *mm = current->mm;
105
106         down_write(&mm->mmap_sem);
107
108         if (brk < mm->end_code)
109                 goto out;
110         newbrk = PAGE_ALIGN(brk);
111         oldbrk = PAGE_ALIGN(mm->brk);
112         if (oldbrk == newbrk)
113                 goto set_brk;
114
115         /* Always allow shrinking brk. */
116         if (brk <= mm->brk) {
117                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
118                         goto set_brk;
119                 goto out;
120         }
121
122         /* Check against rlimit.. */
123         rlim = current->rlim[RLIMIT_DATA].rlim_cur;
124         if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
125                 goto out;
126
127         /* Check against existing mmap mappings. */
128         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
129                 goto out;
130
131         /* Ok, looks good - let it rip. */
132         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
133                 goto out;
134 set_brk:
135         mm->brk = brk;
136 out:
137         retval = mm->brk;
138         up_write(&mm->mmap_sem);
139         return retval;
140 }
141
142 #ifdef DEBUG_MM_RB
143 static int browse_rb(struct rb_root *root) {
144         int i, j;
145         struct rb_node *nd, *pn = NULL;
146         i = 0;
147         unsigned long prev = 0, pend = 0;
148
149         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
150                 struct vm_area_struct *vma;
151                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
152                 if (vma->vm_start < prev)
153                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
154                 if (vma->vm_start < pend)
155                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
156                 if (vma->vm_start > vma->vm_end)
157                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
158                 i++;
159                 pn = nd;
160         }
161         j = 0;
162         for (nd = pn; nd; nd = rb_prev(nd)) {
163                 j++;
164         }
165         if (i != j)
166                 printk("backwards %d, forwards %d\n", j, i), i = 0;
167         return i;
168 }
169
170 void validate_mm(struct mm_struct * mm) {
171         int bug = 0;
172         int i = 0;
173         struct vm_area_struct * tmp = mm->mmap;
174         while (tmp) {
175                 tmp = tmp->vm_next;
176                 i++;
177         }
178         if (i != mm->map_count)
179                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
180         i = browse_rb(&mm->mm_rb);
181         if (i != mm->map_count)
182                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
183         if (bug)
184                 BUG();
185 }
186 #else
187 #define validate_mm(mm) do { } while (0)
188 #endif
189
190 static struct vm_area_struct *
191 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
192                 struct vm_area_struct **pprev, struct rb_node ***rb_link,
193                 struct rb_node ** rb_parent)
194 {
195         struct vm_area_struct * vma;
196         struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
197
198         __rb_link = &mm->mm_rb.rb_node;
199         rb_prev = __rb_parent = NULL;
200         vma = NULL;
201
202         while (*__rb_link) {
203                 struct vm_area_struct *vma_tmp;
204
205                 __rb_parent = *__rb_link;
206                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
207
208                 if (vma_tmp->vm_end > addr) {
209                         vma = vma_tmp;
210                         if (vma_tmp->vm_start <= addr)
211                                 return vma;
212                         __rb_link = &__rb_parent->rb_left;
213                 } else {
214                         rb_prev = __rb_parent;
215                         __rb_link = &__rb_parent->rb_right;
216                 }
217         }
218
219         *pprev = NULL;
220         if (rb_prev)
221                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
222         *rb_link = __rb_link;
223         *rb_parent = __rb_parent;
224         return vma;
225 }
226
227 static inline void
228 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
229                 struct vm_area_struct *prev, struct rb_node *rb_parent)
230 {
231         if (prev) {
232                 vma->vm_next = prev->vm_next;
233                 prev->vm_next = vma;
234         } else {
235                 mm->mmap = vma;
236                 if (rb_parent)
237                         vma->vm_next = rb_entry(rb_parent,
238                                         struct vm_area_struct, vm_rb);
239                 else
240                         vma->vm_next = NULL;
241         }
242 }
243
244 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
245                 struct rb_node **rb_link, struct rb_node *rb_parent)
246 {
247         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
248         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
249 }
250
251 static inline void __vma_link_file(struct vm_area_struct *vma)
252 {
253         struct file * file;
254
255         file = vma->vm_file;
256         if (file) {
257                 struct address_space *mapping = file->f_mapping;
258
259                 if (vma->vm_flags & VM_DENYWRITE)
260                         atomic_dec(&file->f_dentry->d_inode->i_writecount);
261
262                 if (vma->vm_flags & VM_SHARED)
263                         list_add_tail(&vma->shared, &mapping->i_mmap_shared);
264                 else
265                         list_add_tail(&vma->shared, &mapping->i_mmap);
266         }
267 }
268
269 static void
270 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
271         struct vm_area_struct *prev, struct rb_node **rb_link,
272         struct rb_node *rb_parent)
273 {
274         __vma_link_list(mm, vma, prev, rb_parent);
275         __vma_link_rb(mm, vma, rb_link, rb_parent);
276         __vma_link_file(vma);
277 }
278
279 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
280                         struct vm_area_struct *prev, struct rb_node **rb_link,
281                         struct rb_node *rb_parent)
282 {
283         struct address_space *mapping = NULL;
284
285         if (vma->vm_file)
286                 mapping = vma->vm_file->f_mapping;
287
288         if (mapping)
289                 down(&mapping->i_shared_sem);
290         spin_lock(&mm->page_table_lock);
291         __vma_link(mm, vma, prev, rb_link, rb_parent);
292         spin_unlock(&mm->page_table_lock);
293         if (mapping)
294                 up(&mapping->i_shared_sem);
295
296         mark_mm_hugetlb(mm, vma);
297         mm->map_count++;
298         validate_mm(mm);
299 }
300
301 /*
302  * Insert vm structure into process list sorted by address and into the inode's
303  * i_mmap ring. The caller should hold mm->page_table_lock and
304  * ->f_mappping->i_shared_sem if vm_file is non-NULL.
305  */
306 static void
307 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
308 {
309         struct vm_area_struct * __vma, * prev;
310         struct rb_node ** rb_link, * rb_parent;
311
312         __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
313         if (__vma && __vma->vm_start < vma->vm_end)
314                 BUG();
315         __vma_link(mm, vma, prev, rb_link, rb_parent);
316         mark_mm_hugetlb(mm, vma);
317         mm->map_count++;
318         validate_mm(mm);
319 }
320
321 /*
322  * If the vma has a ->close operation then the driver probably needs to release
323  * per-vma resources, so we don't attempt to merge those.
324  */
325 #define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
326
327 static inline int is_mergeable_vma(struct vm_area_struct *vma,
328                         struct file *file, unsigned long vm_flags)
329 {
330         if (vma->vm_ops && vma->vm_ops->close)
331                 return 0;
332         if (vma->vm_file != file)
333                 return 0;
334         if (vma->vm_flags != vm_flags)
335                 return 0;
336         if (vma->vm_private_data)
337                 return 0;
338         return 1;
339 }
340
341 /*
342  * Return true if we can merge this (vm_flags,file,vm_pgoff,size)
343  * in front of (at a lower virtual address and file offset than) the vma.
344  *
345  * We don't check here for the merged mmap wrapping around the end of pagecache
346  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
347  * wrap, nor mmaps which cover the final page at index -1UL.
348  */
349 static int
350 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
351         struct file *file, unsigned long vm_pgoff, unsigned long size)
352 {
353         if (is_mergeable_vma(vma, file, vm_flags)) {
354                 if (!file)
355                         return 1;       /* anon mapping */
356                 if (vma->vm_pgoff == vm_pgoff + size)
357                         return 1;
358         }
359         return 0;
360 }
361
362 /*
363  * Return true if we can merge this (vm_flags,file,vm_pgoff)
364  * beyond (at a higher virtual address and file offset than) the vma.
365  */
366 static int
367 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
368         struct file *file, unsigned long vm_pgoff)
369 {
370         if (is_mergeable_vma(vma, file, vm_flags)) {
371                 unsigned long vma_size;
372
373                 if (!file)
374                         return 1;       /* anon mapping */
375
376                 vma_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
377                 if (vma->vm_pgoff + vma_size == vm_pgoff)
378                         return 1;
379         }
380         return 0;
381 }
382
383 /*
384  * Given a new mapping request (addr,end,vm_flags,file,pgoff), figure out
385  * whether that can be merged with its predecessor or its successor.  Or
386  * both (it neatly fills a hole).
387  */
388 static struct vm_area_struct *vma_merge(struct mm_struct *mm,
389                         struct vm_area_struct *prev,
390                         struct rb_node *rb_parent, unsigned long addr, 
391                         unsigned long end, unsigned long vm_flags,
392                         struct file *file, unsigned long pgoff)
393 {
394         spinlock_t *lock = &mm->page_table_lock;
395         struct inode *inode = file ? file->f_dentry->d_inode : NULL;
396         struct semaphore *i_shared_sem;
397
398         /*
399          * We later require that vma->vm_flags == vm_flags, so this tests
400          * vma->vm_flags & VM_SPECIAL, too.
401          */
402         if (vm_flags & VM_SPECIAL)
403                 return NULL;
404
405         i_shared_sem = file ? &file->f_mapping->i_shared_sem : NULL;
406
407         if (!prev) {
408                 prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
409                 goto merge_next;
410         }
411
412         /*
413          * Can it merge with the predecessor?
414          */
415         if (prev->vm_end == addr &&
416                         can_vma_merge_after(prev, vm_flags, file, pgoff)) {
417                 struct vm_area_struct *next;
418                 int need_up = 0;
419
420                 if (unlikely(file && prev->vm_next &&
421                                 prev->vm_next->vm_file == file)) {
422                         down(i_shared_sem);
423                         need_up = 1;
424                 }
425                 spin_lock(lock);
426                 prev->vm_end = end;
427
428                 /*
429                  * OK, it did.  Can we now merge in the successor as well?
430                  */
431                 next = prev->vm_next;
432                 if (next && prev->vm_end == next->vm_start &&
433                                 can_vma_merge_before(next, vm_flags, file,
434                                         pgoff, (end - addr) >> PAGE_SHIFT)) {
435                         prev->vm_end = next->vm_end;
436                         __vma_unlink(mm, next, prev);
437                         __remove_shared_vm_struct(next, inode);
438                         spin_unlock(lock);
439                         if (need_up)
440                                 up(i_shared_sem);
441                         if (file)
442                                 fput(file);
443
444                         mm->map_count--;
445                         kmem_cache_free(vm_area_cachep, next);
446                         return prev;
447                 }
448                 spin_unlock(lock);
449                 if (need_up)
450                         up(i_shared_sem);
451                 return prev;
452         }
453
454         /*
455          * Can this new request be merged in front of prev->vm_next?
456          */
457         prev = prev->vm_next;
458         if (prev) {
459  merge_next:
460                 if (!can_vma_merge_before(prev, vm_flags, file,
461                                 pgoff, (end - addr) >> PAGE_SHIFT))
462                         return NULL;
463                 if (end == prev->vm_start) {
464                         if (file)
465                                 down(i_shared_sem);
466                         spin_lock(lock);
467                         prev->vm_start = addr;
468                         prev->vm_pgoff -= (end - addr) >> PAGE_SHIFT;
469                         spin_unlock(lock);
470                         if (file)
471                                 up(i_shared_sem);
472                         return prev;
473                 }
474         }
475
476         return NULL;
477 }
478
479 /*
480  * The caller must hold down_write(current->mm->mmap_sem).
481  */
482
483 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
484                         unsigned long len, unsigned long prot,
485                         unsigned long flags, unsigned long pgoff)
486 {
487         struct mm_struct * mm = current->mm;
488         struct vm_area_struct * vma, * prev;
489         struct inode *inode;
490         unsigned int vm_flags;
491         int correct_wcount = 0;
492         int error;
493         struct rb_node ** rb_link, * rb_parent;
494         int accountable = 1;
495         unsigned long charged = 0;
496
497         if (file) {
498                 if (is_file_hugepages(file))
499                         accountable = 0;
500
501                 if (!file->f_op || !file->f_op->mmap)
502                         return -ENODEV;
503
504                 if ((prot & PROT_EXEC) && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
505                         return -EPERM;
506         }
507
508         if (!len)
509                 return addr;
510
511         /* Careful about overflows.. */
512         len = PAGE_ALIGN(len);
513         if (!len || len > TASK_SIZE)
514                 return -EINVAL;
515
516         /* offset overflow? */
517         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
518                 return -EINVAL;
519
520         /* Too many mappings? */
521         if (mm->map_count > sysctl_max_map_count)
522                 return -ENOMEM;
523
524         /* Obtain the address to map to. we verify (or select) it and ensure
525          * that it represents a valid section of the address space.
526          */
527         addr = get_unmapped_area(file, addr, len, pgoff, flags);
528         if (addr & ~PAGE_MASK)
529                 return addr;
530
531         /* Do simple checking here so the lower-level routines won't have
532          * to. we assume access permissions have been handled by the open
533          * of the memory object, so we don't do any here.
534          */
535         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
536                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
537
538         if (flags & MAP_LOCKED) {
539                 if (!capable(CAP_IPC_LOCK))
540                         return -EPERM;
541                 vm_flags |= VM_LOCKED;
542         }
543         /* mlock MCL_FUTURE? */
544         if (vm_flags & VM_LOCKED) {
545                 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
546                 locked += len;
547                 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
548                         return -EAGAIN;
549         }
550
551         inode = file ? file->f_dentry->d_inode : NULL;
552
553         if (file) {
554                 switch (flags & MAP_TYPE) {
555                 case MAP_SHARED:
556                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
557                                 return -EACCES;
558
559                         /*
560                          * Make sure we don't allow writing to an append-only
561                          * file..
562                          */
563                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
564                                 return -EACCES;
565
566                         /*
567                          * Make sure there are no mandatory locks on the file.
568                          */
569                         if (locks_verify_locked(inode))
570                                 return -EAGAIN;
571
572                         vm_flags |= VM_SHARED | VM_MAYSHARE;
573                         if (!(file->f_mode & FMODE_WRITE))
574                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
575
576                         /* fall through */
577                 case MAP_PRIVATE:
578                         if (!(file->f_mode & FMODE_READ))
579                                 return -EACCES;
580                         break;
581
582                 default:
583                         return -EINVAL;
584                 }
585         } else {
586                 vm_flags |= VM_SHARED | VM_MAYSHARE;
587                 switch (flags & MAP_TYPE) {
588                 default:
589                         return -EINVAL;
590                 case MAP_PRIVATE:
591                         vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
592                         /* fall through */
593                 case MAP_SHARED:
594                         break;
595                 }
596         }
597
598         error = security_file_mmap(file, prot, flags);
599         if (error)
600                 return error;
601                 
602         /* Clear old maps */
603         error = -ENOMEM;
604 munmap_back:
605         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
606         if (vma && vma->vm_start < addr + len) {
607                 if (do_munmap(mm, addr, len))
608                         return -ENOMEM;
609                 goto munmap_back;
610         }
611
612         /* Check against address space limit. */
613         if ((mm->total_vm << PAGE_SHIFT) + len
614             > current->rlim[RLIMIT_AS].rlim_cur)
615                 return -ENOMEM;
616
617         if (accountable && (!(flags & MAP_NORESERVE) ||
618                         sysctl_overcommit_memory > 1)) {
619                 if (vm_flags & VM_SHARED) {
620                         /* Check memory availability in shmem_file_setup? */
621                         vm_flags |= VM_ACCOUNT;
622                 } else if (vm_flags & VM_WRITE) {
623                         /*
624                          * Private writable mapping: check memory availability
625                          */
626                         charged = len >> PAGE_SHIFT;
627                         if (security_vm_enough_memory(charged))
628                                 return -ENOMEM;
629                         vm_flags |= VM_ACCOUNT;
630                 }
631         }
632
633         /* Can we just expand an old anonymous mapping? */
634         if (!file && !(vm_flags & VM_SHARED) && rb_parent)
635                 if (vma_merge(mm, prev, rb_parent, addr, addr + len,
636                                         vm_flags, NULL, 0))
637                         goto out;
638
639         /*
640          * Determine the object being mapped and call the appropriate
641          * specific mapper. the address has already been validated, but
642          * not unmapped, but the maps are removed from the list.
643          */
644         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
645         error = -ENOMEM;
646         if (!vma)
647                 goto unacct_error;
648
649         vma->vm_mm = mm;
650         vma->vm_start = addr;
651         vma->vm_end = addr + len;
652         vma->vm_flags = vm_flags;
653         vma->vm_page_prot = protection_map[vm_flags & 0x0f];
654         vma->vm_ops = NULL;
655         vma->vm_pgoff = pgoff;
656         vma->vm_file = NULL;
657         vma->vm_private_data = NULL;
658         vma->vm_next = NULL;
659         INIT_LIST_HEAD(&vma->shared);
660
661         if (file) {
662                 error = -EINVAL;
663                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
664                         goto free_vma;
665                 if (vm_flags & VM_DENYWRITE) {
666                         error = deny_write_access(file);
667                         if (error)
668                                 goto free_vma;
669                         correct_wcount = 1;
670                 }
671                 vma->vm_file = file;
672                 get_file(file);
673                 error = file->f_op->mmap(file, vma);
674                 if (error)
675                         goto unmap_and_free_vma;
676         } else if (vm_flags & VM_SHARED) {
677                 error = shmem_zero_setup(vma);
678                 if (error)
679                         goto free_vma;
680         }
681
682         /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
683          * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
684          * that memory reservation must be checked; but that reservation
685          * belongs to shared memory object, not to vma: so now clear it.
686          */
687         if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
688                 vma->vm_flags &= ~VM_ACCOUNT;
689
690         /* Can addr have changed??
691          *
692          * Answer: Yes, several device drivers can do it in their
693          *         f_op->mmap method. -DaveM
694          */
695         addr = vma->vm_start;
696
697         if (!file || !rb_parent || !vma_merge(mm, prev, rb_parent, addr,
698                                 addr + len, vma->vm_flags, file, pgoff)) {
699                 vma_link(mm, vma, prev, rb_link, rb_parent);
700                 if (correct_wcount)
701                         atomic_inc(&inode->i_writecount);
702         } else {
703                 if (file) {
704                         if (correct_wcount)
705                                 atomic_inc(&inode->i_writecount);
706                         fput(file);
707                 }
708                 kmem_cache_free(vm_area_cachep, vma);
709         }
710 out:    
711         mm->total_vm += len >> PAGE_SHIFT;
712         if (vm_flags & VM_LOCKED) {
713                 mm->locked_vm += len >> PAGE_SHIFT;
714                 make_pages_present(addr, addr + len);
715         }
716         if (flags & MAP_POPULATE) {
717                 up_write(&mm->mmap_sem);
718                 sys_remap_file_pages(addr, len, prot,
719                                         pgoff, flags & MAP_NONBLOCK);
720                 down_write(&mm->mmap_sem);
721         }
722         return addr;
723
724 unmap_and_free_vma:
725         if (correct_wcount)
726                 atomic_inc(&inode->i_writecount);
727         vma->vm_file = NULL;
728         fput(file);
729
730         /* Undo any partial mapping done by a device driver. */
731         zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
732 free_vma:
733         kmem_cache_free(vm_area_cachep, vma);
734 unacct_error:
735         if (charged)
736                 vm_unacct_memory(charged);
737         return error;
738 }
739
740 EXPORT_SYMBOL(do_mmap_pgoff);
741
742 /* Get an address range which is currently unmapped.
743  * For shmat() with addr=0.
744  *
745  * Ugly calling convention alert:
746  * Return value with the low bits set means error value,
747  * ie
748  *      if (ret & ~PAGE_MASK)
749  *              error = ret;
750  *
751  * This function "knows" that -ENOMEM has the bits set.
752  */
753 #ifndef HAVE_ARCH_UNMAPPED_AREA
754 static inline unsigned long
755 arch_get_unmapped_area(struct file *filp, unsigned long addr,
756                 unsigned long len, unsigned long pgoff, unsigned long flags)
757 {
758         struct mm_struct *mm = current->mm;
759         struct vm_area_struct *vma;
760         unsigned long start_addr;
761
762         if (len > TASK_SIZE)
763                 return -ENOMEM;
764
765         if (addr) {
766                 addr = PAGE_ALIGN(addr);
767                 vma = find_vma(mm, addr);
768                 if (TASK_SIZE - len >= addr &&
769                     (!vma || addr + len <= vma->vm_start))
770                         return addr;
771         }
772         start_addr = addr = mm->free_area_cache;
773
774 full_search:
775         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
776                 /* At this point:  (!vma || addr < vma->vm_end). */
777                 if (TASK_SIZE - len < addr) {
778                         /*
779                          * Start a new search - just in case we missed
780                          * some holes.
781                          */
782                         if (start_addr != TASK_UNMAPPED_BASE) {
783                                 start_addr = addr = TASK_UNMAPPED_BASE;
784                                 goto full_search;
785                         }
786                         return -ENOMEM;
787                 }
788                 if (!vma || addr + len <= vma->vm_start) {
789                         /*
790                          * Remember the place where we stopped the search:
791                          */
792                         mm->free_area_cache = addr + len;
793                         return addr;
794                 }
795                 addr = vma->vm_end;
796         }
797 }
798 #else
799 extern unsigned long
800 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
801                         unsigned long, unsigned long);
802 #endif  
803
804 unsigned long
805 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
806                 unsigned long pgoff, unsigned long flags)
807 {
808         if (flags & MAP_FIXED) {
809                 unsigned long ret;
810
811                 if (addr > TASK_SIZE - len)
812                         return -ENOMEM;
813                 if (addr & ~PAGE_MASK)
814                         return -EINVAL;
815                 if (file && is_file_hugepages(file))  {
816                         /*
817                          * Check if the given range is hugepage aligned, and
818                          * can be made suitable for hugepages.
819                          */
820                         ret = prepare_hugepage_range(addr, len);
821                 } else {
822                         /*
823                          * Ensure that a normal request is not falling in a
824                          * reserved hugepage range.  For some archs like IA-64,
825                          * there is a separate region for hugepages.
826                          */
827                         ret = is_hugepage_only_range(addr, len);
828                 }
829                 if (ret)
830                         return -EINVAL;
831                 return addr;
832         }
833
834         if (file && file->f_op && file->f_op->get_unmapped_area)
835                 return file->f_op->get_unmapped_area(file, addr, len,
836                                                 pgoff, flags);
837
838         return arch_get_unmapped_area(file, addr, len, pgoff, flags);
839 }
840
841 EXPORT_SYMBOL(get_unmapped_area);
842
843 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
844 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
845 {
846         struct vm_area_struct *vma = NULL;
847
848         if (mm) {
849                 /* Check the cache first. */
850                 /* (Cache hit rate is typically around 35%.) */
851                 vma = mm->mmap_cache;
852                 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
853                         struct rb_node * rb_node;
854
855                         rb_node = mm->mm_rb.rb_node;
856                         vma = NULL;
857
858                         while (rb_node) {
859                                 struct vm_area_struct * vma_tmp;
860
861                                 vma_tmp = rb_entry(rb_node,
862                                                 struct vm_area_struct, vm_rb);
863
864                                 if (vma_tmp->vm_end > addr) {
865                                         vma = vma_tmp;
866                                         if (vma_tmp->vm_start <= addr)
867                                                 break;
868                                         rb_node = rb_node->rb_left;
869                                 } else
870                                         rb_node = rb_node->rb_right;
871                         }
872                         if (vma)
873                                 mm->mmap_cache = vma;
874                 }
875         }
876         return vma;
877 }
878
879 EXPORT_SYMBOL(find_vma);
880
881 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
882 struct vm_area_struct *
883 find_vma_prev(struct mm_struct *mm, unsigned long addr,
884                         struct vm_area_struct **pprev)
885 {
886         struct vm_area_struct *vma = NULL, *prev = NULL;
887         struct rb_node * rb_node;
888         if (!mm)
889                 goto out;
890
891         /* Guard against addr being lower than the first VMA */
892         vma = mm->mmap;
893
894         /* Go through the RB tree quickly. */
895         rb_node = mm->mm_rb.rb_node;
896
897         while (rb_node) {
898                 struct vm_area_struct *vma_tmp;
899                 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
900
901                 if (addr < vma_tmp->vm_end) {
902                         rb_node = rb_node->rb_left;
903                 } else {
904                         prev = vma_tmp;
905                         if (!prev->vm_next || (addr < prev->vm_next->vm_end))
906                                 break;
907                         rb_node = rb_node->rb_right;
908                 }
909         }
910
911 out:
912         *pprev = prev;
913         return prev ? prev->vm_next : vma;
914 }
915
916 #ifdef CONFIG_STACK_GROWSUP
917 /*
918  * vma is the first one with address > vma->vm_end.  Have to extend vma.
919  */
920 int expand_stack(struct vm_area_struct * vma, unsigned long address)
921 {
922         unsigned long grow;
923
924         if (!(vma->vm_flags & VM_GROWSUP))
925                 return -EFAULT;
926
927         /*
928          * vma->vm_start/vm_end cannot change under us because the caller
929          * is required to hold the mmap_sem in read mode. We need to get
930          * the spinlock only before relocating the vma range ourself.
931          */
932         address += 4 + PAGE_SIZE - 1;
933         address &= PAGE_MASK;
934         spin_lock(&vma->vm_mm->page_table_lock);
935         grow = (address - vma->vm_end) >> PAGE_SHIFT;
936
937         /* Overcommit.. */
938         if (security_vm_enough_memory(grow)) {
939                 spin_unlock(&vma->vm_mm->page_table_lock);
940                 return -ENOMEM;
941         }
942         
943         if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
944                         ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
945                         current->rlim[RLIMIT_AS].rlim_cur) {
946                 spin_unlock(&vma->vm_mm->page_table_lock);
947                 vm_unacct_memory(grow);
948                 return -ENOMEM;
949         }
950         vma->vm_end = address;
951         vma->vm_mm->total_vm += grow;
952         if (vma->vm_flags & VM_LOCKED)
953                 vma->vm_mm->locked_vm += grow;
954         spin_unlock(&vma->vm_mm->page_table_lock);
955         return 0;
956 }
957
958 struct vm_area_struct *
959 find_extend_vma(struct mm_struct *mm, unsigned long addr)
960 {
961         struct vm_area_struct *vma, *prev;
962
963         addr &= PAGE_MASK;
964         vma = find_vma_prev(mm, addr, &prev);
965         if (vma && (vma->vm_start <= addr))
966                 return vma;
967         if (!prev || expand_stack(prev, addr))
968                 return NULL;
969         if (prev->vm_flags & VM_LOCKED) {
970                 make_pages_present(addr, prev->vm_end);
971         }
972         return prev;
973 }
974 #else
975 /*
976  * vma is the first one with address < vma->vm_start.  Have to extend vma.
977  */
978 int expand_stack(struct vm_area_struct *vma, unsigned long address)
979 {
980         unsigned long grow;
981
982         /*
983          * vma->vm_start/vm_end cannot change under us because the caller
984          * is required to hold the mmap_sem in read mode. We need to get
985          * the spinlock only before relocating the vma range ourself.
986          */
987         address &= PAGE_MASK;
988         spin_lock(&vma->vm_mm->page_table_lock);
989         grow = (vma->vm_start - address) >> PAGE_SHIFT;
990
991         /* Overcommit.. */
992         if (security_vm_enough_memory(grow)) {
993                 spin_unlock(&vma->vm_mm->page_table_lock);
994                 return -ENOMEM;
995         }
996         
997         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
998                         ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
999                         current->rlim[RLIMIT_AS].rlim_cur) {
1000                 spin_unlock(&vma->vm_mm->page_table_lock);
1001                 vm_unacct_memory(grow);
1002                 return -ENOMEM;
1003         }
1004         vma->vm_start = address;
1005         vma->vm_pgoff -= grow;
1006         vma->vm_mm->total_vm += grow;
1007         if (vma->vm_flags & VM_LOCKED)
1008                 vma->vm_mm->locked_vm += grow;
1009         spin_unlock(&vma->vm_mm->page_table_lock);
1010         return 0;
1011 }
1012
1013 struct vm_area_struct *
1014 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1015 {
1016         struct vm_area_struct * vma;
1017         unsigned long start;
1018
1019         addr &= PAGE_MASK;
1020         vma = find_vma(mm,addr);
1021         if (!vma)
1022                 return NULL;
1023         if (vma->vm_start <= addr)
1024                 return vma;
1025         if (!(vma->vm_flags & VM_GROWSDOWN))
1026                 return NULL;
1027         start = vma->vm_start;
1028         if (expand_stack(vma, addr))
1029                 return NULL;
1030         if (vma->vm_flags & VM_LOCKED) {
1031                 make_pages_present(addr, start);
1032         }
1033         return vma;
1034 }
1035 #endif
1036
1037 /*
1038  * Try to free as many page directory entries as we can,
1039  * without having to work very hard at actually scanning
1040  * the page tables themselves.
1041  *
1042  * Right now we try to free page tables if we have a nice
1043  * PGDIR-aligned area that got free'd up. We could be more
1044  * granular if we want to, but this is fast and simple,
1045  * and covers the bad cases.
1046  *
1047  * "prev", if it exists, points to a vma before the one
1048  * we just free'd - but there's no telling how much before.
1049  */
1050 static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
1051         unsigned long start, unsigned long end)
1052 {
1053         unsigned long first = start & PGDIR_MASK;
1054         unsigned long last = end + PGDIR_SIZE - 1;
1055         unsigned long start_index, end_index;
1056         struct mm_struct *mm = tlb->mm;
1057
1058         if (!prev) {
1059                 prev = mm->mmap;
1060                 if (!prev)
1061                         goto no_mmaps;
1062                 if (prev->vm_end > start) {
1063                         if (last > prev->vm_start)
1064                                 last = prev->vm_start;
1065                         goto no_mmaps;
1066                 }
1067         }
1068         for (;;) {
1069                 struct vm_area_struct *next = prev->vm_next;
1070
1071                 if (next) {
1072                         if (next->vm_start < start) {
1073                                 prev = next;
1074                                 continue;
1075                         }
1076                         if (last > next->vm_start)
1077                                 last = next->vm_start;
1078                 }
1079                 if (prev->vm_end > first)
1080                         first = prev->vm_end + PGDIR_SIZE - 1;
1081                 break;
1082         }
1083 no_mmaps:
1084         if (last < first)       /* for arches with discontiguous pgd indices */
1085                 return;
1086         /*
1087          * If the PGD bits are not consecutive in the virtual address, the
1088          * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
1089          */
1090         start_index = pgd_index(first);
1091         if (start_index < FIRST_USER_PGD_NR)
1092                 start_index = FIRST_USER_PGD_NR;
1093         end_index = pgd_index(last);
1094         if (end_index > start_index) {
1095                 clear_page_tables(tlb, start_index, end_index - start_index);
1096                 flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
1097         }
1098 }
1099
1100 /* Normal function to fix up a mapping
1101  * This function is the default for when an area has no specific
1102  * function.  This may be used as part of a more specific routine.
1103  *
1104  * By the time this function is called, the area struct has been
1105  * removed from the process mapping list.
1106  */
1107 static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
1108 {
1109         size_t len = area->vm_end - area->vm_start;
1110
1111         area->vm_mm->total_vm -= len >> PAGE_SHIFT;
1112         if (area->vm_flags & VM_LOCKED)
1113                 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
1114         /*
1115          * Is this a new hole at the lowest possible address?
1116          */
1117         if (area->vm_start >= TASK_UNMAPPED_BASE &&
1118                                 area->vm_start < area->vm_mm->free_area_cache)
1119               area->vm_mm->free_area_cache = area->vm_start;
1120
1121         remove_shared_vm_struct(area);
1122
1123         if (area->vm_ops && area->vm_ops->close)
1124                 area->vm_ops->close(area);
1125         if (area->vm_file)
1126                 fput(area->vm_file);
1127         kmem_cache_free(vm_area_cachep, area);
1128 }
1129
1130 /*
1131  * Update the VMA and inode share lists.
1132  *
1133  * Ok - we have the memory areas we should free on the 'free' list,
1134  * so release them, and do the vma updates.
1135  */
1136 static void unmap_vma_list(struct mm_struct *mm,
1137         struct vm_area_struct *mpnt)
1138 {
1139         do {
1140                 struct vm_area_struct *next = mpnt->vm_next;
1141                 unmap_vma(mm, mpnt);
1142                 mpnt = next;
1143         } while (mpnt != NULL);
1144         validate_mm(mm);
1145 }
1146
1147 /*
1148  * Get rid of page table information in the indicated region.
1149  *
1150  * Called with the page table lock held.
1151  */
1152 static void unmap_region(struct mm_struct *mm,
1153         struct vm_area_struct *vma,
1154         struct vm_area_struct *prev,
1155         unsigned long start,
1156         unsigned long end)
1157 {
1158         struct mmu_gather *tlb;
1159         unsigned long nr_accounted = 0;
1160
1161         lru_add_drain();
1162         tlb = tlb_gather_mmu(mm, 0);
1163         unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
1164         vm_unacct_memory(nr_accounted);
1165
1166         if (is_hugepage_only_range(start, end - start))
1167                 hugetlb_free_pgtables(tlb, prev, start, end);
1168         else
1169                 free_pgtables(tlb, prev, start, end);
1170         tlb_finish_mmu(tlb, start, end);
1171 }
1172
1173 /*
1174  * Create a list of vma's touched by the unmap, removing them from the mm's
1175  * vma list as we go..
1176  *
1177  * Called with the page_table_lock held.
1178  */
1179 static void
1180 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1181         struct vm_area_struct *prev, unsigned long end)
1182 {
1183         struct vm_area_struct **insertion_point;
1184         struct vm_area_struct *tail_vma = NULL;
1185
1186         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1187         do {
1188                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1189                 mm->map_count--;
1190                 tail_vma = vma;
1191                 vma = vma->vm_next;
1192         } while (vma && vma->vm_start < end);
1193         *insertion_point = vma;
1194         tail_vma->vm_next = NULL;
1195         mm->mmap_cache = NULL;          /* Kill the cache. */
1196 }
1197
1198 /*
1199  * Split a vma into two pieces at address 'addr', a new vma is allocated
1200  * either for the first part or the the tail.
1201  */
1202 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1203               unsigned long addr, int new_below)
1204 {
1205         struct vm_area_struct *new;
1206         struct address_space *mapping = NULL;
1207
1208         if (mm->map_count >= sysctl_max_map_count)
1209                 return -ENOMEM;
1210
1211         new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1212         if (!new)
1213                 return -ENOMEM;
1214
1215         /* most fields are the same, copy all, and then fixup */
1216         *new = *vma;
1217
1218         INIT_LIST_HEAD(&new->shared);
1219
1220         if (new_below)
1221                 new->vm_end = addr;
1222         else {
1223                 new->vm_start = addr;
1224                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1225         }
1226
1227         if (new->vm_file)
1228                 get_file(new->vm_file);
1229
1230         if (new->vm_ops && new->vm_ops->open)
1231                 new->vm_ops->open(new);
1232
1233         if (vma->vm_file)
1234                  mapping = vma->vm_file->f_mapping;
1235
1236         if (mapping)
1237                 down(&mapping->i_shared_sem);
1238         spin_lock(&mm->page_table_lock);
1239
1240         if (new_below) {
1241                 vma->vm_start = addr;
1242                 vma->vm_pgoff += ((addr - new->vm_start) >> PAGE_SHIFT);
1243         } else
1244                 vma->vm_end = addr;
1245
1246         __insert_vm_struct(mm, new);
1247
1248         spin_unlock(&mm->page_table_lock);
1249         if (mapping)
1250                 up(&mapping->i_shared_sem);
1251
1252         return 0;
1253 }
1254
1255 /* Munmap is split into 2 main parts -- this part which finds
1256  * what needs doing, and the areas themselves, which do the
1257  * work.  This now handles partial unmappings.
1258  * Jeremy Fitzhardinge <jeremy@goop.org>
1259  */
1260 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1261 {
1262         unsigned long end;
1263         struct vm_area_struct *mpnt, *prev, *last;
1264
1265         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
1266                 return -EINVAL;
1267
1268         if ((len = PAGE_ALIGN(len)) == 0)
1269                 return -EINVAL;
1270
1271         /* Find the first overlapping VMA */
1272         mpnt = find_vma_prev(mm, start, &prev);
1273         if (!mpnt)
1274                 return 0;
1275         /* we have  start < mpnt->vm_end  */
1276
1277         if (is_vm_hugetlb_page(mpnt)) {
1278                 int ret = is_aligned_hugepage_range(start, len);
1279
1280                 if (ret)
1281                         return ret;
1282         }
1283
1284         /* if it doesn't overlap, we have nothing.. */
1285         end = start + len;
1286         if (mpnt->vm_start >= end)
1287                 return 0;
1288
1289         /* Something will probably happen, so notify. */
1290         if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
1291                 profile_exec_unmap(mm);
1292  
1293         /*
1294          * If we need to split any vma, do it now to save pain later.
1295          *
1296          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
1297          * unmapped vm_area_struct will remain in use: so lower split_vma
1298          * places tmp vma above, and higher split_vma places tmp vma below.
1299          */
1300         if (start > mpnt->vm_start) {
1301                 if (split_vma(mm, mpnt, start, 0))
1302                         return -ENOMEM;
1303                 prev = mpnt;
1304         }
1305
1306         /* Does it split the last one? */
1307         last = find_vma(mm, end);
1308         if (last && end > last->vm_start) {
1309                 if (split_vma(mm, last, end, 1))
1310                         return -ENOMEM;
1311         }
1312         mpnt = prev? prev->vm_next: mm->mmap;
1313
1314         /*
1315          * Remove the vma's, and unmap the actual pages
1316          */
1317         spin_lock(&mm->page_table_lock);
1318         detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
1319         unmap_region(mm, mpnt, prev, start, end);
1320         spin_unlock(&mm->page_table_lock);
1321
1322         /* Fix up all other VM information */
1323         unmap_vma_list(mm, mpnt);
1324
1325         return 0;
1326 }
1327
1328 EXPORT_SYMBOL(do_munmap);
1329
1330 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1331 {
1332         int ret;
1333         struct mm_struct *mm = current->mm;
1334
1335         down_write(&mm->mmap_sem);
1336         ret = do_munmap(mm, addr, len);
1337         up_write(&mm->mmap_sem);
1338         return ret;
1339 }
1340
1341 /*
1342  *  this is really a simplified "do_mmap".  it only handles
1343  *  anonymous maps.  eventually we may be able to do some
1344  *  brk-specific accounting here.
1345  */
1346 unsigned long do_brk(unsigned long addr, unsigned long len)
1347 {
1348         struct mm_struct * mm = current->mm;
1349         struct vm_area_struct * vma, * prev;
1350         unsigned long flags;
1351         struct rb_node ** rb_link, * rb_parent;
1352
1353         len = PAGE_ALIGN(len);
1354         if (!len)
1355                 return addr;
1356
1357         if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1358                 return -EINVAL;
1359
1360         /*
1361          * mlock MCL_FUTURE?
1362          */
1363         if (mm->def_flags & VM_LOCKED) {
1364                 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
1365                 locked += len;
1366                 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
1367                         return -EAGAIN;
1368         }
1369
1370         /*
1371          * Clear old maps.  this also does some error checking for us
1372          */
1373  munmap_back:
1374         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1375         if (vma && vma->vm_start < addr + len) {
1376                 if (do_munmap(mm, addr, len))
1377                         return -ENOMEM;
1378                 goto munmap_back;
1379         }
1380
1381         /* Check against address space limits *after* clearing old maps... */
1382         if ((mm->total_vm << PAGE_SHIFT) + len
1383             > current->rlim[RLIMIT_AS].rlim_cur)
1384                 return -ENOMEM;
1385
1386         if (mm->map_count > sysctl_max_map_count)
1387                 return -ENOMEM;
1388
1389         if (security_vm_enough_memory(len >> PAGE_SHIFT))
1390                 return -ENOMEM;
1391
1392         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1393
1394         /* Can we just expand an old anonymous mapping? */
1395         if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
1396                                         flags, NULL, 0))
1397                 goto out;
1398
1399         /*
1400          * create a vma struct for an anonymous mapping
1401          */
1402         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1403         if (!vma) {
1404                 vm_unacct_memory(len >> PAGE_SHIFT);
1405                 return -ENOMEM;
1406         }
1407
1408         vma->vm_mm = mm;
1409         vma->vm_start = addr;
1410         vma->vm_end = addr + len;
1411         vma->vm_flags = flags;
1412         vma->vm_page_prot = protection_map[flags & 0x0f];
1413         vma->vm_ops = NULL;
1414         vma->vm_pgoff = 0;
1415         vma->vm_file = NULL;
1416         vma->vm_private_data = NULL;
1417         INIT_LIST_HEAD(&vma->shared);
1418
1419         vma_link(mm, vma, prev, rb_link, rb_parent);
1420
1421 out:
1422         mm->total_vm += len >> PAGE_SHIFT;
1423         if (flags & VM_LOCKED) {
1424                 mm->locked_vm += len >> PAGE_SHIFT;
1425                 make_pages_present(addr, addr + len);
1426         }
1427         return addr;
1428 }
1429
1430 EXPORT_SYMBOL(do_brk);
1431
1432 /* Release all mmaps. */
1433 void exit_mmap(struct mm_struct *mm)
1434 {
1435         struct mmu_gather *tlb;
1436         struct vm_area_struct *vma;
1437         unsigned long nr_accounted = 0;
1438
1439         profile_exit_mmap(mm);
1440  
1441         lru_add_drain();
1442
1443         spin_lock(&mm->page_table_lock);
1444
1445         tlb = tlb_gather_mmu(mm, 1);
1446         flush_cache_mm(mm);
1447         /* Use ~0UL here to ensure all VMAs in the mm are unmapped */
1448         mm->map_count -= unmap_vmas(&tlb, mm, mm->mmap, 0,
1449                                         ~0UL, &nr_accounted, NULL);
1450         vm_unacct_memory(nr_accounted);
1451         BUG_ON(mm->map_count);  /* This is just debugging */
1452         clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
1453         tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
1454
1455         vma = mm->mmap;
1456         mm->mmap = mm->mmap_cache = NULL;
1457         mm->mm_rb = RB_ROOT;
1458         mm->rss = 0;
1459         mm->total_vm = 0;
1460         mm->locked_vm = 0;
1461
1462         spin_unlock(&mm->page_table_lock);
1463
1464         /*
1465          * Walk the list again, actually closing and freeing it
1466          * without holding any MM locks.
1467          */
1468         while (vma) {
1469                 struct vm_area_struct *next = vma->vm_next;
1470                 remove_shared_vm_struct(vma);
1471                 if (vma->vm_ops) {
1472                         if (vma->vm_ops->close)
1473                                 vma->vm_ops->close(vma);
1474                 }
1475                 if (vma->vm_file)
1476                         fput(vma->vm_file);
1477                 kmem_cache_free(vm_area_cachep, vma);
1478                 vma = next;
1479         }
1480 }
1481
1482 /* Insert vm structure into process list sorted by address
1483  * and into the inode's i_mmap ring.  If vm_file is non-NULL
1484  * then i_shared_sem is taken here.
1485  */
1486 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1487 {
1488         struct vm_area_struct * __vma, * prev;
1489         struct rb_node ** rb_link, * rb_parent;
1490
1491         __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1492         if (__vma && __vma->vm_start < vma->vm_end)
1493                 BUG();
1494         vma_link(mm, vma, prev, rb_link, rb_parent);
1495 }
1496
1497 /*
1498  * Copy the vma structure to a new location in the same mm,
1499  * prior to moving page table entries, to effect an mremap move.
1500  */
1501 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1502         unsigned long addr, unsigned long len, unsigned long pgoff)
1503 {
1504         struct vm_area_struct *vma = *vmap;
1505         unsigned long vma_start = vma->vm_start;
1506         struct mm_struct *mm = vma->vm_mm;
1507         struct vm_area_struct *new_vma, *prev;
1508         struct rb_node **rb_link, *rb_parent;
1509
1510         find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1511         new_vma = vma_merge(mm, prev, rb_parent, addr, addr + len,
1512                         vma->vm_flags, vma->vm_file, pgoff);
1513         if (new_vma) {
1514                 /*
1515                  * Source vma may have been merged into new_vma
1516                  */
1517                 if (vma_start >= new_vma->vm_start &&
1518                     vma_start < new_vma->vm_end)
1519                         *vmap = new_vma;
1520         } else {
1521                 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1522                 if (new_vma) {
1523                         *new_vma = *vma;
1524                         INIT_LIST_HEAD(&new_vma->shared);
1525                         new_vma->vm_start = addr;
1526                         new_vma->vm_end = addr + len;
1527                         new_vma->vm_pgoff = pgoff;
1528                         if (new_vma->vm_file)
1529                                 get_file(new_vma->vm_file);
1530                         if (new_vma->vm_ops && new_vma->vm_ops->open)
1531                                 new_vma->vm_ops->open(new_vma);
1532                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
1533                 }
1534         }
1535         return new_vma;
1536 }