c34750cf93f509d510ad9e796e971f1669d65292
[linux-2.6.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@redhat.com>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/shm.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swap.h>
15 #include <linux/syscalls.h>
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/personality.h>
21 #include <linux/security.h>
22 #include <linux/hugetlb.h>
23 #include <linux/profile.h>
24 #include <linux/module.h>
25 #include <linux/mount.h>
26 #include <linux/mempolicy.h>
27 #include <linux/rmap.h>
28 #include <linux/random.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlb.h>
33
34 static void unmap_region(struct mm_struct *mm,
35                 struct vm_area_struct *vma, struct vm_area_struct *prev,
36                 unsigned long start, unsigned long end);
37
38 /*
39  * WARNING: the debugging will use recursive algorithms so never enable this
40  * unless you know what you are doing.
41  */
42 #undef DEBUG_MM_RB
43
44 /* description of effects of mapping type and prot in current implementation.
45  * this is due to the limited x86 page protection hardware.  The expected
46  * behavior is in parens:
47  *
48  * map_type     prot
49  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
50  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
51  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
52  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
53  *              
54  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
55  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
56  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
57  *
58  */
59 pgprot_t protection_map[16] = {
60         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
62 };
63
64 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
65 int sysctl_overcommit_ratio = 50;       /* default is 50% */
66 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
67 atomic_t vm_committed_space = ATOMIC_INIT(0);
68
69 /*
70  * Check that a process has enough memory to allocate a new virtual
71  * mapping. 0 means there is enough memory for the allocation to
72  * succeed and -ENOMEM implies there is not.
73  *
74  * We currently support three overcommit policies, which are set via the
75  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
76  *
77  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
78  * Additional code 2002 Jul 20 by Robert Love.
79  *
80  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
81  *
82  * Note this is a helper function intended to be used by LSMs which
83  * wish to use this logic.
84  */
85 int __vm_enough_memory(long pages, int cap_sys_admin)
86 {
87         unsigned long free, allowed;
88
89         vm_acct_memory(pages);
90
91         /*
92          * Sometimes we want to use more memory than we have
93          */
94         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
95                 return 0;
96
97         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
98                 unsigned long n;
99
100                 free = get_page_cache_size();
101                 free += nr_swap_pages;
102
103                 /*
104                  * Any slabs which are created with the
105                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
106                  * which are reclaimable, under pressure.  The dentry
107                  * cache and most inode caches should fall into this
108                  */
109                 free += atomic_read(&slab_reclaim_pages);
110
111                 /*
112                  * Leave the last 3% for root
113                  */
114                 if (!cap_sys_admin)
115                         free -= free / 32;
116
117                 if (free > pages)
118                         return 0;
119
120                 /*
121                  * nr_free_pages() is very expensive on large systems,
122                  * only call if we're about to fail.
123                  */
124                 n = nr_free_pages();
125
126                 /*
127                  * Leave reserved pages. The pages are not for anonymous pages.
128                  */
129                 if (n <= totalreserve_pages)
130                         goto error;
131                 else
132                         n -= totalreserve_pages;
133
134                 /*
135                  * Leave the last 3% for root
136                  */
137                 if (!cap_sys_admin)
138                         n -= n / 32;
139                 free += n;
140
141                 if (free > pages)
142                         return 0;
143
144                 goto error;
145         }
146
147         allowed = (totalram_pages - hugetlb_total_pages())
148                 * sysctl_overcommit_ratio / 100;
149         /*
150          * Leave the last 3% for root
151          */
152         if (!cap_sys_admin)
153                 allowed -= allowed / 32;
154         allowed += total_swap_pages;
155
156         /* Don't let a single process grow too big:
157            leave 3% of the size of this process for other processes */
158         allowed -= current->mm->total_vm / 32;
159
160         /*
161          * cast `allowed' as a signed long because vm_committed_space
162          * sometimes has a negative value
163          */
164         if (atomic_read(&vm_committed_space) < (long)allowed)
165                 return 0;
166 error:
167         vm_unacct_memory(pages);
168
169         return -ENOMEM;
170 }
171
172 EXPORT_SYMBOL(__vm_enough_memory);
173
174 /*
175  * Requires inode->i_mapping->i_mmap_lock
176  */
177 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
178                 struct file *file, struct address_space *mapping)
179 {
180         if (vma->vm_flags & VM_DENYWRITE)
181                 atomic_inc(&file->f_dentry->d_inode->i_writecount);
182         if (vma->vm_flags & VM_SHARED)
183                 mapping->i_mmap_writable--;
184
185         flush_dcache_mmap_lock(mapping);
186         if (unlikely(vma->vm_flags & VM_NONLINEAR))
187                 list_del_init(&vma->shared.vm_set.list);
188         else
189                 vma_prio_tree_remove(vma, &mapping->i_mmap);
190         flush_dcache_mmap_unlock(mapping);
191 }
192
193 /*
194  * Unlink a file-based vm structure from its prio_tree, to hide
195  * vma from rmap and vmtruncate before freeing its page tables.
196  */
197 void unlink_file_vma(struct vm_area_struct *vma)
198 {
199         struct file *file = vma->vm_file;
200
201         if (file) {
202                 struct address_space *mapping = file->f_mapping;
203                 spin_lock(&mapping->i_mmap_lock);
204                 __remove_shared_vm_struct(vma, file, mapping);
205                 spin_unlock(&mapping->i_mmap_lock);
206         }
207 }
208
209 /*
210  * Close a vm structure and free it, returning the next.
211  */
212 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
213 {
214         struct vm_area_struct *next = vma->vm_next;
215
216         might_sleep();
217         if (vma->vm_ops && vma->vm_ops->close)
218                 vma->vm_ops->close(vma);
219         if (vma->vm_file)
220                 fput(vma->vm_file);
221         mpol_free(vma_policy(vma));
222         kmem_cache_free(vm_area_cachep, vma);
223         return next;
224 }
225
226 asmlinkage unsigned long sys_brk(unsigned long brk)
227 {
228         unsigned long rlim, retval;
229         unsigned long newbrk, oldbrk;
230         struct mm_struct *mm = current->mm;
231
232         down_write(&mm->mmap_sem);
233
234         if (brk < mm->end_code)
235                 goto out;
236
237         /*
238          * Check against rlimit here. If this check is done later after the test
239          * of oldbrk with newbrk then it can escape the test and let the data
240          * segment grow beyond its set limit the in case where the limit is
241          * not page aligned -Ram Gupta
242          */
243         rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
244         if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
245                 goto out;
246
247         newbrk = PAGE_ALIGN(brk);
248         oldbrk = PAGE_ALIGN(mm->brk);
249         if (oldbrk == newbrk)
250                 goto set_brk;
251
252         /* Always allow shrinking brk. */
253         if (brk <= mm->brk) {
254                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
255                         goto set_brk;
256                 goto out;
257         }
258
259         /* Check against existing mmap mappings. */
260         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
261                 goto out;
262
263         /* Ok, looks good - let it rip. */
264         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
265                 goto out;
266 set_brk:
267         mm->brk = brk;
268 out:
269         retval = mm->brk;
270         up_write(&mm->mmap_sem);
271         return retval;
272 }
273
274 #ifdef DEBUG_MM_RB
275 static int browse_rb(struct rb_root *root)
276 {
277         int i = 0, j;
278         struct rb_node *nd, *pn = NULL;
279         unsigned long prev = 0, pend = 0;
280
281         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
282                 struct vm_area_struct *vma;
283                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
284                 if (vma->vm_start < prev)
285                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
286                 if (vma->vm_start < pend)
287                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
288                 if (vma->vm_start > vma->vm_end)
289                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
290                 i++;
291                 pn = nd;
292         }
293         j = 0;
294         for (nd = pn; nd; nd = rb_prev(nd)) {
295                 j++;
296         }
297         if (i != j)
298                 printk("backwards %d, forwards %d\n", j, i), i = 0;
299         return i;
300 }
301
302 void validate_mm(struct mm_struct *mm)
303 {
304         int bug = 0;
305         int i = 0;
306         struct vm_area_struct *tmp = mm->mmap;
307         while (tmp) {
308                 tmp = tmp->vm_next;
309                 i++;
310         }
311         if (i != mm->map_count)
312                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
313         i = browse_rb(&mm->mm_rb);
314         if (i != mm->map_count)
315                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
316         BUG_ON(bug);
317 }
318 #else
319 #define validate_mm(mm) do { } while (0)
320 #endif
321
322 static struct vm_area_struct *
323 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
324                 struct vm_area_struct **pprev, struct rb_node ***rb_link,
325                 struct rb_node ** rb_parent)
326 {
327         struct vm_area_struct * vma;
328         struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
329
330         __rb_link = &mm->mm_rb.rb_node;
331         rb_prev = __rb_parent = NULL;
332         vma = NULL;
333
334         while (*__rb_link) {
335                 struct vm_area_struct *vma_tmp;
336
337                 __rb_parent = *__rb_link;
338                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
339
340                 if (vma_tmp->vm_end > addr) {
341                         vma = vma_tmp;
342                         if (vma_tmp->vm_start <= addr)
343                                 return vma;
344                         __rb_link = &__rb_parent->rb_left;
345                 } else {
346                         rb_prev = __rb_parent;
347                         __rb_link = &__rb_parent->rb_right;
348                 }
349         }
350
351         *pprev = NULL;
352         if (rb_prev)
353                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
354         *rb_link = __rb_link;
355         *rb_parent = __rb_parent;
356         return vma;
357 }
358
359 static inline void
360 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
361                 struct vm_area_struct *prev, struct rb_node *rb_parent)
362 {
363         if (vma->vm_flags & VM_EXEC)
364                 arch_add_exec_range(mm, vma->vm_end);
365         if (prev) {
366                 vma->vm_next = prev->vm_next;
367                 prev->vm_next = vma;
368         } else {
369                 mm->mmap = vma;
370                 if (rb_parent)
371                         vma->vm_next = rb_entry(rb_parent,
372                                         struct vm_area_struct, vm_rb);
373                 else
374                         vma->vm_next = NULL;
375         }
376 }
377
378 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
379                 struct rb_node **rb_link, struct rb_node *rb_parent)
380 {
381         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
382         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
383 }
384
385 static inline void __vma_link_file(struct vm_area_struct *vma)
386 {
387         struct file * file;
388
389         file = vma->vm_file;
390         if (file) {
391                 struct address_space *mapping = file->f_mapping;
392
393                 if (vma->vm_flags & VM_DENYWRITE)
394                         atomic_dec(&file->f_dentry->d_inode->i_writecount);
395                 if (vma->vm_flags & VM_SHARED)
396                         mapping->i_mmap_writable++;
397
398                 flush_dcache_mmap_lock(mapping);
399                 if (unlikely(vma->vm_flags & VM_NONLINEAR))
400                         vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
401                 else
402                         vma_prio_tree_insert(vma, &mapping->i_mmap);
403                 flush_dcache_mmap_unlock(mapping);
404         }
405 }
406
407 static void
408 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
409         struct vm_area_struct *prev, struct rb_node **rb_link,
410         struct rb_node *rb_parent)
411 {
412         __vma_link_list(mm, vma, prev, rb_parent);
413         __vma_link_rb(mm, vma, rb_link, rb_parent);
414         __anon_vma_link(vma);
415 }
416
417 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
418                         struct vm_area_struct *prev, struct rb_node **rb_link,
419                         struct rb_node *rb_parent)
420 {
421         struct address_space *mapping = NULL;
422
423         if (vma->vm_file)
424                 mapping = vma->vm_file->f_mapping;
425
426         if (mapping) {
427                 spin_lock(&mapping->i_mmap_lock);
428                 vma->vm_truncate_count = mapping->truncate_count;
429         }
430         anon_vma_lock(vma);
431
432         __vma_link(mm, vma, prev, rb_link, rb_parent);
433         __vma_link_file(vma);
434
435         anon_vma_unlock(vma);
436         if (mapping)
437                 spin_unlock(&mapping->i_mmap_lock);
438
439         mm->map_count++;
440         validate_mm(mm);
441 }
442
443 /*
444  * Helper for vma_adjust in the split_vma insert case:
445  * insert vm structure into list and rbtree and anon_vma,
446  * but it has already been inserted into prio_tree earlier.
447  */
448 static void
449 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
450 {
451         struct vm_area_struct * __vma, * prev;
452         struct rb_node ** rb_link, * rb_parent;
453
454         __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
455         BUG_ON(__vma && __vma->vm_start < vma->vm_end);
456         __vma_link(mm, vma, prev, rb_link, rb_parent);
457         mm->map_count++;
458 }
459
460 static inline void
461 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
462                 struct vm_area_struct *prev)
463 {
464         prev->vm_next = vma->vm_next;
465         rb_erase(&vma->vm_rb, &mm->mm_rb);
466         if (mm->mmap_cache == vma)
467                 mm->mmap_cache = prev;
468         if (vma->vm_flags & VM_EXEC)
469                 arch_remove_exec_range(mm, vma->vm_end);
470 }
471
472 /*
473  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
474  * is already present in an i_mmap tree without adjusting the tree.
475  * The following helper function should be used when such adjustments
476  * are necessary.  The "insert" vma (if any) is to be inserted
477  * before we drop the necessary locks.
478  */
479 void vma_adjust(struct vm_area_struct *vma, unsigned long start,
480         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
481 {
482         struct mm_struct *mm = vma->vm_mm;
483         struct vm_area_struct *next = vma->vm_next;
484         struct vm_area_struct *importer = NULL;
485         struct address_space *mapping = NULL;
486         struct prio_tree_root *root = NULL;
487         struct file *file = vma->vm_file;
488         struct anon_vma *anon_vma = NULL;
489         long adjust_next = 0;
490         int remove_next = 0;
491
492         if (next && !insert) {
493                 if (end >= next->vm_end) {
494                         /*
495                          * vma expands, overlapping all the next, and
496                          * perhaps the one after too (mprotect case 6).
497                          */
498 again:                  remove_next = 1 + (end > next->vm_end);
499                         end = next->vm_end;
500                         anon_vma = next->anon_vma;
501                         importer = vma;
502                 } else if (end > next->vm_start) {
503                         /*
504                          * vma expands, overlapping part of the next:
505                          * mprotect case 5 shifting the boundary up.
506                          */
507                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
508                         anon_vma = next->anon_vma;
509                         importer = vma;
510                 } else if (end < vma->vm_end) {
511                         /*
512                          * vma shrinks, and !insert tells it's not
513                          * split_vma inserting another: so it must be
514                          * mprotect case 4 shifting the boundary down.
515                          */
516                         adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
517                         anon_vma = next->anon_vma;
518                         importer = next;
519                 }
520         }
521
522         if (file) {
523                 mapping = file->f_mapping;
524                 if (!(vma->vm_flags & VM_NONLINEAR))
525                         root = &mapping->i_mmap;
526                 spin_lock(&mapping->i_mmap_lock);
527                 if (importer &&
528                     vma->vm_truncate_count != next->vm_truncate_count) {
529                         /*
530                          * unmap_mapping_range might be in progress:
531                          * ensure that the expanding vma is rescanned.
532                          */
533                         importer->vm_truncate_count = 0;
534                 }
535                 if (insert) {
536                         insert->vm_truncate_count = vma->vm_truncate_count;
537                         /*
538                          * Put into prio_tree now, so instantiated pages
539                          * are visible to arm/parisc __flush_dcache_page
540                          * throughout; but we cannot insert into address
541                          * space until vma start or end is updated.
542                          */
543                         __vma_link_file(insert);
544                 }
545         }
546
547         /*
548          * When changing only vma->vm_end, we don't really need
549          * anon_vma lock: but is that case worth optimizing out?
550          */
551         if (vma->anon_vma)
552                 anon_vma = vma->anon_vma;
553         if (anon_vma) {
554                 spin_lock(&anon_vma->lock);
555                 /*
556                  * Easily overlooked: when mprotect shifts the boundary,
557                  * make sure the expanding vma has anon_vma set if the
558                  * shrinking vma had, to cover any anon pages imported.
559                  */
560                 if (importer && !importer->anon_vma) {
561                         importer->anon_vma = anon_vma;
562                         __anon_vma_link(importer);
563                 }
564         }
565
566         if (root) {
567                 flush_dcache_mmap_lock(mapping);
568                 vma_prio_tree_remove(vma, root);
569                 if (adjust_next)
570                         vma_prio_tree_remove(next, root);
571         }
572
573         vma->vm_start = start;
574         vma->vm_end = end;
575         vma->vm_pgoff = pgoff;
576         if (adjust_next) {
577                 next->vm_start += adjust_next << PAGE_SHIFT;
578                 next->vm_pgoff += adjust_next;
579         }
580
581         if (root) {
582                 if (adjust_next)
583                         vma_prio_tree_insert(next, root);
584                 vma_prio_tree_insert(vma, root);
585                 flush_dcache_mmap_unlock(mapping);
586         }
587
588         if (remove_next) {
589                 /*
590                  * vma_merge has merged next into vma, and needs
591                  * us to remove next before dropping the locks.
592                  */
593                 __vma_unlink(mm, next, vma);
594                 if (file)
595                         __remove_shared_vm_struct(next, file, mapping);
596                 if (next->anon_vma)
597                         __anon_vma_merge(vma, next);
598         } else if (insert) {
599                 /*
600                  * split_vma has split insert from vma, and needs
601                  * us to insert it before dropping the locks
602                  * (it may either follow vma or precede it).
603                  */
604                 __insert_vm_struct(mm, insert);
605         }
606
607         if (anon_vma)
608                 spin_unlock(&anon_vma->lock);
609         if (mapping)
610                 spin_unlock(&mapping->i_mmap_lock);
611
612         if (remove_next) {
613                 if (file)
614                         fput(file);
615                 mm->map_count--;
616                 mpol_free(vma_policy(next));
617                 kmem_cache_free(vm_area_cachep, next);
618                 /*
619                  * In mprotect's case 6 (see comments on vma_merge),
620                  * we must remove another next too. It would clutter
621                  * up the code too much to do both in one go.
622                  */
623                 if (remove_next == 2) {
624                         next = vma->vm_next;
625                         goto again;
626                 }
627         }
628
629         validate_mm(mm);
630 }
631
632 /*
633  * If the vma has a ->close operation then the driver probably needs to release
634  * per-vma resources, so we don't attempt to merge those.
635  */
636 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
637
638 static inline int is_mergeable_vma(struct vm_area_struct *vma,
639                         struct file *file, unsigned long vm_flags)
640 {
641         if (vma->vm_flags != vm_flags)
642                 return 0;
643         if (vma->vm_file != file)
644                 return 0;
645         if (vma->vm_ops && vma->vm_ops->close)
646                 return 0;
647         return 1;
648 }
649
650 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
651                                         struct anon_vma *anon_vma2)
652 {
653         return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
654 }
655
656 /*
657  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
658  * in front of (at a lower virtual address and file offset than) the vma.
659  *
660  * We cannot merge two vmas if they have differently assigned (non-NULL)
661  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
662  *
663  * We don't check here for the merged mmap wrapping around the end of pagecache
664  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
665  * wrap, nor mmaps which cover the final page at index -1UL.
666  */
667 static int
668 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
669         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
670 {
671         if (is_mergeable_vma(vma, file, vm_flags) &&
672             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
673                 if (vma->vm_pgoff == vm_pgoff)
674                         return 1;
675         }
676         return 0;
677 }
678
679 /*
680  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
681  * beyond (at a higher virtual address and file offset than) the vma.
682  *
683  * We cannot merge two vmas if they have differently assigned (non-NULL)
684  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
685  */
686 static int
687 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
688         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
689 {
690         if (is_mergeable_vma(vma, file, vm_flags) &&
691             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
692                 pgoff_t vm_pglen;
693                 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
694                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
695                         return 1;
696         }
697         return 0;
698 }
699
700 /*
701  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
702  * whether that can be merged with its predecessor or its successor.
703  * Or both (it neatly fills a hole).
704  *
705  * In most cases - when called for mmap, brk or mremap - [addr,end) is
706  * certain not to be mapped by the time vma_merge is called; but when
707  * called for mprotect, it is certain to be already mapped (either at
708  * an offset within prev, or at the start of next), and the flags of
709  * this area are about to be changed to vm_flags - and the no-change
710  * case has already been eliminated.
711  *
712  * The following mprotect cases have to be considered, where AAAA is
713  * the area passed down from mprotect_fixup, never extending beyond one
714  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
715  *
716  *     AAAA             AAAA                AAAA          AAAA
717  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
718  *    cannot merge    might become    might become    might become
719  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
720  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
721  *    mremap move:                                    PPPPNNNNNNNN 8
722  *        AAAA
723  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
724  *    might become    case 1 below    case 2 below    case 3 below
725  *
726  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
727  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
728  */
729 struct vm_area_struct *vma_merge(struct mm_struct *mm,
730                         struct vm_area_struct *prev, unsigned long addr,
731                         unsigned long end, unsigned long vm_flags,
732                         struct anon_vma *anon_vma, struct file *file,
733                         pgoff_t pgoff, struct mempolicy *policy)
734 {
735         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
736         struct vm_area_struct *area, *next;
737
738         /*
739          * We later require that vma->vm_flags == vm_flags,
740          * so this tests vma->vm_flags & VM_SPECIAL, too.
741          */
742         if (vm_flags & VM_SPECIAL)
743                 return NULL;
744
745         if (prev)
746                 next = prev->vm_next;
747         else
748                 next = mm->mmap;
749         area = next;
750         if (next && next->vm_end == end)                /* cases 6, 7, 8 */
751                 next = next->vm_next;
752
753         /*
754          * Can it merge with the predecessor?
755          */
756         if (prev && prev->vm_end == addr &&
757                         mpol_equal(vma_policy(prev), policy) &&
758                         can_vma_merge_after(prev, vm_flags,
759                                                 anon_vma, file, pgoff)) {
760                 /*
761                  * OK, it can.  Can we now merge in the successor as well?
762                  */
763                 if (next && end == next->vm_start &&
764                                 mpol_equal(policy, vma_policy(next)) &&
765                                 can_vma_merge_before(next, vm_flags,
766                                         anon_vma, file, pgoff+pglen) &&
767                                 is_mergeable_anon_vma(prev->anon_vma,
768                                                       next->anon_vma)) {
769                                                         /* cases 1, 6 */
770                         vma_adjust(prev, prev->vm_start,
771                                 next->vm_end, prev->vm_pgoff, NULL);
772                 } else                                  /* cases 2, 5, 7 */
773                         vma_adjust(prev, prev->vm_start,
774                                 end, prev->vm_pgoff, NULL);
775                 if (prev->vm_flags & VM_EXEC)
776                         arch_add_exec_range(mm, prev->vm_end);
777                 return prev;
778         }
779
780         /*
781          * Can this new request be merged in front of next?
782          */
783         if (next && end == next->vm_start &&
784                         mpol_equal(policy, vma_policy(next)) &&
785                         can_vma_merge_before(next, vm_flags,
786                                         anon_vma, file, pgoff+pglen)) {
787                 if (prev && addr < prev->vm_end)        /* case 4 */
788                         vma_adjust(prev, prev->vm_start,
789                                 addr, prev->vm_pgoff, NULL);
790                 else                                    /* cases 3, 8 */
791                         vma_adjust(area, addr, next->vm_end,
792                                 next->vm_pgoff - pglen, NULL);
793                 return area;
794         }
795
796         return NULL;
797 }
798
799 /*
800  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
801  * neighbouring vmas for a suitable anon_vma, before it goes off
802  * to allocate a new anon_vma.  It checks because a repetitive
803  * sequence of mprotects and faults may otherwise lead to distinct
804  * anon_vmas being allocated, preventing vma merge in subsequent
805  * mprotect.
806  */
807 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
808 {
809         struct vm_area_struct *near;
810         unsigned long vm_flags;
811
812         near = vma->vm_next;
813         if (!near)
814                 goto try_prev;
815
816         /*
817          * Since only mprotect tries to remerge vmas, match flags
818          * which might be mprotected into each other later on.
819          * Neither mlock nor madvise tries to remerge at present,
820          * so leave their flags as obstructing a merge.
821          */
822         vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
823         vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
824
825         if (near->anon_vma && vma->vm_end == near->vm_start &&
826                         mpol_equal(vma_policy(vma), vma_policy(near)) &&
827                         can_vma_merge_before(near, vm_flags,
828                                 NULL, vma->vm_file, vma->vm_pgoff +
829                                 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
830                 return near->anon_vma;
831 try_prev:
832         /*
833          * It is potentially slow to have to call find_vma_prev here.
834          * But it's only on the first write fault on the vma, not
835          * every time, and we could devise a way to avoid it later
836          * (e.g. stash info in next's anon_vma_node when assigning
837          * an anon_vma, or when trying vma_merge).  Another time.
838          */
839         BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
840         if (!near)
841                 goto none;
842
843         vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
844         vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
845
846         if (near->anon_vma && near->vm_end == vma->vm_start &&
847                         mpol_equal(vma_policy(near), vma_policy(vma)) &&
848                         can_vma_merge_after(near, vm_flags,
849                                 NULL, vma->vm_file, vma->vm_pgoff))
850                 return near->anon_vma;
851 none:
852         /*
853          * There's no absolute need to look only at touching neighbours:
854          * we could search further afield for "compatible" anon_vmas.
855          * But it would probably just be a waste of time searching,
856          * or lead to too many vmas hanging off the same anon_vma.
857          * We're trying to allow mprotect remerging later on,
858          * not trying to minimize memory used for anon_vmas.
859          */
860         return NULL;
861 }
862
863 #ifdef CONFIG_PROC_FS
864 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
865                                                 struct file *file, long pages)
866 {
867         const unsigned long stack_flags
868                 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
869
870         if (file) {
871                 mm->shared_vm += pages;
872                 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
873                         mm->exec_vm += pages;
874         } else if (flags & stack_flags)
875                 mm->stack_vm += pages;
876         if (flags & (VM_RESERVED|VM_IO))
877                 mm->reserved_vm += pages;
878 }
879 #endif /* CONFIG_PROC_FS */
880
881 /*
882  * The caller must hold down_write(current->mm->mmap_sem).
883  */
884
885 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
886                         unsigned long len, unsigned long prot,
887                         unsigned long flags, unsigned long pgoff)
888 {
889         struct mm_struct * mm = current->mm;
890         struct vm_area_struct * vma, * prev;
891         struct inode *inode;
892         unsigned int vm_flags;
893         int correct_wcount = 0;
894         int error;
895         struct rb_node ** rb_link, * rb_parent;
896         int accountable = 1;
897         unsigned long charged = 0, reqprot = prot;
898
899         if (file) {
900                 if (is_file_hugepages(file))
901                         accountable = 0;
902
903                 if (!file->f_op || !file->f_op->mmap)
904                         return -ENODEV;
905
906                 if ((prot & PROT_EXEC) &&
907                     (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
908                         return -EPERM;
909         }
910         /*
911          * Does the application expect PROT_READ to imply PROT_EXEC?
912          *
913          * (the exception is when the underlying filesystem is noexec
914          *  mounted, in which case we dont add PROT_EXEC.)
915          */
916         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
917                 if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
918                         prot |= PROT_EXEC;
919
920         if (!len)
921                 return -EINVAL;
922
923         /* Careful about overflows.. */
924         len = PAGE_ALIGN(len);
925         if (!len || len > TASK_SIZE)
926                 return -ENOMEM;
927
928         /* offset overflow? */
929         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
930                return -EOVERFLOW;
931
932         /* Too many mappings? */
933         if (mm->map_count > sysctl_max_map_count)
934                 return -ENOMEM;
935
936         /* Obtain the address to map to. we verify (or select) it and ensure
937          * that it represents a valid section of the address space.
938          */
939         addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
940         if (addr & ~PAGE_MASK)
941                 return addr;
942
943         /* Do simple checking here so the lower-level routines won't have
944          * to. we assume access permissions have been handled by the open
945          * of the memory object, so we don't do any here.
946          */
947         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
948                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
949
950         if (flags & MAP_LOCKED) {
951                 if (!can_do_mlock())
952                         return -EPERM;
953                 vm_flags |= VM_LOCKED;
954         }
955         /* mlock MCL_FUTURE? */
956         if (vm_flags & VM_LOCKED) {
957                 unsigned long locked, lock_limit;
958                 locked = len >> PAGE_SHIFT;
959                 locked += mm->locked_vm;
960                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
961                 lock_limit >>= PAGE_SHIFT;
962                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
963                         return -EAGAIN;
964         }
965
966         inode = file ? file->f_dentry->d_inode : NULL;
967
968         if (file) {
969                 switch (flags & MAP_TYPE) {
970                 case MAP_SHARED:
971                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
972                                 return -EACCES;
973
974                         /*
975                          * Make sure we don't allow writing to an append-only
976                          * file..
977                          */
978                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
979                                 return -EACCES;
980
981                         /*
982                          * Make sure there are no mandatory locks on the file.
983                          */
984                         if (locks_verify_locked(inode))
985                                 return -EAGAIN;
986
987                         vm_flags |= VM_SHARED | VM_MAYSHARE;
988                         if (!(file->f_mode & FMODE_WRITE))
989                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
990
991                         /* fall through */
992                 case MAP_PRIVATE:
993                         if (!(file->f_mode & FMODE_READ))
994                                 return -EACCES;
995                         break;
996
997                 default:
998                         return -EINVAL;
999                 }
1000         } else {
1001                 switch (flags & MAP_TYPE) {
1002                 case MAP_SHARED:
1003                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1004                         break;
1005                 case MAP_PRIVATE:
1006                         /*
1007                          * Set pgoff according to addr for anon_vma.
1008                          */
1009                         pgoff = addr >> PAGE_SHIFT;
1010                         break;
1011                 default:
1012                         return -EINVAL;
1013                 }
1014         }
1015
1016         error = security_file_mmap(file, reqprot, prot, flags);
1017         if (error)
1018                 return error;
1019                 
1020         /* Clear old maps */
1021         error = -ENOMEM;
1022 munmap_back:
1023         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1024         if (vma && vma->vm_start < addr + len) {
1025                 if (do_munmap(mm, addr, len))
1026                         return -ENOMEM;
1027                 goto munmap_back;
1028         }
1029
1030         /* Check against address space limit. */
1031         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1032                 return -ENOMEM;
1033
1034         if (accountable && (!(flags & MAP_NORESERVE) ||
1035                             sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
1036                 if (vm_flags & VM_SHARED) {
1037                         /* Check memory availability in shmem_file_setup? */
1038                         vm_flags |= VM_ACCOUNT;
1039                 } else if (vm_flags & VM_WRITE) {
1040                         /*
1041                          * Private writable mapping: check memory availability
1042                          */
1043                         charged = len >> PAGE_SHIFT;
1044                         if (security_vm_enough_memory(charged))
1045                                 return -ENOMEM;
1046                         vm_flags |= VM_ACCOUNT;
1047                 }
1048         }
1049
1050         /*
1051          * Can we just expand an old private anonymous mapping?
1052          * The VM_SHARED test is necessary because shmem_zero_setup
1053          * will create the file object for a shared anonymous map below.
1054          */
1055         if (!file && !(vm_flags & VM_SHARED) &&
1056             vma_merge(mm, prev, addr, addr + len, vm_flags,
1057                                         NULL, NULL, pgoff, NULL))
1058                 goto out;
1059
1060         /*
1061          * Determine the object being mapped and call the appropriate
1062          * specific mapper. the address has already been validated, but
1063          * not unmapped, but the maps are removed from the list.
1064          */
1065         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1066         if (!vma) {
1067                 error = -ENOMEM;
1068                 goto unacct_error;
1069         }
1070
1071         vma->vm_mm = mm;
1072         vma->vm_start = addr;
1073         vma->vm_end = addr + len;
1074         vma->vm_flags = vm_flags;
1075         vma->vm_page_prot = protection_map[vm_flags & 0x0f];
1076         vma->vm_pgoff = pgoff;
1077
1078         if (file) {
1079                 error = -EINVAL;
1080                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1081                         goto free_vma;
1082                 if (vm_flags & VM_DENYWRITE) {
1083                         error = deny_write_access(file);
1084                         if (error)
1085                                 goto free_vma;
1086                         correct_wcount = 1;
1087                 }
1088                 vma->vm_file = file;
1089                 get_file(file);
1090                 error = file->f_op->mmap(file, vma);
1091                 if (error)
1092                         goto unmap_and_free_vma;
1093         } else if (vm_flags & VM_SHARED) {
1094                 error = shmem_zero_setup(vma);
1095                 if (error)
1096                         goto free_vma;
1097         }
1098
1099         /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1100          * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1101          * that memory reservation must be checked; but that reservation
1102          * belongs to shared memory object, not to vma: so now clear it.
1103          */
1104         if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1105                 vma->vm_flags &= ~VM_ACCOUNT;
1106
1107         /* Can addr have changed??
1108          *
1109          * Answer: Yes, several device drivers can do it in their
1110          *         f_op->mmap method. -DaveM
1111          */
1112         addr = vma->vm_start;
1113         pgoff = vma->vm_pgoff;
1114         vm_flags = vma->vm_flags;
1115
1116         if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
1117                         vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
1118                 file = vma->vm_file;
1119                 vma_link(mm, vma, prev, rb_link, rb_parent);
1120                 if (correct_wcount)
1121                         atomic_inc(&inode->i_writecount);
1122         } else {
1123                 if (file) {
1124                         if (correct_wcount)
1125                                 atomic_inc(&inode->i_writecount);
1126                         fput(file);
1127                 }
1128                 mpol_free(vma_policy(vma));
1129                 kmem_cache_free(vm_area_cachep, vma);
1130         }
1131 out:    
1132         vx_vmpages_add(mm, len >> PAGE_SHIFT);
1133         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1134         if (vm_flags & VM_LOCKED) {
1135                 vx_vmlocked_add(mm, len >> PAGE_SHIFT);
1136                 make_pages_present(addr, addr + len);
1137         }
1138         if (flags & MAP_POPULATE) {
1139                 up_write(&mm->mmap_sem);
1140                 sys_remap_file_pages(addr, len, 0,
1141                                         pgoff, flags & MAP_NONBLOCK);
1142                 down_write(&mm->mmap_sem);
1143         }
1144         return addr;
1145
1146 unmap_and_free_vma:
1147         if (correct_wcount)
1148                 atomic_inc(&inode->i_writecount);
1149         vma->vm_file = NULL;
1150         fput(file);
1151
1152         /* Undo any partial mapping done by a device driver. */
1153         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1154         charged = 0;
1155 free_vma:
1156         kmem_cache_free(vm_area_cachep, vma);
1157 unacct_error:
1158         if (charged)
1159                 vm_unacct_memory(charged);
1160         return error;
1161 }
1162
1163 EXPORT_SYMBOL(do_mmap_pgoff);
1164
1165 /* Get an address range which is currently unmapped.
1166  * For shmat() with addr=0.
1167  *
1168  * Ugly calling convention alert:
1169  * Return value with the low bits set means error value,
1170  * ie
1171  *      if (ret & ~PAGE_MASK)
1172  *              error = ret;
1173  *
1174  * This function "knows" that -ENOMEM has the bits set.
1175  */
1176 #ifndef HAVE_ARCH_UNMAPPED_AREA
1177 unsigned long
1178 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1179                 unsigned long len, unsigned long pgoff, unsigned long flags)
1180 {
1181         struct mm_struct *mm = current->mm;
1182         struct vm_area_struct *vma;
1183         unsigned long start_addr;
1184
1185         if (len > TASK_SIZE)
1186                 return -ENOMEM;
1187
1188         if (addr) {
1189                 addr = PAGE_ALIGN(addr);
1190                 vma = find_vma(mm, addr);
1191                 if (TASK_SIZE - len >= addr &&
1192                     (!vma || addr + len <= vma->vm_start))
1193                         return addr;
1194         }
1195         if (len > mm->cached_hole_size) {
1196                 start_addr = addr = mm->free_area_cache;
1197         } else {
1198                 start_addr = addr = TASK_UNMAPPED_BASE;
1199                 mm->cached_hole_size = 0;
1200         }
1201
1202 full_search:
1203         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1204                 /* At this point:  (!vma || addr < vma->vm_end). */
1205                 if (TASK_SIZE - len < addr) {
1206                         /*
1207                          * Start a new search - just in case we missed
1208                          * some holes.
1209                          */
1210                         if (start_addr != TASK_UNMAPPED_BASE) {
1211                                 addr = TASK_UNMAPPED_BASE;
1212                                 start_addr = addr;
1213                                 mm->cached_hole_size = 0;
1214                                 goto full_search;
1215                         }
1216                         return -ENOMEM;
1217                 }
1218                 if (!vma || addr + len <= vma->vm_start) {
1219                         /*
1220                          * Remember the place where we stopped the search:
1221                          */
1222                         mm->free_area_cache = addr + len;
1223                         return addr;
1224                 }
1225                 if (addr + mm->cached_hole_size < vma->vm_start)
1226                         mm->cached_hole_size = vma->vm_start - addr;
1227                 addr = vma->vm_end;
1228         }
1229 }
1230 #endif  
1231
1232 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1233 {
1234         /*
1235          * Is this a new hole at the lowest possible address?
1236          */
1237         if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1238                 mm->free_area_cache = addr;
1239                 mm->cached_hole_size = ~0UL;
1240         }
1241 }
1242
1243 /*
1244  * This mmap-allocator allocates new areas top-down from below the
1245  * stack's low limit (the base):
1246  */
1247 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1248 unsigned long
1249 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1250                           const unsigned long len, const unsigned long pgoff,
1251                           const unsigned long flags)
1252 {
1253         struct vm_area_struct *vma;
1254         struct mm_struct *mm = current->mm;
1255         unsigned long addr = addr0;
1256
1257         /* requested length too big for entire address space */
1258         if (len > TASK_SIZE)
1259                 return -ENOMEM;
1260
1261         /* requesting a specific address */
1262         if (addr) {
1263                 addr = PAGE_ALIGN(addr);
1264                 vma = find_vma(mm, addr);
1265                 if (TASK_SIZE - len >= addr &&
1266                                 (!vma || addr + len <= vma->vm_start))
1267                         return addr;
1268         }
1269
1270         /* check if free_area_cache is useful for us */
1271         if (len <= mm->cached_hole_size) {
1272                 mm->cached_hole_size = 0;
1273                 mm->free_area_cache = mm->mmap_base;
1274         }
1275
1276         /* either no address requested or can't fit in requested address hole */
1277         addr = mm->free_area_cache;
1278
1279         /* make sure it can fit in the remaining address space */
1280         if (addr > len) {
1281                 vma = find_vma(mm, addr-len);
1282                 if (!vma || addr <= vma->vm_start)
1283                         /* remember the address as a hint for next time */
1284                         return (mm->free_area_cache = addr-len);
1285         }
1286
1287         if (mm->mmap_base < len)
1288                 goto bottomup;
1289
1290         addr = mm->mmap_base-len;
1291
1292         do {
1293                 /*
1294                  * Lookup failure means no vma is above this address,
1295                  * else if new region fits below vma->vm_start,
1296                  * return with success:
1297                  */
1298                 vma = find_vma(mm, addr);
1299                 if (!vma || addr+len <= vma->vm_start)
1300                         /* remember the address as a hint for next time */
1301                         return (mm->free_area_cache = addr);
1302
1303                 /* remember the largest hole we saw so far */
1304                 if (addr + mm->cached_hole_size < vma->vm_start)
1305                         mm->cached_hole_size = vma->vm_start - addr;
1306
1307                 /* try just below the current vma->vm_start */
1308                 addr = vma->vm_start-len;
1309         } while (len < vma->vm_start);
1310
1311 bottomup:
1312         /*
1313          * A failed mmap() very likely causes application failure,
1314          * so fall back to the bottom-up function here. This scenario
1315          * can happen with large stack limits and large mmap()
1316          * allocations.
1317          */
1318         mm->cached_hole_size = ~0UL;
1319         mm->free_area_cache = TASK_UNMAPPED_BASE;
1320         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1321         /*
1322          * Restore the topdown base:
1323          */
1324         mm->free_area_cache = mm->mmap_base;
1325         mm->cached_hole_size = ~0UL;
1326
1327         return addr;
1328 }
1329 #endif
1330
1331 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1332 {
1333         /*
1334          * Is this a new hole at the highest possible address?
1335          */
1336         if (addr > mm->free_area_cache)
1337                 mm->free_area_cache = addr;
1338
1339         /* dont allow allocations above current base */
1340         if (mm->free_area_cache > mm->mmap_base)
1341                 mm->free_area_cache = mm->mmap_base;
1342 }
1343
1344
1345 unsigned long
1346 get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
1347                 unsigned long pgoff, unsigned long flags, int exec)
1348 {
1349         unsigned long ret;
1350
1351         if (!(flags & MAP_FIXED)) {
1352                 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1353
1354                 if (exec && current->mm->get_unmapped_exec_area)
1355                         get_area = current->mm->get_unmapped_exec_area;
1356                 else
1357                         get_area = current->mm->get_unmapped_area;
1358
1359                 if (file && file->f_op && file->f_op->get_unmapped_area)
1360                         get_area = file->f_op->get_unmapped_area;
1361                 addr = get_area(file, addr, len, pgoff, flags);
1362                 if (IS_ERR_VALUE(addr))
1363                         return addr;
1364         }
1365
1366         if (addr > TASK_SIZE - len)
1367                 return -ENOMEM;
1368         if (addr & ~PAGE_MASK)
1369                 return -EINVAL;
1370         if (file && is_file_hugepages(file))  {
1371                 /*
1372                  * Check if the given range is hugepage aligned, and
1373                  * can be made suitable for hugepages.
1374                  */
1375                 ret = prepare_hugepage_range(addr, len);
1376         } else {
1377                 /*
1378                  * Ensure that a normal request is not falling in a
1379                  * reserved hugepage range.  For some archs like IA-64,
1380                  * there is a separate region for hugepages.
1381                  */
1382                 ret = is_hugepage_only_range(current->mm, addr, len);
1383         }
1384         if (ret)
1385                 return -EINVAL;
1386         return addr;
1387 }
1388
1389 EXPORT_SYMBOL(get_unmapped_area_prot);
1390
1391 #define SHLIB_BASE             0x00111000
1392
1393 unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
1394                 unsigned long len0, unsigned long pgoff, unsigned long flags)
1395 {
1396         unsigned long addr = addr0, len = len0;
1397         struct mm_struct *mm = current->mm;
1398         struct vm_area_struct *vma;
1399         unsigned long tmp;
1400
1401         if (len > TASK_SIZE)
1402                 return -ENOMEM;
1403
1404         if (!addr && !(flags & MAP_FIXED))
1405                 addr = randomize_range(SHLIB_BASE, 0x01000000, len);
1406
1407         if (addr) {
1408                 addr = PAGE_ALIGN(addr);
1409                 vma = find_vma(mm, addr);
1410                 if (TASK_SIZE - len >= addr &&
1411                     (!vma || addr + len <= vma->vm_start)) {
1412                         return addr;
1413                 }
1414         }
1415
1416         addr = SHLIB_BASE;
1417         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1418                 /* At this point:  (!vma || addr < vma->vm_end). */
1419                 if (TASK_SIZE - len < addr)
1420                         return -ENOMEM;
1421
1422                 if (!vma || addr + len <= vma->vm_start) {
1423                         /*
1424                          * Must not let a PROT_EXEC mapping get into the
1425                          * brk area:
1426                          */
1427                         if (addr + len > mm->brk)
1428                                 goto failed;
1429
1430                         /*
1431                          * Up until the brk area we randomize addresses
1432                          * as much as possible:
1433                          */
1434                         if (addr >= 0x01000000) {
1435                                 tmp = randomize_range(0x01000000, PAGE_ALIGN(max(mm->start_brk, (unsigned long)0x08000000)), len);
1436                                 vma = find_vma(mm, tmp);
1437                                 if (TASK_SIZE - len >= tmp &&
1438                                     (!vma || tmp + len <= vma->vm_start))
1439                                         return tmp;
1440                         }
1441                         /*
1442                          * Ok, randomization didnt work out - return
1443                          * the result of the linear search:
1444                          */
1445                         return addr;
1446                 }
1447                 addr = vma->vm_end;
1448         }
1449
1450 failed:
1451         return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
1452 }
1453
1454
1455 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1456 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
1457 {
1458         struct vm_area_struct *vma = NULL;
1459
1460         if (mm) {
1461                 /* Check the cache first. */
1462                 /* (Cache hit rate is typically around 35%.) */
1463                 vma = mm->mmap_cache;
1464                 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1465                         struct rb_node * rb_node;
1466
1467                         rb_node = mm->mm_rb.rb_node;
1468                         vma = NULL;
1469
1470                         while (rb_node) {
1471                                 struct vm_area_struct * vma_tmp;
1472
1473                                 vma_tmp = rb_entry(rb_node,
1474                                                 struct vm_area_struct, vm_rb);
1475
1476                                 if (vma_tmp->vm_end > addr) {
1477                                         vma = vma_tmp;
1478                                         if (vma_tmp->vm_start <= addr)
1479                                                 break;
1480                                         rb_node = rb_node->rb_left;
1481                                 } else
1482                                         rb_node = rb_node->rb_right;
1483                         }
1484                         if (vma)
1485                                 mm->mmap_cache = vma;
1486                 }
1487         }
1488         return vma;
1489 }
1490
1491 EXPORT_SYMBOL(find_vma);
1492
1493 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
1494 struct vm_area_struct *
1495 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1496                         struct vm_area_struct **pprev)
1497 {
1498         struct vm_area_struct *vma = NULL, *prev = NULL;
1499         struct rb_node * rb_node;
1500         if (!mm)
1501                 goto out;
1502
1503         /* Guard against addr being lower than the first VMA */
1504         vma = mm->mmap;
1505
1506         /* Go through the RB tree quickly. */
1507         rb_node = mm->mm_rb.rb_node;
1508
1509         while (rb_node) {
1510                 struct vm_area_struct *vma_tmp;
1511                 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1512
1513                 if (addr < vma_tmp->vm_end) {
1514                         rb_node = rb_node->rb_left;
1515                 } else {
1516                         prev = vma_tmp;
1517                         if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1518                                 break;
1519                         rb_node = rb_node->rb_right;
1520                 }
1521         }
1522
1523 out:
1524         *pprev = prev;
1525         return prev ? prev->vm_next : vma;
1526 }
1527
1528 static int over_stack_limit(unsigned long sz)
1529 {
1530         if (sz < EXEC_STACK_BIAS)
1531                 return 0;
1532         return (sz - EXEC_STACK_BIAS) >
1533                         current->signal->rlim[RLIMIT_STACK].rlim_cur;
1534 }
1535
1536 /*
1537  * Verify that the stack growth is acceptable and
1538  * update accounting. This is shared with both the
1539  * grow-up and grow-down cases.
1540  */
1541 static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
1542 {
1543         struct mm_struct *mm = vma->vm_mm;
1544         struct rlimit *rlim = current->signal->rlim;
1545
1546         /* address space limit tests */
1547         if (!may_expand_vm(mm, grow))
1548                 return -ENOMEM;
1549
1550         /* Stack limit test */
1551         if (over_stack_limit(size))
1552                 return -ENOMEM;
1553
1554         /* mlock limit tests */
1555         if (vma->vm_flags & VM_LOCKED) {
1556                 unsigned long locked;
1557                 unsigned long limit;
1558                 locked = mm->locked_vm + grow;
1559                 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
1560                 if (locked > limit && !capable(CAP_IPC_LOCK))
1561                         return -ENOMEM;
1562         }
1563
1564         /*
1565          * Overcommit..  This must be the final test, as it will
1566          * update security statistics.
1567          */
1568         if (security_vm_enough_memory(grow))
1569                 return -ENOMEM;
1570
1571         /* Ok, everything looks good - let it rip */
1572         vx_vmpages_add(mm, grow);
1573         if (vma->vm_flags & VM_LOCKED)
1574                 vx_vmlocked_add(mm, grow);
1575         vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1576         return 0;
1577 }
1578
1579 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1580 /*
1581  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1582  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1583  */
1584 #ifndef CONFIG_IA64
1585 static inline
1586 #endif
1587 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1588 {
1589         int error;
1590
1591         if (!(vma->vm_flags & VM_GROWSUP))
1592                 return -EFAULT;
1593
1594         /*
1595          * We must make sure the anon_vma is allocated
1596          * so that the anon_vma locking is not a noop.
1597          */
1598         if (unlikely(anon_vma_prepare(vma)))
1599                 return -ENOMEM;
1600         anon_vma_lock(vma);
1601
1602         /*
1603          * vma->vm_start/vm_end cannot change under us because the caller
1604          * is required to hold the mmap_sem in read mode.  We need the
1605          * anon_vma lock to serialize against concurrent expand_stacks.
1606          */
1607         address += 4 + PAGE_SIZE - 1;
1608         address &= PAGE_MASK;
1609         error = 0;
1610
1611         /* Somebody else might have raced and expanded it already */
1612         if (address > vma->vm_end) {
1613                 unsigned long size, grow;
1614
1615                 size = address - vma->vm_start;
1616                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1617
1618                 error = acct_stack_growth(vma, size, grow);
1619                 if (!error)
1620                         vma->vm_end = address;
1621         }
1622         anon_vma_unlock(vma);
1623         return error;
1624 }
1625 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1626
1627 #ifdef CONFIG_STACK_GROWSUP
1628 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1629 {
1630         return expand_upwards(vma, address);
1631 }
1632
1633 struct vm_area_struct *
1634 find_extend_vma(struct mm_struct *mm, unsigned long addr)
1635 {
1636         struct vm_area_struct *vma, *prev;
1637
1638         addr &= PAGE_MASK;
1639         vma = find_vma_prev(mm, addr, &prev);
1640         if (vma && (vma->vm_start <= addr))
1641                 return vma;
1642         if (!prev || expand_stack(prev, addr))
1643                 return NULL;
1644         if (prev->vm_flags & VM_LOCKED) {
1645                 make_pages_present(addr, prev->vm_end);
1646         }
1647         return prev;
1648 }
1649 #else
1650 /*
1651  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1652  */
1653 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1654 {
1655         int error;
1656
1657         /*
1658          * We must make sure the anon_vma is allocated
1659          * so that the anon_vma locking is not a noop.
1660          */
1661         if (unlikely(anon_vma_prepare(vma)))
1662                 return -ENOMEM;
1663         anon_vma_lock(vma);
1664
1665         /*
1666          * vma->vm_start/vm_end cannot change under us because the caller
1667          * is required to hold the mmap_sem in read mode.  We need the
1668          * anon_vma lock to serialize against concurrent expand_stacks.
1669          */
1670         address &= PAGE_MASK;
1671         error = 0;
1672
1673         /* Somebody else might have raced and expanded it already */
1674         if (address < vma->vm_start) {
1675                 unsigned long size, grow;
1676
1677                 size = vma->vm_end - address;
1678                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1679
1680                 error = acct_stack_growth(vma, size, grow);
1681                 if (!error) {
1682                         vma->vm_start = address;
1683                         vma->vm_pgoff -= grow;
1684                 }
1685         }
1686         anon_vma_unlock(vma);
1687         return error;
1688 }
1689
1690 struct vm_area_struct *
1691 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1692 {
1693         struct vm_area_struct * vma;
1694         unsigned long start;
1695
1696         addr &= PAGE_MASK;
1697         vma = find_vma(mm,addr);
1698         if (!vma)
1699                 return NULL;
1700         if (vma->vm_start <= addr)
1701                 return vma;
1702         if (!(vma->vm_flags & VM_GROWSDOWN))
1703                 return NULL;
1704         start = vma->vm_start;
1705         if (expand_stack(vma, addr))
1706                 return NULL;
1707         if (vma->vm_flags & VM_LOCKED) {
1708                 make_pages_present(addr, start);
1709         }
1710         return vma;
1711 }
1712 #endif
1713
1714 /*
1715  * Ok - we have the memory areas we should free on the vma list,
1716  * so release them, and do the vma updates.
1717  *
1718  * Called with the mm semaphore held.
1719  */
1720 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1721 {
1722         /* Update high watermark before we lower total_vm */
1723         update_hiwater_vm(mm);
1724         do {
1725                 long nrpages = vma_pages(vma);
1726
1727                 vx_vmpages_sub(mm, nrpages);
1728                 if (vma->vm_flags & VM_LOCKED)
1729                         vx_vmlocked_sub(mm, nrpages);
1730                 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1731                 vma = remove_vma(vma);
1732         } while (vma);
1733         validate_mm(mm);
1734 }
1735
1736 /*
1737  * Get rid of page table information in the indicated region.
1738  *
1739  * Called with the mm semaphore held.
1740  */
1741 static void unmap_region(struct mm_struct *mm,
1742                 struct vm_area_struct *vma, struct vm_area_struct *prev,
1743                 unsigned long start, unsigned long end)
1744 {
1745         struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1746         struct mmu_gather *tlb;
1747         unsigned long nr_accounted = 0;
1748
1749         lru_add_drain();
1750         tlb = tlb_gather_mmu(mm, 0);
1751         update_hiwater_rss(mm);
1752         unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1753         vm_unacct_memory(nr_accounted);
1754         free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1755                                  next? next->vm_start: 0);
1756         tlb_finish_mmu(tlb, start, end);
1757 }
1758
1759 /*
1760  * Create a list of vma's touched by the unmap, removing them from the mm's
1761  * vma list as we go..
1762  */
1763 static void
1764 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1765         struct vm_area_struct *prev, unsigned long end)
1766 {
1767         struct vm_area_struct **insertion_point;
1768         struct vm_area_struct *tail_vma = NULL;
1769         unsigned long addr;
1770
1771         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1772         do {
1773                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1774                 mm->map_count--;
1775                 tail_vma = vma;
1776                 vma = vma->vm_next;
1777         } while (vma && vma->vm_start < end);
1778         *insertion_point = vma;
1779         tail_vma->vm_next = NULL;
1780         if (mm->unmap_area == arch_unmap_area)
1781                 addr = prev ? prev->vm_end : mm->mmap_base;
1782         else
1783                 addr = vma ?  vma->vm_start : mm->mmap_base;
1784         mm->unmap_area(mm, addr);
1785         mm->mmap_cache = NULL;          /* Kill the cache. */
1786 }
1787
1788 /*
1789  * Split a vma into two pieces at address 'addr', a new vma is allocated
1790  * either for the first part or the the tail.
1791  */
1792 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1793               unsigned long addr, int new_below)
1794 {
1795         struct mempolicy *pol;
1796         struct vm_area_struct *new;
1797
1798         if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
1799                 return -EINVAL;
1800
1801         if (mm->map_count >= sysctl_max_map_count)
1802                 return -ENOMEM;
1803
1804         new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1805         if (!new)
1806                 return -ENOMEM;
1807
1808         /* most fields are the same, copy all, and then fixup */
1809         *new = *vma;
1810
1811         if (new_below)
1812                 new->vm_end = addr;
1813         else {
1814                 new->vm_start = addr;
1815                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1816         }
1817
1818         pol = mpol_copy(vma_policy(vma));
1819         if (IS_ERR(pol)) {
1820                 kmem_cache_free(vm_area_cachep, new);
1821                 return PTR_ERR(pol);
1822         }
1823         vma_set_policy(new, pol);
1824
1825         if (new->vm_file)
1826                 get_file(new->vm_file);
1827
1828         if (new->vm_ops && new->vm_ops->open)
1829                 new->vm_ops->open(new);
1830
1831         if (new_below) {
1832                 unsigned long old_end = vma->vm_end;
1833
1834                 vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1835                         ((addr - new->vm_start) >> PAGE_SHIFT), new);
1836                 if (vma->vm_flags & VM_EXEC)
1837                         arch_remove_exec_range(mm, old_end);
1838         } else
1839                 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1840
1841         return 0;
1842 }
1843
1844 /* Munmap is split into 2 main parts -- this part which finds
1845  * what needs doing, and the areas themselves, which do the
1846  * work.  This now handles partial unmappings.
1847  * Jeremy Fitzhardinge <jeremy@goop.org>
1848  */
1849 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1850 {
1851         unsigned long end;
1852         struct vm_area_struct *vma, *prev, *last;
1853
1854         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
1855                 return -EINVAL;
1856
1857         if ((len = PAGE_ALIGN(len)) == 0)
1858                 return -EINVAL;
1859
1860         /* Find the first overlapping VMA */
1861         vma = find_vma_prev(mm, start, &prev);
1862         if (!vma)
1863                 return 0;
1864         /* we have  start < vma->vm_end  */
1865
1866         /* if it doesn't overlap, we have nothing.. */
1867         end = start + len;
1868         if (vma->vm_start >= end)
1869                 return 0;
1870
1871         /*
1872          * If we need to split any vma, do it now to save pain later.
1873          *
1874          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
1875          * unmapped vm_area_struct will remain in use: so lower split_vma
1876          * places tmp vma above, and higher split_vma places tmp vma below.
1877          */
1878         if (start > vma->vm_start) {
1879                 int error = split_vma(mm, vma, start, 0);
1880                 if (error)
1881                         return error;
1882                 prev = vma;
1883         }
1884
1885         /* Does it split the last one? */
1886         last = find_vma(mm, end);
1887         if (last && end > last->vm_start) {
1888                 int error = split_vma(mm, last, end, 1);
1889                 if (error)
1890                         return error;
1891         }
1892         vma = prev? prev->vm_next: mm->mmap;
1893
1894         /*
1895          * Remove the vma's, and unmap the actual pages
1896          */
1897         detach_vmas_to_be_unmapped(mm, vma, prev, end);
1898         unmap_region(mm, vma, prev, start, end);
1899
1900         /* Fix up all other VM information */
1901         remove_vma_list(mm, vma);
1902
1903         return 0;
1904 }
1905
1906 EXPORT_SYMBOL(do_munmap);
1907
1908 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1909 {
1910         int ret;
1911         struct mm_struct *mm = current->mm;
1912
1913         profile_munmap(addr);
1914
1915         down_write(&mm->mmap_sem);
1916         ret = do_munmap(mm, addr, len);
1917         up_write(&mm->mmap_sem);
1918         return ret;
1919 }
1920
1921 static inline void verify_mm_writelocked(struct mm_struct *mm)
1922 {
1923 #ifdef CONFIG_DEBUG_VM
1924         if (unlikely(down_read_trylock(&mm->mmap_sem))) {
1925                 WARN_ON(1);
1926                 up_read(&mm->mmap_sem);
1927         }
1928 #endif
1929 }
1930
1931 /*
1932  *  this is really a simplified "do_mmap".  it only handles
1933  *  anonymous maps.  eventually we may be able to do some
1934  *  brk-specific accounting here.
1935  */
1936 unsigned long do_brk(unsigned long addr, unsigned long len)
1937 {
1938         struct mm_struct * mm = current->mm;
1939         struct vm_area_struct * vma, * prev;
1940         unsigned long flags;
1941         struct rb_node ** rb_link, * rb_parent;
1942         pgoff_t pgoff = addr >> PAGE_SHIFT;
1943
1944         len = PAGE_ALIGN(len);
1945         if (!len)
1946                 return addr;
1947
1948         if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1949                 return -EINVAL;
1950
1951         /*
1952          * mlock MCL_FUTURE?
1953          */
1954         if (mm->def_flags & VM_LOCKED) {
1955                 unsigned long locked, lock_limit;
1956                 locked = len >> PAGE_SHIFT;
1957                 locked += mm->locked_vm;
1958                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1959                 lock_limit >>= PAGE_SHIFT;
1960                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1961                         return -EAGAIN;
1962                 if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
1963                         return -ENOMEM;
1964         }
1965
1966         /*
1967          * mm->mmap_sem is required to protect against another thread
1968          * changing the mappings in case we sleep.
1969          */
1970         verify_mm_writelocked(mm);
1971
1972         /*
1973          * Clear old maps.  this also does some error checking for us
1974          */
1975  munmap_back:
1976         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1977         if (vma && vma->vm_start < addr + len) {
1978                 if (do_munmap(mm, addr, len))
1979                         return -ENOMEM;
1980                 goto munmap_back;
1981         }
1982
1983         /* Check against address space limits *after* clearing old maps... */
1984         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1985                 return -ENOMEM;
1986
1987         if (mm->map_count > sysctl_max_map_count)
1988                 return -ENOMEM;
1989
1990         if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
1991                 !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
1992                 return -ENOMEM;
1993
1994         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1995
1996         /* Can we just expand an old private anonymous mapping? */
1997         if (vma_merge(mm, prev, addr, addr + len, flags,
1998                                         NULL, NULL, pgoff, NULL))
1999                 goto out;
2000
2001         /*
2002          * create a vma struct for an anonymous mapping
2003          */
2004         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2005         if (!vma) {
2006                 vm_unacct_memory(len >> PAGE_SHIFT);
2007                 return -ENOMEM;
2008         }
2009
2010         vma->vm_mm = mm;
2011         vma->vm_start = addr;
2012         vma->vm_end = addr + len;
2013         vma->vm_pgoff = pgoff;
2014         vma->vm_flags = flags;
2015         vma->vm_page_prot = protection_map[flags & 0x0f];
2016         vma_link(mm, vma, prev, rb_link, rb_parent);
2017 out:
2018         vx_vmpages_add(mm, len >> PAGE_SHIFT);
2019         if (flags & VM_LOCKED) {
2020                 vx_vmlocked_add(mm, len >> PAGE_SHIFT);
2021                 make_pages_present(addr, addr + len);
2022         }
2023         return addr;
2024 }
2025
2026 EXPORT_SYMBOL(do_brk);
2027
2028 /* Release all mmaps. */
2029 void exit_mmap(struct mm_struct *mm)
2030 {
2031         struct mmu_gather *tlb;
2032         struct vm_area_struct *vma = mm->mmap;
2033         unsigned long nr_accounted = 0;
2034         unsigned long end;
2035
2036         lru_add_drain();
2037         flush_cache_mm(mm);
2038         tlb = tlb_gather_mmu(mm, 1);
2039         /* Don't update_hiwater_rss(mm) here, do_exit already did */
2040         /* Use -1 here to ensure all VMAs in the mm are unmapped */
2041         end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2042         vm_unacct_memory(nr_accounted);
2043         free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2044         tlb_finish_mmu(tlb, 0, end);
2045         arch_flush_exec_range(mm);
2046
2047         set_mm_counter(mm, file_rss, 0);
2048         set_mm_counter(mm, anon_rss, 0);
2049         vx_vmpages_sub(mm, mm->total_vm);
2050         vx_vmlocked_sub(mm, mm->locked_vm);
2051
2052         /*
2053          * Walk the list again, actually closing and freeing it,
2054          * with preemption enabled, without holding any MM locks.
2055          */
2056         while (vma)
2057                 vma = remove_vma(vma);
2058
2059         BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2060 }
2061
2062 /* Insert vm structure into process list sorted by address
2063  * and into the inode's i_mmap tree.  If vm_file is non-NULL
2064  * then i_mmap_lock is taken here.
2065  */
2066 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2067 {
2068         struct vm_area_struct * __vma, * prev;
2069         struct rb_node ** rb_link, * rb_parent;
2070
2071         /*
2072          * The vm_pgoff of a purely anonymous vma should be irrelevant
2073          * until its first write fault, when page's anon_vma and index
2074          * are set.  But now set the vm_pgoff it will almost certainly
2075          * end up with (unless mremap moves it elsewhere before that
2076          * first wfault), so /proc/pid/maps tells a consistent story.
2077          *
2078          * By setting it to reflect the virtual start address of the
2079          * vma, merges and splits can happen in a seamless way, just
2080          * using the existing file pgoff checks and manipulations.
2081          * Similarly in do_mmap_pgoff and in do_brk.
2082          */
2083         if (!vma->vm_file) {
2084                 BUG_ON(vma->anon_vma);
2085                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2086         }
2087         __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
2088         if (__vma && __vma->vm_start < vma->vm_end)
2089                 return -ENOMEM;
2090         if ((vma->vm_flags & VM_ACCOUNT) &&
2091                 (security_vm_enough_memory(vma_pages(vma)) ||
2092                 !vx_vmpages_avail(mm, vma_pages(vma))))
2093                 return -ENOMEM;
2094         vma_link(mm, vma, prev, rb_link, rb_parent);
2095         return 0;
2096 }
2097
2098 /*
2099  * Copy the vma structure to a new location in the same mm,
2100  * prior to moving page table entries, to effect an mremap move.
2101  */
2102 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2103         unsigned long addr, unsigned long len, pgoff_t pgoff)
2104 {
2105         struct vm_area_struct *vma = *vmap;
2106         unsigned long vma_start = vma->vm_start;
2107         struct mm_struct *mm = vma->vm_mm;
2108         struct vm_area_struct *new_vma, *prev;
2109         struct rb_node **rb_link, *rb_parent;
2110         struct mempolicy *pol;
2111
2112         /*
2113          * If anonymous vma has not yet been faulted, update new pgoff
2114          * to match new location, to increase its chance of merging.
2115          */
2116         if (!vma->vm_file && !vma->anon_vma)
2117                 pgoff = addr >> PAGE_SHIFT;
2118
2119         find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2120         new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2121                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2122         if (new_vma) {
2123                 /*
2124                  * Source vma may have been merged into new_vma
2125                  */
2126                 if (vma_start >= new_vma->vm_start &&
2127                     vma_start < new_vma->vm_end)
2128                         *vmap = new_vma;
2129         } else {
2130                 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2131                 if (new_vma) {
2132                         *new_vma = *vma;
2133                         pol = mpol_copy(vma_policy(vma));
2134                         if (IS_ERR(pol)) {
2135                                 kmem_cache_free(vm_area_cachep, new_vma);
2136                                 return NULL;
2137                         }
2138                         vma_set_policy(new_vma, pol);
2139                         new_vma->vm_start = addr;
2140                         new_vma->vm_end = addr + len;
2141                         new_vma->vm_pgoff = pgoff;
2142                         if (new_vma->vm_file)
2143                                 get_file(new_vma->vm_file);
2144                         if (new_vma->vm_ops && new_vma->vm_ops->open)
2145                                 new_vma->vm_ops->open(new_vma);
2146                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
2147                 }
2148         }
2149         return new_vma;
2150 }
2151
2152 /*
2153  * Return true if the calling process may expand its vm space by the passed
2154  * number of pages
2155  */
2156 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2157 {
2158         unsigned long cur = mm->total_vm;       /* pages */
2159         unsigned long lim;
2160
2161         lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
2162
2163         if (cur + npages > lim)
2164                 return 0;
2165         if (!vx_vmpages_avail(mm, npages))
2166                 return 0;
2167         return 1;
2168 }
2169
2170
2171 static struct page *
2172 special_mapping_nopage(struct vm_area_struct *vma,
2173                        unsigned long address, int *type)
2174 {
2175         struct page **pages;
2176
2177         BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2178
2179         address -= vma->vm_start;
2180         for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
2181                 address -= PAGE_SIZE;
2182
2183         if (*pages) {
2184                 get_page(*pages);
2185                 return *pages;
2186         }
2187
2188         return NOPAGE_SIGBUS;
2189 }
2190
2191 static struct vm_operations_struct special_mapping_vmops = {
2192         .nopage = special_mapping_nopage,
2193 };
2194
2195 unsigned int vdso_populate = 1;
2196
2197 /*
2198  * Insert a new vma covering the given region, with the given flags and
2199  * protections.  Its pages are supplied by the given null-terminated array.
2200  * The region past the last page supplied will always produce SIGBUS.
2201  * The array pointer and the pages it points to are assumed to stay alive
2202  * for as long as this mapping might exist.
2203  */
2204 int install_special_mapping(struct mm_struct *mm,
2205                             unsigned long addr, unsigned long len,
2206                             unsigned long vm_flags, pgprot_t pgprot,
2207                             struct page **pages)
2208 {
2209         struct vm_area_struct *vma;
2210         int err;
2211
2212         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2213         if (unlikely(vma == NULL))
2214                 return -ENOMEM;
2215         memset(vma, 0, sizeof(*vma));
2216
2217         vma->vm_mm = mm;
2218         vma->vm_start = addr;
2219         vma->vm_end = addr + len;
2220
2221         vma->vm_flags = vm_flags;
2222         vma->vm_page_prot = pgprot;
2223
2224         vma->vm_ops = &special_mapping_vmops;
2225         vma->vm_private_data = pages;
2226
2227         insert_vm_struct(mm, vma);
2228         mm->total_vm += len >> PAGE_SHIFT;
2229
2230         if (!vdso_populate)
2231                 return 0;
2232
2233         err = 0;
2234         while (*pages) {
2235                 struct page *page = *pages++;
2236                 get_page(page);
2237                 err = install_page(mm, vma, addr, page, vma->vm_page_prot);
2238                 if (err) {
2239                         put_page(page);
2240                         break;
2241                 }
2242                 addr += PAGE_SIZE;
2243         }
2244
2245         return err;
2246 }