patch-2.6.6-vs1.9.0
[linux-2.6.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@redhat.com>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/shm.h>
11 #include <linux/mman.h>
12 #include <linux/pagemap.h>
13 #include <linux/swap.h>
14 #include <linux/syscalls.h>
15 #include <linux/init.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/personality.h>
19 #include <linux/security.h>
20 #include <linux/hugetlb.h>
21 #include <linux/profile.h>
22 #include <linux/module.h>
23 #include <linux/mount.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/pgalloc.h>
27 #include <asm/tlb.h>
28
29 /*
30  * WARNING: the debugging will use recursive algorithms so never enable this
31  * unless you know what you are doing.
32  */
33 #undef DEBUG_MM_RB
34
35 /* description of effects of mapping type and prot in current implementation.
36  * this is due to the limited x86 page protection hardware.  The expected
37  * behavior is in parens:
38  *
39  * map_type     prot
40  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
41  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
42  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
43  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
44  *              
45  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
46  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
47  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
48  *
49  */
50 pgprot_t protection_map[16] = {
51         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
52         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
53 };
54
55 int sysctl_overcommit_memory = 0;       /* default is heuristic overcommit */
56 int sysctl_overcommit_ratio = 50;       /* default is 50% */
57 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
58 atomic_t vm_committed_space = ATOMIC_INIT(0);
59
60 EXPORT_SYMBOL(sysctl_overcommit_memory);
61 EXPORT_SYMBOL(sysctl_overcommit_ratio);
62 EXPORT_SYMBOL(sysctl_max_map_count);
63 EXPORT_SYMBOL(vm_committed_space);
64
65 /*
66  * Requires inode->i_mapping->i_shared_sem
67  */
68 static inline void
69 __remove_shared_vm_struct(struct vm_area_struct *vma, struct inode *inode)
70 {
71         if (inode) {
72                 if (vma->vm_flags & VM_DENYWRITE)
73                         atomic_inc(&inode->i_writecount);
74                 list_del_init(&vma->shared);
75         }
76 }
77
78 /*
79  * Remove one vm structure from the inode's i_mapping address space.
80  */
81 static void remove_shared_vm_struct(struct vm_area_struct *vma)
82 {
83         struct file *file = vma->vm_file;
84
85         if (file) {
86                 struct address_space *mapping = file->f_mapping;
87                 down(&mapping->i_shared_sem);
88                 __remove_shared_vm_struct(vma, file->f_dentry->d_inode);
89                 up(&mapping->i_shared_sem);
90         }
91 }
92
93 /*
94  *  sys_brk() for the most part doesn't need the global kernel
95  *  lock, except when an application is doing something nasty
96  *  like trying to un-brk an area that has already been mapped
97  *  to a regular file.  in this case, the unmapping will need
98  *  to invoke file system routines that need the global lock.
99  */
100 asmlinkage unsigned long sys_brk(unsigned long brk)
101 {
102         unsigned long rlim, retval;
103         unsigned long newbrk, oldbrk;
104         struct mm_struct *mm = current->mm;
105
106         down_write(&mm->mmap_sem);
107
108         if (brk < mm->end_code)
109                 goto out;
110         newbrk = PAGE_ALIGN(brk);
111         oldbrk = PAGE_ALIGN(mm->brk);
112         if (oldbrk == newbrk)
113                 goto set_brk;
114
115         /* Always allow shrinking brk. */
116         if (brk <= mm->brk) {
117                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
118                         goto set_brk;
119                 goto out;
120         }
121
122         /* Check against rlimit.. */
123         rlim = current->rlim[RLIMIT_DATA].rlim_cur;
124         if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
125                 goto out;
126
127         /* Check against existing mmap mappings. */
128         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
129                 goto out;
130
131         /* Ok, looks good - let it rip. */
132         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
133                 goto out;
134 set_brk:
135         mm->brk = brk;
136 out:
137         retval = mm->brk;
138         up_write(&mm->mmap_sem);
139         return retval;
140 }
141
142 #ifdef DEBUG_MM_RB
143 static int browse_rb(struct rb_root *root) {
144         int i, j;
145         struct rb_node *nd, *pn = NULL;
146         i = 0;
147         unsigned long prev = 0, pend = 0;
148
149         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
150                 struct vm_area_struct *vma;
151                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
152                 if (vma->vm_start < prev)
153                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
154                 if (vma->vm_start < pend)
155                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
156                 if (vma->vm_start > vma->vm_end)
157                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
158                 i++;
159                 pn = nd;
160         }
161         j = 0;
162         for (nd = pn; nd; nd = rb_prev(nd)) {
163                 j++;
164         }
165         if (i != j)
166                 printk("backwards %d, forwards %d\n", j, i), i = 0;
167         return i;
168 }
169
170 void validate_mm(struct mm_struct * mm) {
171         int bug = 0;
172         int i = 0;
173         struct vm_area_struct * tmp = mm->mmap;
174         while (tmp) {
175                 tmp = tmp->vm_next;
176                 i++;
177         }
178         if (i != mm->map_count)
179                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
180         i = browse_rb(&mm->mm_rb);
181         if (i != mm->map_count)
182                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
183         if (bug)
184                 BUG();
185 }
186 #else
187 #define validate_mm(mm) do { } while (0)
188 #endif
189
190 static struct vm_area_struct *
191 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
192                 struct vm_area_struct **pprev, struct rb_node ***rb_link,
193                 struct rb_node ** rb_parent)
194 {
195         struct vm_area_struct * vma;
196         struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
197
198         __rb_link = &mm->mm_rb.rb_node;
199         rb_prev = __rb_parent = NULL;
200         vma = NULL;
201
202         while (*__rb_link) {
203                 struct vm_area_struct *vma_tmp;
204
205                 __rb_parent = *__rb_link;
206                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
207
208                 if (vma_tmp->vm_end > addr) {
209                         vma = vma_tmp;
210                         if (vma_tmp->vm_start <= addr)
211                                 return vma;
212                         __rb_link = &__rb_parent->rb_left;
213                 } else {
214                         rb_prev = __rb_parent;
215                         __rb_link = &__rb_parent->rb_right;
216                 }
217         }
218
219         *pprev = NULL;
220         if (rb_prev)
221                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
222         *rb_link = __rb_link;
223         *rb_parent = __rb_parent;
224         return vma;
225 }
226
227 static inline void
228 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
229                 struct vm_area_struct *prev, struct rb_node *rb_parent)
230 {
231         if (prev) {
232                 vma->vm_next = prev->vm_next;
233                 prev->vm_next = vma;
234         } else {
235                 mm->mmap = vma;
236                 if (rb_parent)
237                         vma->vm_next = rb_entry(rb_parent,
238                                         struct vm_area_struct, vm_rb);
239                 else
240                         vma->vm_next = NULL;
241         }
242 }
243
244 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
245                 struct rb_node **rb_link, struct rb_node *rb_parent)
246 {
247         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
248         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
249 }
250
251 static inline void __vma_link_file(struct vm_area_struct *vma)
252 {
253         struct file * file;
254
255         file = vma->vm_file;
256         if (file) {
257                 struct address_space *mapping = file->f_mapping;
258
259                 if (vma->vm_flags & VM_DENYWRITE)
260                         atomic_dec(&file->f_dentry->d_inode->i_writecount);
261
262                 if (vma->vm_flags & VM_SHARED)
263                         list_add_tail(&vma->shared, &mapping->i_mmap_shared);
264                 else
265                         list_add_tail(&vma->shared, &mapping->i_mmap);
266         }
267 }
268
269 static void
270 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
271         struct vm_area_struct *prev, struct rb_node **rb_link,
272         struct rb_node *rb_parent)
273 {
274         __vma_link_list(mm, vma, prev, rb_parent);
275         __vma_link_rb(mm, vma, rb_link, rb_parent);
276         __vma_link_file(vma);
277 }
278
279 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
280                         struct vm_area_struct *prev, struct rb_node **rb_link,
281                         struct rb_node *rb_parent)
282 {
283         struct address_space *mapping = NULL;
284
285         if (vma->vm_file)
286                 mapping = vma->vm_file->f_mapping;
287
288         if (mapping)
289                 down(&mapping->i_shared_sem);
290         spin_lock(&mm->page_table_lock);
291         __vma_link(mm, vma, prev, rb_link, rb_parent);
292         spin_unlock(&mm->page_table_lock);
293         if (mapping)
294                 up(&mapping->i_shared_sem);
295
296         mark_mm_hugetlb(mm, vma);
297         mm->map_count++;
298         validate_mm(mm);
299 }
300
301 /*
302  * Insert vm structure into process list sorted by address and into the inode's
303  * i_mmap ring. The caller should hold mm->page_table_lock and
304  * ->f_mappping->i_shared_sem if vm_file is non-NULL.
305  */
306 static void
307 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
308 {
309         struct vm_area_struct * __vma, * prev;
310         struct rb_node ** rb_link, * rb_parent;
311
312         __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
313         if (__vma && __vma->vm_start < vma->vm_end)
314                 BUG();
315         __vma_link(mm, vma, prev, rb_link, rb_parent);
316         mark_mm_hugetlb(mm, vma);
317         mm->map_count++;
318         validate_mm(mm);
319 }
320
321 /*
322  * If the vma has a ->close operation then the driver probably needs to release
323  * per-vma resources, so we don't attempt to merge those.
324  */
325 #define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
326
327 static inline int is_mergeable_vma(struct vm_area_struct *vma,
328                         struct file *file, unsigned long vm_flags)
329 {
330         if (vma->vm_ops && vma->vm_ops->close)
331                 return 0;
332         if (vma->vm_file != file)
333                 return 0;
334         if (vma->vm_flags != vm_flags)
335                 return 0;
336         if (vma->vm_private_data)
337                 return 0;
338         return 1;
339 }
340
341 /*
342  * Return true if we can merge this (vm_flags,file,vm_pgoff,size)
343  * in front of (at a lower virtual address and file offset than) the vma.
344  *
345  * We don't check here for the merged mmap wrapping around the end of pagecache
346  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
347  * wrap, nor mmaps which cover the final page at index -1UL.
348  */
349 static int
350 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
351         struct file *file, unsigned long vm_pgoff, unsigned long size)
352 {
353         if (is_mergeable_vma(vma, file, vm_flags)) {
354                 if (!file)
355                         return 1;       /* anon mapping */
356                 if (vma->vm_pgoff == vm_pgoff + size)
357                         return 1;
358         }
359         return 0;
360 }
361
362 /*
363  * Return true if we can merge this (vm_flags,file,vm_pgoff)
364  * beyond (at a higher virtual address and file offset than) the vma.
365  */
366 static int
367 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
368         struct file *file, unsigned long vm_pgoff)
369 {
370         if (is_mergeable_vma(vma, file, vm_flags)) {
371                 unsigned long vma_size;
372
373                 if (!file)
374                         return 1;       /* anon mapping */
375
376                 vma_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
377                 if (vma->vm_pgoff + vma_size == vm_pgoff)
378                         return 1;
379         }
380         return 0;
381 }
382
383 /*
384  * Given a new mapping request (addr,end,vm_flags,file,pgoff), figure out
385  * whether that can be merged with its predecessor or its successor.  Or
386  * both (it neatly fills a hole).
387  */
388 static struct vm_area_struct *vma_merge(struct mm_struct *mm,
389                         struct vm_area_struct *prev,
390                         struct rb_node *rb_parent, unsigned long addr, 
391                         unsigned long end, unsigned long vm_flags,
392                         struct file *file, unsigned long pgoff)
393 {
394         spinlock_t *lock = &mm->page_table_lock;
395         struct inode *inode = file ? file->f_dentry->d_inode : NULL;
396         struct semaphore *i_shared_sem;
397
398         /*
399          * We later require that vma->vm_flags == vm_flags, so this tests
400          * vma->vm_flags & VM_SPECIAL, too.
401          */
402         if (vm_flags & VM_SPECIAL)
403                 return NULL;
404
405         i_shared_sem = file ? &file->f_mapping->i_shared_sem : NULL;
406
407         if (!prev) {
408                 prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
409                 goto merge_next;
410         }
411
412         /*
413          * Can it merge with the predecessor?
414          */
415         if (prev->vm_end == addr &&
416                         can_vma_merge_after(prev, vm_flags, file, pgoff)) {
417                 struct vm_area_struct *next;
418                 int need_up = 0;
419
420                 if (unlikely(file && prev->vm_next &&
421                                 prev->vm_next->vm_file == file)) {
422                         down(i_shared_sem);
423                         need_up = 1;
424                 }
425                 spin_lock(lock);
426                 prev->vm_end = end;
427
428                 /*
429                  * OK, it did.  Can we now merge in the successor as well?
430                  */
431                 next = prev->vm_next;
432                 if (next && prev->vm_end == next->vm_start &&
433                                 can_vma_merge_before(next, vm_flags, file,
434                                         pgoff, (end - addr) >> PAGE_SHIFT)) {
435                         prev->vm_end = next->vm_end;
436                         __vma_unlink(mm, next, prev);
437                         __remove_shared_vm_struct(next, inode);
438                         spin_unlock(lock);
439                         if (need_up)
440                                 up(i_shared_sem);
441                         if (file)
442                                 fput(file);
443
444                         mm->map_count--;
445                         kmem_cache_free(vm_area_cachep, next);
446                         return prev;
447                 }
448                 spin_unlock(lock);
449                 if (need_up)
450                         up(i_shared_sem);
451                 return prev;
452         }
453
454         /*
455          * Can this new request be merged in front of prev->vm_next?
456          */
457         prev = prev->vm_next;
458         if (prev) {
459  merge_next:
460                 if (!can_vma_merge_before(prev, vm_flags, file,
461                                 pgoff, (end - addr) >> PAGE_SHIFT))
462                         return NULL;
463                 if (end == prev->vm_start) {
464                         if (file)
465                                 down(i_shared_sem);
466                         spin_lock(lock);
467                         prev->vm_start = addr;
468                         prev->vm_pgoff -= (end - addr) >> PAGE_SHIFT;
469                         spin_unlock(lock);
470                         if (file)
471                                 up(i_shared_sem);
472                         return prev;
473                 }
474         }
475
476         return NULL;
477 }
478
479 /*
480  * The caller must hold down_write(current->mm->mmap_sem).
481  */
482
483 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
484                         unsigned long len, unsigned long prot,
485                         unsigned long flags, unsigned long pgoff)
486 {
487         struct mm_struct * mm = current->mm;
488         struct vm_area_struct * vma, * prev;
489         struct inode *inode;
490         unsigned int vm_flags;
491         int correct_wcount = 0;
492         int error;
493         struct rb_node ** rb_link, * rb_parent;
494         int accountable = 1;
495         unsigned long charged = 0;
496
497         if (file) {
498                 if (is_file_hugepages(file))
499                         accountable = 0;
500
501                 if (!file->f_op || !file->f_op->mmap)
502                         return -ENODEV;
503
504                 if ((prot & PROT_EXEC) && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
505                         return -EPERM;
506         }
507
508         if (!len)
509                 return addr;
510
511         /* Careful about overflows.. */
512         len = PAGE_ALIGN(len);
513         if (!len || len > TASK_SIZE)
514                 return -EINVAL;
515
516         /* offset overflow? */
517         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
518                 return -EINVAL;
519
520         /* Too many mappings? */
521         if (mm->map_count > sysctl_max_map_count)
522                 return -ENOMEM;
523
524         /* Obtain the address to map to. we verify (or select) it and ensure
525          * that it represents a valid section of the address space.
526          */
527         addr = get_unmapped_area(file, addr, len, pgoff, flags);
528         if (addr & ~PAGE_MASK)
529                 return addr;
530
531         /* Do simple checking here so the lower-level routines won't have
532          * to. we assume access permissions have been handled by the open
533          * of the memory object, so we don't do any here.
534          */
535         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
536                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
537
538         if (flags & MAP_LOCKED) {
539                 if (!capable(CAP_IPC_LOCK))
540                         return -EPERM;
541                 vm_flags |= VM_LOCKED;
542         }
543         /* mlock MCL_FUTURE? */
544         if (vm_flags & VM_LOCKED) {
545                 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
546                 locked += len;
547                 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
548                         return -EAGAIN;
549         }
550
551         inode = file ? file->f_dentry->d_inode : NULL;
552
553         if (file) {
554                 switch (flags & MAP_TYPE) {
555                 case MAP_SHARED:
556                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
557                                 return -EACCES;
558
559                         /*
560                          * Make sure we don't allow writing to an append-only
561                          * file..
562                          */
563                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
564                                 return -EACCES;
565
566                         /*
567                          * Make sure there are no mandatory locks on the file.
568                          */
569                         if (locks_verify_locked(inode))
570                                 return -EAGAIN;
571
572                         vm_flags |= VM_SHARED | VM_MAYSHARE;
573                         if (!(file->f_mode & FMODE_WRITE))
574                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
575
576                         /* fall through */
577                 case MAP_PRIVATE:
578                         if (!(file->f_mode & FMODE_READ))
579                                 return -EACCES;
580                         break;
581
582                 default:
583                         return -EINVAL;
584                 }
585         } else {
586                 vm_flags |= VM_SHARED | VM_MAYSHARE;
587                 switch (flags & MAP_TYPE) {
588                 default:
589                         return -EINVAL;
590                 case MAP_PRIVATE:
591                         vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
592                         /* fall through */
593                 case MAP_SHARED:
594                         break;
595                 }
596         }
597
598         error = security_file_mmap(file, prot, flags);
599         if (error)
600                 return error;
601                 
602         /* Clear old maps */
603         error = -ENOMEM;
604 munmap_back:
605         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
606         if (vma && vma->vm_start < addr + len) {
607                 if (do_munmap(mm, addr, len))
608                         return -ENOMEM;
609                 goto munmap_back;
610         }
611
612         /* Check against address space limit. */
613         if ((mm->total_vm << PAGE_SHIFT) + len
614             > current->rlim[RLIMIT_AS].rlim_cur)
615                 return -ENOMEM;
616
617         /* check context space, maybe only Private writable mapping? */
618         if (!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
619                 return -ENOMEM;
620
621         if (accountable && (!(flags & MAP_NORESERVE) ||
622                         sysctl_overcommit_memory > 1)) {
623                 if (vm_flags & VM_SHARED) {
624                         /* Check memory availability in shmem_file_setup? */
625                         vm_flags |= VM_ACCOUNT;
626                 } else if (vm_flags & VM_WRITE) {
627                         /*
628                          * Private writable mapping: check memory availability
629                          */
630                         charged = len >> PAGE_SHIFT;
631                         if (security_vm_enough_memory(charged))
632                                 return -ENOMEM;
633                         vm_flags |= VM_ACCOUNT;
634                 }
635         }
636
637         /* Can we just expand an old anonymous mapping? */
638         if (!file && !(vm_flags & VM_SHARED) && rb_parent)
639                 if (vma_merge(mm, prev, rb_parent, addr, addr + len,
640                                         vm_flags, NULL, 0))
641                         goto out;
642
643         /*
644          * Determine the object being mapped and call the appropriate
645          * specific mapper. the address has already been validated, but
646          * not unmapped, but the maps are removed from the list.
647          */
648         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
649         error = -ENOMEM;
650         if (!vma)
651                 goto unacct_error;
652
653         vma->vm_mm = mm;
654         vma->vm_start = addr;
655         vma->vm_end = addr + len;
656         vma->vm_flags = vm_flags;
657         vma->vm_page_prot = protection_map[vm_flags & 0x0f];
658         vma->vm_ops = NULL;
659         vma->vm_pgoff = pgoff;
660         vma->vm_file = NULL;
661         vma->vm_private_data = NULL;
662         vma->vm_next = NULL;
663         INIT_LIST_HEAD(&vma->shared);
664
665         if (file) {
666                 error = -EINVAL;
667                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
668                         goto free_vma;
669                 if (vm_flags & VM_DENYWRITE) {
670                         error = deny_write_access(file);
671                         if (error)
672                                 goto free_vma;
673                         correct_wcount = 1;
674                 }
675                 vma->vm_file = file;
676                 get_file(file);
677                 error = file->f_op->mmap(file, vma);
678                 if (error)
679                         goto unmap_and_free_vma;
680         } else if (vm_flags & VM_SHARED) {
681                 error = shmem_zero_setup(vma);
682                 if (error)
683                         goto free_vma;
684         }
685
686         /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
687          * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
688          * that memory reservation must be checked; but that reservation
689          * belongs to shared memory object, not to vma: so now clear it.
690          */
691         if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
692                 vma->vm_flags &= ~VM_ACCOUNT;
693
694         /* Can addr have changed??
695          *
696          * Answer: Yes, several device drivers can do it in their
697          *         f_op->mmap method. -DaveM
698          */
699         addr = vma->vm_start;
700
701         if (!file || !rb_parent || !vma_merge(mm, prev, rb_parent, addr,
702                                 addr + len, vma->vm_flags, file, pgoff)) {
703                 vma_link(mm, vma, prev, rb_link, rb_parent);
704                 if (correct_wcount)
705                         atomic_inc(&inode->i_writecount);
706         } else {
707                 if (file) {
708                         if (correct_wcount)
709                                 atomic_inc(&inode->i_writecount);
710                         fput(file);
711                 }
712                 kmem_cache_free(vm_area_cachep, vma);
713         }
714 out:    
715         // mm->total_vm += len >> PAGE_SHIFT;
716         vx_vmpages_add(mm, len >> PAGE_SHIFT);
717         if (vm_flags & VM_LOCKED) {
718                 // mm->locked_vm += len >> PAGE_SHIFT;
719                 vx_vmlocked_add(mm, len >> PAGE_SHIFT);
720                 make_pages_present(addr, addr + len);
721         }
722         if (flags & MAP_POPULATE) {
723                 up_write(&mm->mmap_sem);
724                 sys_remap_file_pages(addr, len, prot,
725                                         pgoff, flags & MAP_NONBLOCK);
726                 down_write(&mm->mmap_sem);
727         }
728         return addr;
729
730 unmap_and_free_vma:
731         if (correct_wcount)
732                 atomic_inc(&inode->i_writecount);
733         vma->vm_file = NULL;
734         fput(file);
735
736         /* Undo any partial mapping done by a device driver. */
737         zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
738 free_vma:
739         kmem_cache_free(vm_area_cachep, vma);
740 unacct_error:
741         if (charged)
742                 vm_unacct_memory(charged);
743         return error;
744 }
745
746 EXPORT_SYMBOL(do_mmap_pgoff);
747
748 /* Get an address range which is currently unmapped.
749  * For shmat() with addr=0.
750  *
751  * Ugly calling convention alert:
752  * Return value with the low bits set means error value,
753  * ie
754  *      if (ret & ~PAGE_MASK)
755  *              error = ret;
756  *
757  * This function "knows" that -ENOMEM has the bits set.
758  */
759 #ifndef HAVE_ARCH_UNMAPPED_AREA
760 static inline unsigned long
761 arch_get_unmapped_area(struct file *filp, unsigned long addr,
762                 unsigned long len, unsigned long pgoff, unsigned long flags)
763 {
764         struct mm_struct *mm = current->mm;
765         struct vm_area_struct *vma;
766         unsigned long start_addr;
767
768         if (len > TASK_SIZE)
769                 return -ENOMEM;
770
771         if (addr) {
772                 addr = PAGE_ALIGN(addr);
773                 vma = find_vma(mm, addr);
774                 if (TASK_SIZE - len >= addr &&
775                     (!vma || addr + len <= vma->vm_start))
776                         return addr;
777         }
778         start_addr = addr = mm->free_area_cache;
779
780 full_search:
781         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
782                 /* At this point:  (!vma || addr < vma->vm_end). */
783                 if (TASK_SIZE - len < addr) {
784                         /*
785                          * Start a new search - just in case we missed
786                          * some holes.
787                          */
788                         if (start_addr != TASK_UNMAPPED_BASE) {
789                                 start_addr = addr = TASK_UNMAPPED_BASE;
790                                 goto full_search;
791                         }
792                         return -ENOMEM;
793                 }
794                 if (!vma || addr + len <= vma->vm_start) {
795                         /*
796                          * Remember the place where we stopped the search:
797                          */
798                         mm->free_area_cache = addr + len;
799                         return addr;
800                 }
801                 addr = vma->vm_end;
802         }
803 }
804 #else
805 extern unsigned long
806 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
807                         unsigned long, unsigned long);
808 #endif  
809
810 unsigned long
811 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
812                 unsigned long pgoff, unsigned long flags)
813 {
814         if (flags & MAP_FIXED) {
815                 unsigned long ret;
816
817                 if (addr > TASK_SIZE - len)
818                         return -ENOMEM;
819                 if (addr & ~PAGE_MASK)
820                         return -EINVAL;
821                 if (file && is_file_hugepages(file))  {
822                         /*
823                          * Check if the given range is hugepage aligned, and
824                          * can be made suitable for hugepages.
825                          */
826                         ret = prepare_hugepage_range(addr, len);
827                 } else {
828                         /*
829                          * Ensure that a normal request is not falling in a
830                          * reserved hugepage range.  For some archs like IA-64,
831                          * there is a separate region for hugepages.
832                          */
833                         ret = is_hugepage_only_range(addr, len);
834                 }
835                 if (ret)
836                         return -EINVAL;
837                 return addr;
838         }
839
840         if (file && file->f_op && file->f_op->get_unmapped_area)
841                 return file->f_op->get_unmapped_area(file, addr, len,
842                                                 pgoff, flags);
843
844         return arch_get_unmapped_area(file, addr, len, pgoff, flags);
845 }
846
847 EXPORT_SYMBOL(get_unmapped_area);
848
849 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
850 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
851 {
852         struct vm_area_struct *vma = NULL;
853
854         if (mm) {
855                 /* Check the cache first. */
856                 /* (Cache hit rate is typically around 35%.) */
857                 vma = mm->mmap_cache;
858                 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
859                         struct rb_node * rb_node;
860
861                         rb_node = mm->mm_rb.rb_node;
862                         vma = NULL;
863
864                         while (rb_node) {
865                                 struct vm_area_struct * vma_tmp;
866
867                                 vma_tmp = rb_entry(rb_node,
868                                                 struct vm_area_struct, vm_rb);
869
870                                 if (vma_tmp->vm_end > addr) {
871                                         vma = vma_tmp;
872                                         if (vma_tmp->vm_start <= addr)
873                                                 break;
874                                         rb_node = rb_node->rb_left;
875                                 } else
876                                         rb_node = rb_node->rb_right;
877                         }
878                         if (vma)
879                                 mm->mmap_cache = vma;
880                 }
881         }
882         return vma;
883 }
884
885 EXPORT_SYMBOL(find_vma);
886
887 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
888 struct vm_area_struct *
889 find_vma_prev(struct mm_struct *mm, unsigned long addr,
890                         struct vm_area_struct **pprev)
891 {
892         struct vm_area_struct *vma = NULL, *prev = NULL;
893         struct rb_node * rb_node;
894         if (!mm)
895                 goto out;
896
897         /* Guard against addr being lower than the first VMA */
898         vma = mm->mmap;
899
900         /* Go through the RB tree quickly. */
901         rb_node = mm->mm_rb.rb_node;
902
903         while (rb_node) {
904                 struct vm_area_struct *vma_tmp;
905                 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
906
907                 if (addr < vma_tmp->vm_end) {
908                         rb_node = rb_node->rb_left;
909                 } else {
910                         prev = vma_tmp;
911                         if (!prev->vm_next || (addr < prev->vm_next->vm_end))
912                                 break;
913                         rb_node = rb_node->rb_right;
914                 }
915         }
916
917 out:
918         *pprev = prev;
919         return prev ? prev->vm_next : vma;
920 }
921
922 #ifdef CONFIG_STACK_GROWSUP
923 /*
924  * vma is the first one with address > vma->vm_end.  Have to extend vma.
925  */
926 int expand_stack(struct vm_area_struct * vma, unsigned long address)
927 {
928         unsigned long grow;
929
930         if (!(vma->vm_flags & VM_GROWSUP))
931                 return -EFAULT;
932
933         /*
934          * vma->vm_start/vm_end cannot change under us because the caller
935          * is required to hold the mmap_sem in read mode. We need to get
936          * the spinlock only before relocating the vma range ourself.
937          */
938         address += 4 + PAGE_SIZE - 1;
939         address &= PAGE_MASK;
940         spin_lock(&vma->vm_mm->page_table_lock);
941         grow = (address - vma->vm_end) >> PAGE_SHIFT;
942
943         /* Overcommit.. */
944         if (security_vm_enough_memory(grow) ||
945                 !vx_vmpages_avail(vma->vm_mm, grow)) {
946                 spin_unlock(&vma->vm_mm->page_table_lock);
947                 return -ENOMEM;
948         }
949         
950         if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
951                         ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
952                         current->rlim[RLIMIT_AS].rlim_cur) {
953                 spin_unlock(&vma->vm_mm->page_table_lock);
954                 vm_unacct_memory(grow);
955                 return -ENOMEM;
956         }
957
958         vma->vm_end = address;
959         // vma->vm_mm->total_vm += grow;
960         vx_vmpages_add(vma->vm_mm, grow);
961         if (vma->vm_flags & VM_LOCKED)
962                 // vma->vm_mm->locked_vm += grow;
963                 vx_vmlocked_add(vma->vm_mm, grow);
964         spin_unlock(&vma->vm_mm->page_table_lock);
965         return 0;
966 }
967
968 struct vm_area_struct *
969 find_extend_vma(struct mm_struct *mm, unsigned long addr)
970 {
971         struct vm_area_struct *vma, *prev;
972
973         addr &= PAGE_MASK;
974         vma = find_vma_prev(mm, addr, &prev);
975         if (vma && (vma->vm_start <= addr))
976                 return vma;
977         if (!prev || expand_stack(prev, addr))
978                 return NULL;
979         if (prev->vm_flags & VM_LOCKED) {
980                 make_pages_present(addr, prev->vm_end);
981         }
982         return prev;
983 }
984 #else
985 /*
986  * vma is the first one with address < vma->vm_start.  Have to extend vma.
987  */
988 int expand_stack(struct vm_area_struct *vma, unsigned long address)
989 {
990         unsigned long grow;
991
992         /*
993          * vma->vm_start/vm_end cannot change under us because the caller
994          * is required to hold the mmap_sem in read mode. We need to get
995          * the spinlock only before relocating the vma range ourself.
996          */
997         address &= PAGE_MASK;
998         spin_lock(&vma->vm_mm->page_table_lock);
999         grow = (vma->vm_start - address) >> PAGE_SHIFT;
1000
1001         /* Overcommit.. */
1002         if (security_vm_enough_memory(grow) ||
1003                 !vx_vmpages_avail(vma->vm_mm, grow)) {
1004                 spin_unlock(&vma->vm_mm->page_table_lock);
1005                 return -ENOMEM;
1006         }
1007         
1008         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
1009                         ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
1010                         current->rlim[RLIMIT_AS].rlim_cur) {
1011                 spin_unlock(&vma->vm_mm->page_table_lock);
1012                 vm_unacct_memory(grow);
1013                 return -ENOMEM;
1014         }
1015
1016         vma->vm_start = address;
1017         vma->vm_pgoff -= grow;
1018         // vma->vm_mm->total_vm += grow;
1019         vx_vmpages_add(vma->vm_mm, grow);
1020         if (vma->vm_flags & VM_LOCKED)
1021                 // vma->vm_mm->locked_vm += grow;
1022                 vx_vmlocked_add(vma->vm_mm, grow);
1023         spin_unlock(&vma->vm_mm->page_table_lock);
1024         return 0;
1025 }
1026
1027 struct vm_area_struct *
1028 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1029 {
1030         struct vm_area_struct * vma;
1031         unsigned long start;
1032
1033         addr &= PAGE_MASK;
1034         vma = find_vma(mm,addr);
1035         if (!vma)
1036                 return NULL;
1037         if (vma->vm_start <= addr)
1038                 return vma;
1039         if (!(vma->vm_flags & VM_GROWSDOWN))
1040                 return NULL;
1041         start = vma->vm_start;
1042         if (expand_stack(vma, addr))
1043                 return NULL;
1044         if (vma->vm_flags & VM_LOCKED) {
1045                 make_pages_present(addr, start);
1046         }
1047         return vma;
1048 }
1049 #endif
1050
1051 /*
1052  * Try to free as many page directory entries as we can,
1053  * without having to work very hard at actually scanning
1054  * the page tables themselves.
1055  *
1056  * Right now we try to free page tables if we have a nice
1057  * PGDIR-aligned area that got free'd up. We could be more
1058  * granular if we want to, but this is fast and simple,
1059  * and covers the bad cases.
1060  *
1061  * "prev", if it exists, points to a vma before the one
1062  * we just free'd - but there's no telling how much before.
1063  */
1064 static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
1065         unsigned long start, unsigned long end)
1066 {
1067         unsigned long first = start & PGDIR_MASK;
1068         unsigned long last = end + PGDIR_SIZE - 1;
1069         unsigned long start_index, end_index;
1070         struct mm_struct *mm = tlb->mm;
1071
1072         if (!prev) {
1073                 prev = mm->mmap;
1074                 if (!prev)
1075                         goto no_mmaps;
1076                 if (prev->vm_end > start) {
1077                         if (last > prev->vm_start)
1078                                 last = prev->vm_start;
1079                         goto no_mmaps;
1080                 }
1081         }
1082         for (;;) {
1083                 struct vm_area_struct *next = prev->vm_next;
1084
1085                 if (next) {
1086                         if (next->vm_start < start) {
1087                                 prev = next;
1088                                 continue;
1089                         }
1090                         if (last > next->vm_start)
1091                                 last = next->vm_start;
1092                 }
1093                 if (prev->vm_end > first)
1094                         first = prev->vm_end + PGDIR_SIZE - 1;
1095                 break;
1096         }
1097 no_mmaps:
1098         if (last < first)       /* for arches with discontiguous pgd indices */
1099                 return;
1100         /*
1101          * If the PGD bits are not consecutive in the virtual address, the
1102          * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
1103          */
1104         start_index = pgd_index(first);
1105         if (start_index < FIRST_USER_PGD_NR)
1106                 start_index = FIRST_USER_PGD_NR;
1107         end_index = pgd_index(last);
1108         if (end_index > start_index) {
1109                 clear_page_tables(tlb, start_index, end_index - start_index);
1110                 flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
1111         }
1112 }
1113
1114 /* Normal function to fix up a mapping
1115  * This function is the default for when an area has no specific
1116  * function.  This may be used as part of a more specific routine.
1117  *
1118  * By the time this function is called, the area struct has been
1119  * removed from the process mapping list.
1120  */
1121 static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
1122 {
1123         size_t len = area->vm_end - area->vm_start;
1124
1125         // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
1126         vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
1127         
1128         if (area->vm_flags & VM_LOCKED)
1129                 // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
1130                 vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
1131         /*
1132          * Is this a new hole at the lowest possible address?
1133          */
1134         if (area->vm_start >= TASK_UNMAPPED_BASE &&
1135                                 area->vm_start < area->vm_mm->free_area_cache)
1136               area->vm_mm->free_area_cache = area->vm_start;
1137
1138         remove_shared_vm_struct(area);
1139
1140         if (area->vm_ops && area->vm_ops->close)
1141                 area->vm_ops->close(area);
1142         if (area->vm_file)
1143                 fput(area->vm_file);
1144         kmem_cache_free(vm_area_cachep, area);
1145 }
1146
1147 /*
1148  * Update the VMA and inode share lists.
1149  *
1150  * Ok - we have the memory areas we should free on the 'free' list,
1151  * so release them, and do the vma updates.
1152  */
1153 static void unmap_vma_list(struct mm_struct *mm,
1154         struct vm_area_struct *mpnt)
1155 {
1156         do {
1157                 struct vm_area_struct *next = mpnt->vm_next;
1158                 unmap_vma(mm, mpnt);
1159                 mpnt = next;
1160         } while (mpnt != NULL);
1161         validate_mm(mm);
1162 }
1163
1164 /*
1165  * Get rid of page table information in the indicated region.
1166  *
1167  * Called with the page table lock held.
1168  */
1169 static void unmap_region(struct mm_struct *mm,
1170         struct vm_area_struct *vma,
1171         struct vm_area_struct *prev,
1172         unsigned long start,
1173         unsigned long end)
1174 {
1175         struct mmu_gather *tlb;
1176         unsigned long nr_accounted = 0;
1177
1178         lru_add_drain();
1179         tlb = tlb_gather_mmu(mm, 0);
1180         unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
1181         vm_unacct_memory(nr_accounted);
1182
1183         if (is_hugepage_only_range(start, end - start))
1184                 hugetlb_free_pgtables(tlb, prev, start, end);
1185         else
1186                 free_pgtables(tlb, prev, start, end);
1187         tlb_finish_mmu(tlb, start, end);
1188 }
1189
1190 /*
1191  * Create a list of vma's touched by the unmap, removing them from the mm's
1192  * vma list as we go..
1193  *
1194  * Called with the page_table_lock held.
1195  */
1196 static void
1197 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1198         struct vm_area_struct *prev, unsigned long end)
1199 {
1200         struct vm_area_struct **insertion_point;
1201         struct vm_area_struct *tail_vma = NULL;
1202
1203         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1204         do {
1205                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1206                 mm->map_count--;
1207                 tail_vma = vma;
1208                 vma = vma->vm_next;
1209         } while (vma && vma->vm_start < end);
1210         *insertion_point = vma;
1211         tail_vma->vm_next = NULL;
1212         mm->mmap_cache = NULL;          /* Kill the cache. */
1213 }
1214
1215 /*
1216  * Split a vma into two pieces at address 'addr', a new vma is allocated
1217  * either for the first part or the the tail.
1218  */
1219 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1220               unsigned long addr, int new_below)
1221 {
1222         struct vm_area_struct *new;
1223         struct address_space *mapping = NULL;
1224
1225         if (mm->map_count >= sysctl_max_map_count)
1226                 return -ENOMEM;
1227
1228         new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1229         if (!new)
1230                 return -ENOMEM;
1231
1232         /* most fields are the same, copy all, and then fixup */
1233         *new = *vma;
1234
1235         INIT_LIST_HEAD(&new->shared);
1236
1237         if (new_below)
1238                 new->vm_end = addr;
1239         else {
1240                 new->vm_start = addr;
1241                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1242         }
1243
1244         if (new->vm_file)
1245                 get_file(new->vm_file);
1246
1247         if (new->vm_ops && new->vm_ops->open)
1248                 new->vm_ops->open(new);
1249
1250         if (vma->vm_file)
1251                  mapping = vma->vm_file->f_mapping;
1252
1253         if (mapping)
1254                 down(&mapping->i_shared_sem);
1255         spin_lock(&mm->page_table_lock);
1256
1257         if (new_below) {
1258                 vma->vm_start = addr;
1259                 vma->vm_pgoff += ((addr - new->vm_start) >> PAGE_SHIFT);
1260         } else
1261                 vma->vm_end = addr;
1262
1263         __insert_vm_struct(mm, new);
1264
1265         spin_unlock(&mm->page_table_lock);
1266         if (mapping)
1267                 up(&mapping->i_shared_sem);
1268
1269         return 0;
1270 }
1271
1272 /* Munmap is split into 2 main parts -- this part which finds
1273  * what needs doing, and the areas themselves, which do the
1274  * work.  This now handles partial unmappings.
1275  * Jeremy Fitzhardinge <jeremy@goop.org>
1276  */
1277 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1278 {
1279         unsigned long end;
1280         struct vm_area_struct *mpnt, *prev, *last;
1281
1282         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
1283                 return -EINVAL;
1284
1285         if ((len = PAGE_ALIGN(len)) == 0)
1286                 return -EINVAL;
1287
1288         /* Find the first overlapping VMA */
1289         mpnt = find_vma_prev(mm, start, &prev);
1290         if (!mpnt)
1291                 return 0;
1292         /* we have  start < mpnt->vm_end  */
1293
1294         if (is_vm_hugetlb_page(mpnt)) {
1295                 int ret = is_aligned_hugepage_range(start, len);
1296
1297                 if (ret)
1298                         return ret;
1299         }
1300
1301         /* if it doesn't overlap, we have nothing.. */
1302         end = start + len;
1303         if (mpnt->vm_start >= end)
1304                 return 0;
1305
1306         /* Something will probably happen, so notify. */
1307         if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
1308                 profile_exec_unmap(mm);
1309  
1310         /*
1311          * If we need to split any vma, do it now to save pain later.
1312          *
1313          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
1314          * unmapped vm_area_struct will remain in use: so lower split_vma
1315          * places tmp vma above, and higher split_vma places tmp vma below.
1316          */
1317         if (start > mpnt->vm_start) {
1318                 if (split_vma(mm, mpnt, start, 0))
1319                         return -ENOMEM;
1320                 prev = mpnt;
1321         }
1322
1323         /* Does it split the last one? */
1324         last = find_vma(mm, end);
1325         if (last && end > last->vm_start) {
1326                 if (split_vma(mm, last, end, 1))
1327                         return -ENOMEM;
1328         }
1329         mpnt = prev? prev->vm_next: mm->mmap;
1330
1331         /*
1332          * Remove the vma's, and unmap the actual pages
1333          */
1334         spin_lock(&mm->page_table_lock);
1335         detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
1336         unmap_region(mm, mpnt, prev, start, end);
1337         spin_unlock(&mm->page_table_lock);
1338
1339         /* Fix up all other VM information */
1340         unmap_vma_list(mm, mpnt);
1341
1342         return 0;
1343 }
1344
1345 EXPORT_SYMBOL(do_munmap);
1346
1347 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1348 {
1349         int ret;
1350         struct mm_struct *mm = current->mm;
1351
1352         down_write(&mm->mmap_sem);
1353         ret = do_munmap(mm, addr, len);
1354         up_write(&mm->mmap_sem);
1355         return ret;
1356 }
1357
1358 /*
1359  *  this is really a simplified "do_mmap".  it only handles
1360  *  anonymous maps.  eventually we may be able to do some
1361  *  brk-specific accounting here.
1362  */
1363 unsigned long do_brk(unsigned long addr, unsigned long len)
1364 {
1365         struct mm_struct * mm = current->mm;
1366         struct vm_area_struct * vma, * prev;
1367         unsigned long flags;
1368         struct rb_node ** rb_link, * rb_parent;
1369
1370         len = PAGE_ALIGN(len);
1371         if (!len)
1372                 return addr;
1373
1374         if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1375                 return -EINVAL;
1376
1377         /*
1378          * mlock MCL_FUTURE?
1379          */
1380         if (mm->def_flags & VM_LOCKED) {
1381                 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
1382                 locked += len;
1383                 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
1384                         return -EAGAIN;
1385                 /* vserver checks ? */
1386         }
1387
1388         /*
1389          * Clear old maps.  this also does some error checking for us
1390          */
1391  munmap_back:
1392         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1393         if (vma && vma->vm_start < addr + len) {
1394                 if (do_munmap(mm, addr, len))
1395                         return -ENOMEM;
1396                 goto munmap_back;
1397         }
1398
1399         /* Check against address space limits *after* clearing old maps... */
1400         if ((mm->total_vm << PAGE_SHIFT) + len
1401             > current->rlim[RLIMIT_AS].rlim_cur)
1402                 return -ENOMEM;
1403
1404         if (mm->map_count > sysctl_max_map_count)
1405                 return -ENOMEM;
1406
1407         if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
1408                 !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
1409                 return -ENOMEM;
1410
1411         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1412
1413         /* Can we just expand an old anonymous mapping? */
1414         if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
1415                                         flags, NULL, 0))
1416                 goto out;
1417
1418         /*
1419          * create a vma struct for an anonymous mapping
1420          */
1421         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1422         if (!vma) {
1423                 vm_unacct_memory(len >> PAGE_SHIFT);
1424                 return -ENOMEM;
1425         }
1426
1427         vma->vm_mm = mm;
1428         vma->vm_start = addr;
1429         vma->vm_end = addr + len;
1430         vma->vm_flags = flags;
1431         vma->vm_page_prot = protection_map[flags & 0x0f];
1432         vma->vm_ops = NULL;
1433         vma->vm_pgoff = 0;
1434         vma->vm_file = NULL;
1435         vma->vm_private_data = NULL;
1436         INIT_LIST_HEAD(&vma->shared);
1437
1438         vma_link(mm, vma, prev, rb_link, rb_parent);
1439
1440 out:
1441         // mm->total_vm += len >> PAGE_SHIFT;
1442         vx_vmpages_add(mm, len >> PAGE_SHIFT);
1443         if (flags & VM_LOCKED) {
1444                 // mm->locked_vm += len >> PAGE_SHIFT;
1445                 vx_vmlocked_add(mm, len >> PAGE_SHIFT);
1446                 make_pages_present(addr, addr + len);
1447         }
1448         return addr;
1449 }
1450
1451 EXPORT_SYMBOL(do_brk);
1452
1453 /* Release all mmaps. */
1454 void exit_mmap(struct mm_struct *mm)
1455 {
1456         struct mmu_gather *tlb;
1457         struct vm_area_struct *vma;
1458         unsigned long nr_accounted = 0;
1459
1460         profile_exit_mmap(mm);
1461  
1462         lru_add_drain();
1463
1464         spin_lock(&mm->page_table_lock);
1465
1466         tlb = tlb_gather_mmu(mm, 1);
1467         flush_cache_mm(mm);
1468         /* Use ~0UL here to ensure all VMAs in the mm are unmapped */
1469         mm->map_count -= unmap_vmas(&tlb, mm, mm->mmap, 0,
1470                                         ~0UL, &nr_accounted, NULL);
1471         vm_unacct_memory(nr_accounted);
1472         BUG_ON(mm->map_count);  /* This is just debugging */
1473         clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
1474         tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
1475
1476         vma = mm->mmap;
1477         mm->mmap = mm->mmap_cache = NULL;
1478         mm->mm_rb = RB_ROOT;
1479         // mm->rss = 0;
1480         vx_rsspages_sub(mm, mm->rss);
1481         // mm->total_vm = 0;
1482         vx_vmpages_sub(mm, mm->total_vm);
1483         // mm->locked_vm = 0;
1484         vx_vmlocked_sub(mm, mm->locked_vm);
1485
1486         spin_unlock(&mm->page_table_lock);
1487
1488         /*
1489          * Walk the list again, actually closing and freeing it
1490          * without holding any MM locks.
1491          */
1492         while (vma) {
1493                 struct vm_area_struct *next = vma->vm_next;
1494                 remove_shared_vm_struct(vma);
1495                 if (vma->vm_ops) {
1496                         if (vma->vm_ops->close)
1497                                 vma->vm_ops->close(vma);
1498                 }
1499                 if (vma->vm_file)
1500                         fput(vma->vm_file);
1501                 kmem_cache_free(vm_area_cachep, vma);
1502                 vma = next;
1503         }
1504 }
1505
1506 /* Insert vm structure into process list sorted by address
1507  * and into the inode's i_mmap ring.  If vm_file is non-NULL
1508  * then i_shared_sem is taken here.
1509  */
1510 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1511 {
1512         struct vm_area_struct * __vma, * prev;
1513         struct rb_node ** rb_link, * rb_parent;
1514
1515         __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1516         if (__vma && __vma->vm_start < vma->vm_end)
1517                 BUG();
1518         vma_link(mm, vma, prev, rb_link, rb_parent);
1519 }
1520
1521 /*
1522  * Copy the vma structure to a new location in the same mm,
1523  * prior to moving page table entries, to effect an mremap move.
1524  */
1525 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1526         unsigned long addr, unsigned long len, unsigned long pgoff)
1527 {
1528         struct vm_area_struct *vma = *vmap;
1529         unsigned long vma_start = vma->vm_start;
1530         struct mm_struct *mm = vma->vm_mm;
1531         struct vm_area_struct *new_vma, *prev;
1532         struct rb_node **rb_link, *rb_parent;
1533
1534         find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1535         new_vma = vma_merge(mm, prev, rb_parent, addr, addr + len,
1536                         vma->vm_flags, vma->vm_file, pgoff);
1537         if (new_vma) {
1538                 /*
1539                  * Source vma may have been merged into new_vma
1540                  */
1541                 if (vma_start >= new_vma->vm_start &&
1542                     vma_start < new_vma->vm_end)
1543                         *vmap = new_vma;
1544         } else {
1545                 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1546                 if (new_vma) {
1547                         *new_vma = *vma;
1548                         INIT_LIST_HEAD(&new_vma->shared);
1549                         new_vma->vm_start = addr;
1550                         new_vma->vm_end = addr + len;
1551                         new_vma->vm_pgoff = pgoff;
1552                         if (new_vma->vm_file)
1553                                 get_file(new_vma->vm_file);
1554                         if (new_vma->vm_ops && new_vma->vm_ops->open)
1555                                 new_vma->vm_ops->open(new_vma);
1556                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
1557                 }
1558         }
1559         return new_vma;
1560 }