#include <linux/rmap.h>
#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
{
struct file *file = vma->vm_file;
+ might_sleep();
if (file) {
struct address_space *mapping = file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
- vma_prio_tree_init(vma);
__vma_link_list(mm, vma, prev, rb_parent);
__vma_link_rb(mm, vma, rb_link, rb_parent);
- __vma_link_file(vma);
__anon_vma_link(vma);
}
if (mapping)
spin_lock(&mapping->i_mmap_lock);
anon_vma_lock(vma);
+
__vma_link(mm, vma, prev, rb_link, rb_parent);
+ __vma_link_file(vma);
+
anon_vma_unlock(vma);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
- mark_mm_hugetlb(mm, vma);
mm->map_count++;
validate_mm(mm);
}
/*
- * Insert vm structure into process list sorted by address and into the
- * inode's i_mmap tree. The caller should hold mm->mmap_sem and
- * ->f_mappping->i_mmap_lock if vm_file is non-NULL.
+ * Helper for vma_adjust in the split_vma insert case:
+ * insert vm structure into list and rbtree and anon_vma,
+ * but it has already been inserted into prio_tree earlier.
*/
static void
__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if (__vma && __vma->vm_start < vma->vm_end)
BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent);
- mark_mm_hugetlb(mm, vma);
mm->map_count++;
- validate_mm(mm);
}
-static inline void __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev)
+static inline void
+__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev)
{
prev->vm_next = vma->vm_next;
rb_erase(&vma->vm_rb, &mm->mm_rb);
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *importer = NULL;
struct address_space *mapping = NULL;
struct prio_tree_root *root = NULL;
struct file *file = vma->vm_file;
if (next && !insert) {
if (end >= next->vm_end) {
+ /*
+ * vma expands, overlapping all the next, and
+ * perhaps the one after too (mprotect case 6).
+ */
again: remove_next = 1 + (end > next->vm_end);
end = next->vm_end;
anon_vma = next->anon_vma;
- } else if (end < vma->vm_end || end > next->vm_start) {
+ } else if (end > next->vm_start) {
/*
- * vma shrinks, and !insert tells it's not
- * split_vma inserting another: so it must
- * be mprotect shifting the boundary down.
- * Or:
* vma expands, overlapping part of the next:
- * must be mprotect shifting the boundary up.
+ * mprotect case 5 shifting the boundary up.
*/
- BUG_ON(vma->vm_end != next->vm_start);
- adjust_next = end - next->vm_start;
+ adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
anon_vma = next->anon_vma;
+ importer = vma;
+ } else if (end < vma->vm_end) {
+ /*
+ * vma shrinks, and !insert tells it's not
+ * split_vma inserting another: so it must be
+ * mprotect case 4 shifting the boundary down.
+ */
+ adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
+ anon_vma = next->anon_vma;
+ importer = next;
}
}
if (!(vma->vm_flags & VM_NONLINEAR))
root = &mapping->i_mmap;
spin_lock(&mapping->i_mmap_lock);
+ if (insert) {
+ /*
+ * Put into prio_tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(insert);
+ }
}
/*
*/
if (vma->anon_vma)
anon_vma = vma->anon_vma;
- if (anon_vma)
+ if (anon_vma) {
spin_lock(&anon_vma->lock);
+ /*
+ * Easily overlooked: when mprotect shifts the boundary,
+ * make sure the expanding vma has anon_vma set if the
+ * shrinking vma had, to cover any anon pages imported.
+ */
+ if (importer && !importer->anon_vma) {
+ importer->anon_vma = anon_vma;
+ __anon_vma_link(importer);
+ }
+ }
if (root) {
flush_dcache_mmap_lock(mapping);
vma->vm_end = end;
vma->vm_pgoff = pgoff;
if (adjust_next) {
- next->vm_start += adjust_next;
- next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+ next->vm_start += adjust_next << PAGE_SHIFT;
+ next->vm_pgoff += adjust_next;
}
if (root) {
* The caller must hold down_write(current->mm->mmap_sem).
*/
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long pgoff)
+unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long pgoff)
{
- struct mm_struct * mm = current->mm;
struct vm_area_struct * vma, * prev;
struct inode *inode;
unsigned int vm_flags;
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_LOCKED) {
- if (!capable(CAP_IPC_LOCK))
+ if (!can_do_mlock())
return -EPERM;
vm_flags |= VM_LOCKED;
}
/* mlock MCL_FUTURE? */
if (vm_flags & VM_LOCKED) {
- unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
locked += len;
- if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
> current->rlim[RLIMIT_AS].rlim_cur)
return -ENOMEM;
+ /* check context space, maybe only Private writable mapping? */
+ if (!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
+ return -ENOMEM;
+
if (accountable && (!(flags & MAP_NORESERVE) ||
sysctl_overcommit_memory > 1)) {
if (vm_flags & VM_SHARED) {
kmem_cache_free(vm_area_cachep, vma);
}
out:
- mm->total_vm += len >> PAGE_SHIFT;
+ // mm->total_vm += len >> PAGE_SHIFT;
+ vx_vmpages_add(mm, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
- mm->locked_vm += len >> PAGE_SHIFT;
+ // mm->locked_vm += len >> PAGE_SHIFT;
+ vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
if (flags & MAP_POPULATE) {
grow = (address - vma->vm_end) >> PAGE_SHIFT;
/* Overcommit.. */
- if (security_vm_enough_memory(grow)) {
+ if (security_vm_enough_memory(grow) ||
+ !vx_vmpages_avail(vma->vm_mm, grow)) {
anon_vma_unlock(vma);
return -ENOMEM;
}
vm_unacct_memory(grow);
return -ENOMEM;
}
+
vma->vm_end = address;
- vma->vm_mm->total_vm += grow;
+ // vma->vm_mm->total_vm += grow;
+ vx_vmpages_add(vma->vm_mm, grow);
if (vma->vm_flags & VM_LOCKED)
- vma->vm_mm->locked_vm += grow;
+ // vma->vm_mm->locked_vm += grow;
+ vx_vmlocked_add(vma->vm_mm, grow);
anon_vma_unlock(vma);
return 0;
}
grow = (vma->vm_start - address) >> PAGE_SHIFT;
/* Overcommit.. */
- if (security_vm_enough_memory(grow)) {
+ if (security_vm_enough_memory(grow) ||
+ !vx_vmpages_avail(vma->vm_mm, grow)) {
anon_vma_unlock(vma);
return -ENOMEM;
}
vm_unacct_memory(grow);
return -ENOMEM;
}
+
vma->vm_start = address;
vma->vm_pgoff -= grow;
- vma->vm_mm->total_vm += grow;
+ // vma->vm_mm->total_vm += grow;
+ vx_vmpages_add(vma->vm_mm, grow);
if (vma->vm_flags & VM_LOCKED)
- vma->vm_mm->locked_vm += grow;
+ // vma->vm_mm->locked_vm += grow;
+ vx_vmlocked_add(vma->vm_mm, grow);
anon_vma_unlock(vma);
return 0;
}
static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
{
size_t len = area->vm_end - area->vm_start;
+ unsigned long old_end = area->vm_end;
- area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+ // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+ vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
+
if (area->vm_flags & VM_LOCKED)
- area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+ // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+ vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
/*
* Is this a new hole at the lowest possible address?
*/
*/
if (area->vm_start > area->vm_mm->non_executable_cache)
area->vm_mm->non_executable_cache = area->vm_start;
-
remove_vm_struct(area);
if (unlikely(area->vm_flags & VM_EXEC))
- arch_remove_exec_range(mm, area->vm_end);
+ arch_remove_exec_range(mm, old_end);
}
/*
/* most fields are the same, copy all, and then fixup */
*new = *vma;
+ vma_prio_tree_init(new);
- if (new_below) {
- if (vma->vm_flags & VM_EXEC)
- arch_remove_exec_range(mm, new->vm_end);
+ if (new_below)
new->vm_end = addr;
- }
else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- if (new_below)
+ if (new_below) {
+ unsigned long old_end = vma->vm_end;
+
vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
((addr - new->vm_start) >> PAGE_SHIFT), new);
- else
+ if (vma->vm_flags & VM_EXEC)
+ arch_remove_exec_range(mm, old_end);
+ } else
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
return 0;
* mlock MCL_FUTURE?
*/
if (mm->def_flags & VM_LOCKED) {
- unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
locked += len;
- if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
+ /* vserver checks ? */
}
/*
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
- if (security_vm_enough_memory(len >> PAGE_SHIFT))
+ if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
+ !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
return -ENOMEM;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
vma->vm_page_prot = protection_map[flags & 0x0f];
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
- mm->total_vm += len >> PAGE_SHIFT;
+ // mm->total_vm += len >> PAGE_SHIFT;
+ vx_vmpages_add(mm, len >> PAGE_SHIFT);
if (flags & VM_LOCKED) {
- mm->locked_vm += len >> PAGE_SHIFT;
+ // mm->locked_vm += len >> PAGE_SHIFT;
+ vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
return addr;
vma = mm->mmap;
mm->mmap = mm->mmap_cache = NULL;
mm->mm_rb = RB_ROOT;
- mm->rss = 0;
- mm->total_vm = 0;
- mm->locked_vm = 0;
+ // mm->rss = 0;
+ vx_rsspages_sub(mm, mm->rss);
+ // mm->total_vm = 0;
+ vx_vmpages_sub(mm, mm->total_vm);
+ // mm->locked_vm = 0;
+ vx_vmlocked_sub(mm, mm->locked_vm);
arch_flush_exec_range(mm);
spin_unlock(&mm->page_table_lock);
new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (new_vma) {
*new_vma = *vma;
+ vma_prio_tree_init(new_vma);
pol = mpol_copy(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new_vma);