kmem_cache_free(vm_area_cachep, vma);
}
out:
- // mm->total_vm += len >> PAGE_SHIFT;
vx_vmpages_add(mm, len >> PAGE_SHIFT);
__vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
- // mm->locked_vm += len >> PAGE_SHIFT;
vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- if (flags & MAP_FIXED) {
- unsigned long ret;
+ unsigned long ret;
- if (addr > TASK_SIZE - len)
- return -ENOMEM;
- if (addr & ~PAGE_MASK)
- return -EINVAL;
- if (file && is_file_hugepages(file)) {
- /*
- * Check if the given range is hugepage aligned, and
- * can be made suitable for hugepages.
- */
- ret = prepare_hugepage_range(addr, len);
- } else {
- /*
- * Ensure that a normal request is not falling in a
- * reserved hugepage range. For some archs like IA-64,
- * there is a separate region for hugepages.
- */
- ret = is_hugepage_only_range(addr, len);
- }
- if (ret)
- return -EINVAL;
- return addr;
- }
+ if (!(flags & MAP_FIXED)) {
+ unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- if (file && file->f_op && file->f_op->get_unmapped_area)
- return file->f_op->get_unmapped_area(file, addr, len,
- pgoff, flags);
+ get_area = current->mm->get_unmapped_area;
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ get_area = file->f_op->get_unmapped_area;
+ addr = get_area(file, addr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ }
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr > TASK_SIZE - len)
+ return -ENOMEM;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (file && is_file_hugepages(file)) {
+ /*
+ * Check if the given range is hugepage aligned, and
+ * can be made suitable for hugepages.
+ */
+ ret = prepare_hugepage_range(addr, len);
+ } else {
+ /*
+ * Ensure that a normal request is not falling in a
+ * reserved hugepage range. For some archs like IA-64,
+ * there is a separate region for hugepages.
+ */
+ ret = is_hugepage_only_range(addr, len);
+ }
+ if (ret)
+ return -EINVAL;
+ return addr;
}
EXPORT_SYMBOL(get_unmapped_area);
return -ENOMEM;
/* Ok, everything looks good - let it rip */
- // mm->total_vm += grow;
vx_vmpages_add(mm, grow);
if (vma->vm_flags & VM_LOCKED)
- // mm->locked_vm += grow;
vx_vmlocked_add(mm, grow);
__vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
acct_update_integrals();
{
size_t len = area->vm_end - area->vm_start;
- // area->vm_mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
if (area->vm_flags & VM_LOCKED)
- // area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
vm_stat_unaccount(area);
area->vm_mm->unmap_area(area);
vma->vm_page_prot = protection_map[flags & 0x0f];
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
- // mm->total_vm += len >> PAGE_SHIFT;
vx_vmpages_add(mm, len >> PAGE_SHIFT);
if (flags & VM_LOCKED) {
- // mm->locked_vm += len >> PAGE_SHIFT;
vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
vma = mm->mmap;
mm->mmap = mm->mmap_cache = NULL;
mm->mm_rb = RB_ROOT;
- // mm->rss = 0;
vx_rsspages_sub(mm, mm->rss);
- // mm->total_vm = 0;
vx_vmpages_sub(mm, mm->total_vm);
- // mm->locked_vm = 0;
vx_vmlocked_sub(mm, mm->locked_vm);
spin_unlock(&mm->page_table_lock);