X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmmap.c;h=800b745ca7be4155a3244bdf29fd735d2c4ddc73;hb=8e8ece46a861c84343256819eaec77e608ff9217;hp=d04d60a1dd36adc38b6af6abecc65849388fb179;hpb=a6d8dea2993ef90fb69b81372daa0b63f8aa940e;p=linux-2.6.git diff --git a/mm/mmap.c b/mm/mmap.c index d04d60a1d..800b745ca 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1113,11 +1113,9 @@ munmap_back: kmem_cache_free(vm_area_cachep, vma); } out: - // mm->total_vm += len >> PAGE_SHIFT; vx_vmpages_add(mm, len >> PAGE_SHIFT); __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { - // mm->locked_vm += len >> PAGE_SHIFT; vx_vmlocked_add(mm, len >> PAGE_SHIFT); make_pages_present(addr, addr + len); } @@ -1321,37 +1319,40 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - if (flags & MAP_FIXED) { - unsigned long ret; + unsigned long ret; - if (addr > TASK_SIZE - len) - return -ENOMEM; - if (addr & ~PAGE_MASK) - return -EINVAL; - if (file && is_file_hugepages(file)) { - /* - * Check if the given range is hugepage aligned, and - * can be made suitable for hugepages. - */ - ret = prepare_hugepage_range(addr, len); - } else { - /* - * Ensure that a normal request is not falling in a - * reserved hugepage range. For some archs like IA-64, - * there is a separate region for hugepages. - */ - ret = is_hugepage_only_range(addr, len); - } - if (ret) - return -EINVAL; - return addr; - } + if (!(flags & MAP_FIXED)) { + unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - if (file && file->f_op && file->f_op->get_unmapped_area) - return file->f_op->get_unmapped_area(file, addr, len, - pgoff, flags); + get_area = current->mm->get_unmapped_area; + if (file && file->f_op && file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + addr = get_area(file, addr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + } - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + if (addr > TASK_SIZE - len) + return -ENOMEM; + if (addr & ~PAGE_MASK) + return -EINVAL; + if (file && is_file_hugepages(file)) { + /* + * Check if the given range is hugepage aligned, and + * can be made suitable for hugepages. + */ + ret = prepare_hugepage_range(addr, len); + } else { + /* + * Ensure that a normal request is not falling in a + * reserved hugepage range. For some archs like IA-64, + * there is a separate region for hugepages. + */ + ret = is_hugepage_only_range(addr, len); + } + if (ret) + return -EINVAL; + return addr; } EXPORT_SYMBOL(get_unmapped_area); @@ -1468,10 +1469,8 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un return -ENOMEM; /* Ok, everything looks good - let it rip */ - // mm->total_vm += grow; vx_vmpages_add(mm, grow); if (vma->vm_flags & VM_LOCKED) - // mm->locked_vm += grow; vx_vmlocked_add(mm, grow); __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); acct_update_integrals(); @@ -1674,11 +1673,9 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) { size_t len = area->vm_end - area->vm_start; - // area->vm_mm->total_vm -= len >> PAGE_SHIFT; vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT); if (area->vm_flags & VM_LOCKED) - // area->vm_mm->locked_vm -= len >> PAGE_SHIFT; vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT); vm_stat_unaccount(area); area->vm_mm->unmap_area(area); @@ -1980,10 +1977,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma->vm_page_prot = protection_map[flags & 0x0f]; vma_link(mm, vma, prev, rb_link, rb_parent); out: - // mm->total_vm += len >> PAGE_SHIFT; vx_vmpages_add(mm, len >> PAGE_SHIFT); if (flags & VM_LOCKED) { - // mm->locked_vm += len >> PAGE_SHIFT; vx_vmlocked_add(mm, len >> PAGE_SHIFT); make_pages_present(addr, addr + len); } @@ -2019,11 +2014,8 @@ void exit_mmap(struct mm_struct *mm) vma = mm->mmap; mm->mmap = mm->mmap_cache = NULL; mm->mm_rb = RB_ROOT; - // mm->rss = 0; vx_rsspages_sub(mm, mm->rss); - // mm->total_vm = 0; vx_vmpages_sub(mm, mm->total_vm); - // mm->locked_vm = 0; vx_vmlocked_sub(mm, mm->locked_vm); spin_unlock(&mm->page_table_lock);