X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmremap.c;h=989af55ddf21602cee584a426b1a4a0a8c74043f;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=e2a1d61f56d6b4d7390c6334c4cb8a02c841bcbf;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/mm/mremap.c b/mm/mremap.c index e2a1d61f5..989af55dd 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -13,81 +13,68 @@ #include #include #include +#include #include #include #include +#include +#include #include -#include #include #include -static pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr) +static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd)) - goto end; - if (pgd_bad(*pgd)) { - pgd_ERROR(*pgd); - pgd_clear(pgd); - goto end; - } + if (pgd_none_or_clear_bad(pgd)) + return NULL; - pmd = pmd_offset(pgd, addr); - if (pmd_none(*pmd)) - goto end; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - goto end; - } + pud = pud_offset(pgd, addr); + if (pud_none_or_clear_bad(pud)) + return NULL; - pte = pte_offset_map_nested(pmd, addr); - if (pte_none(*pte)) { - pte_unmap_nested(pte); - pte = NULL; - } -end: - return pte; + pmd = pmd_offset(pud, addr); + if (pmd_none_or_clear_bad(pmd)) + return NULL; + + return pmd; } -static pte_t *get_one_pte_map(struct mm_struct *mm, unsigned long addr) +static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd)) + pud = pud_alloc(mm, pgd, addr); + if (!pud) return NULL; - pmd = pmd_offset(pgd, addr); - if (!pmd_present(*pmd)) + + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) return NULL; - return pte_offset_map(pmd, addr); -} -static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr) -{ - pmd_t *pmd; - pte_t *pte = NULL; + if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) + return NULL; - pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr); - if (pmd) - pte = pte_alloc_map(mm, pmd, addr); - return pte; + return pmd; } -static int -move_one_page(struct vm_area_struct *vma, unsigned long old_addr, +static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, + unsigned long old_addr, unsigned long old_end, + struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) { struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; - int error = 0; - pte_t *src, *dst; + pte_t *old_pte, *new_pte, pte; + spinlock_t *old_ptl, *new_ptl; if (vma->vm_file) { /* @@ -98,70 +85,73 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr, */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); + if (new_vma->vm_truncate_count && + new_vma->vm_truncate_count != vma->vm_truncate_count) + new_vma->vm_truncate_count = 0; } - spin_lock(&mm->page_table_lock); - src = get_one_pte_map_nested(mm, old_addr); - if (src) { - /* - * Look to see whether alloc_one_pte_map needs to perform a - * memory allocation. If it does then we need to drop the - * atomic kmap - */ - dst = get_one_pte_map(mm, new_addr); - if (unlikely(!dst)) { - pte_unmap_nested(src); - if (mapping) - spin_unlock(&mapping->i_mmap_lock); - dst = alloc_one_pte_map(mm, new_addr); - if (mapping && !spin_trylock(&mapping->i_mmap_lock)) { - spin_unlock(&mm->page_table_lock); - spin_lock(&mapping->i_mmap_lock); - spin_lock(&mm->page_table_lock); - } - src = get_one_pte_map_nested(mm, old_addr); - } - /* - * Since alloc_one_pte_map can drop and re-acquire - * page_table_lock, we should re-check the src entry... - */ - if (src) { - if (dst) { - pte_t pte; - pte = ptep_clear_flush(vma, old_addr, src); - set_pte(dst, pte); - } else - error = -ENOMEM; - pte_unmap_nested(src); - } - if (dst) - pte_unmap(dst); + /* + * We don't have to worry about the ordering of src and dst + * pte locks because exclusive mmap_sem prevents deadlock. + */ + old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); + new_pte = pte_offset_map_nested(new_pmd, new_addr); + new_ptl = pte_lockptr(mm, new_pmd); + if (new_ptl != old_ptl) + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + + for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, + new_pte++, new_addr += PAGE_SIZE) { + if (pte_none(*old_pte)) + continue; + pte = ptep_clear_flush(vma, old_addr, old_pte); + /* ZERO_PAGE can be dependant on virtual addr */ + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); + set_pte_at(mm, new_addr, new_pte, pte); } - spin_unlock(&mm->page_table_lock); + + if (new_ptl != old_ptl) + spin_unlock(new_ptl); + pte_unmap_nested(new_pte - 1); + pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) spin_unlock(&mapping->i_mmap_lock); - return error; } +#define LATENCY_LIMIT (64 * PAGE_SIZE) + static unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long new_addr, unsigned long old_addr, - unsigned long len) + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len) { - unsigned long offset; + unsigned long extent, next, old_end; + pmd_t *old_pmd, *new_pmd; - flush_cache_range(vma, old_addr, old_addr + len); + old_end = old_addr + len; + flush_cache_range(vma, old_addr, old_end); - /* - * This is not the clever way to do this, but we're taking the - * easy way out on the assumption that most remappings will be - * only a few pages.. This also makes error recovery easier. - */ - for (offset = 0; offset < len; offset += PAGE_SIZE) { - if (move_one_page(vma, old_addr+offset, new_addr+offset) < 0) - break; + for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); + next = (old_addr + PMD_SIZE) & PMD_MASK; + if (next - 1 > old_end) + next = old_end; + extent = next - old_addr; + old_pmd = get_old_pmd(vma->vm_mm, old_addr); + if (!old_pmd) + continue; + new_pmd = alloc_new_pmd(vma->vm_mm, new_addr); + if (!new_pmd) + break; + next = (new_addr + PMD_SIZE) & PMD_MASK; + if (extent > next - new_addr) + extent = next - new_addr; + if (extent > LATENCY_LIMIT) + extent = LATENCY_LIMIT; + move_ptes(vma, old_pmd, old_addr, old_addr + extent, + new_vma, new_pmd, new_addr); } - return offset; + + return len + old_addr - old_end; /* how much done */ } static unsigned long move_vma(struct vm_area_struct *vma, @@ -174,6 +164,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; + unsigned long hiwater_vm; int split = 0; /* @@ -188,14 +179,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (!new_vma) return -ENOMEM; - moved_len = move_page_tables(vma, new_addr, old_addr, old_len); + moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ - move_page_tables(new_vma, old_addr, new_addr, moved_len); + move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -211,11 +202,25 @@ static unsigned long move_vma(struct vm_area_struct *vma, split = 1; } + /* + * If we failed to move page tables we still do total_vm increment + * since do_munmap() will decrement it by old_len == new_len. + * + * Since total_vm is about to be raised artificially high for a + * moment, we need to restore high watermark afterwards: if stats + * are taken meanwhile, total_vm and hiwater_vm appear too high. + * If this were a serious issue, we'd add a flag to do_munmap(). + */ + hiwater_vm = mm->hiwater_vm; + vx_vmpages_add(mm, new_len >> PAGE_SHIFT); + vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); + if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } + mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { @@ -224,10 +229,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, vma->vm_next->vm_flags |= VM_ACCOUNT; } - // mm->total_vm += new_len >> PAGE_SHIFT; - vx_vmpages_add(mm, new_len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { - // mm->locked_vm += new_len >> PAGE_SHIFT; vx_vmlocked_add(mm, new_len >> PAGE_SHIFT); if (new_len > old_len) make_pages_present(new_addr + old_len, @@ -248,6 +250,7 @@ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { + struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; @@ -288,7 +291,7 @@ unsigned long do_mremap(unsigned long addr, if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; - ret = do_munmap(current->mm, new_addr, new_len); + ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; } @@ -299,7 +302,7 @@ unsigned long do_mremap(unsigned long addr, * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { - ret = do_munmap(current->mm, addr+new_len, old_len - new_len); + ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; ret = addr; @@ -312,7 +315,7 @@ unsigned long do_mremap(unsigned long addr, * Ok, we need to grow.. or relocate. */ ret = -EFAULT; - vma = find_vma(current->mm, addr); + vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto out; if (is_vm_hugetlb_page(vma)) { @@ -322,28 +325,26 @@ unsigned long do_mremap(unsigned long addr, /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto out; - if (vma->vm_flags & VM_DONTEXPAND) { + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { if (new_len > old_len) goto out; } if (vma->vm_flags & VM_LOCKED) { - unsigned long locked = current->mm->locked_vm << PAGE_SHIFT; + unsigned long locked, lock_limit; + locked = mm->locked_vm << PAGE_SHIFT; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; locked += new_len - old_len; ret = -EAGAIN; - if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur) + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto out; - ret = -ENOMEM; if (!vx_vmlocked_avail(current->mm, (new_len - old_len) >> PAGE_SHIFT)) goto out; } - ret = -ENOMEM; - if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) - > current->rlim[RLIMIT_AS].rlim_cur) - goto out; - /* check context space, maybe only Private writable mapping? */ - if (!vx_vmpages_avail(current->mm, (new_len - old_len) >> PAGE_SHIFT)) + if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) { + ret = -ENOMEM; goto out; + } if (vma->vm_flags & VM_ACCOUNT) { charged = (new_len - old_len) >> PAGE_SHIFT; @@ -367,11 +368,10 @@ unsigned long do_mremap(unsigned long addr, vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL); - // current->mm->total_vm += pages; - vx_vmpages_add(current->mm, pages); + vx_vmpages_add(mm, pages); + vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { - // current->mm->locked_vm += pages; - vx_vmlocked_add(vma->vm_mm, pages); + vx_vmlocked_add(mm, pages); make_pages_present(addr + old_len, addr + new_len); } @@ -391,8 +391,8 @@ unsigned long do_mremap(unsigned long addr, if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, - vma->vm_pgoff, map_flags); + new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len, + vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC); ret = new_addr; if (new_addr & ~PAGE_MASK) goto out;