X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmemory.c;h=9f75bb2caf43e2b9d9a46330c405f8048187fc30;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=90acc557965345e9093a5af8ccdd2f60e3f3bf50;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/mm/memory.c b/mm/memory.c index 90acc5579..9f75bb2ca 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -47,11 +47,7 @@ #include #include #include -#include #include -#include -#include -#include #include #include @@ -130,7 +126,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) pmd_clear(pmd); pte_lock_deinit(page); pte_free_tlb(tlb, page); - dec_zone_page_state(page, NR_PAGETABLE); + dec_page_state(nr_page_table_pages); tlb->mm->nr_ptes--; } @@ -315,7 +311,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) pte_free(new); } else { mm->nr_ptes++; - inc_zone_page_state(new, NR_PAGETABLE); + inc_page_state(nr_page_table_pages); pmd_populate(mm, pmd, new); } spin_unlock(&mm->page_table_lock); @@ -439,9 +435,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, /* pte contains position in swap or file, so copy. */ if (unlikely(!pte_present(pte))) { if (!pte_file(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); - - swap_duplicate(entry); + swap_duplicate(pte_to_swp_entry(pte)); /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); @@ -450,16 +444,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, &src_mm->mmlist); spin_unlock(&mmlist_lock); } - if (is_write_migration_entry(entry) && - is_cow_mapping(vm_flags)) { - /* - * COW mappings require pages in both parent - * and child to be set to read. - */ - make_migration_entry_read(&entry); - pte = swp_entry_to_pte(entry); - set_pte_at(src_mm, addr, src_pte, pte); - } } goto out_set_pte; } @@ -508,7 +492,7 @@ again: return -ENOMEM; src_pte = pte_offset_map_nested(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); - spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); + spin_lock(src_ptl); do { /* @@ -681,7 +665,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, mark_page_accessed(page); file_rss--; } - page_remove_rmap(page, vma); + page_remove_rmap(page); tlb_remove_page(tlb, page); continue; } @@ -895,7 +879,6 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(tlb, address, end); return end; } -EXPORT_SYMBOL(zap_page_range); /* * Do a quick page-table lookup for a single page. @@ -1579,71 +1562,25 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *old_page, *new_page; pte_t entry; - int reuse = 0, ret = VM_FAULT_MINOR; - struct page *dirty_page = NULL; + int ret = VM_FAULT_MINOR; old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) goto gotten; - /* - * Take out anonymous pages first, anonymous shared vmas are - * not dirty accountable. - */ - if (PageAnon(old_page)) { - if (!TestSetPageLocked(old_page)) { - reuse = can_share_swap_page(old_page); - unlock_page(old_page); - } - } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == - (VM_WRITE|VM_SHARED))) { - /* - * Only catch write-faults on shared writable pages, - * read-only shared pages can get COWed by - * get_user_pages(.write=1, .force=1). - */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) { - /* - * Notify the address space that the page is about to - * become writable so that it can prohibit this or wait - * for the page to get into an appropriate state. - * - * We do this without the lock held, so that it can - * sleep if it needs to. - */ - page_cache_get(old_page); - pte_unmap_unlock(page_table, ptl); - - if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) - goto unwritable_page; - - page_cache_release(old_page); - - /* - * Since we dropped the lock we need to revalidate - * the PTE as someone else may have changed it. If - * they did, we just return, as we can count on the - * MMU to tell us if they didn't also make it writable. - */ - page_table = pte_offset_map_lock(mm, pmd, address, - &ptl); - if (!pte_same(*page_table, orig_pte)) - goto unlock; + if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { + int reuse = can_share_swap_page(old_page); + unlock_page(old_page); + if (reuse) { + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = pte_mkyoung(orig_pte); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + ptep_set_access_flags(vma, address, page_table, entry, 1); + update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); + ret |= VM_FAULT_WRITE; + goto unlock; } - dirty_page = old_page; - get_page(dirty_page); - reuse = 1; - } - - if (reuse) { - flush_cache_page(vma, address, pte_pfn(orig_pte)); - entry = pte_mkyoung(orig_pte); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - ptep_set_access_flags(vma, address, page_table, entry, 1); - update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); - ret |= VM_FAULT_WRITE; - goto unlock; } /* @@ -1672,7 +1609,7 @@ gotten: page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { - page_remove_rmap(old_page, vma); + page_remove_rmap(old_page); if (!PageAnon(old_page)) { dec_mm_counter(mm, file_rss); inc_mm_counter(mm, anon_rss); @@ -1682,16 +1619,9 @@ gotten: flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); - lazy_mmu_prot_update(entry); - /* - * Clear the pte entry and flush it first, before updating the - * pte with the new entry. This will avoid a race condition - * seen in the presence of one thread doing SMC and another - * thread doing COW. - */ - ptep_clear_flush(vma, address, page_table); - set_pte_at(mm, address, page_table, entry); + ptep_establish(vma, address, page_table, entry); update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); @@ -1705,19 +1635,11 @@ gotten: page_cache_release(old_page); unlock: pte_unmap_unlock(page_table, ptl); - if (dirty_page) { - set_page_dirty_balance(dirty_page); - put_page(dirty_page); - } return ret; oom: if (old_page) page_cache_release(old_page); return VM_FAULT_OOM; - -unwritable_page: - page_cache_release(old_page); - return VM_FAULT_SIGBUS; } /* @@ -1997,7 +1919,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) return 0; } -EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ +EXPORT_SYMBOL(vmtruncate_range); /* * Primitive swap readahead code. We simply read an aligned block of @@ -2074,11 +1996,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out; entry = pte_to_swp_entry(orig_pte); - if (is_migration_entry(entry)) { - migration_entry_wait(mm, pmd, address); - goto out; - } - delayacct_set_flag(DELAYACCT_PF_SWAPIN); +again: page = lookup_swap_cache(entry); if (!page) { swapin_readahead(entry, address, vma); @@ -2091,13 +2009,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) ret = VM_FAULT_OOM; - delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; } /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; - count_vm_event(PGMAJFAULT); + inc_page_state(pgmajfault); grab_swap_token(); } @@ -2105,10 +2022,14 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out; } - - delayacct_clear_flag(DELAYACCT_PF_SWAPIN); mark_page_accessed(page); lock_page(page); + if (!PageSwapCache(page)) { + /* Page migration has occured */ + unlock_page(page); + page_cache_release(page); + goto again; + } /* * Back out if somebody else already faulted in this pte. @@ -2248,7 +2169,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned int sequence = 0; int ret = VM_FAULT_MINOR; int anon = 0; - struct page *dirty_page = NULL; pte_unmap(page_table); BUG_ON(vma->vm_flags & VM_PFNMAP); @@ -2280,31 +2200,18 @@ retry: /* * Should we do an early C-O-W break? */ - if (write_access) { - if (!(vma->vm_flags & VM_SHARED)) { - struct page *page; + if (write_access && !(vma->vm_flags & VM_SHARED)) { + struct page *page; - if (unlikely(anon_vma_prepare(vma))) - goto oom; - page = alloc_page_vma(GFP_HIGHUSER, vma, address); - if (!page) - goto oom; - copy_user_highpage(page, new_page, address); - page_cache_release(new_page); - new_page = page; - anon = 1; - - } else { - /* if the page will be shareable, see if the backing - * address space wants to know that the page is about - * to become writable */ - if (vma->vm_ops->page_mkwrite && - vma->vm_ops->page_mkwrite(vma, new_page) < 0 - ) { - page_cache_release(new_page); - return VM_FAULT_SIGBUS; - } - } + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_page_vma(GFP_HIGHUSER, vma, address); + if (!page) + goto oom; + copy_user_highpage(page, new_page, address); + page_cache_release(new_page); + new_page = page; + anon = 1; } page_table = pte_offset_map_lock(mm, pmd, address, &ptl); @@ -2346,10 +2253,6 @@ retry: } else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); - if (write_access) { - dirty_page = new_page; - get_page(dirty_page); - } } } else { /* One of our sibling threads was faster, back out. */ @@ -2362,10 +2265,6 @@ retry: lazy_mmu_prot_update(entry); unlock: pte_unmap_unlock(page_table, ptl); - if (dirty_page) { - set_page_dirty_balance(dirty_page); - put_page(dirty_page); - } return ret; oom: page_cache_release(new_page); @@ -2490,7 +2389,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, __set_current_state(TASK_RUNNING); - count_vm_event(PGFAULT); + inc_page_state(pgfault); if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, write_access);