#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/module.h>
-#include <linux/delayacct.h>
#include <linux/init.h>
-#include <linux/writeback.h>
-#include <linux/vs_base.h>
-#include <linux/vs_memory.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
pmd_clear(pmd);
pte_lock_deinit(page);
pte_free_tlb(tlb, page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ dec_page_state(nr_page_table_pages);
tlb->mm->nr_ptes--;
}
pte_free(new);
} else {
mm->nr_ptes++;
- inc_zone_page_state(new, NR_PAGETABLE);
+ inc_page_state(nr_page_table_pages);
pmd_populate(mm, pmd, new);
}
spin_unlock(&mm->page_table_lock);
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
- swap_duplicate(entry);
+ swap_duplicate(pte_to_swp_entry(pte));
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
- if (is_write_migration_entry(entry) &&
- is_cow_mapping(vm_flags)) {
- /*
- * COW mappings require pages in both parent
- * and child to be set to read.
- */
- make_migration_entry_read(&entry);
- pte = swp_entry_to_pte(entry);
- set_pte_at(src_mm, addr, src_pte, pte);
- }
}
goto out_set_pte;
}
return -ENOMEM;
src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
- spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ spin_lock(src_ptl);
do {
/*
mark_page_accessed(page);
file_rss--;
}
- page_remove_rmap(page, vma);
+ page_remove_rmap(page);
tlb_remove_page(tlb, page);
continue;
}
tlb_finish_mmu(tlb, address, end);
return end;
}
-EXPORT_SYMBOL(zap_page_range);
/*
* Do a quick page-table lookup for a single page.
{
struct page *old_page, *new_page;
pte_t entry;
- int reuse = 0, ret = VM_FAULT_MINOR;
- struct page *dirty_page = NULL;
+ int ret = VM_FAULT_MINOR;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page)
goto gotten;
- /*
- * Take out anonymous pages first, anonymous shared vmas are
- * not dirty accountable.
- */
- if (PageAnon(old_page)) {
- if (!TestSetPageLocked(old_page)) {
- reuse = can_share_swap_page(old_page);
- unlock_page(old_page);
- }
- } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
- (VM_WRITE|VM_SHARED))) {
- /*
- * Only catch write-faults on shared writable pages,
- * read-only shared pages can get COWed by
- * get_user_pages(.write=1, .force=1).
- */
- if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
- /*
- * Notify the address space that the page is about to
- * become writable so that it can prohibit this or wait
- * for the page to get into an appropriate state.
- *
- * We do this without the lock held, so that it can
- * sleep if it needs to.
- */
- page_cache_get(old_page);
- pte_unmap_unlock(page_table, ptl);
-
- if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
- goto unwritable_page;
-
- page_cache_release(old_page);
-
- /*
- * Since we dropped the lock we need to revalidate
- * the PTE as someone else may have changed it. If
- * they did, we just return, as we can count on the
- * MMU to tell us if they didn't also make it writable.
- */
- page_table = pte_offset_map_lock(mm, pmd, address,
- &ptl);
- if (!pte_same(*page_table, orig_pte))
- goto unlock;
+ if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
+ int reuse = can_share_swap_page(old_page);
+ unlock_page(old_page);
+ if (reuse) {
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
+ entry = pte_mkyoung(orig_pte);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ ptep_set_access_flags(vma, address, page_table, entry, 1);
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ ret |= VM_FAULT_WRITE;
+ goto unlock;
}
- dirty_page = old_page;
- get_page(dirty_page);
- reuse = 1;
- }
-
- if (reuse) {
- flush_cache_page(vma, address, pte_pfn(orig_pte));
- entry = pte_mkyoung(orig_pte);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_set_access_flags(vma, address, page_table, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
- ret |= VM_FAULT_WRITE;
- goto unlock;
}
/*
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
- page_remove_rmap(old_page, vma);
+ page_remove_rmap(old_page);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, file_rss);
inc_mm_counter(mm, anon_rss);
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- lazy_mmu_prot_update(entry);
- /*
- * Clear the pte entry and flush it first, before updating the
- * pte with the new entry. This will avoid a race condition
- * seen in the presence of one thread doing SMC and another
- * thread doing COW.
- */
- ptep_clear_flush(vma, address, page_table);
- set_pte_at(mm, address, page_table, entry);
+ ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
lru_cache_add_active(new_page);
page_add_new_anon_rmap(new_page, vma, address);
page_cache_release(old_page);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (dirty_page) {
- set_page_dirty_balance(dirty_page);
- put_page(dirty_page);
- }
return ret;
oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
-
-unwritable_page:
- page_cache_release(old_page);
- return VM_FAULT_SIGBUS;
}
/*
return 0;
}
-EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */
+EXPORT_SYMBOL(vmtruncate_range);
/*
* Primitive swap readahead code. We simply read an aligned block of
goto out;
entry = pte_to_swp_entry(orig_pte);
- if (is_migration_entry(entry)) {
- migration_entry_wait(mm, pmd, address);
- goto out;
- }
- delayacct_set_flag(DELAYACCT_PF_SWAPIN);
+again:
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte)))
ret = VM_FAULT_OOM;
- delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
goto unlock;
}
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
- count_vm_event(PGMAJFAULT);
+ inc_page_state(pgmajfault);
grab_swap_token();
}
ret = VM_FAULT_OOM;
goto out;
}
-
- delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
mark_page_accessed(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
/*
* Back out if somebody else already faulted in this pte.
unsigned int sequence = 0;
int ret = VM_FAULT_MINOR;
int anon = 0;
- struct page *dirty_page = NULL;
pte_unmap(page_table);
BUG_ON(vma->vm_flags & VM_PFNMAP);
/*
* Should we do an early C-O-W break?
*/
- if (write_access) {
- if (!(vma->vm_flags & VM_SHARED)) {
- struct page *page;
+ if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ struct page *page;
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- if (!page)
- goto oom;
- copy_user_highpage(page, new_page, address);
- page_cache_release(new_page);
- new_page = page;
- anon = 1;
-
- } else {
- /* if the page will be shareable, see if the backing
- * address space wants to know that the page is about
- * to become writable */
- if (vma->vm_ops->page_mkwrite &&
- vma->vm_ops->page_mkwrite(vma, new_page) < 0
- ) {
- page_cache_release(new_page);
- return VM_FAULT_SIGBUS;
- }
- }
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ if (!page)
+ goto oom;
+ copy_user_highpage(page, new_page, address);
+ page_cache_release(new_page);
+ new_page = page;
+ anon = 1;
}
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
} else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
- if (write_access) {
- dirty_page = new_page;
- get_page(dirty_page);
- }
}
} else {
/* One of our sibling threads was faster, back out. */
lazy_mmu_prot_update(entry);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (dirty_page) {
- set_page_dirty_balance(dirty_page);
- put_page(dirty_page);
- }
return ret;
oom:
page_cache_release(new_page);
__set_current_state(TASK_RUNNING);
- count_vm_event(PGFAULT);
+ inc_page_state(pgfault);
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, write_access);