#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/module.h>
-#include <linux/delayacct.h>
#include <linux/init.h>
-#include <linux/writeback.h>
-#include <linux/vs_base.h>
-#include <linux/vs_memory.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
- return 1;
+ return 0;
}
__setup("norandmaps", disable_randmaps);
pmd_clear(pmd);
pte_lock_deinit(page);
pte_free_tlb(tlb, page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ dec_page_state(nr_page_table_pages);
tlb->mm->nr_ptes--;
}
anon_vma_unlink(vma);
unlink_file_vma(vma);
- if (is_vm_hugetlb_page(vma)) {
+ if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
} else {
* Optimization: gather nearby vmas into one call down
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
- && !is_vm_hugetlb_page(next)) {
+ && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
+ HPAGE_SIZE)) {
vma = next;
next = vma->vm_next;
anon_vma_unlink(vma);
pte_free(new);
} else {
mm->nr_ptes++;
- inc_zone_page_state(new, NR_PAGETABLE);
+ inc_page_state(nr_page_table_pages);
pmd_populate(mm, pmd, new);
}
spin_unlock(&mm->page_table_lock);
{
unsigned long pfn = pte_pfn(pte);
- if (unlikely(vma->vm_flags & VM_PFNMAP)) {
+ if (vma->vm_flags & VM_PFNMAP) {
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
* we should just do "return pfn_to_page(pfn)", but
* in the meantime we check that we get a valid pfn,
* and that the resulting page looks ok.
+ *
+ * Remove this test eventually!
*/
if (unlikely(!pfn_valid(pfn))) {
- if (!(vma->vm_flags & VM_RESERVED))
- print_bad_pte(vma, pte, addr);
+ print_bad_pte(vma, pte, addr);
return NULL;
}
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
- swap_duplicate(entry);
+ swap_duplicate(pte_to_swp_entry(pte));
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
- if (is_write_migration_entry(entry) &&
- is_cow_mapping(vm_flags)) {
- /*
- * COW mappings require pages in both parent
- * and child to be set to read.
- */
- make_migration_entry_read(&entry);
- pte = swp_entry_to_pte(entry);
- set_pte_at(src_mm, addr, src_pte, pte);
- }
}
goto out_set_pte;
}
return -ENOMEM;
src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
- spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ spin_lock(src_ptl);
do {
/*
mark_page_accessed(page);
file_rss--;
}
- page_remove_rmap(page, vma);
+ page_remove_rmap(page);
tlb_remove_page(tlb, page);
continue;
}
tlb_finish_mmu(tlb, address, end);
return end;
}
-EXPORT_SYMBOL(zap_page_range);
/*
* Do a quick page-table lookup for a single page.
continue;
}
-#ifdef CONFIG_XEN
- if (vma && (vma->vm_flags & VM_FOREIGN)) {
- struct page **map = vma->vm_private_data;
- int offset = (start - vma->vm_start) >> PAGE_SHIFT;
- if (map[offset] != NULL) {
- if (pages) {
- struct page *page = map[offset];
-
- pages[i] = page;
- get_page(page);
- }
- if (vmas)
- vmas[i] = vma;
- i++;
- start += PAGE_SIZE;
- len--;
- continue;
- }
- }
-#endif
if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
}
if (pages) {
pages[i] = page;
-
- flush_anon_page(page, start);
flush_dcache_page(page);
}
if (vmas)
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
- * (see split_page()).
+ * (which is mainly an issue of doing "set_page_count(page, 1)" for
+ * each sub-page, and then freeing them one by one when you free
+ * them rather than freeing it as a compound page).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
}
EXPORT_SYMBOL(remap_pfn_range);
-#ifdef CONFIG_XEN
-static inline int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data)
-{
- pte_t *pte;
- int err;
- struct page *pmd_page;
- spinlock_t *ptl;
-
- pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
- pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (!pte)
- return -ENOMEM;
-
- BUG_ON(pmd_huge(*pmd));
-
- pmd_page = pmd_page(*pmd);
-
- do {
- err = fn(pte, pmd_page, addr, data);
- if (err)
- break;
- } while (pte++, addr += PAGE_SIZE, addr != end);
-
- if (mm != &init_mm)
- pte_unmap_unlock(pte-1, ptl);
- return err;
-}
-
-static inline int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data)
-{
- pmd_t *pmd;
- unsigned long next;
- int err;
-
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
- err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
- if (err)
- break;
- } while (pmd++, addr = next, addr != end);
- return err;
-}
-
-static inline int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data)
-{
- pud_t *pud;
- unsigned long next;
- int err;
-
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
- err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
- if (err)
- break;
- } while (pud++, addr = next, addr != end);
- return err;
-}
-
-/*
- * Scan a region of virtual memory, filling in page tables as necessary
- * and calling a provided function on each leaf page table.
- */
-int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
- unsigned long size, pte_fn_t fn, void *data)
-{
- pgd_t *pgd;
- unsigned long next;
- unsigned long end = addr + size;
- int err;
-
- BUG_ON(addr >= end);
- pgd = pgd_offset(mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
- if (err)
- break;
- } while (pgd++, addr = next, addr != end);
- return err;
-}
-EXPORT_SYMBOL_GPL(apply_to_page_range);
-#endif
-
/*
* handle_pte_fault chooses page fault handler according to an entry
* which was read non-atomically. Before making any commitment, on
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
memset(kaddr, 0, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(dst);
return;
}
{
struct page *old_page, *new_page;
pte_t entry;
- int reuse = 0, ret = VM_FAULT_MINOR;
- struct page *dirty_page = NULL;
+ int ret = VM_FAULT_MINOR;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page)
goto gotten;
- /*
- * Take out anonymous pages first, anonymous shared vmas are
- * not dirty accountable.
- */
- if (PageAnon(old_page)) {
- if (!TestSetPageLocked(old_page)) {
- reuse = can_share_swap_page(old_page);
- unlock_page(old_page);
- }
- } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
- (VM_WRITE|VM_SHARED))) {
- /*
- * Only catch write-faults on shared writable pages,
- * read-only shared pages can get COWed by
- * get_user_pages(.write=1, .force=1).
- */
- if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
- /*
- * Notify the address space that the page is about to
- * become writable so that it can prohibit this or wait
- * for the page to get into an appropriate state.
- *
- * We do this without the lock held, so that it can
- * sleep if it needs to.
- */
- page_cache_get(old_page);
- pte_unmap_unlock(page_table, ptl);
-
- if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
- goto unwritable_page;
-
- page_cache_release(old_page);
-
- /*
- * Since we dropped the lock we need to revalidate
- * the PTE as someone else may have changed it. If
- * they did, we just return, as we can count on the
- * MMU to tell us if they didn't also make it writable.
- */
- page_table = pte_offset_map_lock(mm, pmd, address,
- &ptl);
- if (!pte_same(*page_table, orig_pte))
- goto unlock;
+ if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
+ int reuse = can_share_swap_page(old_page);
+ unlock_page(old_page);
+ if (reuse) {
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
+ entry = pte_mkyoung(orig_pte);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ ptep_set_access_flags(vma, address, page_table, entry, 1);
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ ret |= VM_FAULT_WRITE;
+ goto unlock;
}
- dirty_page = old_page;
- get_page(dirty_page);
- reuse = 1;
- }
-
- if (reuse) {
- flush_cache_page(vma, address, pte_pfn(orig_pte));
- entry = pte_mkyoung(orig_pte);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_set_access_flags(vma, address, page_table, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
- ret |= VM_FAULT_WRITE;
- goto unlock;
}
/*
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
- page_remove_rmap(old_page, vma);
+ page_remove_rmap(old_page);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, file_rss);
inc_mm_counter(mm, anon_rss);
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- lazy_mmu_prot_update(entry);
- /*
- * Clear the pte entry and flush it first, before updating the
- * pte with the new entry. This will avoid a race condition
- * seen in the presence of one thread doing SMC and another
- * thread doing COW.
- */
- ptep_clear_flush(vma, address, page_table);
- set_pte_at(mm, address, page_table, entry);
+ ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
lru_cache_add_active(new_page);
page_add_new_anon_rmap(new_page, vma, address);
page_cache_release(old_page);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (dirty_page) {
- set_page_dirty_balance(dirty_page);
- put_page(dirty_page);
- }
return ret;
oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
-
-unwritable_page:
- page_cache_release(old_page);
- return VM_FAULT_SIGBUS;
}
/*
return 0;
}
-EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */
+EXPORT_SYMBOL(vmtruncate_range);
/*
* Primitive swap readahead code. We simply read an aligned block of
goto out;
entry = pte_to_swp_entry(orig_pte);
- if (is_migration_entry(entry)) {
- migration_entry_wait(mm, pmd, address);
- goto out;
- }
- delayacct_set_flag(DELAYACCT_PF_SWAPIN);
+again:
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte)))
ret = VM_FAULT_OOM;
- delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
goto unlock;
}
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
- count_vm_event(PGMAJFAULT);
+ inc_page_state(pgmajfault);
grab_swap_token();
}
ret = VM_FAULT_OOM;
goto out;
}
-
- delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
mark_page_accessed(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
/*
* Back out if somebody else already faulted in this pte.
unsigned int sequence = 0;
int ret = VM_FAULT_MINOR;
int anon = 0;
- struct page *dirty_page = NULL;
pte_unmap(page_table);
BUG_ON(vma->vm_flags & VM_PFNMAP);
/*
* Should we do an early C-O-W break?
*/
- if (write_access) {
- if (!(vma->vm_flags & VM_SHARED)) {
- struct page *page;
+ if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ struct page *page;
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- if (!page)
- goto oom;
- copy_user_highpage(page, new_page, address);
- page_cache_release(new_page);
- new_page = page;
- anon = 1;
-
- } else {
- /* if the page will be shareable, see if the backing
- * address space wants to know that the page is about
- * to become writable */
- if (vma->vm_ops->page_mkwrite &&
- vma->vm_ops->page_mkwrite(vma, new_page) < 0
- ) {
- page_cache_release(new_page);
- return VM_FAULT_SIGBUS;
- }
- }
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ if (!page)
+ goto oom;
+ copy_user_highpage(page, new_page, address);
+ page_cache_release(new_page);
+ new_page = page;
+ anon = 1;
}
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
} else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
- if (write_access) {
- dirty_page = new_page;
- get_page(dirty_page);
- }
}
} else {
/* One of our sibling threads was faster, back out. */
lazy_mmu_prot_update(entry);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (dirty_page) {
- set_page_dirty_balance(dirty_page);
- put_page(dirty_page);
- }
return ret;
oom:
page_cache_release(new_page);
__set_current_state(TASK_RUNNING);
- count_vm_event(PGFAULT);
+ inc_page_state(pgfault);
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, write_access);
if (!vma)
return -1;
write = (vma->vm_flags & VM_WRITE) != 0;
- BUG_ON(addr >= end);
- BUG_ON(end > vma->vm_end);
+ if (addr >= end)
+ BUG();
+ if (end > vma->vm_end)
+ BUG();
len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
ret = get_user_pages(current, current->mm, addr,
len, write, 0, NULL, NULL);