*/
/*
- * Locking:
- * - the page->mapcount field is protected by the PG_maplock bit,
- * which nests within the mm->page_table_lock,
- * which nests within the page lock.
- * - because swapout locking is opposite to the locking order
- * in the page fault path, the swapout path uses trylocks
- * on the mm->page_table_lock
+ * Lock ordering in mm:
+ *
+ * inode->i_mutex (while writing or truncating, not reading or faulting)
+ * inode->i_alloc_sem
+ *
+ * When a page fault occurs in writing from user to file, down_read
+ * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
+ * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
+ * taken together; in truncation, i_mutex is taken outermost.
+ *
+ * mm->mmap_sem
+ * page->flags PG_locked (lock_page)
+ * mapping->i_mmap_lock
+ * anon_vma->lock
+ * mm->page_table_lock or pte_lock
+ * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
+ * swap_lock (in swap_duplicate, swap_info_get)
+ * mmlist_lock (in mmput, drain_mmlist and others)
+ * mapping->private_lock (in __set_page_dirty_buffers)
+ * inode_lock (in set_page_dirty's __mark_inode_dirty)
+ * sb_lock (within inode_lock in fs/fs-writeback.c)
+ * mapping->tree_lock (widely used, in set_page_dirty,
+ * in arch-dependent flush_dcache_mmap_lock,
+ * within inode_lock in __sync_single_inode)
*/
+
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rmap.h>
+#include <linux/rcupdate.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
#include <linux/vs_memory.h>
#include <asm/tlbflush.h>
-//#define RMAP_DEBUG /* can be enabled only for debugging */
-
-kmem_cache_t *anon_vma_cachep;
+struct kmem_cache *anon_vma_cachep;
static inline void validate_anon_vma(struct vm_area_struct *find_vma)
{
-#ifdef RMAP_DEBUG
+#ifdef CONFIG_DEBUG_VM
struct anon_vma *anon_vma = find_vma->anon_vma;
struct vm_area_struct *vma;
unsigned int mapcount = 0;
might_sleep();
if (unlikely(!anon_vma)) {
struct mm_struct *mm = vma->vm_mm;
- struct anon_vma *allocated = NULL;
+ struct anon_vma *allocated, *locked;
anon_vma = find_mergeable_anon_vma(vma);
- if (!anon_vma) {
+ if (anon_vma) {
+ allocated = NULL;
+ locked = anon_vma;
+ spin_lock(&locked->lock);
+ } else {
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
return -ENOMEM;
allocated = anon_vma;
+ locked = NULL;
}
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
allocated = NULL;
}
spin_unlock(&mm->page_table_lock);
+
+ if (locked)
+ spin_unlock(&locked->lock);
if (unlikely(allocated))
anon_vma_free(allocated);
}
void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
{
- if (!vma->anon_vma) {
- BUG_ON(!next->anon_vma);
- vma->anon_vma = next->anon_vma;
- list_add(&vma->anon_vma_node, &next->anon_vma_node);
- } else {
- /* if they're both non-null they must be the same */
- BUG_ON(vma->anon_vma != next->anon_vma);
- }
+ BUG_ON(vma->anon_vma != next->anon_vma);
list_del(&next->anon_vma_node);
}
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma) {
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
}
}
if (anon_vma) {
spin_lock(&anon_vma->lock);
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
spin_unlock(&anon_vma->lock);
}
anon_vma_free(anon_vma);
}
-static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
+ unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
void __init anon_vma_init(void)
{
- anon_vma_cachep = kmem_cache_create("anon_vma",
- sizeof(struct anon_vma), 0, SLAB_PANIC, anon_vma_ctor, NULL);
+ anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
}
-/* this needs the page->flags PG_maplock held */
-static inline void clear_page_anon(struct page *page)
+/*
+ * Getting a lock on a stable anon_vma from a page off the LRU is
+ * tricky: page_lock_anon_vma rely on RCU to guard against the races.
+ */
+static struct anon_vma *page_lock_anon_vma(struct page *page)
{
- BUG_ON(!page->mapping);
- page->mapping = NULL;
- ClearPageAnon(page);
+ struct anon_vma *anon_vma = NULL;
+ unsigned long anon_mapping;
+
+ rcu_read_lock();
+ anon_mapping = (unsigned long) page->mapping;
+ if (!(anon_mapping & PAGE_MAPPING_ANON))
+ goto out;
+ if (!page_mapped(page))
+ goto out;
+
+ anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
+ spin_lock(&anon_vma->lock);
+out:
+ rcu_read_unlock();
+ return anon_vma;
}
/*
return address;
}
+/*
+ * At what user virtual address is page expected in vma? checking that the
+ * page matches the vma: currently only used on anon pages, by unuse_vma;
+ */
+unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+{
+ if (PageAnon(page)) {
+ if ((void *)vma->anon_vma !=
+ (void *)page->mapping - PAGE_MAPPING_ANON)
+ return -EFAULT;
+ } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
+ if (!vma->vm_file ||
+ vma->vm_file->f_mapping != page->mapping)
+ return -EFAULT;
+ } else
+ return -EFAULT;
+ return vma_address(page, vma);
+}
+
+/*
+ * Check that @page is mapped at @address into @mm.
+ *
+ * On success returns with pte mapped and locked.
+ */
+pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+ unsigned long address, spinlock_t **ptlp)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ pgd = pgd_offset(mm, address);
+ if (!pgd_present(*pgd))
+ return NULL;
+
+ pud = pud_offset(pgd, address);
+ if (!pud_present(*pud))
+ return NULL;
+
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ return NULL;
+
+ pte = pte_offset_map(pmd, address);
+ /* Make a quick check before getting the lock */
+ if (!pte_present(*pte)) {
+ pte_unmap(pte);
+ return NULL;
+ }
+
+ ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+ if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
+ *ptlp = ptl;
+ return pte;
+ }
+ pte_unmap_unlock(pte, ptl);
+ return NULL;
+}
+
/*
* Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file.
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
- pgd_t *pgd;
- pmd_t *pmd;
pte_t *pte;
+ spinlock_t *ptl;
int referenced = 0;
- if (!mm->rss)
- goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
- if (!spin_trylock(&mm->page_table_lock))
+ pte = page_check_address(page, mm, address, &ptl);
+ if (!pte)
goto out;
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- goto out_unlock;
-
- pmd = pmd_offset(pgd, address);
- if (!pmd_present(*pmd))
- goto out_unlock;
-
- pte = pte_offset_map(pmd, address);
- if (!pte_present(*pte))
- goto out_unmap;
-
- if (page_to_pfn(page) != pte_pfn(*pte))
- goto out_unmap;
+ if (ptep_clear_flush_young(vma, address, pte))
+ referenced++;
- if (ptep_test_and_clear_young(pte))
+ /* Pretend the page is referenced if the task has the
+ swap token and is in the middle of a page fault. */
+ if (mm != current->mm && has_swap_token(mm) &&
+ rwsem_is_locked(&mm->mmap_sem))
referenced++;
(*mapcount)--;
-
-out_unmap:
- pte_unmap(pte);
-out_unlock:
- spin_unlock(&mm->page_table_lock);
+ pte_unmap_unlock(pte, ptl);
out:
return referenced;
}
-static inline int page_referenced_anon(struct page *page)
+static int page_referenced_anon(struct page *page)
{
- unsigned int mapcount = page->mapcount;
- struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
+ unsigned int mapcount;
+ struct anon_vma *anon_vma;
struct vm_area_struct *vma;
int referenced = 0;
- spin_lock(&anon_vma->lock);
- BUG_ON(list_empty(&anon_vma->head));
+ anon_vma = page_lock_anon_vma(page);
+ if (!anon_vma)
+ return referenced;
+
+ mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
* of references it found.
*
* This function is only called from page_referenced for object-based pages.
- *
- * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
- * assume a reference count of 0, so try_to_unmap will then have a go.
*/
-static inline int page_referenced_file(struct page *page)
+static int page_referenced_file(struct page *page)
{
- unsigned int mapcount = page->mapcount;
+ unsigned int mapcount;
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma = NULL;
+ struct vm_area_struct *vma;
struct prio_tree_iter iter;
int referenced = 0;
- if (!spin_trylock(&mapping->i_mmap_lock))
- return 0;
+ /*
+ * The caller's checks on page->mapping and !PageAnon have made
+ * sure that this is a file page: the check for page->mapping
+ * excludes the case just before it gets set on an anon page.
+ */
+ BUG_ON(PageAnon(page));
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
- &iter, pgoff, pgoff)) != NULL) {
+ /*
+ * The page lock not only makes sure that page->mapping cannot
+ * suddenly be NULLified by truncation, it makes sure that the
+ * structure at mapping cannot be freed and reused yet,
+ * so we can safely take mapping->i_mmap_lock.
+ */
+ BUG_ON(!PageLocked(page));
+
+ spin_lock(&mapping->i_mmap_lock);
+
+ /*
+ * i_mmap_lock does not stabilize mapcount at all, but mapcount
+ * is more likely to be accurate if we note it after spinning.
+ */
+ mapcount = page_mapcount(page);
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
== (VM_LOCKED|VM_MAYSHARE)) {
referenced++;
/**
* page_referenced - test if the page was referenced
* @page: the page to test
+ * @is_locked: caller holds lock on the page
*
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
- * Caller needs to hold the rmap lock.
*/
-int page_referenced(struct page *page)
+int page_referenced(struct page *page, int is_locked)
{
int referenced = 0;
if (TestClearPageReferenced(page))
referenced++;
- if (page->mapcount && page->mapping) {
+ if (page_mapped(page) && page->mapping) {
if (PageAnon(page))
referenced += page_referenced_anon(page);
- else
+ else if (is_locked)
referenced += page_referenced_file(page);
+ else if (TestSetPageLocked(page))
+ referenced++;
+ else {
+ if (page->mapping)
+ referenced += page_referenced_file(page);
+ unlock_page(page);
+ }
}
return referenced;
}
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address;
+ pte_t *pte, entry;
+ spinlock_t *ptl;
+ int ret = 0;
+
+ address = vma_address(page, vma);
+ if (address == -EFAULT)
+ goto out;
+
+ pte = page_check_address(page, mm, address, &ptl);
+ if (!pte)
+ goto out;
+
+ if (!pte_dirty(*pte) && !pte_write(*pte))
+ goto unlock;
+
+ entry = ptep_get_and_clear(mm, address, pte);
+ entry = pte_mkclean(entry);
+ entry = pte_wrprotect(entry);
+ ptep_establish(vma, address, pte, entry);
+ lazy_mmu_prot_update(entry);
+ ret = 1;
+
+unlock:
+ pte_unmap_unlock(pte, ptl);
+out:
+ return ret;
+}
+
+static int page_mkclean_file(struct address_space *mapping, struct page *page)
+{
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ int ret = 0;
+
+ BUG_ON(PageAnon(page));
+
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ if (vma->vm_flags & VM_SHARED)
+ ret += page_mkclean_one(page, vma);
+ }
+ spin_unlock(&mapping->i_mmap_lock);
+ return ret;
+}
+
+int page_mkclean(struct page *page)
+{
+ int ret = 0;
+
+ WARN_ON_ONCE(!PageLocked(page));
+
+ if (page_mapped(page)) {
+ struct address_space *mapping = page_mapping(page);
+ if (mapping)
+ ret = page_mkclean_file(mapping, page);
+ }
+
+ return ret;
+}
+
/**
- * page_add_anon_rmap - add pte mapping to an anonymous page
+ * page_set_anon_rmap - setup new anonymous rmap
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
- *
- * The caller needs to hold the mm->page_table_lock.
*/
-void page_add_anon_rmap(struct page *page,
+static void __page_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = vma->anon_vma;
- pgoff_t index;
- BUG_ON(PageReserved(page));
BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ page->mapping = (struct address_space *) anon_vma;
- index = (address - vma->vm_start) >> PAGE_SHIFT;
- index += vma->vm_pgoff;
- index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
+ page->index = linear_page_index(vma, address);
/*
- * Setting and clearing PG_anon must always happen inside
- * page_map_lock to avoid races between mapping and
- * unmapping on different processes of the same
- * shared cow swapcache page. And while we take the
- * page_map_lock PG_anon cannot change from under us.
- * Actually PG_anon cannot change under fork either
- * since fork holds a reference on the page so it cannot
- * be unmapped under fork and in turn copy_page_range is
- * allowed to read PG_anon outside the page_map_lock.
+ * nr_mapped state can be updated without turning off
+ * interrupts because it is not modified via interrupt.
*/
- page_map_lock(page);
- if (!page->mapcount) {
- BUG_ON(PageAnon(page));
- BUG_ON(page->mapping);
- SetPageAnon(page);
- page->index = index;
- page->mapping = (struct address_space *) anon_vma;
- inc_page_state(nr_mapped);
- } else {
- BUG_ON(!PageAnon(page));
- BUG_ON(page->index != index);
- BUG_ON(page->mapping != (struct address_space *) anon_vma);
- }
- page->mapcount++;
- page_map_unlock(page);
+ __inc_zone_page_state(page, NR_ANON_PAGES);
+}
+
+/**
+ * page_add_anon_rmap - add pte mapping to an anonymous page
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_add_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ if (atomic_inc_and_test(&page->_mapcount))
+ __page_set_anon_rmap(page, vma, address);
+ /* else checking page index and mapping is racy */
+}
+
+/*
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ *
+ * Same as page_add_anon_rmap but must only be called on *new* pages.
+ * This means the inc-and-test can be bypassed.
+ */
+void page_add_new_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+ __page_set_anon_rmap(page, vma, address);
}
/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
*
- * The caller needs to hold the mm->page_table_lock.
+ * The caller needs to hold the pte lock.
*/
void page_add_file_rmap(struct page *page)
{
- BUG_ON(PageAnon(page));
- if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
- return;
-
- page_map_lock(page);
- if (!page->mapcount)
- inc_page_state(nr_mapped);
- page->mapcount++;
- page_map_unlock(page);
+ if (atomic_inc_and_test(&page->_mapcount))
+ __inc_zone_page_state(page, NR_FILE_MAPPED);
}
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
*
- * Caller needs to hold the mm->page_table_lock.
+ * The caller needs to hold the pte lock.
*/
-void page_remove_rmap(struct page *page)
+void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
{
- BUG_ON(PageReserved(page));
- BUG_ON(!page->mapcount);
-
- page_map_lock(page);
- page->mapcount--;
- if (!page->mapcount) {
+ if (atomic_add_negative(-1, &page->_mapcount)) {
+ if (unlikely(page_mapcount(page) < 0)) {
+ printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
+ printk (KERN_EMERG " page->flags = %lx\n", page->flags);
+ printk (KERN_EMERG " page->count = %x\n", page_count(page));
+ printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
+ if (vma->vm_ops)
+ print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
+ BUG();
+ }
+ /*
+ * It would be tidy to reset the PageAnon mapping here,
+ * but that might overwrite a racing page_add_anon_rmap
+ * which increments mapcount after us but sets mapping
+ * before us: so leave the reset to free_hot_cold_page,
+ * and remember that it's only reliable while mapped.
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- if (PageAnon(page))
- clear_page_anon(page);
- dec_page_state(nr_mapped);
+ __dec_zone_page_state(page,
+ PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
}
- page_map_unlock(page);
}
/*
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ int migration)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
- pgd_t *pgd;
- pmd_t *pmd;
pte_t *pte;
pte_t pteval;
+ spinlock_t *ptl;
int ret = SWAP_AGAIN;
- if (!mm->rss)
- goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
- /*
- * We need the page_table_lock to protect us from page faults,
- * munmap, fork, etc...
- */
- if (!spin_trylock(&mm->page_table_lock))
+ pte = page_check_address(page, mm, address, &ptl);
+ if (!pte)
goto out;
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- goto out_unlock;
-
- pmd = pmd_offset(pgd, address);
- if (!pmd_present(*pmd))
- goto out_unlock;
-
- pte = pte_offset_map(pmd, address);
- if (!pte_present(*pte))
- goto out_unmap;
-
- if (page_to_pfn(page) != pte_pfn(*pte))
- goto out_unmap;
-
/*
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
- if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
- ptep_test_and_clear_young(pte)) {
- ret = SWAP_FAIL;
- goto out_unmap;
- }
-
- /*
- * Don't pull an anonymous page out from under get_user_pages.
- * GUP carefully breaks COW and raises page count (while holding
- * page_table_lock, as we have here) to make sure that the page
- * cannot be freed. If we unmap that page here, a user write
- * access to the virtual address will bring back the page, but
- * its raised count will (ironically) be taken to mean it's not
- * an exclusive swap page, do_wp_page will replace it by a copy
- * page, and the user never get to see the data GUP was holding
- * the original page for.
- */
- if (PageSwapCache(page) &&
- page_count(page) != page->mapcount + 2) {
+ if (!migration && ((vma->vm_flags & VM_LOCKED) ||
+ (ptep_clear_flush_young(vma, address, pte)))) {
ret = SWAP_FAIL;
goto out_unmap;
}
/* Nuke the page table entry. */
- flush_cache_page(vma, address);
+ flush_cache_page(vma, address, page_to_pfn(page));
pteval = ptep_clear_flush(vma, address, pte);
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
+ /* Update high watermark before we lower rss */
+ update_hiwater_rss(mm);
+
if (PageAnon(page)) {
- swp_entry_t entry = { .val = page->private };
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- BUG_ON(!PageSwapCache(page));
- swap_duplicate(entry);
- set_pte(pte, swp_entry_to_pte(entry));
+ swp_entry_t entry = { .val = page_private(page) };
+
+ if (PageSwapCache(page)) {
+ /*
+ * Store the swap location in the pte.
+ * See handle_pte_fault() ...
+ */
+ swap_duplicate(entry);
+ if (list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&mm->mmlist))
+ list_add(&mm->mmlist, &init_mm.mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ dec_mm_counter(mm, anon_rss);
+#ifdef CONFIG_MIGRATION
+ } else {
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ BUG_ON(!migration);
+ entry = make_migration_entry(page, pte_write(pteval));
+#endif
+ }
+ set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- }
+ } else
+#ifdef CONFIG_MIGRATION
+ if (migration) {
+ /* Establish migration entry for a file page */
+ swp_entry_t entry;
+ entry = make_migration_entry(page, pte_write(pteval));
+ set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ } else
+#endif
+ dec_mm_counter(mm, file_rss);
+
- // mm->rss--;
- vx_rsspages_dec(mm);
- BUG_ON(!page->mapcount);
- page->mapcount--;
+ page_remove_rmap(page, vma);
page_cache_release(page);
out_unmap:
- pte_unmap(pte);
-out_unlock:
- spin_unlock(&mm->page_table_lock);
+ pte_unmap_unlock(pte, ptl);
out:
return ret;
}
#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
-static int try_to_unmap_cluster(unsigned long cursor,
+static void try_to_unmap_cluster(unsigned long cursor,
unsigned int *mapcount, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
+ spinlock_t *ptl;
struct page *page;
unsigned long address;
unsigned long end;
- unsigned long pfn;
-
- /*
- * We need the page_table_lock to protect us from page faults,
- * munmap, fork, etc...
- */
- if (!spin_trylock(&mm->page_table_lock))
- return SWAP_FAIL;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
- goto out_unlock;
+ return;
- pmd = pmd_offset(pgd, address);
- if (!pmd_present(*pmd))
- goto out_unlock;
+ pud = pud_offset(pgd, address);
+ if (!pud_present(*pud))
+ return;
- for (pte = pte_offset_map(pmd, address);
- address < end; pte++, address += PAGE_SIZE) {
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ return;
- if (!pte_present(*pte))
- continue;
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- pfn = pte_pfn(*pte);
- if (!pfn_valid(pfn))
- continue;
+ /* Update high watermark before we lower rss */
+ update_hiwater_rss(mm);
- page = pfn_to_page(pfn);
- BUG_ON(PageAnon(page));
- if (PageReserved(page))
+ for (; address < end; pte++, address += PAGE_SIZE) {
+ if (!pte_present(*pte))
continue;
+ page = vm_normal_page(vma, address, *pte);
+ BUG_ON(!page || PageAnon(page));
- if (ptep_test_and_clear_young(pte))
+ if (ptep_clear_flush_young(vma, address, pte))
continue;
/* Nuke the page table entry. */
- flush_cache_page(vma, address);
+ flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address))
- set_pte(pte, pgoff_to_pte(page->index));
+ set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
- page_remove_rmap(page);
+ page_remove_rmap(page, vma);
page_cache_release(page);
- // mm->rss--;
- vx_rsspages_dec(mm);
+ dec_mm_counter(mm, file_rss);
(*mapcount)--;
}
-
- pte_unmap(pte);
-
-out_unlock:
- spin_unlock(&mm->page_table_lock);
- return SWAP_AGAIN;
+ pte_unmap_unlock(pte - 1, ptl);
}
-static inline int try_to_unmap_anon(struct page *page)
+static int try_to_unmap_anon(struct page *page, int migration)
{
- struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
+ struct anon_vma *anon_vma;
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
- spin_lock(&anon_vma->lock);
- BUG_ON(list_empty(&anon_vma->head));
+ anon_vma = page_lock_anon_vma(page);
+ if (!anon_vma)
+ return ret;
+
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma);
- if (ret == SWAP_FAIL || !page->mapcount)
+ ret = try_to_unmap_one(page, vma, migration);
+ if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
spin_unlock(&anon_vma->lock);
* contained in the address_space struct it points to.
*
* This function is only called from try_to_unmap for object-based pages.
- *
- * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
- * return a temporary error.
*/
-static inline int try_to_unmap_file(struct page *page)
+static int try_to_unmap_file(struct page *page, int migration)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma = NULL;
+ struct vm_area_struct *vma;
struct prio_tree_iter iter;
int ret = SWAP_AGAIN;
unsigned long cursor;
unsigned long max_nl_size = 0;
unsigned int mapcount;
- if (!spin_trylock(&mapping->i_mmap_lock))
- return ret;
-
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
- &iter, pgoff, pgoff)) != NULL) {
- ret = try_to_unmap_one(page, vma);
- if (ret == SWAP_FAIL || !page->mapcount)
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ ret = try_to_unmap_one(page, vma, migration);
+ if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
+ if ((vma->vm_flags & VM_LOCKED) && !migration)
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_size = cursor;
}
- if (max_nl_size == 0) /* any nonlinears locked or reserved */
+ if (max_nl_size == 0) { /* any nonlinears locked or reserved */
+ ret = SWAP_FAIL;
goto out;
+ }
/*
* We don't try to search for this page in the nonlinear vmas,
* The mapcount of the page we came in with is irrelevant,
* but even so use it as a guide to how hard we should try?
*/
- mapcount = page->mapcount;
- page_map_unlock(page);
+ mapcount = page_mapcount(page);
+ if (!mapcount)
+ goto out;
cond_resched_lock(&mapping->i_mmap_lock);
max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
+ if ((vma->vm_flags & VM_LOCKED) && !migration)
continue;
cursor = (unsigned long) vma->vm_private_data;
- while (vma->vm_mm->rss &&
- cursor < max_nl_cursor &&
+ while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
- ret = try_to_unmap_cluster(
- cursor, &mapcount, vma);
- if (ret == SWAP_FAIL)
- break;
+ try_to_unmap_cluster(cursor, &mapcount, vma);
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
- goto relock;
+ goto out;
}
- if (ret != SWAP_FAIL)
- vma->vm_private_data =
- (void *) max_nl_cursor;
- ret = SWAP_AGAIN;
+ vma->vm_private_data = (void *) max_nl_cursor;
}
cond_resched_lock(&mapping->i_mmap_lock);
max_nl_cursor += CLUSTER_SIZE;
* in locked vmas). Reset cursor on all unreserved nonlinear
* vmas, now forgetting on which ones it had fallen behind.
*/
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.vm_set.list) {
- if (!(vma->vm_flags & VM_RESERVED))
- vma->vm_private_data = 0;
- }
-relock:
- page_map_lock(page);
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
+ vma->vm_private_data = NULL;
out:
spin_unlock(&mapping->i_mmap_lock);
return ret;
* @page: the page to get unmapped
*
* Tries to remove all the page table entries which are mapping this
- * page, used in the pageout path. Caller must hold the page lock
- * and its rmap lock. Return values are:
+ * page, used in the pageout path. Caller must hold the page lock.
+ * Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
- * SWAP_AGAIN - we missed a trylock, try again later
+ * SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page, int migration)
{
int ret;
- BUG_ON(PageReserved(page));
BUG_ON(!PageLocked(page));
- BUG_ON(!page->mapcount);
if (PageAnon(page))
- ret = try_to_unmap_anon(page);
+ ret = try_to_unmap_anon(page, migration);
else
- ret = try_to_unmap_file(page);
+ ret = try_to_unmap_file(page, migration);
- if (!page->mapcount) {
- if (page_test_and_clear_dirty(page))
- set_page_dirty(page);
- if (PageAnon(page))
- clear_page_anon(page);
- dec_page_state(nr_mapped);
+ if (!page_mapped(page))
ret = SWAP_SUCCESS;
- }
return ret;
}
+