fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / mm / rmap.c
index 086cf43..ba4fe10 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
 /*
  * Lock ordering in mm:
  *
- * inode->i_sem        (while writing or truncating, not reading or faulting)
- *   inode->i_alloc_sem
- *
- * When a page fault occurs in writing from user to file, down_read
- * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
- * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
- * taken together; in truncation, i_sem is taken outermost.
- *
- * mm->mmap_sem
- *   page->flags PG_locked (lock_page)
- *     mapping->i_mmap_lock
- *       anon_vma->lock
- *         mm->page_table_lock
- *           zone->lru_lock (in mark_page_accessed)
- *           swap_list_lock (in swap_free etc's swap_info_get)
- *             mmlist_lock (in mmput, drain_mmlist and others)
- *             swap_device_lock (in swap_duplicate, swap_info_get)
- *             mapping->private_lock (in __set_page_dirty_buffers)
- *             inode_lock (in set_page_dirty's __mark_inode_dirty)
- *               sb_lock (within inode_lock in fs/fs-writeback.c)
- *               mapping->tree_lock (widely used, in set_page_dirty,
- *                         in arch-dependent flush_dcache_mmap_lock,
- *                         within inode_lock in __sync_single_inode)
+ * inode->i_mutex      (while writing or truncating, not reading or faulting)
+ *   inode->i_alloc_sem (vmtruncate_range)
+ *   mm->mmap_sem
+ *     page->flags PG_locked (lock_page)
+ *       mapping->i_mmap_lock
+ *         anon_vma->lock
+ *           mm->page_table_lock or pte_lock
+ *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
+ *             swap_lock (in swap_duplicate, swap_info_get)
+ *               mmlist_lock (in mmput, drain_mmlist and others)
+ *               mapping->private_lock (in __set_page_dirty_buffers)
+ *               inode_lock (in set_page_dirty's __mark_inode_dirty)
+ *                 sb_lock (within inode_lock in fs/fs-writeback.c)
+ *                 mapping->tree_lock (widely used, in set_page_dirty,
+ *                           in arch-dependent flush_dcache_mmap_lock,
+ *                           within inode_lock in __sync_single_inode)
  */
 
 #include <linux/mm.h>
 #include <linux/swapops.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/acct.h>
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
 #include <linux/vs_memory.h>
 
 #include <asm/tlbflush.h>
 
-//#define RMAP_DEBUG /* can be enabled only for debugging */
-
-kmem_cache_t *anon_vma_cachep;
+struct kmem_cache *anon_vma_cachep;
 
 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
 {
-#ifdef RMAP_DEBUG
+#ifdef CONFIG_DEBUG_VM
        struct anon_vma *anon_vma = find_vma->anon_vma;
        struct vm_area_struct *vma;
        unsigned int mapcount = 0;
@@ -107,7 +99,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
                        vma->anon_vma = anon_vma;
-                       list_add(&vma->anon_vma_node, &anon_vma->head);
+                       list_add_tail(&vma->anon_vma_node, &anon_vma->head);
                        allocated = NULL;
                }
                spin_unlock(&mm->page_table_lock);
@@ -131,7 +123,7 @@ void __anon_vma_link(struct vm_area_struct *vma)
        struct anon_vma *anon_vma = vma->anon_vma;
 
        if (anon_vma) {
-               list_add(&vma->anon_vma_node, &anon_vma->head);
+               list_add_tail(&vma->anon_vma_node, &anon_vma->head);
                validate_anon_vma(vma);
        }
 }
@@ -142,7 +134,7 @@ void anon_vma_link(struct vm_area_struct *vma)
 
        if (anon_vma) {
                spin_lock(&anon_vma->lock);
-               list_add(&vma->anon_vma_node, &anon_vma->head);
+               list_add_tail(&vma->anon_vma_node, &anon_vma->head);
                validate_anon_vma(vma);
                spin_unlock(&anon_vma->lock);
        }
@@ -168,7 +160,8 @@ void anon_vma_unlink(struct vm_area_struct *vma)
                anon_vma_free(anon_vma);
 }
 
-static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
+                         unsigned long flags)
 {
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
                                                SLAB_CTOR_CONSTRUCTOR) {
@@ -228,7 +221,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 
 /*
  * At what user virtual address is page expected in vma? checking that the
- * page matches the vma: currently only used by unuse_process, on anon pages.
+ * page matches the vma: currently only used on anon pages, by unuse_vma;
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
@@ -237,7 +230,8 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
                    (void *)page->mapping - PAGE_MAPPING_ANON)
                        return -EFAULT;
        } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
-               if (vma->vm_file->f_mapping != page->mapping)
+               if (!vma->vm_file ||
+                   vma->vm_file->f_mapping != page->mapping)
                        return -EFAULT;
        } else
                return -EFAULT;
@@ -245,64 +239,85 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 }
 
 /*
- * Subfunctions of page_referenced: page_referenced_one called
- * repeatedly from either page_referenced_anon or page_referenced_file.
+ * Check that @page is mapped at @address into @mm.
+ *
+ * On success returns with pte mapped and locked.
  */
-static int page_referenced_one(struct page *page,
-       struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
+pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+                         unsigned long address, spinlock_t **ptlp)
 {
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long address;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
-       int referenced = 0;
-
-       if (!mm->rss)
-               goto out;
-       address = vma_address(page, vma);
-       if (address == -EFAULT)
-               goto out;
-
-       spin_lock(&mm->page_table_lock);
+       spinlock_t *ptl;
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
-               goto out_unlock;
+               return NULL;
 
        pud = pud_offset(pgd, address);
        if (!pud_present(*pud))
-               goto out_unlock;
+               return NULL;
 
        pmd = pmd_offset(pud, address);
        if (!pmd_present(*pmd))
-               goto out_unlock;
+               return NULL;
 
        pte = pte_offset_map(pmd, address);
-       if (!pte_present(*pte))
-               goto out_unmap;
+       /* Make a quick check before getting the lock */
+       if (!pte_present(*pte)) {
+               pte_unmap(pte);
+               return NULL;
+       }
 
-       if (page_to_pfn(page) != pte_pfn(*pte))
-               goto out_unmap;
+       ptl = pte_lockptr(mm, pmd);
+       spin_lock(ptl);
+       if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
+               *ptlp = ptl;
+               return pte;
+       }
+       pte_unmap_unlock(pte, ptl);
+       return NULL;
+}
+
+/*
+ * Subfunctions of page_referenced: page_referenced_one called
+ * repeatedly from either page_referenced_anon or page_referenced_file.
+ */
+static int page_referenced_one(struct page *page,
+       struct vm_area_struct *vma, unsigned int *mapcount)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address;
+       pte_t *pte;
+       spinlock_t *ptl;
+       int referenced = 0;
+
+       address = vma_address(page, vma);
+       if (address == -EFAULT)
+               goto out;
+
+       pte = page_check_address(page, mm, address, &ptl);
+       if (!pte)
+               goto out;
 
        if (ptep_clear_flush_young(vma, address, pte))
                referenced++;
 
-       if (mm != current->mm && !ignore_token && has_swap_token(mm))
+       /* Pretend the page is referenced if the task has the
+          swap token and is in the middle of a page fault. */
+       if (mm != current->mm && has_swap_token(mm) &&
+                       rwsem_is_locked(&mm->mmap_sem))
                referenced++;
 
        (*mapcount)--;
-
-out_unmap:
-       pte_unmap(pte);
-out_unlock:
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(pte, ptl);
 out:
        return referenced;
 }
 
-static int page_referenced_anon(struct page *page, int ignore_token)
+static int page_referenced_anon(struct page *page)
 {
        unsigned int mapcount;
        struct anon_vma *anon_vma;
@@ -315,8 +330,7 @@ static int page_referenced_anon(struct page *page, int ignore_token)
 
        mapcount = page_mapcount(page);
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-               referenced += page_referenced_one(page, vma, &mapcount,
-                                                       ignore_token);
+               referenced += page_referenced_one(page, vma, &mapcount);
                if (!mapcount)
                        break;
        }
@@ -335,7 +349,7 @@ static int page_referenced_anon(struct page *page, int ignore_token)
  *
  * This function is only called from page_referenced for object-based pages.
  */
-static int page_referenced_file(struct page *page, int ignore_token)
+static int page_referenced_file(struct page *page)
 {
        unsigned int mapcount;
        struct address_space *mapping = page->mapping;
@@ -373,8 +387,7 @@ static int page_referenced_file(struct page *page, int ignore_token)
                        referenced++;
                        break;
                }
-               referenced += page_referenced_one(page, vma, &mapcount,
-                                                       ignore_token);
+               referenced += page_referenced_one(page, vma, &mapcount);
                if (!mapcount)
                        break;
        }
@@ -391,13 +404,10 @@ static int page_referenced_file(struct page *page, int ignore_token)
  * Quick test_and_clear_referenced for all mappings to a page,
  * returns the number of ptes which referenced the page.
  */
-int page_referenced(struct page *page, int is_locked, int ignore_token)
+int page_referenced(struct page *page, int is_locked)
 {
        int referenced = 0;
 
-       if (!swap_token_default_timeout)
-               ignore_token = 1;
-
        if (page_test_and_clear_young(page))
                referenced++;
 
@@ -406,81 +416,179 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
 
        if (page_mapped(page) && page->mapping) {
                if (PageAnon(page))
-                       referenced += page_referenced_anon(page, ignore_token);
+                       referenced += page_referenced_anon(page);
                else if (is_locked)
-                       referenced += page_referenced_file(page, ignore_token);
+                       referenced += page_referenced_file(page);
                else if (TestSetPageLocked(page))
                        referenced++;
                else {
                        if (page->mapping)
-                               referenced += page_referenced_file(page,
-                                                               ignore_token);
+                               referenced += page_referenced_file(page);
                        unlock_page(page);
                }
        }
        return referenced;
 }
 
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address;
+       pte_t *pte;
+       spinlock_t *ptl;
+       int ret = 0;
+
+       address = vma_address(page, vma);
+       if (address == -EFAULT)
+               goto out;
+
+       pte = page_check_address(page, mm, address, &ptl);
+       if (!pte)
+               goto out;
+
+       if (pte_dirty(*pte) || pte_write(*pte)) {
+               pte_t entry;
+
+               flush_cache_page(vma, address, pte_pfn(*pte));
+               entry = ptep_clear_flush(vma, address, pte);
+               entry = pte_wrprotect(entry);
+               entry = pte_mkclean(entry);
+               set_pte_at(mm, address, pte, entry);
+               lazy_mmu_prot_update(entry);
+               ret = 1;
+       }
+
+       pte_unmap_unlock(pte, ptl);
+out:
+       return ret;
+}
+
+static int page_mkclean_file(struct address_space *mapping, struct page *page)
+{
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       int ret = 0;
+
+       BUG_ON(PageAnon(page));
+
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               if (vma->vm_flags & VM_SHARED)
+                       ret += page_mkclean_one(page, vma);
+       }
+       spin_unlock(&mapping->i_mmap_lock);
+       return ret;
+}
+
+int page_mkclean(struct page *page)
+{
+       int ret = 0;
+
+       BUG_ON(!PageLocked(page));
+
+       if (page_mapped(page)) {
+               struct address_space *mapping = page_mapping(page);
+               if (mapping)
+                       ret = page_mkclean_file(mapping, page);
+       }
+       if (page_test_and_clear_dirty(page))
+               ret = 1;
+
+       return ret;
+}
+
 /**
- * page_add_anon_rmap - add pte mapping to an anonymous page
+ * page_set_anon_rmap - setup new anonymous rmap
  * @page:      the page to add the mapping to
  * @vma:       the vm area in which the mapping is added
  * @address:   the user virtual address mapped
- *
- * The caller needs to hold the mm->page_table_lock.
  */
-void page_add_anon_rmap(struct page *page,
+static void __page_set_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
-       pgoff_t index;
 
-       BUG_ON(PageReserved(page));
        BUG_ON(!anon_vma);
+       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       page->mapping = (struct address_space *) anon_vma;
 
-       vx_anonpages_inc(vma->vm_mm);
+       page->index = linear_page_index(vma, address);
 
-       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
-       index = (address - vma->vm_start) >> PAGE_SHIFT;
-       index += vma->vm_pgoff;
-       index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
-
-       if (atomic_inc_and_test(&page->_mapcount)) {
-               page->index = index;
-               page->mapping = (struct address_space *) anon_vma;
-               inc_page_state(nr_mapped);
-       }
+       /*
+        * nr_mapped state can be updated without turning off
+        * interrupts because it is not modified via interrupt.
+        */
+       __inc_zone_page_state(page, NR_ANON_PAGES);
+}
+
+/**
+ * page_add_anon_rmap - add pte mapping to an anonymous page
+ * @page:      the page to add the mapping to
+ * @vma:       the vm area in which the mapping is added
+ * @address:   the user virtual address mapped
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_add_anon_rmap(struct page *page,
+       struct vm_area_struct *vma, unsigned long address)
+{
+       if (atomic_inc_and_test(&page->_mapcount))
+               __page_set_anon_rmap(page, vma, address);
        /* else checking page index and mapping is racy */
 }
 
+/*
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page:      the page to add the mapping to
+ * @vma:       the vm area in which the mapping is added
+ * @address:   the user virtual address mapped
+ *
+ * Same as page_add_anon_rmap but must only be called on *new* pages.
+ * This means the inc-and-test can be bypassed.
+ */
+void page_add_new_anon_rmap(struct page *page,
+       struct vm_area_struct *vma, unsigned long address)
+{
+       atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+       __page_set_anon_rmap(page, vma, address);
+}
+
 /**
  * page_add_file_rmap - add pte mapping to a file page
  * @page: the page to add the mapping to
  *
- * The caller needs to hold the mm->page_table_lock.
+ * The caller needs to hold the pte lock.
  */
 void page_add_file_rmap(struct page *page)
 {
-       BUG_ON(PageAnon(page));
-       if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
-               return;
-
        if (atomic_inc_and_test(&page->_mapcount))
-               inc_page_state(nr_mapped);
+               __inc_zone_page_state(page, NR_FILE_MAPPED);
 }
 
 /**
  * page_remove_rmap - take down pte mapping from a page
  * @page: page to remove mapping from
  *
- * Caller needs to hold the mm->page_table_lock.
+ * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page)
+void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
 {
-       BUG_ON(PageReserved(page));
-
        if (atomic_add_negative(-1, &page->_mapcount)) {
-               BUG_ON(page_mapcount(page) < 0);
+               if (unlikely(page_mapcount(page) < 0)) {
+                       printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
+                       printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page));
+                       printk (KERN_EMERG "  page->flags = %lx\n", page->flags);
+                       printk (KERN_EMERG "  page->count = %x\n", page_count(page));
+                       printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
+                       print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
+                       if (vma->vm_ops)
+                               print_symbol (KERN_EMERG "  vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
+                       if (vma->vm_file && vma->vm_file->f_op)
+                               print_symbol (KERN_EMERG "  vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
+                       BUG();
+               }
+
                /*
                 * It would be tidy to reset the PageAnon mapping here,
                 * but that might overwrite a racing page_add_anon_rmap
@@ -492,7 +600,8 @@ void page_remove_rmap(struct page *page)
                 */
                if (page_test_and_clear_dirty(page))
                        set_page_dirty(page);
-               dec_page_state(nr_mapped);
+               __dec_zone_page_state(page,
+                               PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
        }
 }
 
@@ -500,115 +609,92 @@ void page_remove_rmap(struct page *page)
  * Subfunctions of try_to_unmap: try_to_unmap_one called
  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  */
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+                               int migration)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
        pte_t *pte;
        pte_t pteval;
+       spinlock_t *ptl;
        int ret = SWAP_AGAIN;
 
-       if (!mm->rss)
-               goto out;
        address = vma_address(page, vma);
        if (address == -EFAULT)
                goto out;
 
-       /*
-        * We need the page_table_lock to protect us from page faults,
-        * munmap, fork, etc...
-        */
-       spin_lock(&mm->page_table_lock);
-
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               goto out_unlock;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               goto out_unlock;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
-               goto out_unlock;
-
-       pte = pte_offset_map(pmd, address);
-       if (!pte_present(*pte))
-               goto out_unmap;
-
-       if (page_to_pfn(page) != pte_pfn(*pte))
-               goto out_unmap;
+       pte = page_check_address(page, mm, address, &ptl);
+       if (!pte)
+               goto out;
 
        /*
         * If the page is mlock()d, we cannot swap it out.
         * If it's recently referenced (perhaps page_referenced
         * skipped over this mm) then we should reactivate it.
         */
-       if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
-                       ptep_clear_flush_young(vma, address, pte)) {
-               ret = SWAP_FAIL;
-               goto out_unmap;
-       }
-
-       /*
-        * Don't pull an anonymous page out from under get_user_pages.
-        * GUP carefully breaks COW and raises page count (while holding
-        * page_table_lock, as we have here) to make sure that the page
-        * cannot be freed.  If we unmap that page here, a user write
-        * access to the virtual address will bring back the page, but
-        * its raised count will (ironically) be taken to mean it's not
-        * an exclusive swap page, do_wp_page will replace it by a copy
-        * page, and the user never get to see the data GUP was holding
-        * the original page for.
-        *
-        * This test is also useful for when swapoff (unuse_process) has
-        * to drop page lock: its reference to the page stops existing
-        * ptes from being unmapped, so swapoff can make progress.
-        */
-       if (PageSwapCache(page) &&
-           page_count(page) != page_mapcount(page) + 2) {
+       if (!migration && ((vma->vm_flags & VM_LOCKED) ||
+                       (ptep_clear_flush_young(vma, address, pte)))) {
                ret = SWAP_FAIL;
                goto out_unmap;
        }
 
        /* Nuke the page table entry. */
-       flush_cache_page(vma, address);
+       flush_cache_page(vma, address, page_to_pfn(page));
        pteval = ptep_clear_flush(vma, address, pte);
 
        /* Move the dirty bit to the physical page now the pte is gone. */
        if (pte_dirty(pteval))
                set_page_dirty(page);
 
+       /* Update high watermark before we lower rss */
+       update_hiwater_rss(mm);
+
        if (PageAnon(page)) {
-               swp_entry_t entry = { .val = page->private };
-               /*
-                * Store the swap location in the pte.
-                * See handle_pte_fault() ...
-                */
-               BUG_ON(!PageSwapCache(page));
-               swap_duplicate(entry);
-               if (list_empty(&mm->mmlist)) {
-                       spin_lock(&mmlist_lock);
-                       list_add(&mm->mmlist, &init_mm.mmlist);
-                       spin_unlock(&mmlist_lock);
+               swp_entry_t entry = { .val = page_private(page) };
+
+               if (PageSwapCache(page)) {
+                       /*
+                        * Store the swap location in the pte.
+                        * See handle_pte_fault() ...
+                        */
+                       swap_duplicate(entry);
+                       if (list_empty(&mm->mmlist)) {
+                               spin_lock(&mmlist_lock);
+                               if (list_empty(&mm->mmlist))
+                                       list_add(&mm->mmlist, &init_mm.mmlist);
+                               spin_unlock(&mmlist_lock);
+                       }
+                       dec_mm_counter(mm, anon_rss);
+#ifdef CONFIG_MIGRATION
+               } else {
+                       /*
+                        * Store the pfn of the page in a special migration
+                        * pte. do_swap_page() will wait until the migration
+                        * pte is removed and then restart fault handling.
+                        */
+                       BUG_ON(!migration);
+                       entry = make_migration_entry(page, pte_write(pteval));
+#endif
                }
-               set_pte(pte, swp_entry_to_pte(entry));
+               set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
                BUG_ON(pte_file(*pte));
-               vx_anonpages_dec(mm);
-       }
+       } else
+#ifdef CONFIG_MIGRATION
+       if (migration) {
+               /* Establish migration entry for a file page */
+               swp_entry_t entry;
+               entry = make_migration_entry(page, pte_write(pteval));
+               set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+       } else
+#endif
+               dec_mm_counter(mm, file_rss);
 
-       vx_rsspages_dec(mm);
-       acct_update_integrals();
-       page_remove_rmap(page);
+
+       page_remove_rmap(page, vma);
        page_cache_release(page);
 
 out_unmap:
-       pte_unmap(pte);
-out_unlock:
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(pte, ptl);
 out:
        return ret;
 }
@@ -644,16 +730,10 @@ static void try_to_unmap_cluster(unsigned long cursor,
        pmd_t *pmd;
        pte_t *pte;
        pte_t pteval;
+       spinlock_t *ptl;
        struct page *page;
        unsigned long address;
        unsigned long end;
-       unsigned long pfn;
-
-       /*
-        * We need the page_table_lock to protect us from page faults,
-        * munmap, fork, etc...
-        */
-       spin_lock(&mm->page_table_lock);
 
        address = (vma->vm_start + cursor) & CLUSTER_MASK;
        end = address + CLUSTER_SIZE;
@@ -664,60 +744,51 @@ static void try_to_unmap_cluster(unsigned long cursor,
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
-               goto out_unlock;
+               return;
 
        pud = pud_offset(pgd, address);
        if (!pud_present(*pud))
-               goto out_unlock;
+               return;
 
        pmd = pmd_offset(pud, address);
        if (!pmd_present(*pmd))
-               goto out_unlock;
+               return;
 
-       for (pte = pte_offset_map(pmd, address);
-                       address < end; pte++, address += PAGE_SIZE) {
+       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 
-               if (!pte_present(*pte))
-                       continue;
-
-               pfn = pte_pfn(*pte);
-               if (!pfn_valid(pfn))
-                       continue;
+       /* Update high watermark before we lower rss */
+       update_hiwater_rss(mm);
 
-               page = pfn_to_page(pfn);
-               BUG_ON(PageAnon(page));
-               if (PageReserved(page))
+       for (; address < end; pte++, address += PAGE_SIZE) {
+               if (!pte_present(*pte))
                        continue;
+               page = vm_normal_page(vma, address, *pte);
+               BUG_ON(!page || PageAnon(page));
 
                if (ptep_clear_flush_young(vma, address, pte))
                        continue;
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, address);
+               flush_cache_page(vma, address, pte_pfn(*pte));
                pteval = ptep_clear_flush(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
                if (page->index != linear_page_index(vma, address))
-                       set_pte(pte, pgoff_to_pte(page->index));
+                       set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
 
                /* Move the dirty bit to the physical page now the pte is gone. */
                if (pte_dirty(pteval))
                        set_page_dirty(page);
 
-               page_remove_rmap(page);
+               page_remove_rmap(page, vma);
                page_cache_release(page);
-               acct_update_integrals();
-               vx_rsspages_dec(mm);
+               dec_mm_counter(mm, file_rss);
                (*mapcount)--;
        }
-
-       pte_unmap(pte);
-
-out_unlock:
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(pte - 1, ptl);
 }
 
-static int try_to_unmap_anon(struct page *page)
+static int try_to_unmap_anon(struct page *page, int migration)
 {
        struct anon_vma *anon_vma;
        struct vm_area_struct *vma;
@@ -728,7 +799,7 @@ static int try_to_unmap_anon(struct page *page)
                return ret;
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-               ret = try_to_unmap_one(page, vma);
+               ret = try_to_unmap_one(page, vma, migration);
                if (ret == SWAP_FAIL || !page_mapped(page))
                        break;
        }
@@ -745,7 +816,7 @@ static int try_to_unmap_anon(struct page *page)
  *
  * This function is only called from try_to_unmap for object-based pages.
  */
-static int try_to_unmap_file(struct page *page)
+static int try_to_unmap_file(struct page *page, int migration)
 {
        struct address_space *mapping = page->mapping;
        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -759,7 +830,7 @@ static int try_to_unmap_file(struct page *page)
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-               ret = try_to_unmap_one(page, vma);
+               ret = try_to_unmap_one(page, vma, migration);
                if (ret == SWAP_FAIL || !page_mapped(page))
                        goto out;
        }
@@ -769,7 +840,7 @@ static int try_to_unmap_file(struct page *page)
 
        list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-               if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
+               if ((vma->vm_flags & VM_LOCKED) && !migration)
                        continue;
                cursor = (unsigned long) vma->vm_private_data;
                if (cursor > max_nl_cursor)
@@ -803,11 +874,10 @@ static int try_to_unmap_file(struct page *page)
        do {
                list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-                       if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
+                       if ((vma->vm_flags & VM_LOCKED) && !migration)
                                continue;
                        cursor = (unsigned long) vma->vm_private_data;
-                       while (vma->vm_mm->rss &&
-                               cursor < max_nl_cursor &&
+                       while ( cursor < max_nl_cursor &&
                                cursor < vma->vm_end - vma->vm_start) {
                                try_to_unmap_cluster(cursor, &mapcount, vma);
                                cursor += CLUSTER_SIZE;
@@ -826,11 +896,8 @@ static int try_to_unmap_file(struct page *page)
         * in locked vmas).  Reset cursor on all unreserved nonlinear
         * vmas, now forgetting on which ones it had fallen behind.
         */
-       list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
-                                               shared.vm_set.list) {
-               if (!(vma->vm_flags & VM_RESERVED))
-                       vma->vm_private_data = NULL;
-       }
+       list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
+               vma->vm_private_data = NULL;
 out:
        spin_unlock(&mapping->i_mmap_lock);
        return ret;
@@ -848,19 +915,19 @@ out:
  * SWAP_AGAIN  - we missed a mapping, try again later
  * SWAP_FAIL   - the page is unswappable
  */
-int try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page, int migration)
 {
        int ret;
 
-       BUG_ON(PageReserved(page));
        BUG_ON(!PageLocked(page));
 
        if (PageAnon(page))
-               ret = try_to_unmap_anon(page);
+               ret = try_to_unmap_anon(page, migration);
        else
-               ret = try_to_unmap_file(page);
+               ret = try_to_unmap_file(page, migration);
 
        if (!page_mapped(page))
                ret = SWAP_SUCCESS;
        return ret;
 }
+