This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / mm / rmap.c
index 1f3c84f..74fa3d7 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
  */
 
 /*
- * Locking:
- * - the page->mapcount field is protected by the PG_maplock bit,
- *   which nests within the mm->page_table_lock,
- *   which nests within the page lock.
- * - because swapout locking is opposite to the locking order
- *   in the page fault path, the swapout path uses trylocks
- *   on the mm->page_table_lock
+ * Locking: see "Lock ordering" summary in filemap.c.
+ * In swapout, page_map_lock is held on entry to page_referenced and
+ * try_to_unmap, so they trylock for i_mmap_lock and page_table_lock.
  */
+
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
@@ -33,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/rmap.h>
+#include <linux/vs_memory.h>
 
 #include <asm/tlbflush.h>
 
@@ -79,8 +77,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                /* page_table_lock to protect against threads */
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
+                       if (!allocated)
+                               spin_lock(&anon_vma->lock);
                        vma->anon_vma = anon_vma;
                        list_add(&vma->anon_vma_node, &anon_vma->head);
+                       if (!allocated)
+                               spin_unlock(&anon_vma->lock);
                        allocated = NULL;
                }
                spin_unlock(&mm->page_table_lock);
@@ -226,7 +228,7 @@ static int page_referenced_one(struct page *page,
        if (page_to_pfn(page) != pte_pfn(*pte))
                goto out_unmap;
 
-       if (ptep_test_and_clear_young(pte))
+       if (ptep_clear_flush_young(vma, address, pte))
                referenced++;
 
        (*mapcount)--;
@@ -465,7 +467,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
         * skipped over this mm) then we should reactivate it.
         */
        if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
-                       ptep_test_and_clear_young(pte)) {
+                       ptep_clear_flush_young(vma, address, pte)) {
                ret = SWAP_FAIL;
                goto out_unmap;
        }
@@ -507,7 +509,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
                BUG_ON(pte_file(*pte));
        }
 
-       mm->rss--;
+       // mm->rss--;
+       vx_rsspages_dec(mm);
        BUG_ON(!page->mapcount);
        page->mapcount--;
        page_cache_release(page);
@@ -592,7 +595,7 @@ static int try_to_unmap_cluster(unsigned long cursor,
                if (PageReserved(page))
                        continue;
 
-               if (ptep_test_and_clear_young(pte))
+               if (ptep_clear_flush_young(vma, address, pte))
                        continue;
 
                /* Nuke the page table entry. */
@@ -739,7 +742,7 @@ static inline int try_to_unmap_file(struct page *page)
        list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
                if (!(vma->vm_flags & VM_RESERVED))
-                       vma->vm_private_data = 0;
+                       vma->vm_private_data = NULL;
        }
 relock:
        page_map_lock(page);