patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / mm / fremap.c
index 1b891a1..c708a58 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/swapops.h>
 #include <linux/rmap.h>
 #include <linux/module.h>
+#include <linux/vs_memory.h>
 
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
@@ -36,7 +37,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
                        if (!PageReserved(page)) {
                                if (pte_dirty(pte))
                                        set_page_dirty(page);
-                               page_remove_rmap(page, ptep);
+                               page_remove_rmap(page);
                                page_cache_release(page);
                                // mm->rss--;
                                vx_rsspages_dec(mm);
@@ -50,7 +51,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 /*
- * Install a page to a given virtual memory address, release any
+ * Install a file page to a given virtual memory address, release any
  * previously existing mapping.
  */
 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -61,11 +62,13 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t pte_val;
-       struct pte_chain *pte_chain;
 
-       pte_chain = pte_chain_alloc(GFP_KERNEL);
-       if (!pte_chain)
-               goto err;
+       /*
+        * We use page_add_file_rmap below: if install_page is
+        * ever extended to anonymous pages, this will warn us.
+        */
+       BUG_ON(!page_mapping(page));
+
        pgd = pgd_offset(mm, addr);
        spin_lock(&mm->page_table_lock);
 
@@ -86,18 +89,14 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
        vx_rsspages_inc(mm);
        flush_icache_page(vma, page);
        set_pte(pte, mk_pte(page, prot));
-       pte_chain = page_add_rmap(page, pte, pte_chain);
+       page_add_file_rmap(page);
        pte_val = *pte;
        pte_unmap(pte);
        update_mmu_cache(vma, addr, pte_val);
-       spin_unlock(&mm->page_table_lock);
-       pte_chain_free(pte_chain);
-       return 0;
 
+       err = 0;
 err_unlock:
        spin_unlock(&mm->page_table_lock);
-       pte_chain_free(pte_chain);
-err:
        return err;
 }
 EXPORT_SYMBOL(install_page);
@@ -164,6 +163,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
        unsigned long __prot, unsigned long pgoff, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
+       struct address_space *mapping;
        unsigned long end = start + size;
        struct vm_area_struct *vma;
        int err = -EINVAL;
@@ -193,16 +193,30 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
        /*
         * Make sure the vma is shared, that it supports prefaulting,
         * and that the remapped range is valid and fully within
-        * the single existing vma:
+        * the single existing vma.  vm_private_data is used as a
+        * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
+        * or VM_LOCKED, but VM_LOCKED could be revoked later on).
         */
        if (vma && (vma->vm_flags & VM_SHARED) &&
+               (!vma->vm_private_data || (vma->vm_flags & VM_RESERVED)) &&
                vma->vm_ops && vma->vm_ops->populate &&
                        end > start && start >= vma->vm_start &&
                                end <= vma->vm_end) {
 
                /* Must set VM_NONLINEAR before any pages are populated. */
-               if (pgoff != ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff)
+               if (pgoff != linear_page_index(vma, start) &&
+                   !(vma->vm_flags & VM_NONLINEAR)) {
+                       mapping = vma->vm_file->f_mapping;
+                       spin_lock(&mapping->i_mmap_lock);
+                       flush_dcache_mmap_lock(mapping);
                        vma->vm_flags |= VM_NONLINEAR;
+                       vma_prio_tree_remove(vma, &mapping->i_mmap);
+                       vma_prio_tree_init(vma);
+                       list_add_tail(&vma->shared.vm_set.list,
+                                       &mapping->i_mmap_nonlinear);
+                       flush_dcache_mmap_unlock(mapping);
+                       spin_unlock(&mapping->i_mmap_lock);
+               }
 
                /* ->populate can take a long time, so downgrade the lock. */
                downgrade_write(&mm->mmap_sem);