fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / mm / msync.c
index 2aaad04..358d73c 100644 (file)
 /*
  * The msync() system call.
  */
-#include <linux/slab.h>
-#include <linux/pagemap.h>
+#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
-#include <linux/hugetlb.h>
-
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-
-/*
- * Called with mm->page_table_lock held to protect against other
- * threads/the swapper from ripping pte's out from under us.
- */
-static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
-       unsigned long address, unsigned int flags)
-{
-       pte_t pte = *ptep;
-       unsigned long pfn = pte_pfn(pte);
-       struct page *page;
-
-       if (pte_present(pte) && pfn_valid(pfn)) {
-               page = pfn_to_page(pfn);
-               if (!PageReserved(page) &&
-                   (ptep_clear_flush_dirty(vma, address, ptep) ||
-                    page_test_and_clear_dirty(page)))
-                       set_page_dirty(page);
-       }
-       return 0;
-}
-
-static int filemap_sync_pte_range(pmd_t * pmd,
-       unsigned long address, unsigned long end, 
-       struct vm_area_struct *vma, unsigned int flags)
-{
-       pte_t *pte;
-       int error;
-
-       if (pmd_none(*pmd))
-               return 0;
-       if (pmd_bad(*pmd)) {
-               pmd_ERROR(*pmd);
-               pmd_clear(pmd);
-               return 0;
-       }
-       pte = pte_offset_map(pmd, address);
-       if ((address & PMD_MASK) != (end & PMD_MASK))
-               end = (address & PMD_MASK) + PMD_SIZE;
-       error = 0;
-       do {
-               error |= filemap_sync_pte(pte, vma, address, flags);
-               address += PAGE_SIZE;
-               pte++;
-       } while (address && (address < end));
-
-       pte_unmap(pte - 1);
-
-       return error;
-}
-
-static inline int filemap_sync_pmd_range(pgd_t * pgd,
-       unsigned long address, unsigned long end, 
-       struct vm_area_struct *vma, unsigned int flags)
-{
-       pmd_t * pmd;
-       int error;
-
-       if (pgd_none(*pgd))
-               return 0;
-       if (pgd_bad(*pgd)) {
-               pgd_ERROR(*pgd);
-               pgd_clear(pgd);
-               return 0;
-       }
-       pmd = pmd_offset(pgd, address);
-       if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
-               end = (address & PGDIR_MASK) + PGDIR_SIZE;
-       error = 0;
-       do {
-               error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return error;
-}
-
-static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
-       size_t size, unsigned int flags)
-{
-       pgd_t * dir;
-       unsigned long end = address + size;
-       int error = 0;
-
-       /* Aquire the lock early; it may be possible to avoid dropping
-        * and reaquiring it repeatedly.
-        */
-       spin_lock(&vma->vm_mm->page_table_lock);
-
-       dir = pgd_offset(vma->vm_mm, address);
-       flush_cache_range(vma, address, end);
-
-       /* For hugepages we can't go walking the page table normally,
-        * but that's ok, hugetlbfs is memory based, so we don't need
-        * to do anything more on an msync() */
-       if (is_vm_hugetlb_page(vma))
-               goto out;
-
-       if (address >= end)
-               BUG();
-       do {
-               error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-       /*
-        * Why flush ? filemap_sync_pte already flushed the tlbs with the
-        * dirty bits.
-        */
-       flush_tlb_range(vma, end - size, end);
- out:
-       spin_unlock(&vma->vm_mm->page_table_lock);
-
-       return error;
-}
+#include <linux/file.h>
+#include <linux/syscalls.h>
 
 /*
  * MS_SYNC syncs the entire file - including mappings.
  *
- * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
- * marks the relevant pages dirty.  The application may now run fsync() to
+ * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
+ * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
+ * Now it doesn't do anything, since dirty pages are properly tracked.
+ *
+ * The application may now run fsync() to
  * write out the dirty pages and wait on the writeout and check the result.
  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
  * async writeout immediately.
- * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
+ * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
  * applications.
  */
-static int msync_interval(struct vm_area_struct * vma,
-       unsigned long start, unsigned long end, int flags)
-{
-       int ret = 0;
-       struct file * file = vma->vm_file;
-
-       if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
-               return -EBUSY;
-
-       if (file && (vma->vm_flags & VM_SHARED)) {
-               ret = filemap_sync(vma, start, end-start, flags);
-
-               if (!ret && (flags & MS_SYNC)) {
-                       struct address_space *mapping = file->f_mapping;
-                       int err;
-
-                       down(&mapping->host->i_sem);
-                       ret = filemap_fdatawrite(mapping);
-                       if (file->f_op && file->f_op->fsync) {
-                               err = file->f_op->fsync(file,file->f_dentry,1);
-                               if (err && !ret)
-                                       ret = err;
-                       }
-                       err = filemap_fdatawait(mapping);
-                       if (!ret)
-                               ret = err;
-                       up(&mapping->host->i_sem);
-               }
-       }
-       return ret;
-}
-
 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
 {
        unsigned long end;
-       struct vm_area_struct * vma;
-       int unmapped_error, error = -EINVAL;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int unmapped_error = 0;
+       int error = -EINVAL;
 
-       down_read(&current->mm->mmap_sem);
        if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
                goto out;
        if (start & ~PAGE_MASK)
@@ -199,36 +53,50 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
         * If the interval [start,end) covers some unmapped address ranges,
         * just ignore them, but return -ENOMEM at the end.
         */
-       vma = find_vma(current->mm, start);
-       unmapped_error = 0;
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, start);
        for (;;) {
+               struct file *file;
+
                /* Still start < end. */
                error = -ENOMEM;
                if (!vma)
-                       goto out;
+                       goto out_unlock;
                /* Here start < vma->vm_end. */
                if (start < vma->vm_start) {
-                       unmapped_error = -ENOMEM;
                        start = vma->vm_start;
+                       if (start >= end)
+                               goto out_unlock;
+                       unmapped_error = -ENOMEM;
                }
                /* Here vma->vm_start <= start < vma->vm_end. */
-               if (end <= vma->vm_end) {
-                       if (start < end) {
-                               error = msync_interval(vma, start, end, flags);
-                               if (error)
-                                       goto out;
-                       }
-                       error = unmapped_error;
-                       goto out;
+               if ((flags & MS_INVALIDATE) &&
+                               (vma->vm_flags & VM_LOCKED)) {
+                       error = -EBUSY;
+                       goto out_unlock;
                }
-               /* Here vma->vm_start <= start < vma->vm_end < end. */
-               error = msync_interval(vma, start, vma->vm_end, flags);
-               if (error)
-                       goto out;
+               file = vma->vm_file;
                start = vma->vm_end;
-               vma = vma->vm_next;
+               if ((flags & MS_SYNC) && file &&
+                               (vma->vm_flags & VM_SHARED)) {
+                       get_file(file);
+                       up_read(&mm->mmap_sem);
+                       error = do_fsync(file, 0);
+                       fput(file);
+                       if (error || start >= end)
+                               goto out;
+                       down_read(&mm->mmap_sem);
+                       vma = find_vma(mm, start);
+               } else {
+                       if (start >= end) {
+                               error = 0;
+                               goto out_unlock;
+                       }
+                       vma = vma->vm_next;
+               }
        }
+out_unlock:
+       up_read(&mm->mmap_sem);
 out:
-       up_read(&current->mm->mmap_sem);
-       return error;
+       return error ? : unmapped_error;
 }