linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / mm / msync.c
index 358d73c..3563a56 100644 (file)
 /*
  * The msync() system call.
  */
-#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
-#include <linux/file.h>
+#include <linux/hugetlb.h>
 #include <linux/syscalls.h>
 
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+                               unsigned long addr, unsigned long end)
+{
+       pte_t *pte;
+       spinlock_t *ptl;
+       int progress = 0;
+
+again:
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       do {
+               struct page *page;
+
+               if (progress >= 64) {
+                       progress = 0;
+                       if (need_resched() || need_lockbreak(ptl))
+                               break;
+               }
+               progress++;
+               if (!pte_present(*pte))
+                       continue;
+               if (!pte_maybe_dirty(*pte))
+                       continue;
+               page = vm_normal_page(vma, addr, *pte);
+               if (!page)
+                       continue;
+               if (ptep_clear_flush_dirty(vma, addr, pte) ||
+                   page_test_and_clear_dirty(page))
+                       set_page_dirty(page);
+               progress += 3;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+       if (addr != end)
+               goto again;
+}
+
+static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+                               unsigned long addr, unsigned long end)
+{
+       pmd_t *pmd;
+       unsigned long next;
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               if (pmd_none_or_clear_bad(pmd))
+                       continue;
+               msync_pte_range(vma, pmd, addr, next);
+       } while (pmd++, addr = next, addr != end);
+}
+
+static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+                               unsigned long addr, unsigned long end)
+{
+       pud_t *pud;
+       unsigned long next;
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               if (pud_none_or_clear_bad(pud))
+                       continue;
+               msync_pmd_range(vma, pud, addr, next);
+       } while (pud++, addr = next, addr != end);
+}
+
+static void msync_page_range(struct vm_area_struct *vma,
+                               unsigned long addr, unsigned long end)
+{
+       pgd_t *pgd;
+       unsigned long next;
+
+       /* For hugepages we can't go walking the page table normally,
+        * but that's ok, hugetlbfs is memory based, so we don't need
+        * to do anything more on an msync().
+        */
+       if (vma->vm_flags & VM_HUGETLB)
+               return;
+
+       BUG_ON(addr >= end);
+       pgd = pgd_offset(vma->vm_mm, addr);
+       flush_cache_range(vma, addr, end);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               msync_pud_range(vma, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
 /*
  * MS_SYNC syncs the entire file - including mappings.
  *
- * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
- * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
- * Now it doesn't do anything, since dirty pages are properly tracked.
- *
- * The application may now run fsync() to
+ * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
+ * marks the relevant pages dirty.  The application may now run fsync() to
  * write out the dirty pages and wait on the writeout and check the result.
  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
  * async writeout immediately.
- * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
+ * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
  * applications.
  */
+static int msync_interval(struct vm_area_struct *vma,
+                       unsigned long addr, unsigned long end, int flags)
+{
+       int ret = 0;
+       struct file *file = vma->vm_file;
+
+       if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
+               return -EBUSY;
+
+       if (file && (vma->vm_flags & VM_SHARED)) {
+               msync_page_range(vma, addr, end);
+
+               if (flags & MS_SYNC) {
+                       struct address_space *mapping = file->f_mapping;
+                       int err;
+
+                       ret = filemap_fdatawrite(mapping);
+                       if (file->f_op && file->f_op->fsync) {
+                               /*
+                                * We don't take i_mutex here because mmap_sem
+                                * is already held.
+                                */
+                               err = file->f_op->fsync(file,file->f_dentry,1);
+                               if (err && !ret)
+                                       ret = err;
+                       }
+                       err = filemap_fdatawait(mapping);
+                       if (!ret)
+                               ret = err;
+               }
+       }
+       return ret;
+}
+
 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
 {
        unsigned long end;
-       struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       int unmapped_error = 0;
-       int error = -EINVAL;
+       int unmapped_error, error = -EINVAL;
+
+       if (flags & MS_SYNC)
+               current->flags |= PF_SYNCWRITE;
 
+       down_read(&current->mm->mmap_sem);
        if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
                goto out;
        if (start & ~PAGE_MASK)
@@ -53,50 +180,37 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
         * If the interval [start,end) covers some unmapped address ranges,
         * just ignore them, but return -ENOMEM at the end.
         */
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, start);
+       vma = find_vma(current->mm, start);
+       unmapped_error = 0;
        for (;;) {
-               struct file *file;
-
                /* Still start < end. */
                error = -ENOMEM;
                if (!vma)
-                       goto out_unlock;
+                       goto out;
                /* Here start < vma->vm_end. */
                if (start < vma->vm_start) {
-                       start = vma->vm_start;
-                       if (start >= end)
-                               goto out_unlock;
                        unmapped_error = -ENOMEM;
+                       start = vma->vm_start;
                }
                /* Here vma->vm_start <= start < vma->vm_end. */
-               if ((flags & MS_INVALIDATE) &&
-                               (vma->vm_flags & VM_LOCKED)) {
-                       error = -EBUSY;
-                       goto out_unlock;
-               }
-               file = vma->vm_file;
-               start = vma->vm_end;
-               if ((flags & MS_SYNC) && file &&
-                               (vma->vm_flags & VM_SHARED)) {
-                       get_file(file);
-                       up_read(&mm->mmap_sem);
-                       error = do_fsync(file, 0);
-                       fput(file);
-                       if (error || start >= end)
-                               goto out;
-                       down_read(&mm->mmap_sem);
-                       vma = find_vma(mm, start);
-               } else {
-                       if (start >= end) {
-                               error = 0;
-                               goto out_unlock;
+               if (end <= vma->vm_end) {
+                       if (start < end) {
+                               error = msync_interval(vma, start, end, flags);
+                               if (error)
+                                       goto out;
                        }
-                       vma = vma->vm_next;
+                       error = unmapped_error;
+                       goto out;
                }
+               /* Here vma->vm_start <= start < vma->vm_end < end. */
+               error = msync_interval(vma, start, vma->vm_end, flags);
+               if (error)
+                       goto out;
+               start = vma->vm_end;
+               vma = vma->vm_next;
        }
-out_unlock:
-       up_read(&mm->mmap_sem);
 out:
-       return error ? : unmapped_error;
+       up_read(&current->mm->mmap_sem);
+       current->flags &= ~PF_SYNCWRITE;
+       return error;
 }