*/
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
+#include <linux/writeback.h>
+#include <linux/file.h>
#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-/*
- * Called with mm->page_table_lock held to protect against other
- * threads/the swapper from ripping pte's out from under us.
- */
-static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
-{
- pte_t pte = *ptep;
- unsigned long pfn = pte_pfn(pte);
- struct page *page;
-
- if (pte_present(pte) && pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (!PageReserved(page) &&
- (ptep_clear_flush_dirty(vma, address, ptep) ||
- page_test_and_clear_dirty(page)))
- set_page_dirty(page);
- }
- return 0;
-}
-
-static int filemap_sync_pte_range(pmd_t * pmd,
- unsigned long address, unsigned long end,
- struct vm_area_struct *vma, unsigned int flags)
+static unsigned long msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end)
{
pte_t *pte;
- int error;
+ spinlock_t *ptl;
+ int progress = 0;
+ unsigned long ret = 0;
- if (pmd_none(*pmd))
- return 0;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return 0;
- }
- pte = pte_offset_map(pmd, address);
- if ((address & PMD_MASK) != (end & PMD_MASK))
- end = (address & PMD_MASK) + PMD_SIZE;
- error = 0;
+again:
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
- error |= filemap_sync_pte(pte, vma, address, flags);
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-
- pte_unmap(pte - 1);
+ struct page *page;
- return error;
+ if (progress >= 64) {
+ progress = 0;
+ if (need_resched() || need_lockbreak(ptl))
+ break;
+ }
+ progress++;
+ if (!pte_present(*pte))
+ continue;
+ if (!pte_maybe_dirty(*pte))
+ continue;
+ page = vm_normal_page(vma, addr, *pte);
+ if (!page)
+ continue;
+ if (ptep_clear_flush_dirty(vma, addr, pte) ||
+ page_test_and_clear_dirty(page))
+ ret += set_page_dirty(page);
+ progress += 3;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+ if (addr != end)
+ goto again;
+ return ret;
}
-static inline int filemap_sync_pmd_range(pgd_t * pgd,
- unsigned long address, unsigned long end,
- struct vm_area_struct *vma, unsigned int flags)
+static inline unsigned long msync_pmd_range(struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr, unsigned long end)
{
- pmd_t * pmd;
- int error;
+ pmd_t *pmd;
+ unsigned long next;
+ unsigned long ret = 0;
- if (pgd_none(*pgd))
- return 0;
- if (pgd_bad(*pgd)) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return 0;
- }
- pmd = pmd_offset(pgd, address);
- if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
- end = (address & PGDIR_MASK) + PGDIR_SIZE;
- error = 0;
+ pmd = pmd_offset(pud, addr);
do {
- error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return error;
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ ret += msync_pte_range(vma, pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+ return ret;
}
-static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
- size_t size, unsigned int flags)
+static inline unsigned long msync_pud_range(struct vm_area_struct *vma,
+ pgd_t *pgd, unsigned long addr, unsigned long end)
{
- pgd_t * dir;
- unsigned long end = address + size;
- int error = 0;
+ pud_t *pud;
+ unsigned long next;
+ unsigned long ret = 0;
- /* Aquire the lock early; it may be possible to avoid dropping
- * and reaquiring it repeatedly.
- */
- spin_lock(&vma->vm_mm->page_table_lock);
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ ret += msync_pmd_range(vma, pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+ return ret;
+}
- dir = pgd_offset(vma->vm_mm, address);
- flush_cache_range(vma, address, end);
+static unsigned long msync_page_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ pgd_t *pgd;
+ unsigned long next;
+ unsigned long ret = 0;
/* For hugepages we can't go walking the page table normally,
* but that's ok, hugetlbfs is memory based, so we don't need
- * to do anything more on an msync() */
- if (is_vm_hugetlb_page(vma))
- goto out;
-
- if (address >= end)
- BUG();
- do {
- error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- /*
- * Why flush ? filemap_sync_pte already flushed the tlbs with the
- * dirty bits.
+ * to do anything more on an msync().
*/
- flush_tlb_range(vma, end - size, end);
- out:
- spin_unlock(&vma->vm_mm->page_table_lock);
+ if (vma->vm_flags & VM_HUGETLB)
+ return 0;
- return error;
+ BUG_ON(addr >= end);
+ pgd = pgd_offset(vma->vm_mm, addr);
+ flush_cache_range(vma, addr, end);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ ret += msync_pud_range(vma, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+ return ret;
}
/*
* write out the dirty pages and wait on the writeout and check the result.
* Or the application may run fadvise(FADV_DONTNEED) against the fd to start
* async writeout immediately.
- * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
+ * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
* applications.
*/
-static int msync_interval(struct vm_area_struct * vma,
- unsigned long start, unsigned long end, int flags)
+static int msync_interval(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long end, int flags,
+ unsigned long *nr_pages_dirtied)
{
- int ret = 0;
- struct file * file = vma->vm_file;
+ struct file *file = vma->vm_file;
if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
return -EBUSY;
- if (file && (vma->vm_flags & VM_SHARED)) {
- ret = filemap_sync(vma, start, end-start, flags);
-
- if (!ret && (flags & MS_SYNC)) {
- struct address_space *mapping = file->f_mapping;
- int err;
-
- down(&mapping->host->i_sem);
- ret = filemap_fdatawrite(mapping);
- if (file->f_op && file->f_op->fsync) {
- err = file->f_op->fsync(file,file->f_dentry,1);
- if (err && !ret)
- ret = err;
- }
- err = filemap_fdatawait(mapping);
- if (!ret)
- ret = err;
- up(&mapping->host->i_sem);
- }
- }
- return ret;
+ if (file && (vma->vm_flags & VM_SHARED))
+ *nr_pages_dirtied = msync_page_range(vma, addr, end);
+ return 0;
}
asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
{
unsigned long end;
- struct vm_area_struct * vma;
- int unmapped_error, error = -EINVAL;
+ struct vm_area_struct *vma;
+ int unmapped_error = 0;
+ int error = -EINVAL;
+ int done = 0;
- down_read(¤t->mm->mmap_sem);
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
goto out;
if (start & ~PAGE_MASK)
* If the interval [start,end) covers some unmapped address ranges,
* just ignore them, but return -ENOMEM at the end.
*/
+ down_read(¤t->mm->mmap_sem);
+ if (flags & MS_SYNC)
+ current->flags |= PF_SYNCWRITE;
vma = find_vma(current->mm, start);
- unmapped_error = 0;
- for (;;) {
- /* Still start < end. */
+ if (!vma) {
error = -ENOMEM;
- if (!vma)
- goto out;
+ goto out_unlock;
+ }
+ do {
+ unsigned long nr_pages_dirtied = 0;
+ struct file *file;
+
/* Here start < vma->vm_end. */
if (start < vma->vm_start) {
unmapped_error = -ENOMEM;
/* Here vma->vm_start <= start < vma->vm_end. */
if (end <= vma->vm_end) {
if (start < end) {
- error = msync_interval(vma, start, end, flags);
+ error = msync_interval(vma, start, end, flags,
+ &nr_pages_dirtied);
if (error)
- goto out;
+ goto out_unlock;
}
error = unmapped_error;
- goto out;
+ done = 1;
+ } else {
+ /* Here vma->vm_start <= start < vma->vm_end < end. */
+ error = msync_interval(vma, start, vma->vm_end, flags,
+ &nr_pages_dirtied);
+ if (error)
+ goto out_unlock;
}
- /* Here vma->vm_start <= start < vma->vm_end < end. */
- error = msync_interval(vma, start, vma->vm_end, flags);
- if (error)
- goto out;
+ file = vma->vm_file;
start = vma->vm_end;
- vma = vma->vm_next;
- }
-out:
+ if ((flags & MS_ASYNC) && file && nr_pages_dirtied) {
+ get_file(file);
+ up_read(¤t->mm->mmap_sem);
+ balance_dirty_pages_ratelimited_nr(file->f_mapping,
+ nr_pages_dirtied);
+ fput(file);
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, start);
+ } else if ((flags & MS_SYNC) && file &&
+ (vma->vm_flags & VM_SHARED)) {
+ get_file(file);
+ up_read(¤t->mm->mmap_sem);
+ error = do_fsync(file, 0);
+ fput(file);
+ down_read(¤t->mm->mmap_sem);
+ if (error)
+ goto out_unlock;
+ vma = find_vma(current->mm, start);
+ } else {
+ vma = vma->vm_next;
+ }
+ } while (vma && !done);
+out_unlock:
+ current->flags &= ~PF_SYNCWRITE;
up_read(¤t->mm->mmap_sem);
+out:
return error;
}