#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
+#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-/*
- * Called with mm->page_table_lock held to protect against other
- * threads/the swapper from ripping pte's out from under us.
- */
-static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
-{
- pte_t pte = *ptep;
- unsigned long pfn = pte_pfn(pte);
- struct page *page;
-
- if (pte_present(pte) && pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (!PageReserved(page) &&
- (ptep_clear_flush_dirty(vma, address, ptep) ||
- page_test_and_clear_dirty(page)))
- set_page_dirty(page);
- }
- return 0;
-}
-
-static int filemap_sync_pte_range(pmd_t * pmd,
- unsigned long address, unsigned long end,
- struct vm_area_struct *vma, unsigned int flags)
+static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end)
{
pte_t *pte;
- int error;
-
- if (pmd_none(*pmd))
- return 0;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return 0;
- }
- pte = pte_offset_map(pmd, address);
- if ((address & PMD_MASK) != (end & PMD_MASK))
- end = (address & PMD_MASK) + PMD_SIZE;
- error = 0;
- do {
- error |= filemap_sync_pte(pte, vma, address, flags);
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
+ spinlock_t *ptl;
+ int progress = 0;
- pte_unmap(pte - 1);
+again:
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ do {
+ struct page *page;
- return error;
+ if (progress >= 64) {
+ progress = 0;
+ if (need_resched() || need_lockbreak(ptl))
+ break;
+ }
+ progress++;
+ if (!pte_present(*pte))
+ continue;
+ if (!pte_maybe_dirty(*pte))
+ continue;
+ page = vm_normal_page(vma, addr, *pte);
+ if (!page)
+ continue;
+ if (ptep_clear_flush_dirty(vma, addr, pte) ||
+ page_test_and_clear_dirty(page))
+ set_page_dirty(page);
+ progress += 3;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+ if (addr != end)
+ goto again;
}
-static inline int filemap_sync_pmd_range(pgd_t * pgd,
- unsigned long address, unsigned long end,
- struct vm_area_struct *vma, unsigned int flags)
+static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr, unsigned long end)
{
- pmd_t * pmd;
- int error;
-
- if (pgd_none(*pgd))
- return 0;
- if (pgd_bad(*pgd)) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return 0;
- }
- pmd = pmd_offset(pgd, address);
- if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
- end = (address & PGDIR_MASK) + PGDIR_SIZE;
- error = 0;
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
do {
- error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return error;
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ msync_pte_range(vma, pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
}
-static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
- size_t size, unsigned int flags)
+static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+ unsigned long addr, unsigned long end)
{
- pgd_t * dir;
- unsigned long end = address + size;
- int error = 0;
+ pud_t *pud;
+ unsigned long next;
- /* Aquire the lock early; it may be possible to avoid dropping
- * and reaquiring it repeatedly.
- */
- spin_lock(&vma->vm_mm->page_table_lock);
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ msync_pmd_range(vma, pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
- dir = pgd_offset(vma->vm_mm, address);
- flush_cache_range(vma, address, end);
+static void msync_page_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ pgd_t *pgd;
+ unsigned long next;
/* For hugepages we can't go walking the page table normally,
* but that's ok, hugetlbfs is memory based, so we don't need
- * to do anything more on an msync() */
- if (is_vm_hugetlb_page(vma))
- goto out;
-
- if (address >= end)
- BUG();
- do {
- error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- /*
- * Why flush ? filemap_sync_pte already flushed the tlbs with the
- * dirty bits.
+ * to do anything more on an msync().
*/
- flush_tlb_range(vma, end - size, end);
- out:
- spin_unlock(&vma->vm_mm->page_table_lock);
+ if (vma->vm_flags & VM_HUGETLB)
+ return;
- return error;
+ BUG_ON(addr >= end);
+ pgd = pgd_offset(vma->vm_mm, addr);
+ flush_cache_range(vma, addr, end);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ msync_pud_range(vma, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
}
/*
* So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
* applications.
*/
-static int msync_interval(struct vm_area_struct * vma,
- unsigned long start, unsigned long end, int flags)
+static int msync_interval(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end, int flags)
{
int ret = 0;
- struct file * file = vma->vm_file;
+ struct file *file = vma->vm_file;
if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
return -EBUSY;
if (file && (vma->vm_flags & VM_SHARED)) {
- ret = filemap_sync(vma, start, end-start, flags);
+ msync_page_range(vma, addr, end);
- if (!ret && (flags & MS_SYNC)) {
+ if (flags & MS_SYNC) {
struct address_space *mapping = file->f_mapping;
int err;
- down(&mapping->host->i_sem);
ret = filemap_fdatawrite(mapping);
if (file->f_op && file->f_op->fsync) {
+ /*
+ * We don't take i_mutex here because mmap_sem
+ * is already held.
+ */
err = file->f_op->fsync(file,file->f_dentry,1);
if (err && !ret)
ret = err;
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
- up(&mapping->host->i_sem);
}
}
return ret;
asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
{
unsigned long end;
- struct vm_area_struct * vma;
+ struct vm_area_struct *vma;
int unmapped_error, error = -EINVAL;
+ if (flags & MS_SYNC)
+ current->flags |= PF_SYNCWRITE;
+
down_read(¤t->mm->mmap_sem);
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
goto out;
}
out:
up_read(¤t->mm->mmap_sem);
+ current->flags &= ~PF_SYNCWRITE;
return error;
}