4 * Copyright (C) 1994-1999 Linus Torvalds
8 * The msync() system call.
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/syscalls.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
21 * Called with mm->page_table_lock held to protect against other
22 * threads/the swapper from ripping pte's out from under us.
24 static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
25 unsigned long address, unsigned int flags)
28 unsigned long pfn = pte_pfn(pte);
31 if (pte_present(pte) && pfn_valid(pfn)) {
32 page = pfn_to_page(pfn);
33 if (!PageReserved(page) &&
34 (ptep_clear_flush_dirty(vma, address, ptep) ||
35 page_test_and_clear_dirty(page)))
41 static int filemap_sync_pte_range(pmd_t * pmd,
42 unsigned long address, unsigned long end,
43 struct vm_area_struct *vma, unsigned int flags)
55 pte = pte_offset_map(pmd, address);
56 if ((address & PMD_MASK) != (end & PMD_MASK))
57 end = (address & PMD_MASK) + PMD_SIZE;
60 error |= filemap_sync_pte(pte, vma, address, flags);
63 } while (address && (address < end));
70 static inline int filemap_sync_pmd_range(pud_t * pud,
71 unsigned long address, unsigned long end,
72 struct vm_area_struct *vma, unsigned int flags)
84 pmd = pmd_offset(pud, address);
85 if ((address & PUD_MASK) != (end & PUD_MASK))
86 end = (address & PUD_MASK) + PUD_SIZE;
89 error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
90 address = (address + PMD_SIZE) & PMD_MASK;
92 } while (address && (address < end));
96 static inline int filemap_sync_pud_range(pgd_t *pgd,
97 unsigned long address, unsigned long end,
98 struct vm_area_struct *vma, unsigned int flags)
110 pud = pud_offset(pgd, address);
111 if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
112 end = (address & PGDIR_MASK) + PGDIR_SIZE;
115 error |= filemap_sync_pmd_range(pud, address, end, vma, flags);
116 address = (address + PUD_SIZE) & PUD_MASK;
118 } while (address && (address < end));
122 static int __filemap_sync(struct vm_area_struct *vma, unsigned long address,
123 size_t size, unsigned int flags)
126 unsigned long end = address + size;
131 /* Aquire the lock early; it may be possible to avoid dropping
132 * and reaquiring it repeatedly.
134 spin_lock(&vma->vm_mm->page_table_lock);
136 pgd = pgd_offset(vma->vm_mm, address);
137 flush_cache_range(vma, address, end);
139 /* For hugepages we can't go walking the page table normally,
140 * but that's ok, hugetlbfs is memory based, so we don't need
141 * to do anything more on an msync() */
142 if (is_vm_hugetlb_page(vma))
147 for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
148 next = (address + PGDIR_SIZE) & PGDIR_MASK;
149 if (next <= address || next > end)
151 error |= filemap_sync_pud_range(pgd, address, next, vma, flags);
156 * Why flush ? filemap_sync_pte already flushed the tlbs with the
159 flush_tlb_range(vma, end - size, end);
161 spin_unlock(&vma->vm_mm->page_table_lock);
166 #ifdef CONFIG_PREEMPT
167 static int filemap_sync(struct vm_area_struct *vma, unsigned long address,
168 size_t size, unsigned int flags)
170 const size_t chunk = 64 * 1024; /* bytes */
174 size_t sz = min(size, chunk);
176 error |= __filemap_sync(vma, address, sz, flags);
184 static int filemap_sync(struct vm_area_struct *vma, unsigned long address,
185 size_t size, unsigned int flags)
187 return __filemap_sync(vma, address, size, flags);
192 * MS_SYNC syncs the entire file - including mappings.
194 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
195 * marks the relevant pages dirty. The application may now run fsync() to
196 * write out the dirty pages and wait on the writeout and check the result.
197 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
198 * async writeout immediately.
199 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
202 static int msync_interval(struct vm_area_struct * vma,
203 unsigned long start, unsigned long end, int flags)
206 struct file * file = vma->vm_file;
208 if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
211 if (file && (vma->vm_flags & VM_SHARED)) {
212 ret = filemap_sync(vma, start, end-start, flags);
214 if (!ret && (flags & MS_SYNC)) {
215 struct address_space *mapping = file->f_mapping;
218 ret = filemap_fdatawrite(mapping);
219 if (file->f_op && file->f_op->fsync) {
221 * We don't take i_sem here because mmap_sem
224 err = file->f_op->fsync(file,file->f_dentry,1);
228 err = filemap_fdatawait(mapping);
236 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
239 struct vm_area_struct * vma;
240 int unmapped_error, error = -EINVAL;
243 current->flags |= PF_SYNCWRITE;
245 down_read(¤t->mm->mmap_sem);
246 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
248 if (start & ~PAGE_MASK)
250 if ((flags & MS_ASYNC) && (flags & MS_SYNC))
253 len = (len + ~PAGE_MASK) & PAGE_MASK;
261 * If the interval [start,end) covers some unmapped address ranges,
262 * just ignore them, but return -ENOMEM at the end.
264 vma = find_vma(current->mm, start);
267 /* Still start < end. */
271 /* Here start < vma->vm_end. */
272 if (start < vma->vm_start) {
273 unmapped_error = -ENOMEM;
274 start = vma->vm_start;
276 /* Here vma->vm_start <= start < vma->vm_end. */
277 if (end <= vma->vm_end) {
279 error = msync_interval(vma, start, end, flags);
283 error = unmapped_error;
286 /* Here vma->vm_start <= start < vma->vm_end < end. */
287 error = msync_interval(vma, start, vma->vm_end, flags);
294 up_read(¤t->mm->mmap_sem);
295 current->flags &= ~PF_SYNCWRITE;