ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / mm / msync.c
1 /*
2  *      linux/mm/msync.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * The msync() system call.
9  */
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14
15 #include <asm/pgtable.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18
19 /*
20  * Called with mm->page_table_lock held to protect against other
21  * threads/the swapper from ripping pte's out from under us.
22  */
23 static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
24         unsigned long address, unsigned int flags)
25 {
26         pte_t pte = *ptep;
27         unsigned long pfn = pte_pfn(pte);
28         struct page *page;
29
30         if (pte_present(pte) && pfn_valid(pfn)) {
31                 page = pfn_to_page(pfn);
32                 if (!PageReserved(page) &&
33                     (ptep_clear_flush_dirty(vma, address, ptep) ||
34                      page_test_and_clear_dirty(page)))
35                         set_page_dirty(page);
36         }
37         return 0;
38 }
39
40 static int filemap_sync_pte_range(pmd_t * pmd,
41         unsigned long address, unsigned long end, 
42         struct vm_area_struct *vma, unsigned int flags)
43 {
44         pte_t *pte;
45         int error;
46
47         if (pmd_none(*pmd))
48                 return 0;
49         if (pmd_bad(*pmd)) {
50                 pmd_ERROR(*pmd);
51                 pmd_clear(pmd);
52                 return 0;
53         }
54         pte = pte_offset_map(pmd, address);
55         if ((address & PMD_MASK) != (end & PMD_MASK))
56                 end = (address & PMD_MASK) + PMD_SIZE;
57         error = 0;
58         do {
59                 error |= filemap_sync_pte(pte, vma, address, flags);
60                 address += PAGE_SIZE;
61                 pte++;
62         } while (address && (address < end));
63
64         pte_unmap(pte - 1);
65
66         return error;
67 }
68
69 static inline int filemap_sync_pmd_range(pgd_t * pgd,
70         unsigned long address, unsigned long end, 
71         struct vm_area_struct *vma, unsigned int flags)
72 {
73         pmd_t * pmd;
74         int error;
75
76         if (pgd_none(*pgd))
77                 return 0;
78         if (pgd_bad(*pgd)) {
79                 pgd_ERROR(*pgd);
80                 pgd_clear(pgd);
81                 return 0;
82         }
83         pmd = pmd_offset(pgd, address);
84         if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
85                 end = (address & PGDIR_MASK) + PGDIR_SIZE;
86         error = 0;
87         do {
88                 error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
89                 address = (address + PMD_SIZE) & PMD_MASK;
90                 pmd++;
91         } while (address && (address < end));
92         return error;
93 }
94
95 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
96         size_t size, unsigned int flags)
97 {
98         pgd_t * dir;
99         unsigned long end = address + size;
100         int error = 0;
101
102         /* Aquire the lock early; it may be possible to avoid dropping
103          * and reaquiring it repeatedly.
104          */
105         spin_lock(&vma->vm_mm->page_table_lock);
106
107         dir = pgd_offset(vma->vm_mm, address);
108         flush_cache_range(vma, address, end);
109         if (address >= end)
110                 BUG();
111         do {
112                 error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
113                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
114                 dir++;
115         } while (address && (address < end));
116         /*
117          * Why flush ? filemap_sync_pte already flushed the tlbs with the
118          * dirty bits.
119          */
120         flush_tlb_range(vma, end - size, end);
121
122         spin_unlock(&vma->vm_mm->page_table_lock);
123
124         return error;
125 }
126
127 /*
128  * MS_SYNC syncs the entire file - including mappings.
129  *
130  * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
131  * marks the relevant pages dirty.  The application may now run fsync() to
132  * write out the dirty pages and wait on the writeout and check the result.
133  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
134  * async writeout immediately.
135  * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
136  * applications.
137  */
138 static int msync_interval(struct vm_area_struct * vma,
139         unsigned long start, unsigned long end, int flags)
140 {
141         int ret = 0;
142         struct file * file = vma->vm_file;
143
144         if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
145                 return -EBUSY;
146
147         if (file && (vma->vm_flags & VM_SHARED)) {
148                 ret = filemap_sync(vma, start, end-start, flags);
149
150                 if (!ret && (flags & MS_SYNC)) {
151                         struct address_space *mapping = file->f_mapping;
152                         int err;
153
154                         down(&mapping->host->i_sem);
155                         ret = filemap_fdatawrite(mapping);
156                         if (file->f_op && file->f_op->fsync) {
157                                 err = file->f_op->fsync(file,file->f_dentry,1);
158                                 if (err && !ret)
159                                         ret = err;
160                         }
161                         err = filemap_fdatawait(mapping);
162                         if (!ret)
163                                 ret = err;
164                         up(&mapping->host->i_sem);
165                 }
166         }
167         return ret;
168 }
169
170 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
171 {
172         unsigned long end;
173         struct vm_area_struct * vma;
174         int unmapped_error, error = -EINVAL;
175
176         down_read(&current->mm->mmap_sem);
177         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
178                 goto out;
179         if (start & ~PAGE_MASK)
180                 goto out;
181         if ((flags & MS_ASYNC) && (flags & MS_SYNC))
182                 goto out;
183         error = -ENOMEM;
184         len = (len + ~PAGE_MASK) & PAGE_MASK;
185         end = start + len;
186         if (end < start)
187                 goto out;
188         error = 0;
189         if (end == start)
190                 goto out;
191         /*
192          * If the interval [start,end) covers some unmapped address ranges,
193          * just ignore them, but return -ENOMEM at the end.
194          */
195         vma = find_vma(current->mm, start);
196         unmapped_error = 0;
197         for (;;) {
198                 /* Still start < end. */
199                 error = -ENOMEM;
200                 if (!vma)
201                         goto out;
202                 /* Here start < vma->vm_end. */
203                 if (start < vma->vm_start) {
204                         unmapped_error = -ENOMEM;
205                         start = vma->vm_start;
206                 }
207                 /* Here vma->vm_start <= start < vma->vm_end. */
208                 if (end <= vma->vm_end) {
209                         if (start < end) {
210                                 error = msync_interval(vma, start, end, flags);
211                                 if (error)
212                                         goto out;
213                         }
214                         error = unmapped_error;
215                         goto out;
216                 }
217                 /* Here vma->vm_start <= start < vma->vm_end < end. */
218                 error = msync_interval(vma, start, vma->vm_end, flags);
219                 if (error)
220                         goto out;
221                 start = vma->vm_end;
222                 vma = vma->vm_next;
223         }
224 out:
225         up_read(&current->mm->mmap_sem);
226         return error;
227 }