VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / mm / msync.c
1 /*
2  *      linux/mm/msync.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * The msync() system call.
9  */
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15
16 #include <asm/pgtable.h>
17 #include <asm/tlbflush.h>
18
19 /*
20  * Called with mm->page_table_lock held to protect against other
21  * threads/the swapper from ripping pte's out from under us.
22  */
23 static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
24         unsigned long address, unsigned int flags)
25 {
26         pte_t pte = *ptep;
27         unsigned long pfn = pte_pfn(pte);
28         struct page *page;
29
30         if (pte_present(pte) && pfn_valid(pfn)) {
31                 page = pfn_to_page(pfn);
32                 if (!PageReserved(page) &&
33                     (ptep_clear_flush_dirty(vma, address, ptep) ||
34                      page_test_and_clear_dirty(page)))
35                         set_page_dirty(page);
36         }
37         return 0;
38 }
39
40 static int filemap_sync_pte_range(pmd_t * pmd,
41         unsigned long address, unsigned long end, 
42         struct vm_area_struct *vma, unsigned int flags)
43 {
44         pte_t *pte;
45         int error;
46
47         if (pmd_none(*pmd))
48                 return 0;
49         if (pmd_bad(*pmd)) {
50                 pmd_ERROR(*pmd);
51                 pmd_clear(pmd);
52                 return 0;
53         }
54         pte = pte_offset_map(pmd, address);
55         if ((address & PMD_MASK) != (end & PMD_MASK))
56                 end = (address & PMD_MASK) + PMD_SIZE;
57         error = 0;
58         do {
59                 error |= filemap_sync_pte(pte, vma, address, flags);
60                 address += PAGE_SIZE;
61                 pte++;
62         } while (address && (address < end));
63
64         pte_unmap(pte - 1);
65
66         return error;
67 }
68
69 static inline int filemap_sync_pmd_range(pgd_t * pgd,
70         unsigned long address, unsigned long end, 
71         struct vm_area_struct *vma, unsigned int flags)
72 {
73         pmd_t * pmd;
74         int error;
75
76         if (pgd_none(*pgd))
77                 return 0;
78         if (pgd_bad(*pgd)) {
79                 pgd_ERROR(*pgd);
80                 pgd_clear(pgd);
81                 return 0;
82         }
83         pmd = pmd_offset(pgd, address);
84         if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
85                 end = (address & PGDIR_MASK) + PGDIR_SIZE;
86         error = 0;
87         do {
88                 error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
89                 address = (address + PMD_SIZE) & PMD_MASK;
90                 pmd++;
91         } while (address && (address < end));
92         return error;
93 }
94
95 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
96         size_t size, unsigned int flags)
97 {
98         pgd_t * dir;
99         unsigned long end = address + size;
100         int error = 0;
101
102         /* Aquire the lock early; it may be possible to avoid dropping
103          * and reaquiring it repeatedly.
104          */
105         spin_lock(&vma->vm_mm->page_table_lock);
106
107         dir = pgd_offset(vma->vm_mm, address);
108         flush_cache_range(vma, address, end);
109
110         /* For hugepages we can't go walking the page table normally,
111          * but that's ok, hugetlbfs is memory based, so we don't need
112          * to do anything more on an msync() */
113         if (is_vm_hugetlb_page(vma))
114                 goto out;
115
116         if (address >= end)
117                 BUG();
118         do {
119                 error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
120                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
121                 dir++;
122         } while (address && (address < end));
123         /*
124          * Why flush ? filemap_sync_pte already flushed the tlbs with the
125          * dirty bits.
126          */
127         flush_tlb_range(vma, end - size, end);
128  out:
129         spin_unlock(&vma->vm_mm->page_table_lock);
130
131         return error;
132 }
133
134 /*
135  * MS_SYNC syncs the entire file - including mappings.
136  *
137  * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
138  * marks the relevant pages dirty.  The application may now run fsync() to
139  * write out the dirty pages and wait on the writeout and check the result.
140  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
141  * async writeout immediately.
142  * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
143  * applications.
144  */
145 static int msync_interval(struct vm_area_struct * vma,
146         unsigned long start, unsigned long end, int flags)
147 {
148         int ret = 0;
149         struct file * file = vma->vm_file;
150
151         if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
152                 return -EBUSY;
153
154         if (file && (vma->vm_flags & VM_SHARED)) {
155                 ret = filemap_sync(vma, start, end-start, flags);
156
157                 if (!ret && (flags & MS_SYNC)) {
158                         struct address_space *mapping = file->f_mapping;
159                         int err;
160
161                         down(&mapping->host->i_sem);
162                         ret = filemap_fdatawrite(mapping);
163                         if (file->f_op && file->f_op->fsync) {
164                                 err = file->f_op->fsync(file,file->f_dentry,1);
165                                 if (err && !ret)
166                                         ret = err;
167                         }
168                         err = filemap_fdatawait(mapping);
169                         if (!ret)
170                                 ret = err;
171                         up(&mapping->host->i_sem);
172                 }
173         }
174         return ret;
175 }
176
177 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
178 {
179         unsigned long end;
180         struct vm_area_struct * vma;
181         int unmapped_error, error = -EINVAL;
182
183         down_read(&current->mm->mmap_sem);
184         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
185                 goto out;
186         if (start & ~PAGE_MASK)
187                 goto out;
188         if ((flags & MS_ASYNC) && (flags & MS_SYNC))
189                 goto out;
190         error = -ENOMEM;
191         len = (len + ~PAGE_MASK) & PAGE_MASK;
192         end = start + len;
193         if (end < start)
194                 goto out;
195         error = 0;
196         if (end == start)
197                 goto out;
198         /*
199          * If the interval [start,end) covers some unmapped address ranges,
200          * just ignore them, but return -ENOMEM at the end.
201          */
202         vma = find_vma(current->mm, start);
203         unmapped_error = 0;
204         for (;;) {
205                 /* Still start < end. */
206                 error = -ENOMEM;
207                 if (!vma)
208                         goto out;
209                 /* Here start < vma->vm_end. */
210                 if (start < vma->vm_start) {
211                         unmapped_error = -ENOMEM;
212                         start = vma->vm_start;
213                 }
214                 /* Here vma->vm_start <= start < vma->vm_end. */
215                 if (end <= vma->vm_end) {
216                         if (start < end) {
217                                 error = msync_interval(vma, start, end, flags);
218                                 if (error)
219                                         goto out;
220                         }
221                         error = unmapped_error;
222                         goto out;
223                 }
224                 /* Here vma->vm_start <= start < vma->vm_end < end. */
225                 error = msync_interval(vma, start, vma->vm_end, flags);
226                 if (error)
227                         goto out;
228                 start = vma->vm_end;
229                 vma = vma->vm_next;
230         }
231 out:
232         up_read(&current->mm->mmap_sem);
233         return error;
234 }