Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / mm / mprotect.c
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code       <alan@redhat.com>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28
29 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
30                 unsigned long addr, unsigned long end, pgprot_t newprot)
31 {
32         pte_t *pte;
33         spinlock_t *ptl;
34
35         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
36         do {
37                 if (pte_present(*pte)) {
38                         pte_t ptent;
39
40                         /* Avoid an SMP race with hardware updated dirty/clean
41                          * bits by wiping the pte and then setting the new pte
42                          * into place.
43                          */
44                         ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
45                         set_pte_at(mm, addr, pte, ptent);
46                         lazy_mmu_prot_update(ptent);
47                 }
48         } while (pte++, addr += PAGE_SIZE, addr != end);
49         pte_unmap_unlock(pte - 1, ptl);
50 }
51
52 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
53                 unsigned long addr, unsigned long end, pgprot_t newprot)
54 {
55         pmd_t *pmd;
56         unsigned long next;
57
58         pmd = pmd_offset(pud, addr);
59         do {
60                 next = pmd_addr_end(addr, end);
61                 if (pmd_none_or_clear_bad(pmd))
62                         continue;
63                 change_pte_range(mm, pmd, addr, next, newprot);
64         } while (pmd++, addr = next, addr != end);
65 }
66
67 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
68                 unsigned long addr, unsigned long end, pgprot_t newprot)
69 {
70         pud_t *pud;
71         unsigned long next;
72
73         pud = pud_offset(pgd, addr);
74         do {
75                 next = pud_addr_end(addr, end);
76                 if (pud_none_or_clear_bad(pud))
77                         continue;
78                 change_pmd_range(mm, pud, addr, next, newprot);
79         } while (pud++, addr = next, addr != end);
80 }
81
82 static void change_protection(struct vm_area_struct *vma,
83                 unsigned long addr, unsigned long end, pgprot_t newprot)
84 {
85         struct mm_struct *mm = vma->vm_mm;
86         pgd_t *pgd;
87         unsigned long next;
88         unsigned long start = addr;
89
90         BUG_ON(addr >= end);
91         pgd = pgd_offset(mm, addr);
92         flush_cache_range(vma, addr, end);
93         do {
94                 next = pgd_addr_end(addr, end);
95                 if (pgd_none_or_clear_bad(pgd))
96                         continue;
97                 change_pud_range(mm, pgd, addr, next, newprot);
98         } while (pgd++, addr = next, addr != end);
99         flush_tlb_range(vma, start, end);
100 }
101
102 static int
103 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
104         unsigned long start, unsigned long end, unsigned long newflags)
105 {
106         struct mm_struct *mm = vma->vm_mm;
107         unsigned long oldflags = vma->vm_flags;
108         long nrpages = (end - start) >> PAGE_SHIFT;
109         unsigned long charged = 0, old_end = vma->vm_end;
110         pgprot_t newprot;
111         pgoff_t pgoff;
112         int error;
113
114         if (newflags == oldflags) {
115                 *pprev = vma;
116                 return 0;
117         }
118
119         /*
120          * If we make a private mapping writable we increase our commit;
121          * but (without finer accounting) cannot reduce our commit if we
122          * make it unwritable again.
123          *
124          * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
125          * a MAP_NORESERVE private mapping to writable will now reserve.
126          */
127         if (newflags & VM_WRITE) {
128                 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
129                         charged = nrpages;
130                         if (security_vm_enough_memory(charged))
131                                 return -ENOMEM;
132                         newflags |= VM_ACCOUNT;
133                 }
134         }
135
136         newprot = protection_map[newflags & 0xf];
137
138         /*
139          * First try to merge with previous and/or next vma.
140          */
141         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
142         *pprev = vma_merge(mm, *pprev, start, end, newflags,
143                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
144         if (*pprev) {
145                 vma = *pprev;
146                 goto success;
147         }
148
149         *pprev = vma;
150
151         if (start != vma->vm_start) {
152                 error = split_vma(mm, vma, start, 1);
153                 if (error)
154                         goto fail;
155         }
156
157         if (end != vma->vm_end) {
158                 error = split_vma(mm, vma, end, 0);
159                 if (error)
160                         goto fail;
161         }
162
163 success:
164         /*
165          * vm_flags and vm_page_prot are protected by the mmap_sem
166          * held in write mode.
167          */
168         vma->vm_flags = newflags;
169         vma->vm_page_prot = newprot;
170         if (oldflags & VM_EXEC)
171                 arch_remove_exec_range(current->mm, old_end);
172         if (is_vm_hugetlb_page(vma))
173                 hugetlb_change_protection(vma, start, end, newprot);
174         else
175                 change_protection(vma, start, end, newprot);
176         vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
177         vm_stat_account(mm, newflags, vma->vm_file, nrpages);
178         return 0;
179
180 fail:
181         vm_unacct_memory(charged);
182         return error;
183 }
184
185 asmlinkage long
186 sys_mprotect(unsigned long start, size_t len, unsigned long prot)
187 {
188         unsigned long vm_flags, nstart, end, tmp, reqprot;
189         struct vm_area_struct *vma, *prev;
190         int error = -EINVAL;
191         const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
192         prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
193         if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
194                 return -EINVAL;
195
196         if (start & ~PAGE_MASK)
197                 return -EINVAL;
198         if (!len)
199                 return 0;
200         len = PAGE_ALIGN(len);
201         end = start + len;
202         if (end <= start)
203                 return -ENOMEM;
204         if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
205                 return -EINVAL;
206
207         reqprot = prot;
208         /*
209          * Does the application expect PROT_READ to imply PROT_EXEC:
210          */
211         if (unlikely((prot & PROT_READ) &&
212                         (current->personality & READ_IMPLIES_EXEC)))
213                 prot |= PROT_EXEC;
214
215         vm_flags = calc_vm_prot_bits(prot);
216
217         down_write(&current->mm->mmap_sem);
218
219         vma = find_vma_prev(current->mm, start, &prev);
220         error = -ENOMEM;
221         if (!vma)
222                 goto out;
223         if (unlikely(grows & PROT_GROWSDOWN)) {
224                 if (vma->vm_start >= end)
225                         goto out;
226                 start = vma->vm_start;
227                 error = -EINVAL;
228                 if (!(vma->vm_flags & VM_GROWSDOWN))
229                         goto out;
230         }
231         else {
232                 if (vma->vm_start > start)
233                         goto out;
234                 if (unlikely(grows & PROT_GROWSUP)) {
235                         end = vma->vm_end;
236                         error = -EINVAL;
237                         if (!(vma->vm_flags & VM_GROWSUP))
238                                 goto out;
239                 }
240         }
241         if (start > vma->vm_start)
242                 prev = vma;
243
244         for (nstart = start ; ; ) {
245                 unsigned long newflags;
246
247                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
248
249                 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
250
251                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
252                 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
253                         error = -EACCES;
254                         goto out;
255                 }
256
257                 error = security_file_mprotect(vma, reqprot, prot);
258                 if (error)
259                         goto out;
260
261                 tmp = vma->vm_end;
262                 if (tmp > end)
263                         tmp = end;
264                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
265                 if (error)
266                         goto out;
267                 nstart = tmp;
268
269                 if (nstart < prev->vm_end)
270                         nstart = prev->vm_end;
271                 if (nstart >= end)
272                         goto out;
273
274                 vma = prev->vm_next;
275                 if (!vma || vma->vm_start != nstart) {
276                         error = -ENOMEM;
277                         goto out;
278                 }
279         }
280 out:
281         up_write(&current->mm->mmap_sem);
282         return error;
283 }