vserver 1.9.5.x5
[linux-2.6.git] / arch / um / kernel / tt / tlb.c
1 /* 
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6
7 #include "linux/stddef.h"
8 #include "linux/kernel.h"
9 #include "linux/sched.h"
10 #include "linux/mm.h"
11 #include "asm/page.h"
12 #include "asm/pgtable.h"
13 #include "asm/uaccess.h"
14 #include "asm/tlbflush.h"
15 #include "user_util.h"
16 #include "mem_user.h"
17 #include "os.h"
18
19 static void fix_range(struct mm_struct *mm, unsigned long start_addr, 
20                       unsigned long end_addr, int force)
21 {
22         pgd_t *npgd;
23         pud_t *npud;
24         pmd_t *npmd;
25         pte_t *npte;
26         unsigned long addr, end;
27         int r, w, x, err;
28
29         if((current->thread.mode.tt.extern_pid != -1) && 
30            (current->thread.mode.tt.extern_pid != os_getpid()))
31                 panic("fix_range fixing wrong address space, current = 0x%p",
32                       current);
33         if(mm == NULL) return;
34         for(addr=start_addr;addr<end_addr;){
35                 if(addr == TASK_SIZE){
36                         /* Skip over kernel text, kernel data, and physical
37                          * memory, which don't have ptes, plus kernel virtual
38                          * memory, which is flushed separately, and remap
39                          * the process stack.  The only way to get here is
40                          * if (end_addr == STACK_TOP) > TASK_SIZE, which is
41                          * only true in the honeypot case.
42                          */
43                         addr = STACK_TOP - ABOVE_KMEM;
44                         continue;
45                 }
46
47                 npgd = pgd_offset(mm, addr);
48                 if(!pgd_present(*npgd)){
49                         if(force || pgd_newpage(*npgd)){
50                                 end = addr + PGDIR_SIZE;
51                                 if(end > end_addr)
52                                         end = end_addr;
53                                 err = os_unmap_memory((void *) addr,
54                                                       end - addr);
55                                 if(err < 0)
56                                         panic("munmap failed, errno = %d\n",
57                                               -err);
58                                 pgd_mkuptodate(*npgd);
59                         }
60                         addr += PGDIR_SIZE;
61                         continue;
62                 }
63
64                 npud = pud_offset(npgd, addr);
65                 if(!pud_present(*npud)){
66                         if(force || pud_newpage(*npud)){
67                                 end = addr + PUD_SIZE;
68                                 if(end > end_addr)
69                                         end = end_addr;
70                                 err = os_unmap_memory((void *) addr, 
71                                                       end - addr);
72                                 if(err < 0)
73                                         panic("munmap failed, errno = %d\n",
74                                               -err);
75                                 pud_mkuptodate(*npud);
76                         }
77                         addr += PUD_SIZE;
78                         continue;
79                 }
80
81                 npmd = pmd_offset(npud, addr);
82                 if(!pmd_present(*npmd)){
83                         if(force || pmd_newpage(*npmd)){
84                                 end = addr + PMD_SIZE;
85                                 if(end > end_addr)
86                                         end = end_addr;
87                                 err = os_unmap_memory((void *) addr,
88                                                       end - addr);
89                                 if(err < 0)
90                                         panic("munmap failed, errno = %d\n",
91                                               -err);
92                                 pmd_mkuptodate(*npmd);
93                         }
94                         addr += PMD_SIZE;
95                         continue;
96                 }
97
98                 npte = pte_offset_kernel(npmd, addr);
99                 r = pte_read(*npte);
100                 w = pte_write(*npte);
101                 x = pte_exec(*npte);
102                 if(!pte_dirty(*npte))
103                         w = 0;
104                 if(!pte_young(*npte)){
105                         r = 0;
106                         w = 0;
107                 }
108                 if(force || pte_newpage(*npte)){
109                         err = os_unmap_memory((void *) addr, PAGE_SIZE);
110                         if(err < 0)
111                                 panic("munmap failed, errno = %d\n", -err);
112                         if(pte_present(*npte))
113                                 map_memory(addr, pte_val(*npte) & PAGE_MASK,
114                                            PAGE_SIZE, r, w, x);
115                 }
116                 else if(pte_newprot(*npte))
117                         protect_memory(addr, PAGE_SIZE, r, w, x, 1);
118
119                 *npte = pte_mkuptodate(*npte);
120                 addr += PAGE_SIZE;
121         }
122 }
123
124 atomic_t vmchange_seq = ATOMIC_INIT(1);
125
126 static void flush_kernel_vm_range(unsigned long start, unsigned long end, 
127                                   int update_seq)
128 {
129         struct mm_struct *mm;
130         pgd_t *pgd;
131         pud_t *pud;
132         pmd_t *pmd;
133         pte_t *pte;
134         unsigned long addr, last;
135         int updated = 0, err;
136
137         mm = &init_mm;
138         for(addr = start; addr < end;){
139                 pgd = pgd_offset(mm, addr);
140                 if(!pgd_present(*pgd)){
141                         if(pgd_newpage(*pgd)){
142                                 updated = 1;
143                                 last = addr + PGDIR_SIZE;
144                                 if(last > end)
145                                         last = end;
146                                 err = os_unmap_memory((void *) addr, 
147                                                       last - addr);
148                                 if(err < 0)
149                                         panic("munmap failed, errno = %d\n",
150                                               -err);
151                         }
152                         addr += PGDIR_SIZE;
153                         continue;
154                 }
155
156                 pud = pud_offset(pgd, addr);
157                 if(!pud_present(*pud)){
158                         if(pud_newpage(*pud)){
159                                 updated = 1;
160                                 last = addr + PUD_SIZE;
161                                 if(last > end)
162                                         last = end;
163                                 err = os_unmap_memory((void *) addr,
164                                                       last - addr);
165                                 if(err < 0)
166                                         panic("munmap failed, errno = %d\n",
167                                               -err);
168                         }
169                         addr += PUD_SIZE;
170                         continue;
171                 }
172
173                 pmd = pmd_offset(pud, addr);
174                 if(!pmd_present(*pmd)){
175                         if(pmd_newpage(*pmd)){
176                                 updated = 1;
177                                 last = addr + PMD_SIZE;
178                                 if(last > end)
179                                         last = end;
180                                 err = os_unmap_memory((void *) addr,
181                                                       last - addr);
182                                 if(err < 0)
183                                         panic("munmap failed, errno = %d\n",
184                                               -err);
185                         }
186                         addr += PMD_SIZE;
187                         continue;
188                 }
189
190                 pte = pte_offset_kernel(pmd, addr);
191                 if(!pte_present(*pte) || pte_newpage(*pte)){
192                         updated = 1;
193                         err = os_unmap_memory((void *) addr,
194                                               PAGE_SIZE);
195                         if(err < 0)
196                                 panic("munmap failed, errno = %d\n",
197                                       -err);
198                         if(pte_present(*pte))
199                                 map_memory(addr,
200                                            pte_val(*pte) & PAGE_MASK,
201                                            PAGE_SIZE, 1, 1, 1);
202                 }
203                 else if(pte_newprot(*pte)){
204                         updated = 1;
205                         protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
206                 }
207                 addr += PAGE_SIZE;
208         }
209         if(updated && update_seq) atomic_inc(&vmchange_seq);
210 }
211
212 void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
213 {
214         flush_kernel_vm_range(start, end, 1);
215 }
216
217 static void protect_vm_page(unsigned long addr, int w, int must_succeed)
218 {
219         int err;
220
221         err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
222         if(err == 0) return;
223         else if((err == -EFAULT) || (err == -ENOMEM)){
224                 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
225                 protect_vm_page(addr, w, 1);
226         }
227         else panic("protect_vm_page : protect failed, errno = %d\n", err);
228 }
229
230 void mprotect_kernel_vm(int w)
231 {
232         struct mm_struct *mm;
233         pgd_t *pgd;
234         pud_t *pud;
235         pmd_t *pmd;
236         pte_t *pte;
237         unsigned long addr;
238         
239         mm = &init_mm;
240         for(addr = start_vm; addr < end_vm;){
241                 pgd = pgd_offset(mm, addr);
242                 pud = pud_offset(pgd, addr);
243                 pmd = pmd_offset(pud, addr);
244                 if(pmd_present(*pmd)){
245                         pte = pte_offset_kernel(pmd, addr);
246                         if(pte_present(*pte)) protect_vm_page(addr, w, 0);
247                         addr += PAGE_SIZE;
248                 }
249                 else addr += PMD_SIZE;
250         }
251 }
252
253 void flush_tlb_kernel_vm_tt(void)
254 {
255         flush_tlb_kernel_range(start_vm, end_vm);
256 }
257
258 void __flush_tlb_one_tt(unsigned long addr)
259 {
260         flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
261 }
262   
263 void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start, 
264                      unsigned long end)
265 {
266         if(vma->vm_mm != current->mm) return;
267
268         /* Assumes that the range start ... end is entirely within
269          * either process memory or kernel vm
270          */
271         if((start >= start_vm) && (start < end_vm)) 
272                 flush_kernel_vm_range(start, end, 1);
273         else fix_range(vma->vm_mm, start, end, 0);
274 }
275
276 void flush_tlb_mm_tt(struct mm_struct *mm)
277 {
278         unsigned long seq;
279
280         if(mm != current->mm) return;
281
282         fix_range(mm, 0, STACK_TOP, 0);
283
284         seq = atomic_read(&vmchange_seq);
285         if(current->thread.mode.tt.vm_seq == seq) return;
286         current->thread.mode.tt.vm_seq = seq;
287         flush_kernel_vm_range(start_vm, end_vm, 0);
288 }
289
290 void force_flush_all_tt(void)
291 {
292         fix_range(current->mm, 0, STACK_TOP, 1);
293         flush_kernel_vm_range(start_vm, end_vm, 0);
294 }
295
296 /*
297  * Overrides for Emacs so that we follow Linus's tabbing style.
298  * Emacs will notice this stuff at the end of the file and automatically
299  * adjust the settings for this buffer only.  This must remain at the end
300  * of the file.
301  * ---------------------------------------------------------------------------
302  * Local variables:
303  * c-file-style: "linux"
304  * End:
305  */