ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / um / kernel / tt / tlb.c
1 /* 
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/stddef.h"
7 #include "linux/kernel.h"
8 #include "linux/sched.h"
9 #include "linux/mm.h"
10 #include "asm/page.h"
11 #include "asm/pgtable.h"
12 #include "asm/uaccess.h"
13 #include "user_util.h"
14 #include "mem_user.h"
15 #include "os.h"
16
17 static void fix_range(struct mm_struct *mm, unsigned long start_addr, 
18                       unsigned long end_addr, int force)
19 {
20         pgd_t *npgd;
21         pmd_t *npmd;
22         pte_t *npte;
23         unsigned long addr;
24         int r, w, x, err;
25
26         if((current->thread.mode.tt.extern_pid != -1) && 
27            (current->thread.mode.tt.extern_pid != os_getpid()))
28                 panic("fix_range fixing wrong address space, current = 0x%p",
29                       current);
30         if(mm == NULL) return;
31         for(addr=start_addr;addr<end_addr;){
32                 if(addr == TASK_SIZE){
33                         /* Skip over kernel text, kernel data, and physical
34                          * memory, which don't have ptes, plus kernel virtual
35                          * memory, which is flushed separately, and remap
36                          * the process stack.  The only way to get here is
37                          * if (end_addr == STACK_TOP) > TASK_SIZE, which is
38                          * only true in the honeypot case.
39                          */
40                         addr = STACK_TOP - ABOVE_KMEM;
41                         continue;
42                 }
43                 npgd = pgd_offset(mm, addr);
44                 npmd = pmd_offset(npgd, addr);
45                 if(pmd_present(*npmd)){
46                         npte = pte_offset_kernel(npmd, addr);
47                         r = pte_read(*npte);
48                         w = pte_write(*npte);
49                         x = pte_exec(*npte);
50                         if(!pte_dirty(*npte)) w = 0;
51                         if(!pte_young(*npte)){
52                                 r = 0;
53                                 w = 0;
54                         }
55                         if(force || pte_newpage(*npte)){
56                                 err = os_unmap_memory((void *) addr, 
57                                                       PAGE_SIZE);
58                                 if(err < 0)
59                                         panic("munmap failed, errno = %d\n",
60                                               -err);
61                                 if(pte_present(*npte))
62                                         map_memory(addr, 
63                                                    pte_val(*npte) & PAGE_MASK,
64                                                    PAGE_SIZE, r, w, x);
65                         }
66                         else if(pte_newprot(*npte)){
67                                 protect_memory(addr, PAGE_SIZE, r, w, x, 1);
68                         }
69                         *npte = pte_mkuptodate(*npte);
70                         addr += PAGE_SIZE;
71                 }
72                 else {
73                         if(force || pmd_newpage(*npmd)){
74                                 err = os_unmap_memory((void *) addr, PMD_SIZE);
75                                 if(err < 0)
76                                         panic("munmap failed, errno = %d\n",
77                                               -err);
78                                 pmd_mkuptodate(*npmd);
79                         }
80                         addr += PMD_SIZE;
81                 }
82         }
83 }
84
85 atomic_t vmchange_seq = ATOMIC_INIT(1);
86
87 static void flush_kernel_vm_range(unsigned long start, unsigned long end, 
88                                   int update_seq)
89 {
90         struct mm_struct *mm;
91         pgd_t *pgd;
92         pmd_t *pmd;
93         pte_t *pte;
94         unsigned long addr;
95         int updated = 0, err;
96
97         mm = &init_mm;
98         for(addr = start; addr < end;){
99                 pgd = pgd_offset(mm, addr);
100                 pmd = pmd_offset(pgd, addr);
101                 if(pmd_present(*pmd)){
102                         pte = pte_offset_kernel(pmd, addr);
103                         if(!pte_present(*pte) || pte_newpage(*pte)){
104                                 updated = 1;
105                                 err = os_unmap_memory((void *) addr, 
106                                                       PAGE_SIZE);
107                                 if(err < 0)
108                                         panic("munmap failed, errno = %d\n",
109                                               -err);
110                                 if(pte_present(*pte))
111                                         map_memory(addr, 
112                                                    pte_val(*pte) & PAGE_MASK,
113                                                    PAGE_SIZE, 1, 1, 1);
114                         }
115                         else if(pte_newprot(*pte)){
116                                 updated = 1;
117                                 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
118                         }
119                         addr += PAGE_SIZE;
120                 }
121                 else {
122                         if(pmd_newpage(*pmd)){
123                                 updated = 1;
124                                 err = os_unmap_memory((void *) addr, PMD_SIZE);
125                                 if(err < 0)
126                                         panic("munmap failed, errno = %d\n",
127                                               -err);
128                         }
129                         addr += PMD_SIZE;
130                 }
131         }
132         if(updated && update_seq) atomic_inc(&vmchange_seq);
133 }
134
135 void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
136 {
137         flush_kernel_vm_range(start, end, 1);
138 }
139
140 static void protect_vm_page(unsigned long addr, int w, int must_succeed)
141 {
142         int err;
143
144         err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
145         if(err == 0) return;
146         else if((err == -EFAULT) || (err == -ENOMEM)){
147                 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
148                 protect_vm_page(addr, w, 1);
149         }
150         else panic("protect_vm_page : protect failed, errno = %d\n", err);
151 }
152
153 void mprotect_kernel_vm(int w)
154 {
155         struct mm_struct *mm;
156         pgd_t *pgd;
157         pmd_t *pmd;
158         pte_t *pte;
159         unsigned long addr;
160         
161         mm = &init_mm;
162         for(addr = start_vm; addr < end_vm;){
163                 pgd = pgd_offset(mm, addr);
164                 pmd = pmd_offset(pgd, addr);
165                 if(pmd_present(*pmd)){
166                         pte = pte_offset_kernel(pmd, addr);
167                         if(pte_present(*pte)) protect_vm_page(addr, w, 0);
168                         addr += PAGE_SIZE;
169                 }
170                 else addr += PMD_SIZE;
171         }
172 }
173
174 void flush_tlb_kernel_vm_tt(void)
175 {
176         flush_tlb_kernel_range(start_vm, end_vm);
177 }
178
179 void __flush_tlb_one_tt(unsigned long addr)
180 {
181         flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
182 }
183   
184 void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start, 
185                      unsigned long end)
186 {
187         if(vma->vm_mm != current->mm) return;
188
189         /* Assumes that the range start ... end is entirely within
190          * either process memory or kernel vm
191          */
192         if((start >= start_vm) && (start < end_vm)) 
193                 flush_kernel_vm_range(start, end, 1);
194         else fix_range(vma->vm_mm, start, end, 0);
195 }
196
197 void flush_tlb_mm_tt(struct mm_struct *mm)
198 {
199         unsigned long seq;
200
201         if(mm != current->mm) return;
202
203         fix_range(mm, 0, STACK_TOP, 0);
204
205         seq = atomic_read(&vmchange_seq);
206         if(current->thread.mode.tt.vm_seq == seq) return;
207         current->thread.mode.tt.vm_seq = seq;
208         flush_kernel_vm_range(start_vm, end_vm, 0);
209 }
210
211 void force_flush_all_tt(void)
212 {
213         fix_range(current->mm, 0, STACK_TOP, 1);
214         flush_kernel_vm_range(start_vm, end_vm, 0);
215 }
216
217 /*
218  * Overrides for Emacs so that we follow Linus's tabbing style.
219  * Emacs will notice this stuff at the end of the file and automatically
220  * adjust the settings for this buffer only.  This must remain at the end
221  * of the file.
222  * ---------------------------------------------------------------------------
223  * Local variables:
224  * c-file-style: "linux"
225  * End:
226  */