vserver 1.9.5.x5
[linux-2.6.git] / arch / um / kernel / skas / tlb.c
1 /* 
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6
7 #include "linux/stddef.h"
8 #include "linux/sched.h"
9 #include "linux/mm.h"
10 #include "asm/page.h"
11 #include "asm/pgtable.h"
12 #include "asm/mmu.h"
13 #include "user_util.h"
14 #include "mem_user.h"
15 #include "skas.h"
16 #include "os.h"
17
18 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
19                       unsigned long end_addr, int force)
20 {
21         pgd_t *npgd;
22         pud_t *npud;
23         pmd_t *npmd;
24         pte_t *npte;
25         unsigned long addr,  end;
26         int r, w, x, err, fd;
27
28         if(mm == NULL) return;
29         fd = mm->context.skas.mm_fd;
30         for(addr = start_addr; addr < end_addr;){
31                 npgd = pgd_offset(mm, addr);
32                 if(!pgd_present(*npgd)){
33                         if(force || pgd_newpage(*npgd)){
34                                 end = addr + PGDIR_SIZE;
35                                 if(end > end_addr)
36                                         end = end_addr;
37                                 err = unmap(fd, (void *) addr, end - addr);
38                                 if(err < 0)
39                                         panic("munmap failed, errno = %d\n",
40                                               -err);
41                                 pgd_mkuptodate(*npgd);
42                         }
43                         addr += PGDIR_SIZE;
44                         continue;
45                 }
46
47                 npud = pud_offset(npgd, addr);
48                 if(!pud_present(*npud)){
49                         if(force || pud_newpage(*npud)){
50                                 end = addr + PUD_SIZE;
51                                 if(end > end_addr)
52                                         end = end_addr;
53                                 err = unmap(fd, (void *) addr, end - addr);
54                                 if(err < 0)
55                                         panic("munmap failed, errno = %d\n",
56                                               -err);
57                                 pud_mkuptodate(*npud);
58                         }
59                         addr += PUD_SIZE;
60                         continue;
61                 }
62
63                 npmd = pmd_offset(npud, addr);
64                 if(!pmd_present(*npmd)){
65                         if(force || pmd_newpage(*npmd)){
66                                 end = addr + PMD_SIZE;
67                                 if(end > end_addr)
68                                         end = end_addr;
69                                 err = unmap(fd, (void *) addr, end - addr);
70                                 if(err < 0)
71                                         panic("munmap failed, errno = %d\n",
72                                               -err);
73                                 pmd_mkuptodate(*npmd);
74                         }
75                         addr += PMD_SIZE;
76                         continue;
77                 }
78
79                 npte = pte_offset_kernel(npmd, addr);
80                 r = pte_read(*npte);
81                 w = pte_write(*npte);
82                 x = pte_exec(*npte);
83                 if(!pte_dirty(*npte))
84                         w = 0;
85                 if(!pte_young(*npte)){
86                         r = 0;
87                         w = 0;
88                 }
89                 if(force || pte_newpage(*npte)){
90                         err = unmap(fd, (void *) addr, PAGE_SIZE);
91                         if(err < 0)
92                                 panic("munmap failed, errno = %d\n", -err);
93                         if(pte_present(*npte))
94                                 map(fd, addr, pte_val(*npte) & PAGE_MASK,
95                                     PAGE_SIZE, r, w, x);
96                 }
97                 else if(pte_newprot(*npte))
98                         protect(fd, addr, PAGE_SIZE, r, w, x, 1);
99
100                 *npte = pte_mkuptodate(*npte);
101                 addr += PAGE_SIZE;
102         }
103 }
104
105 void flush_tlb_kernel_range_skas(unsigned long start, unsigned long end)
106 {
107         struct mm_struct *mm;
108         pgd_t *pgd;
109         pud_t *pud;
110         pmd_t *pmd;
111         pte_t *pte;
112         unsigned long addr, last;
113         int updated = 0, err;
114
115         mm = &init_mm;
116         for(addr = start; addr < end;){
117                 pgd = pgd_offset(mm, addr);
118                 pud = pud_offset(pgd, addr);
119                 pmd = pmd_offset(pud, addr);
120                 if(!pgd_present(*pgd)){
121                         if(pgd_newpage(*pgd)){
122                                 updated = 1;
123                                 last = addr + PGDIR_SIZE;
124                                 if(last > end)
125                                         last = end;
126                                 err = os_unmap_memory((void *) addr, 
127                                                       last - addr);
128                                 if(err < 0)
129                                         panic("munmap failed, errno = %d\n",
130                                               -err);
131                         }
132                         addr += PGDIR_SIZE;
133                         continue;
134                 }
135
136                 pud = pud_offset(pgd, addr);
137                 if(!pud_present(*pud)){
138                         if(pud_newpage(*pud)){
139                                 updated = 1;
140                                 last = addr + PUD_SIZE;
141                                 if(last > end)
142                                         last = end;
143                                 err = os_unmap_memory((void *) addr,
144                                                       last - addr);
145                                 if(err < 0)
146                                         panic("munmap failed, errno = %d\n",
147                                               -err);
148                         }
149                         addr += PUD_SIZE;
150                         continue;
151                 }
152
153                 pmd = pmd_offset(pud, addr);
154                 if(!pmd_present(*pmd)){
155                         if(pmd_newpage(*pmd)){
156                                 updated = 1;
157                                 last = addr + PMD_SIZE;
158                                 if(last > end)
159                                         last = end;
160                                 err = os_unmap_memory((void *) addr,
161                                                       last - addr);
162                                 if(err < 0)
163                                         panic("munmap failed, errno = %d\n",
164                                               -err);
165                         }
166                         addr += PMD_SIZE;
167                         continue;
168                 }
169
170                 pte = pte_offset_kernel(pmd, addr);
171                 if(!pte_present(*pte) || pte_newpage(*pte)){
172                         updated = 1;
173                         err = os_unmap_memory((void *) addr, PAGE_SIZE);
174                         if(err < 0)
175                                 panic("munmap failed, errno = %d\n", -err);
176                         if(pte_present(*pte))
177                                 map_memory(addr, pte_val(*pte) & PAGE_MASK,
178                                            PAGE_SIZE, 1, 1, 1);
179                 }
180                 else if(pte_newprot(*pte)){
181                         updated = 1;
182                         protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
183                 }
184                 addr += PAGE_SIZE;
185         }
186 }
187
188 void flush_tlb_kernel_vm_skas(void)
189 {
190         flush_tlb_kernel_range_skas(start_vm, end_vm);
191 }
192
193 void __flush_tlb_one_skas(unsigned long addr)
194 {
195         flush_tlb_kernel_range_skas(addr, addr + PAGE_SIZE);
196 }
197
198 void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start, 
199                      unsigned long end)
200 {
201         if(vma->vm_mm == NULL)
202                 flush_tlb_kernel_range_skas(start, end);
203         else fix_range(vma->vm_mm, start, end, 0);
204 }
205
206 void flush_tlb_mm_skas(struct mm_struct *mm)
207 {
208         flush_tlb_kernel_vm_skas();
209         fix_range(mm, 0, host_task_size, 0);
210 }
211
212 void force_flush_all_skas(void)
213 {
214         fix_range(current->mm, 0, host_task_size, 1);
215 }
216
217 /*
218  * Overrides for Emacs so that we follow Linus's tabbing style.
219  * Emacs will notice this stuff at the end of the file and automatically
220  * adjust the settings for this buffer only.  This must remain at the end
221  * of the file.
222  * ---------------------------------------------------------------------------
223  * Local variables:
224  * c-file-style: "linux"
225  * End:
226  */