2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include "linux/stddef.h"
8 #include "linux/kernel.h"
9 #include "linux/sched.h"
12 #include "asm/pgtable.h"
13 #include "asm/uaccess.h"
14 #include "asm/tlbflush.h"
15 #include "user_util.h"
19 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
20 unsigned long end_addr, int force)
26 unsigned long addr, end;
29 if((current->thread.mode.tt.extern_pid != -1) &&
30 (current->thread.mode.tt.extern_pid != os_getpid()))
31 panic("fix_range fixing wrong address space, current = 0x%p",
33 if(mm == NULL) return;
34 for(addr=start_addr;addr<end_addr;){
35 if(addr == TASK_SIZE){
36 /* Skip over kernel text, kernel data, and physical
37 * memory, which don't have ptes, plus kernel virtual
38 * memory, which is flushed separately, and remap
39 * the process stack. The only way to get here is
40 * if (end_addr == STACK_TOP) > TASK_SIZE, which is
41 * only true in the honeypot case.
43 addr = STACK_TOP - ABOVE_KMEM;
47 npgd = pgd_offset(mm, addr);
48 if(!pgd_present(*npgd)){
49 if(force || pgd_newpage(*npgd)){
50 end = addr + PGDIR_SIZE;
53 err = os_unmap_memory((void *) addr,
56 panic("munmap failed, errno = %d\n",
58 pgd_mkuptodate(*npgd);
64 npud = pud_offset(npgd, addr);
65 if(!pud_present(*npud)){
66 if(force || pud_newpage(*npud)){
67 end = addr + PUD_SIZE;
70 err = os_unmap_memory((void *) addr,
73 panic("munmap failed, errno = %d\n",
75 pud_mkuptodate(*npud);
81 npmd = pmd_offset(npud, addr);
82 if(!pmd_present(*npmd)){
83 if(force || pmd_newpage(*npmd)){
84 end = addr + PMD_SIZE;
87 err = os_unmap_memory((void *) addr,
90 panic("munmap failed, errno = %d\n",
92 pmd_mkuptodate(*npmd);
98 npte = pte_offset_kernel(npmd, addr);
100 w = pte_write(*npte);
102 if(!pte_dirty(*npte))
104 if(!pte_young(*npte)){
108 if(force || pte_newpage(*npte)){
109 err = os_unmap_memory((void *) addr, PAGE_SIZE);
111 panic("munmap failed, errno = %d\n", -err);
112 if(pte_present(*npte))
113 map_memory(addr, pte_val(*npte) & PAGE_MASK,
116 else if(pte_newprot(*npte))
117 protect_memory(addr, PAGE_SIZE, r, w, x, 1);
119 *npte = pte_mkuptodate(*npte);
124 atomic_t vmchange_seq = ATOMIC_INIT(1);
126 static void flush_kernel_vm_range(unsigned long start, unsigned long end,
129 struct mm_struct *mm;
134 unsigned long addr, last;
135 int updated = 0, err;
138 for(addr = start; addr < end;){
139 pgd = pgd_offset(mm, addr);
140 if(!pgd_present(*pgd)){
141 if(pgd_newpage(*pgd)){
143 last = addr + PGDIR_SIZE;
146 err = os_unmap_memory((void *) addr,
149 panic("munmap failed, errno = %d\n",
156 pud = pud_offset(pgd, addr);
157 if(!pud_present(*pud)){
158 if(pud_newpage(*pud)){
160 last = addr + PUD_SIZE;
163 err = os_unmap_memory((void *) addr,
166 panic("munmap failed, errno = %d\n",
173 pmd = pmd_offset(pud, addr);
174 if(!pmd_present(*pmd)){
175 if(pmd_newpage(*pmd)){
177 last = addr + PMD_SIZE;
180 err = os_unmap_memory((void *) addr,
183 panic("munmap failed, errno = %d\n",
190 pte = pte_offset_kernel(pmd, addr);
191 if(!pte_present(*pte) || pte_newpage(*pte)){
193 err = os_unmap_memory((void *) addr,
196 panic("munmap failed, errno = %d\n",
198 if(pte_present(*pte))
200 pte_val(*pte) & PAGE_MASK,
203 else if(pte_newprot(*pte)){
205 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
209 if(updated && update_seq) atomic_inc(&vmchange_seq);
212 void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
214 flush_kernel_vm_range(start, end, 1);
217 static void protect_vm_page(unsigned long addr, int w, int must_succeed)
221 err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
223 else if((err == -EFAULT) || (err == -ENOMEM)){
224 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
225 protect_vm_page(addr, w, 1);
227 else panic("protect_vm_page : protect failed, errno = %d\n", err);
230 void mprotect_kernel_vm(int w)
232 struct mm_struct *mm;
240 for(addr = start_vm; addr < end_vm;){
241 pgd = pgd_offset(mm, addr);
242 pud = pud_offset(pgd, addr);
243 pmd = pmd_offset(pud, addr);
244 if(pmd_present(*pmd)){
245 pte = pte_offset_kernel(pmd, addr);
246 if(pte_present(*pte)) protect_vm_page(addr, w, 0);
249 else addr += PMD_SIZE;
253 void flush_tlb_kernel_vm_tt(void)
255 flush_tlb_kernel_range(start_vm, end_vm);
258 void __flush_tlb_one_tt(unsigned long addr)
260 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
263 void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start,
266 if(vma->vm_mm != current->mm) return;
268 /* Assumes that the range start ... end is entirely within
269 * either process memory or kernel vm
271 if((start >= start_vm) && (start < end_vm))
272 flush_kernel_vm_range(start, end, 1);
273 else fix_range(vma->vm_mm, start, end, 0);
276 void flush_tlb_mm_tt(struct mm_struct *mm)
280 if(mm != current->mm) return;
282 fix_range(mm, 0, STACK_TOP, 0);
284 seq = atomic_read(&vmchange_seq);
285 if(current->thread.mode.tt.vm_seq == seq) return;
286 current->thread.mode.tt.vm_seq = seq;
287 flush_kernel_vm_range(start_vm, end_vm, 0);
290 void force_flush_all_tt(void)
292 fix_range(current->mm, 0, STACK_TOP, 1);
293 flush_kernel_vm_range(start_vm, end_vm, 0);
297 * Overrides for Emacs so that we follow Linus's tabbing style.
298 * Emacs will notice this stuff at the end of the file and automatically
299 * adjust the settings for this buffer only. This must remain at the end
301 * ---------------------------------------------------------------------------
303 * c-file-style: "linux"