2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
6 #include "linux/stddef.h"
7 #include "linux/kernel.h"
8 #include "linux/sched.h"
11 #include "asm/pgtable.h"
12 #include "asm/uaccess.h"
13 #include "asm/tlbflush.h"
14 #include "user_util.h"
18 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
19 unsigned long end_addr, int force)
27 if((current->thread.mode.tt.extern_pid != -1) &&
28 (current->thread.mode.tt.extern_pid != os_getpid()))
29 panic("fix_range fixing wrong address space, current = 0x%p",
31 if(mm == NULL) return;
32 for(addr=start_addr;addr<end_addr;){
33 if(addr == TASK_SIZE){
34 /* Skip over kernel text, kernel data, and physical
35 * memory, which don't have ptes, plus kernel virtual
36 * memory, which is flushed separately, and remap
37 * the process stack. The only way to get here is
38 * if (end_addr == STACK_TOP) > TASK_SIZE, which is
39 * only true in the honeypot case.
41 addr = STACK_TOP - ABOVE_KMEM;
44 npgd = pgd_offset(mm, addr);
45 npmd = pmd_offset(npgd, addr);
46 if(pmd_present(*npmd)){
47 npte = pte_offset_kernel(npmd, addr);
51 if(!pte_dirty(*npte)) w = 0;
52 if(!pte_young(*npte)){
56 if(force || pte_newpage(*npte)){
57 err = os_unmap_memory((void *) addr,
60 panic("munmap failed, errno = %d\n",
62 if(pte_present(*npte))
64 pte_val(*npte) & PAGE_MASK,
67 else if(pte_newprot(*npte)){
68 protect_memory(addr, PAGE_SIZE, r, w, x, 1);
70 *npte = pte_mkuptodate(*npte);
74 if(force || pmd_newpage(*npmd)){
75 err = os_unmap_memory((void *) addr, PMD_SIZE);
77 panic("munmap failed, errno = %d\n",
79 pmd_mkuptodate(*npmd);
86 atomic_t vmchange_seq = ATOMIC_INIT(1);
88 static void flush_kernel_vm_range(unsigned long start, unsigned long end,
99 for(addr = start; addr < end;){
100 pgd = pgd_offset(mm, addr);
101 pmd = pmd_offset(pgd, addr);
102 if(pmd_present(*pmd)){
103 pte = pte_offset_kernel(pmd, addr);
104 if(!pte_present(*pte) || pte_newpage(*pte)){
106 err = os_unmap_memory((void *) addr,
109 panic("munmap failed, errno = %d\n",
111 if(pte_present(*pte))
113 pte_val(*pte) & PAGE_MASK,
116 else if(pte_newprot(*pte)){
118 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
123 if(pmd_newpage(*pmd)){
125 err = os_unmap_memory((void *) addr, PMD_SIZE);
127 panic("munmap failed, errno = %d\n",
133 if(updated && update_seq) atomic_inc(&vmchange_seq);
136 void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
138 flush_kernel_vm_range(start, end, 1);
141 static void protect_vm_page(unsigned long addr, int w, int must_succeed)
145 err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
147 else if((err == -EFAULT) || (err == -ENOMEM)){
148 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
149 protect_vm_page(addr, w, 1);
151 else panic("protect_vm_page : protect failed, errno = %d\n", err);
154 void mprotect_kernel_vm(int w)
156 struct mm_struct *mm;
163 for(addr = start_vm; addr < end_vm;){
164 pgd = pgd_offset(mm, addr);
165 pmd = pmd_offset(pgd, addr);
166 if(pmd_present(*pmd)){
167 pte = pte_offset_kernel(pmd, addr);
168 if(pte_present(*pte)) protect_vm_page(addr, w, 0);
171 else addr += PMD_SIZE;
175 void flush_tlb_kernel_vm_tt(void)
177 flush_tlb_kernel_range(start_vm, end_vm);
180 void __flush_tlb_one_tt(unsigned long addr)
182 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
185 void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start,
188 if(vma->vm_mm != current->mm) return;
190 /* Assumes that the range start ... end is entirely within
191 * either process memory or kernel vm
193 if((start >= start_vm) && (start < end_vm))
194 flush_kernel_vm_range(start, end, 1);
195 else fix_range(vma->vm_mm, start, end, 0);
198 void flush_tlb_mm_tt(struct mm_struct *mm)
202 if(mm != current->mm) return;
204 fix_range(mm, 0, STACK_TOP, 0);
206 seq = atomic_read(&vmchange_seq);
207 if(current->thread.mode.tt.vm_seq == seq) return;
208 current->thread.mode.tt.vm_seq = seq;
209 flush_kernel_vm_range(start_vm, end_vm, 0);
212 void force_flush_all_tt(void)
214 fix_range(current->mm, 0, STACK_TOP, 1);
215 flush_kernel_vm_range(start_vm, end_vm, 0);
219 * Overrides for Emacs so that we follow Linus's tabbing style.
220 * Emacs will notice this stuff at the end of the file and automatically
221 * adjust the settings for this buffer only. This must remain at the end
223 * ---------------------------------------------------------------------------
225 * c-file-style: "linux"