#include "user_util.h"
#include "mem_user.h"
#include "os.h"
+#include "tlb.h"
-static void fix_range(struct mm_struct *mm, unsigned long start_addr,
- unsigned long end_addr, int force)
+static void do_ops(int unused, struct host_vm_op *ops, int last)
{
- pgd_t *npgd;
- pud_t *npud;
- pmd_t *npmd;
- pte_t *npte;
- unsigned long addr, end;
- int r, w, x, err;
-
- if((current->thread.mode.tt.extern_pid != -1) &&
- (current->thread.mode.tt.extern_pid != os_getpid()))
- panic("fix_range fixing wrong address space, current = 0x%p",
- current);
- if(mm == NULL) return;
- for(addr=start_addr;addr<end_addr;){
- if(addr == TASK_SIZE){
- /* Skip over kernel text, kernel data, and physical
- * memory, which don't have ptes, plus kernel virtual
- * memory, which is flushed separately, and remap
- * the process stack. The only way to get here is
- * if (end_addr == STACK_TOP) > TASK_SIZE, which is
- * only true in the honeypot case.
- */
- addr = STACK_TOP - ABOVE_KMEM;
- continue;
- }
-
- npgd = pgd_offset(mm, addr);
- if(!pgd_present(*npgd)){
- if(force || pgd_newpage(*npgd)){
- end = addr + PGDIR_SIZE;
- if(end > end_addr)
- end = end_addr;
- err = os_unmap_memory((void *) addr,
- end - addr);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- pgd_mkuptodate(*npgd);
- }
- addr += PGDIR_SIZE;
- continue;
- }
-
- npud = pud_offset(npgd, addr);
- if(!pud_present(*npud)){
- if(force || pud_newpage(*npud)){
- end = addr + PUD_SIZE;
- if(end > end_addr)
- end = end_addr;
- err = os_unmap_memory((void *) addr,
- end - addr);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- pud_mkuptodate(*npud);
- }
- addr += PUD_SIZE;
- continue;
- }
-
- npmd = pmd_offset(npud, addr);
- if(!pmd_present(*npmd)){
- if(force || pmd_newpage(*npmd)){
- end = addr + PMD_SIZE;
- if(end > end_addr)
- end = end_addr;
- err = os_unmap_memory((void *) addr,
- end - addr);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- pmd_mkuptodate(*npmd);
- }
- addr += PMD_SIZE;
- continue;
- }
-
- npte = pte_offset_kernel(npmd, addr);
- r = pte_read(*npte);
- w = pte_write(*npte);
- x = pte_exec(*npte);
- if(!pte_dirty(*npte))
- w = 0;
- if(!pte_young(*npte)){
- r = 0;
- w = 0;
+ struct host_vm_op *op;
+ int i;
+
+ for(i = 0; i <= last; i++){
+ op = &ops[i];
+ switch(op->type){
+ case MMAP:
+ os_map_memory((void *) op->u.mmap.addr, op->u.mmap.fd,
+ op->u.mmap.offset, op->u.mmap.len,
+ op->u.mmap.r, op->u.mmap.w,
+ op->u.mmap.x);
+ break;
+ case MUNMAP:
+ os_unmap_memory((void *) op->u.munmap.addr,
+ op->u.munmap.len);
+ break;
+ case MPROTECT:
+ protect_memory(op->u.mprotect.addr, op->u.munmap.len,
+ op->u.mprotect.r, op->u.mprotect.w,
+ op->u.mprotect.x, 1);
+ break;
+ default:
+ printk("Unknown op type %d in do_ops\n", op->type);
+ break;
}
- if(force || pte_newpage(*npte)){
- err = os_unmap_memory((void *) addr, PAGE_SIZE);
- if(err < 0)
- panic("munmap failed, errno = %d\n", -err);
- if(pte_present(*npte))
- map_memory(addr, pte_val(*npte) & PAGE_MASK,
- PAGE_SIZE, r, w, x);
- }
- else if(pte_newprot(*npte))
- protect_memory(addr, PAGE_SIZE, r, w, x, 1);
-
- *npte = pte_mkuptodate(*npte);
- addr += PAGE_SIZE;
}
}
-atomic_t vmchange_seq = ATOMIC_INIT(1);
-
-static void flush_kernel_vm_range(unsigned long start, unsigned long end,
- int update_seq)
+static void fix_range(struct mm_struct *mm, unsigned long start_addr,
+ unsigned long end_addr, int force)
{
- struct mm_struct *mm;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long addr, last;
- int updated = 0, err;
-
- mm = &init_mm;
- for(addr = start; addr < end;){
- pgd = pgd_offset(mm, addr);
- if(!pgd_present(*pgd)){
- if(pgd_newpage(*pgd)){
- updated = 1;
- last = addr + PGDIR_SIZE;
- if(last > end)
- last = end;
- err = os_unmap_memory((void *) addr,
- last - addr);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr += PGDIR_SIZE;
- continue;
- }
+ if((current->thread.mode.tt.extern_pid != -1) &&
+ (current->thread.mode.tt.extern_pid != os_getpid()))
+ panic("fix_range fixing wrong address space, current = 0x%p",
+ current);
- pud = pud_offset(pgd, addr);
- if(!pud_present(*pud)){
- if(pud_newpage(*pud)){
- updated = 1;
- last = addr + PUD_SIZE;
- if(last > end)
- last = end;
- err = os_unmap_memory((void *) addr,
- last - addr);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr += PUD_SIZE;
- continue;
- }
-
- pmd = pmd_offset(pud, addr);
- if(!pmd_present(*pmd)){
- if(pmd_newpage(*pmd)){
- updated = 1;
- last = addr + PMD_SIZE;
- if(last > end)
- last = end;
- err = os_unmap_memory((void *) addr,
- last - addr);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- }
- addr += PMD_SIZE;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, addr);
- if(!pte_present(*pte) || pte_newpage(*pte)){
- updated = 1;
- err = os_unmap_memory((void *) addr,
- PAGE_SIZE);
- if(err < 0)
- panic("munmap failed, errno = %d\n",
- -err);
- if(pte_present(*pte))
- map_memory(addr,
- pte_val(*pte) & PAGE_MASK,
- PAGE_SIZE, 1, 1, 1);
- }
- else if(pte_newprot(*pte)){
- updated = 1;
- protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
- }
- addr += PAGE_SIZE;
- }
- if(updated && update_seq) atomic_inc(&vmchange_seq);
+ fix_range_common(mm, start_addr, end_addr, force, 0, do_ops);
}
+atomic_t vmchange_seq = ATOMIC_INIT(1);
+
void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
{
- flush_kernel_vm_range(start, end, 1);
+ if(flush_tlb_kernel_range_common(start, end))
+ atomic_inc(&vmchange_seq);
}
static void protect_vm_page(unsigned long addr, int w, int must_succeed)
/* Assumes that the range start ... end is entirely within
* either process memory or kernel vm
*/
- if((start >= start_vm) && (start < end_vm))
- flush_kernel_vm_range(start, end, 1);
+ if((start >= start_vm) && (start < end_vm)){
+ if(flush_tlb_kernel_range_common(start, end))
+ atomic_inc(&vmchange_seq);
+ }
else fix_range(vma->vm_mm, start, end, 0);
}
fix_range(mm, 0, STACK_TOP, 0);
seq = atomic_read(&vmchange_seq);
- if(current->thread.mode.tt.vm_seq == seq) return;
+ if(current->thread.mode.tt.vm_seq == seq)
+ return;
current->thread.mode.tt.vm_seq = seq;
- flush_kernel_vm_range(start_vm, end_vm, 0);
+ flush_tlb_kernel_range_common(start_vm, end_vm);
}
void force_flush_all_tt(void)
{
fix_range(current->mm, 0, STACK_TOP, 1);
- flush_kernel_vm_range(start_vm, end_vm, 0);
+ flush_tlb_kernel_range_common(start_vm, end_vm);
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */