2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include "linux/stddef.h"
8 #include "linux/sched.h"
11 #include "asm/pgtable.h"
13 #include "user_util.h"
18 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
19 unsigned long end_addr, int force)
25 unsigned long addr, end;
28 if(mm == NULL) return;
29 fd = mm->context.skas.mm_fd;
30 for(addr = start_addr; addr < end_addr;){
31 npgd = pgd_offset(mm, addr);
32 if(!pgd_present(*npgd)){
33 if(force || pgd_newpage(*npgd)){
34 end = addr + PGDIR_SIZE;
37 err = unmap(fd, (void *) addr, end - addr);
39 panic("munmap failed, errno = %d\n",
41 pgd_mkuptodate(*npgd);
47 npud = pud_offset(npgd, addr);
48 if(!pud_present(*npud)){
49 if(force || pud_newpage(*npud)){
50 end = addr + PUD_SIZE;
53 err = unmap(fd, (void *) addr, end - addr);
55 panic("munmap failed, errno = %d\n",
57 pud_mkuptodate(*npud);
63 npmd = pmd_offset(npud, addr);
64 if(!pmd_present(*npmd)){
65 if(force || pmd_newpage(*npmd)){
66 end = addr + PMD_SIZE;
69 err = unmap(fd, (void *) addr, end - addr);
71 panic("munmap failed, errno = %d\n",
73 pmd_mkuptodate(*npmd);
79 npte = pte_offset_kernel(npmd, addr);
85 if(!pte_young(*npte)){
89 if(force || pte_newpage(*npte)){
90 err = unmap(fd, (void *) addr, PAGE_SIZE);
92 panic("munmap failed, errno = %d\n", -err);
93 if(pte_present(*npte))
94 map(fd, addr, pte_val(*npte) & PAGE_MASK,
97 else if(pte_newprot(*npte))
98 protect(fd, addr, PAGE_SIZE, r, w, x, 1);
100 *npte = pte_mkuptodate(*npte);
105 void flush_tlb_kernel_range_skas(unsigned long start, unsigned long end)
107 struct mm_struct *mm;
112 unsigned long addr, last;
113 int updated = 0, err;
116 for(addr = start; addr < end;){
117 pgd = pgd_offset(mm, addr);
118 pud = pud_offset(pgd, addr);
119 pmd = pmd_offset(pud, addr);
120 if(!pgd_present(*pgd)){
121 if(pgd_newpage(*pgd)){
123 last = addr + PGDIR_SIZE;
126 err = os_unmap_memory((void *) addr,
129 panic("munmap failed, errno = %d\n",
136 pud = pud_offset(pgd, addr);
137 if(!pud_present(*pud)){
138 if(pud_newpage(*pud)){
140 last = addr + PUD_SIZE;
143 err = os_unmap_memory((void *) addr,
146 panic("munmap failed, errno = %d\n",
153 pmd = pmd_offset(pud, addr);
154 if(!pmd_present(*pmd)){
155 if(pmd_newpage(*pmd)){
157 last = addr + PMD_SIZE;
160 err = os_unmap_memory((void *) addr,
163 panic("munmap failed, errno = %d\n",
170 pte = pte_offset_kernel(pmd, addr);
171 if(!pte_present(*pte) || pte_newpage(*pte)){
173 err = os_unmap_memory((void *) addr, PAGE_SIZE);
175 panic("munmap failed, errno = %d\n", -err);
176 if(pte_present(*pte))
177 map_memory(addr, pte_val(*pte) & PAGE_MASK,
180 else if(pte_newprot(*pte)){
182 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
188 void flush_tlb_kernel_vm_skas(void)
190 flush_tlb_kernel_range_skas(start_vm, end_vm);
193 void __flush_tlb_one_skas(unsigned long addr)
195 flush_tlb_kernel_range_skas(addr, addr + PAGE_SIZE);
198 void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start,
201 if(vma->vm_mm == NULL)
202 flush_tlb_kernel_range_skas(start, end);
203 else fix_range(vma->vm_mm, start, end, 0);
206 void flush_tlb_mm_skas(struct mm_struct *mm)
208 flush_tlb_kernel_vm_skas();
209 fix_range(mm, 0, host_task_size, 0);
212 void force_flush_all_skas(void)
214 fix_range(current->mm, 0, host_task_size, 1);
218 * Overrides for Emacs so that we follow Linus's tabbing style.
219 * Emacs will notice this stuff at the end of the file and automatically
220 * adjust the settings for this buffer only. This must remain at the end
222 * ---------------------------------------------------------------------------
224 * c-file-style: "linux"