ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / um / kernel / skas / tlb.c
1 /* 
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/stddef.h"
7 #include "linux/sched.h"
8 #include "linux/mm.h"
9 #include "asm/page.h"
10 #include "asm/pgtable.h"
11 #include "asm/mmu.h"
12 #include "user_util.h"
13 #include "mem_user.h"
14 #include "skas.h"
15 #include "os.h"
16
17 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
18                       unsigned long end_addr, int force)
19 {
20         pgd_t *npgd;
21         pmd_t *npmd;
22         pte_t *npte;
23         unsigned long addr;
24         int r, w, x, err, fd;
25
26         if(mm == NULL) return;
27         fd = mm->context.skas.mm_fd;
28         for(addr = start_addr; addr < end_addr;){
29                 npgd = pgd_offset(mm, addr);
30                 npmd = pmd_offset(npgd, addr);
31                 if(pmd_present(*npmd)){
32                         npte = pte_offset_kernel(npmd, addr);
33                         r = pte_read(*npte);
34                         w = pte_write(*npte);
35                         x = pte_exec(*npte);
36                         if(!pte_dirty(*npte)) w = 0;
37                         if(!pte_young(*npte)){
38                                 r = 0;
39                                 w = 0;
40                         }
41                         if(force || pte_newpage(*npte)){
42                                 err = unmap(fd, (void *) addr, PAGE_SIZE);
43                                 if(err < 0)
44                                         panic("munmap failed, errno = %d\n",
45                                               -err);
46                                 if(pte_present(*npte))
47                                         map(fd, addr, 
48                                             pte_val(*npte) & PAGE_MASK,
49                                             PAGE_SIZE, r, w, x);
50                         }
51                         else if(pte_newprot(*npte)){
52                                 protect(fd, addr, PAGE_SIZE, r, w, x, 1);
53                         }
54                         *npte = pte_mkuptodate(*npte);
55                         addr += PAGE_SIZE;
56                 }
57                 else {
58                         if(force || pmd_newpage(*npmd)){
59                                 err = unmap(fd, (void *) addr, PMD_SIZE);
60                                 if(err < 0)
61                                         panic("munmap failed, errno = %d\n",
62                                               -err);
63                                 pmd_mkuptodate(*npmd);
64                         }
65                         addr += PMD_SIZE;
66                 }
67         }
68 }
69
70 void flush_tlb_kernel_range_skas(unsigned long start, unsigned long end)
71 {
72         struct mm_struct *mm;
73         pgd_t *pgd;
74         pmd_t *pmd;
75         pte_t *pte;
76         unsigned long addr;
77         int updated = 0, err;
78
79         mm = &init_mm;
80         for(addr = start_vm; addr < end_vm;){
81                 pgd = pgd_offset(mm, addr);
82                 pmd = pmd_offset(pgd, addr);
83                 if(pmd_present(*pmd)){
84                         pte = pte_offset_kernel(pmd, addr);
85                         if(!pte_present(*pte) || pte_newpage(*pte)){
86                                 updated = 1;
87                                 err = os_unmap_memory((void *) addr, 
88                                                       PAGE_SIZE);
89                                 if(err < 0)
90                                         panic("munmap failed, errno = %d\n",
91                                               -err);
92                                 if(pte_present(*pte))
93                                         map_memory(addr, 
94                                                    pte_val(*pte) & PAGE_MASK,
95                                                    PAGE_SIZE, 1, 1, 1);
96                         }
97                         else if(pte_newprot(*pte)){
98                                 updated = 1;
99                                 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
100                         }
101                         addr += PAGE_SIZE;
102                 }
103                 else {
104                         if(pmd_newpage(*pmd)){
105                                 updated = 1;
106                                 err = os_unmap_memory((void *) addr, PMD_SIZE);
107                                 if(err < 0)
108                                         panic("munmap failed, errno = %d\n",
109                                               -err);
110                         }
111                         addr += PMD_SIZE;
112                 }
113         }
114 }
115
116 void flush_tlb_kernel_vm_skas(void)
117 {
118         flush_tlb_kernel_range_skas(start_vm, end_vm);
119 }
120
121 void __flush_tlb_one_skas(unsigned long addr)
122 {
123         flush_tlb_kernel_range_skas(addr, addr + PAGE_SIZE);
124 }
125
126 void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start, 
127                      unsigned long end)
128 {
129         if(vma->vm_mm == NULL)
130                 flush_tlb_kernel_range_skas(start, end);
131         else fix_range(vma->vm_mm, start, end, 0);
132 }
133
134 void flush_tlb_mm_skas(struct mm_struct *mm)
135 {
136         flush_tlb_kernel_vm_skas();
137         fix_range(mm, 0, host_task_size, 0);
138 }
139
140 void force_flush_all_skas(void)
141 {
142         fix_range(current->mm, 0, host_task_size, 1);
143 }
144
145 /*
146  * Overrides for Emacs so that we follow Linus's tabbing style.
147  * Emacs will notice this stuff at the end of the file and automatically
148  * adjust the settings for this buffer only.  This must remain at the end
149  * of the file.
150  * ---------------------------------------------------------------------------
151  * Local variables:
152  * c-file-style: "linux"
153  * End:
154  */