Fedora Core 2 Updates 2.6.10-1.771_FC2
[linux-2.6.git] / arch / xen / x86_64 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/io.h>
17
18 void pte_free(struct page *pte)
19 {
20         pte_t *ptep;
21
22         ptep = pfn_to_kaddr(page_to_pfn(pte));
23
24         xen_pte_unpin(__pa(ptep));
25         make_page_writable(ptep);
26         __free_page(pte); 
27 }
28
29 static inline pte_t *lookup_address(unsigned long address) 
30
31         pgd_t *pgd = pgd_offset_k(address);
32         pud_t *pud;
33         pmd_t *pmd;
34         pte_t *pte;
35         if (pgd_none(*pgd))
36                 return NULL;
37         pud = pud_offset(pgd, address);
38         if (!pud_present(*pud))
39                 return NULL; 
40         pmd = pmd_offset(pud, address);
41         if (!pmd_present(*pmd))
42                 return NULL; 
43         if (pmd_large(*pmd))
44                 return (pte_t *)pmd;
45         pte = pte_offset_kernel(pmd, address);
46         if (pte && !pte_present(*pte))
47                 pte = NULL; 
48         return pte;
49
50
51 static struct page *split_large_page(unsigned long address, pgprot_t prot,
52                                      pgprot_t ref_prot)
53
54         int i; 
55         unsigned long addr;
56         struct page *base = alloc_pages(GFP_KERNEL, 0);
57         pte_t *pbase;
58         if (!base) 
59                 return NULL;
60         address = __pa(address);
61         addr = address & LARGE_PAGE_MASK; 
62         pbase = (pte_t *)page_address(base);
63         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
64                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
65                                    addr == address ? prot : ref_prot);
66         }
67         return base;
68
69
70
71 static void flush_kernel_map(void *address) 
72 {
73         if (0 && address && cpu_has_clflush) {
74                 /* is this worth it? */ 
75                 int i;
76                 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
77                         asm volatile("clflush (%0)" :: "r" (address + i)); 
78         } else
79                 asm volatile("wbinvd":::"memory"); 
80         if (address)
81                 __flush_tlb_one((unsigned long) address);
82         else
83                 __flush_tlb_all();
84 }
85
86
87 static inline void flush_map(unsigned long address)
88 {       
89         on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
90 }
91
92 struct deferred_page { 
93         struct deferred_page *next; 
94         struct page *fpage;
95         unsigned long address;
96 }; 
97 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
98
99 static inline void save_page(unsigned long address, struct page *fpage)
100 {
101         struct deferred_page *df;
102         df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL); 
103         if (!df) {
104                 flush_map(address);
105                 __free_page(fpage);
106         } else { 
107                 df->next = df_list;
108                 df->fpage = fpage;
109                 df->address = address;
110                 df_list = df;
111         }                       
112 }
113
114 /* 
115  * No more special protections in this 2/4MB area - revert to a
116  * large page again. 
117  */
118 static void revert_page(unsigned long address, pgprot_t ref_prot)
119 {
120         pgd_t *pgd;
121         pud_t *pud;
122         pmd_t *pmd;
123         pte_t large_pte;
124
125         pgd = pgd_offset_k(address);
126         BUG_ON(pgd_none(*pgd));
127         pud = pud_offset(pgd,address);
128         BUG_ON(pud_none(*pud));
129         pmd = pmd_offset(pud, address);
130         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
131         pgprot_val(ref_prot) |= _PAGE_PSE;
132         large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
133         set_pte((pte_t *)pmd, large_pte);
134 }      
135
136 static int
137 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
138                                    pgprot_t ref_prot)
139
140         pte_t *kpte; 
141         struct page *kpte_page;
142         unsigned kpte_flags;
143         kpte = lookup_address(address);
144         if (!kpte) return 0;
145         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
146         kpte_flags = pte_val(*kpte); 
147         if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
148                 if ((kpte_flags & _PAGE_PSE) == 0) { 
149                         set_pte(kpte, pfn_pte(pfn, prot));
150                 } else {
151                         /*
152                          * split_large_page will take the reference for this change_page_attr
153                          * on the split page.
154                          */
155                         struct page *split = split_large_page(address, prot, ref_prot); 
156                         if (!split)
157                                 return -ENOMEM;
158                         set_pte(kpte,mk_pte(split, ref_prot));
159                         kpte_page = split;
160                 }       
161                 get_page(kpte_page);
162         } else if ((kpte_flags & _PAGE_PSE) == 0) { 
163                 set_pte(kpte, pfn_pte(pfn, ref_prot));
164                 __put_page(kpte_page);
165         } else
166                 BUG();
167
168         /* on x86-64 the direct mapping set at boot is not using 4k pages */
169         BUG_ON(PageReserved(kpte_page));
170
171         switch (page_count(kpte_page)) {
172         case 1:
173                 save_page(address, kpte_page);               
174                 revert_page(address, ref_prot);
175                 break;
176         case 0:
177                 BUG(); /* memleak and failed 2M page regeneration */
178         }
179         return 0;
180
181
182 /*
183  * Change the page attributes of an page in the linear mapping.
184  *
185  * This should be used when a page is mapped with a different caching policy
186  * than write-back somewhere - some CPUs do not like it when mappings with
187  * different caching policies exist. This changes the page attributes of the
188  * in kernel linear mapping too.
189  * 
190  * The caller needs to ensure that there are no conflicting mappings elsewhere.
191  * This function only deals with the kernel linear map.
192  * 
193  * Caller must call global_flush_tlb() after this.
194  */
195 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
196 {
197         int err = 0; 
198         int i; 
199
200         down_write(&init_mm.mmap_sem);
201         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
202                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
203
204                 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
205                 if (err) 
206                         break; 
207                 /* Handle kernel mapping too which aliases part of the
208                  * lowmem */
209                 if (__pa(address) < KERNEL_TEXT_SIZE) {
210                         unsigned long addr2;
211                         pgprot_t prot2 = prot;
212                         addr2 = __START_KERNEL_map + __pa(address);
213                         pgprot_val(prot2) &= ~_PAGE_NX;
214                         err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
215                 } 
216         }       
217         up_write(&init_mm.mmap_sem); 
218         return err;
219 }
220
221 /* Don't call this for MMIO areas that may not have a mem_map entry */
222 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
223 {
224         unsigned long addr = (unsigned long)page_address(page);
225         return change_page_attr_addr(addr, numpages, prot);
226 }
227
228 void global_flush_tlb(void)
229
230         struct deferred_page *df, *next_df;
231
232         down_read(&init_mm.mmap_sem);
233         df = xchg(&df_list, NULL);
234         up_read(&init_mm.mmap_sem);
235         if (!df)
236                 return;
237         flush_map((df && !df->next) ? df->address : 0);
238         for (; df; df = next_df) { 
239                 next_df = df->next;
240                 if (df->fpage) 
241                         __free_page(df->fpage);
242                 kfree(df);
243         } 
244
245
246 EXPORT_SYMBOL(change_page_attr);
247 EXPORT_SYMBOL(global_flush_tlb);