ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / x86_64 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/io.h>
16
17 static inline pte_t *lookup_address(unsigned long address) 
18
19         pgd_t *pgd = pgd_offset_k(address); 
20         pmd_t *pmd;
21         pte_t *pte;
22         if (!pgd || !pgd_present(*pgd))
23                 return NULL; 
24         pmd = pmd_offset(pgd, address);                
25         if (!pmd_present(*pmd))
26                 return NULL; 
27         if (pmd_large(*pmd))
28                 return (pte_t *)pmd;
29         pte = pte_offset_kernel(pmd, address);
30         if (pte && !pte_present(*pte))
31                 pte = NULL; 
32         return pte;
33
34
35 static struct page *split_large_page(unsigned long address, pgprot_t prot)
36
37         int i; 
38         unsigned long addr;
39         struct page *base = alloc_pages(GFP_KERNEL, 0);
40         pte_t *pbase;
41         if (!base) 
42                 return NULL;
43         address = __pa(address);
44         addr = address & LARGE_PAGE_MASK; 
45         pbase = (pte_t *)page_address(base);
46         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
47                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
48                                    addr == address ? prot : PAGE_KERNEL);
49         }
50         return base;
51
52
53
54 static void flush_kernel_map(void *address) 
55 {
56         if (0 && address && cpu_has_clflush) {
57                 /* is this worth it? */ 
58                 int i;
59                 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
60                         asm volatile("clflush (%0)" :: "r" (address + i)); 
61         } else
62                 asm volatile("wbinvd":::"memory"); 
63         __flush_tlb_one(address);
64 }
65
66
67 static inline void flush_map(unsigned long address)
68 {       
69         on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
70 }
71
72 struct deferred_page { 
73         struct deferred_page *next; 
74         struct page *fpage;
75         unsigned long address;
76 }; 
77 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
78
79 static inline void save_page(unsigned long address, struct page *fpage)
80 {
81         struct deferred_page *df;
82         df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL); 
83         if (!df) {
84                 flush_map(address);
85                 __free_page(fpage);
86         } else { 
87                 df->next = df_list;
88                 df->fpage = fpage;
89                 df->address = address;
90                 df_list = df;
91         }                       
92 }
93
94 /* 
95  * No more special protections in this 2/4MB area - revert to a
96  * large page again. 
97  */
98 static void revert_page(struct page *kpte_page, unsigned long address)
99 {
100        pgd_t *pgd;
101        pmd_t *pmd; 
102        pte_t large_pte; 
103        
104        pgd = pgd_offset_k(address); 
105        pmd = pmd_offset(pgd, address);
106        BUG_ON(pmd_val(*pmd) & _PAGE_PSE); 
107        large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, PAGE_KERNEL_LARGE);
108        set_pte((pte_t *)pmd, large_pte);
109 }      
110
111 static int
112 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
113
114         pte_t *kpte; 
115         struct page *kpte_page;
116         unsigned kpte_flags;
117
118         kpte = lookup_address(address);
119         if (!kpte) return 0;
120         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
121         kpte_flags = pte_val(*kpte); 
122         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
123                 if ((kpte_flags & _PAGE_PSE) == 0) { 
124                         pte_t old = *kpte;
125                         pte_t standard = mk_pte(page, PAGE_KERNEL); 
126
127                         set_pte(kpte, mk_pte(page, prot)); 
128                         if (pte_same(old,standard))
129                                 atomic_inc(&kpte_page->count);
130                 } else {
131                         struct page *split = split_large_page(address, prot); 
132                         if (!split)
133                                 return -ENOMEM;
134                         atomic_inc(&kpte_page->count);
135                         set_pte(kpte,mk_pte(split, PAGE_KERNEL));
136                 }       
137         } else if ((kpte_flags & _PAGE_PSE) == 0) { 
138                 set_pte(kpte, mk_pte(page, PAGE_KERNEL));
139                 atomic_dec(&kpte_page->count); 
140         }
141
142         if (atomic_read(&kpte_page->count) == 1) { 
143                 save_page(address, kpte_page);               
144                 revert_page(kpte_page, address);
145         } 
146         return 0;
147
148
149 /*
150  * Change the page attributes of an page in the linear mapping.
151  *
152  * This should be used when a page is mapped with a different caching policy
153  * than write-back somewhere - some CPUs do not like it when mappings with
154  * different caching policies exist. This changes the page attributes of the
155  * in kernel linear mapping too.
156  * 
157  * The caller needs to ensure that there are no conflicting mappings elsewhere.
158  * This function only deals with the kernel linear map.
159  * 
160  * Caller must call global_flush_tlb() after this.
161  */
162 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
163 {
164         int err = 0; 
165         int i; 
166
167         down_write(&init_mm.mmap_sem);
168         for (i = 0; i < numpages; !err && i++, page++) { 
169                 unsigned long address = (unsigned long)page_address(page); 
170                 err = __change_page_attr(address, page, prot); 
171                 if (err) 
172                         break; 
173                 /* Handle kernel mapping too which aliases part of the lowmem */
174                 if (page_to_phys(page) < KERNEL_TEXT_SIZE) {            
175                         unsigned long addr2 = __START_KERNEL_map + page_to_phys(page);
176                         err = __change_page_attr(addr2, page, prot);
177                 } 
178         }       
179         up_write(&init_mm.mmap_sem); 
180         return err;
181 }
182
183 void global_flush_tlb(void)
184
185         struct deferred_page *df, *next_df;
186
187         down_read(&init_mm.mmap_sem);
188         df = xchg(&df_list, NULL);
189         up_read(&init_mm.mmap_sem);
190         flush_map((df && !df->next) ? df->address : 0);
191         for (; df; df = next_df) { 
192                 next_df = df->next;
193                 if (df->fpage) 
194                         __free_page(df->fpage);
195                 kfree(df);
196         } 
197
198
199 EXPORT_SYMBOL(change_page_attr);
200 EXPORT_SYMBOL(global_flush_tlb);