patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / i386 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15
16 static spinlock_t cpa_lock = SPIN_LOCK_UNLOCKED;
17 static struct list_head df_list = LIST_HEAD_INIT(df_list);
18
19
20 static inline pte_t *lookup_address(unsigned long address) 
21
22         pgd_t *pgd = pgd_offset_k(address); 
23         pmd_t *pmd;
24         if (pgd_none(*pgd))
25                 return NULL;
26         pmd = pmd_offset(pgd, address);                
27         if (pmd_none(*pmd))
28                 return NULL;
29         if (pmd_large(*pmd))
30                 return (pte_t *)pmd;
31         return pte_offset_kernel(pmd, address);
32
33
34 static struct page *split_large_page(unsigned long address, pgprot_t prot)
35
36         int i; 
37         unsigned long addr;
38         struct page *base;
39         pte_t *pbase;
40
41         spin_unlock_irq(&cpa_lock);
42         base = alloc_pages(GFP_KERNEL, 0);
43         spin_lock_irq(&cpa_lock);
44         if (!base) 
45                 return NULL;
46
47         address = __pa(address);
48         addr = address & LARGE_PAGE_MASK; 
49         pbase = (pte_t *)page_address(base);
50         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
51                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
52                                    addr == address ? prot : PAGE_KERNEL);
53         }
54         return base;
55
56
57 static void flush_kernel_map(void *dummy) 
58
59         /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
60         if (boot_cpu_data.x86_model >= 4) 
61                 asm volatile("wbinvd":::"memory"); 
62         /* Flush all to work around Errata in early athlons regarding 
63          * large page flushing. 
64          */
65         __flush_tlb_all();      
66 }
67
68 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
69
70         struct page *page;
71         unsigned long flags;
72
73         set_pte_atomic(kpte, pte);      /* change init_mm */
74         if (PTRS_PER_PMD > 1)
75                 return;
76
77         spin_lock_irqsave(&pgd_lock, flags);
78         for (page = pgd_list; page; page = (struct page *)page->index) {
79                 pgd_t *pgd;
80                 pmd_t *pmd;
81                 pgd = (pgd_t *)page_address(page) + pgd_index(address);
82                 pmd = pmd_offset(pgd, address);
83                 set_pte_atomic((pte_t *)pmd, pte);
84         }
85         spin_unlock_irqrestore(&pgd_lock, flags);
86 }
87
88 /* 
89  * No more special protections in this 2/4MB area - revert to a
90  * large page again. 
91  */
92 static inline void revert_page(struct page *kpte_page, unsigned long address)
93 {
94         pte_t *linear = (pte_t *) 
95                 pmd_offset(pgd_offset(&init_mm, address), address);
96         set_pmd_pte(linear,  address,
97                     pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
98                             PAGE_KERNEL_LARGE));
99 }
100
101 static int
102 __change_page_attr(struct page *page, pgprot_t prot)
103
104         pte_t *kpte; 
105         unsigned long address;
106         struct page *kpte_page;
107
108 #ifdef CONFIG_HIGHMEM
109         if (page >= highmem_start_page) 
110                 BUG(); 
111 #endif
112         address = (unsigned long)page_address(page);
113
114         kpte = lookup_address(address);
115         if (!kpte)
116                 return -EINVAL;
117         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
118         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
119                 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
120                         pte_t old = *kpte;
121                         pte_t standard = mk_pte(page, PAGE_KERNEL); 
122                         set_pte_atomic(kpte, mk_pte(page, prot)); 
123                         if (pte_same(old,standard))
124                                 get_page(kpte_page);
125                 } else {
126                         struct page *split = split_large_page(address, prot); 
127                         if (!split)
128                                 return -ENOMEM;
129                         get_page(kpte_page);
130                         set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
131                 }       
132         } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
133                 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
134                 __put_page(kpte_page);
135         }
136
137         if (cpu_has_pse && (page_count(kpte_page) == 1)) {
138                 list_add(&kpte_page->lru, &df_list);
139                 revert_page(kpte_page, address);
140         } 
141         return 0;
142
143
144 static inline void flush_map(void)
145 {
146         on_each_cpu(flush_kernel_map, NULL, 1, 1);
147 }
148
149 /*
150  * Change the page attributes of an page in the linear mapping.
151  *
152  * This should be used when a page is mapped with a different caching policy
153  * than write-back somewhere - some CPUs do not like it when mappings with
154  * different caching policies exist. This changes the page attributes of the
155  * in kernel linear mapping too.
156  * 
157  * The caller needs to ensure that there are no conflicting mappings elsewhere.
158  * This function only deals with the kernel linear map.
159  * 
160  * Caller must call global_flush_tlb() after this.
161  */
162 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
163 {
164         int err = 0; 
165         int i; 
166         unsigned long flags;
167
168         spin_lock_irqsave(&cpa_lock, flags);
169         for (i = 0; i < numpages; i++, page++) { 
170                 err = __change_page_attr(page, prot);
171                 if (err) 
172                         break; 
173         }       
174         spin_unlock_irqrestore(&cpa_lock, flags);
175         return err;
176 }
177
178 void global_flush_tlb(void)
179
180         LIST_HEAD(l);
181         struct list_head* n;
182
183         BUG_ON(irqs_disabled());
184
185         spin_lock_irq(&cpa_lock);
186         list_splice_init(&df_list, &l);
187         spin_unlock_irq(&cpa_lock);
188         flush_map();
189         n = l.next;
190         while (n != &l) {
191                 struct page *pg = list_entry(n, struct page, lru);
192                 n = n->next;
193                 __free_page(pg);
194         }
195
196
197 #ifdef CONFIG_DEBUG_PAGEALLOC
198 void kernel_map_pages(struct page *page, int numpages, int enable)
199 {
200         if (PageHighMem(page))
201                 return;
202         /* the return value is ignored - the calls cannot fail,
203          * large pages are disabled at boot time.
204          */
205         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
206         /* we should perform an IPI and flush all tlbs,
207          * but that can deadlock->flush only current cpu.
208          */
209         __flush_tlb_all();
210 }
211 EXPORT_SYMBOL(kernel_map_pages);
212 #endif
213
214 EXPORT_SYMBOL(change_page_attr);
215 EXPORT_SYMBOL(global_flush_tlb);