This stack check implementation leverages the compiler's profiling (gcc -p)
[linux-2.6.git] / arch / i386 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15
16 static spinlock_t cpa_lock = SPIN_LOCK_UNLOCKED;
17 static struct list_head df_list = LIST_HEAD_INIT(df_list);
18
19
20 pte_t *lookup_address(unsigned long address) 
21
22         pgd_t *pgd = pgd_offset_k(address); 
23         pmd_t *pmd;
24         if (pgd_none(*pgd))
25                 return NULL;
26         pmd = pmd_offset(pgd, address);                
27         if (pmd_none(*pmd))
28                 return NULL;
29         if (pmd_large(*pmd))
30                 return (pte_t *)pmd;
31         return pte_offset_kernel(pmd, address);
32
33
34 static struct page *split_large_page(unsigned long address, pgprot_t prot)
35
36         int i; 
37         unsigned long addr;
38         struct page *base;
39         pte_t *pbase;
40
41         spin_unlock_irq(&cpa_lock);
42         base = alloc_pages(GFP_KERNEL, 0);
43         spin_lock_irq(&cpa_lock);
44         if (!base) 
45                 return NULL;
46
47         address = __pa(address);
48         addr = address & LARGE_PAGE_MASK; 
49         pbase = (pte_t *)page_address(base);
50         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
51                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
52                                    addr == address ? prot : PAGE_KERNEL);
53         }
54         return base;
55
56
57 static void flush_kernel_map(void *dummy) 
58
59         /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
60         if (boot_cpu_data.x86_model >= 4) 
61                 asm volatile("wbinvd":::"memory"); 
62         /* Flush all to work around Errata in early athlons regarding 
63          * large page flushing. 
64          */
65         __flush_tlb_all();      
66 }
67
68 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
69
70         set_pte_atomic(kpte, pte);      /* change init_mm */
71 #ifndef CONFIG_X86_PAE
72         {
73                 struct list_head *l;
74                 if (TASK_SIZE > PAGE_OFFSET)
75                         return;
76                 spin_lock(&mmlist_lock);
77                 list_for_each(l, &init_mm.mmlist) {
78                         struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
79                         pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
80                         set_pte_atomic((pte_t *)pmd, pte);
81                 }
82                 spin_unlock(&mmlist_lock);
83         }
84 #endif
85 }
86
87 /* 
88  * No more special protections in this 2/4MB area - revert to a
89  * large page again. 
90  */
91 static inline void revert_page(struct page *kpte_page, unsigned long address)
92 {
93         pte_t *linear = (pte_t *) 
94                 pmd_offset(pgd_offset(&init_mm, address), address);
95         set_pmd_pte(linear,  address,
96                     pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
97                             PAGE_KERNEL_LARGE));
98 }
99
100 static int
101 __change_page_attr(struct page *page, pgprot_t prot)
102
103         pte_t *kpte; 
104         unsigned long address;
105         struct page *kpte_page;
106
107 #ifdef CONFIG_HIGHMEM
108         if (page >= highmem_start_page) 
109                 BUG(); 
110 #endif
111         address = (unsigned long)page_address(page);
112
113         kpte = lookup_address(address);
114         if (!kpte)
115                 return -EINVAL;
116         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
117         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
118                 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
119                         pte_t old = *kpte;
120                         pte_t standard = mk_pte(page, PAGE_KERNEL); 
121                         set_pte_atomic(kpte, mk_pte(page, prot)); 
122                         if (pte_same(old,standard))
123                                 get_page(kpte_page);
124                 } else {
125                         struct page *split = split_large_page(address, prot); 
126                         if (!split)
127                                 return -ENOMEM;
128                         get_page(kpte_page);
129                         set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
130                 }       
131         } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
132                 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
133                 __put_page(kpte_page);
134         }
135
136         if (cpu_has_pse && (page_count(kpte_page) == 1)) {
137                 list_add(&kpte_page->lru, &df_list);
138                 revert_page(kpte_page, address);
139         } 
140         return 0;
141
142
143 static inline void flush_map(void)
144 {
145         on_each_cpu(flush_kernel_map, NULL, 1, 1);
146 }
147
148 /*
149  * Change the page attributes of an page in the linear mapping.
150  *
151  * This should be used when a page is mapped with a different caching policy
152  * than write-back somewhere - some CPUs do not like it when mappings with
153  * different caching policies exist. This changes the page attributes of the
154  * in kernel linear mapping too.
155  * 
156  * The caller needs to ensure that there are no conflicting mappings elsewhere.
157  * This function only deals with the kernel linear map.
158  * 
159  * Caller must call global_flush_tlb() after this.
160  */
161 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
162 {
163         int err = 0; 
164         int i; 
165         unsigned long flags;
166
167         spin_lock_irqsave(&cpa_lock, flags);
168         for (i = 0; i < numpages; i++, page++) { 
169                 err = __change_page_attr(page, prot);
170                 if (err) 
171                         break; 
172         }       
173         spin_unlock_irqrestore(&cpa_lock, flags);
174         return err;
175 }
176
177 void global_flush_tlb(void)
178
179         LIST_HEAD(l);
180         struct list_head* n;
181
182         BUG_ON(irqs_disabled());
183
184         spin_lock_irq(&cpa_lock);
185         list_splice_init(&df_list, &l);
186         spin_unlock_irq(&cpa_lock);
187         flush_map();
188         n = l.next;
189         while (n != &l) {
190                 struct page *pg = list_entry(n, struct page, lru);
191                 n = n->next;
192                 __free_page(pg);
193         }
194
195
196 #ifdef CONFIG_DEBUG_PAGEALLOC
197 void kernel_map_pages(struct page *page, int numpages, int enable)
198 {
199         if (PageHighMem(page))
200                 return;
201         /* the return value is ignored - the calls cannot fail,
202          * large pages are disabled at boot time.
203          */
204         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
205         /* we should perform an IPI and flush all tlbs,
206          * but that can deadlock->flush only current cpu.
207          */
208         __flush_tlb_all();
209 }
210 EXPORT_SYMBOL(kernel_map_pages);
211 #endif
212
213 EXPORT_SYMBOL(change_page_attr);
214 EXPORT_SYMBOL(global_flush_tlb);