2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <asm/sections.h>
13 #include <asm/uaccess.h>
14 #include <asm/processor.h>
15 #include <asm/tlbflush.h>
16 #include <asm/pgalloc.h>
17 #include <asm/sections.h>
19 static DEFINE_SPINLOCK(cpa_lock);
20 static struct list_head df_list = LIST_HEAD_INIT(df_list);
23 pte_t *lookup_address(unsigned long address)
25 pgd_t *pgd = pgd_offset_k(address);
30 pud = pud_offset(pgd, address);
33 pmd = pmd_offset(pud, address);
38 return pte_offset_kernel(pmd, address);
41 static struct page *split_large_page(unsigned long address, pgprot_t prot,
49 spin_unlock_irq(&cpa_lock);
50 base = alloc_pages(GFP_KERNEL, 0);
51 spin_lock_irq(&cpa_lock);
56 * page_private is used to track the number of entries in
57 * the page table page that have non standard attributes.
60 page_private(base) = 0;
62 address = __pa(address);
63 addr = address & LARGE_PAGE_MASK;
64 pbase = (pte_t *)page_address(base);
65 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
66 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
67 addr == address ? prot : ref_prot));
72 static void flush_kernel_map(void *arg)
74 unsigned long adr = (unsigned long)arg;
76 if (adr && cpu_has_clflush) {
78 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
79 asm volatile("clflush (%0)" :: "r" (adr + i));
80 } else if (boot_cpu_data.x86_model >= 4)
83 /* Flush all to work around Errata in early athlons regarding
84 * large page flushing.
89 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
94 set_pte_atomic(kpte, pte); /* change init_mm */
95 if (HAVE_SHARED_KERNEL_PMD)
98 spin_lock_irqsave(&pgd_lock, flags);
99 for (page = pgd_list; page; page = (struct page *)page->index) {
103 pgd = (pgd_t *)page_address(page) + pgd_index(address);
104 pud = pud_offset(pgd, address);
105 pmd = pmd_offset(pud, address);
106 set_pte_atomic((pte_t *)pmd, pte);
108 spin_unlock_irqrestore(&pgd_lock, flags);
112 * No more special protections in this 2/4MB area - revert to a
115 static inline void revert_page(struct page *kpte_page, unsigned long address)
121 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
122 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
125 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
126 set_pmd_pte(linear, address,
127 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
132 __change_page_attr(struct page *page, pgprot_t prot)
135 unsigned long address;
136 struct page *kpte_page;
138 BUG_ON(PageHighMem(page));
139 address = (unsigned long)page_address(page);
141 if (address >= (unsigned long)__start_rodata && address <= (unsigned long)__end_rodata &&
142 (pgprot_val(prot) & _PAGE_RW)) {
143 pgprot_val(prot) &= ~(_PAGE_RW);
144 add_taint(TAINT_MACHINE_CHECK);
147 kpte = lookup_address(address);
150 kpte_page = virt_to_page(kpte);
151 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
152 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
153 set_pte_atomic(kpte, mk_pte(page, prot));
159 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
160 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
161 split = split_large_page(address, prot, ref_prot);
164 set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
167 page_private(kpte_page)++;
168 } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
169 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
170 BUG_ON(page_private(kpte_page) == 0);
171 page_private(kpte_page)--;
176 * If the pte was reserved, it means it was created at boot
177 * time (not via split_large_page) and in turn we must not
178 * replace it with a largepage.
180 if (!PageReserved(kpte_page)) {
181 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
182 ClearPagePrivate(kpte_page);
183 list_add(&kpte_page->lru, &df_list);
184 revert_page(kpte_page, address);
190 static inline void flush_map(void *adr)
192 on_each_cpu(flush_kernel_map, adr, 1, 1);
196 * Change the page attributes of an page in the linear mapping.
198 * This should be used when a page is mapped with a different caching policy
199 * than write-back somewhere - some CPUs do not like it when mappings with
200 * different caching policies exist. This changes the page attributes of the
201 * in kernel linear mapping too.
203 * The caller needs to ensure that there are no conflicting mappings elsewhere.
204 * This function only deals with the kernel linear map.
206 * Caller must call global_flush_tlb() after this.
208 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
214 spin_lock_irqsave(&cpa_lock, flags);
215 for (i = 0; i < numpages; i++, page++) {
216 err = __change_page_attr(page, prot);
220 spin_unlock_irqrestore(&cpa_lock, flags);
224 void global_flush_tlb(void)
227 struct page *pg, *next;
229 BUG_ON(irqs_disabled());
231 spin_lock_irq(&cpa_lock);
232 list_replace_init(&df_list, &l);
233 spin_unlock_irq(&cpa_lock);
234 if (!cpu_has_clflush)
236 list_for_each_entry_safe(pg, next, &l, lru) {
238 flush_map(page_address(pg));
243 #ifdef CONFIG_DEBUG_PAGEALLOC
244 void kernel_map_pages(struct page *page, int numpages, int enable)
246 if (PageHighMem(page))
249 debug_check_no_locks_freed(page_address(page),
250 numpages * PAGE_SIZE);
252 /* the return value is ignored - the calls cannot fail,
253 * large pages are disabled at boot time.
255 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
256 /* we should perform an IPI and flush all tlbs,
257 * but that can deadlock->flush only current cpu.
263 EXPORT_SYMBOL(change_page_attr);
264 EXPORT_SYMBOL(global_flush_tlb);