2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
17 static inline pte_t *lookup_address(unsigned long address)
19 pgd_t *pgd = pgd_offset_k(address);
22 if (!pgd || !pgd_present(*pgd))
24 pmd = pmd_offset(pgd, address);
25 if (!pmd_present(*pmd))
29 pte = pte_offset_kernel(pmd, address);
30 if (pte && !pte_present(*pte))
35 static struct page *split_large_page(unsigned long address, pgprot_t prot)
39 struct page *base = alloc_pages(GFP_KERNEL, 0);
43 address = __pa(address);
44 addr = address & LARGE_PAGE_MASK;
45 pbase = (pte_t *)page_address(base);
46 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
47 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
48 addr == address ? prot : PAGE_KERNEL);
54 static void flush_kernel_map(void *address)
56 if (0 && address && cpu_has_clflush) {
57 /* is this worth it? */
59 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
60 asm volatile("clflush (%0)" :: "r" (address + i));
62 asm volatile("wbinvd":::"memory");
63 __flush_tlb_one(address);
67 static inline void flush_map(unsigned long address)
69 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
72 struct deferred_page {
73 struct deferred_page *next;
75 unsigned long address;
77 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
79 static inline void save_page(unsigned long address, struct page *fpage)
81 struct deferred_page *df;
82 df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
89 df->address = address;
95 * No more special protections in this 2/4MB area - revert to a
98 static void revert_page(struct page *kpte_page, unsigned long address)
104 pgd = pgd_offset_k(address);
105 pmd = pmd_offset(pgd, address);
106 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
107 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, PAGE_KERNEL_LARGE);
108 set_pte((pte_t *)pmd, large_pte);
112 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
115 struct page *kpte_page;
118 kpte = lookup_address(address);
120 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
121 kpte_flags = pte_val(*kpte);
122 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
123 if ((kpte_flags & _PAGE_PSE) == 0) {
125 pte_t standard = mk_pte(page, PAGE_KERNEL);
127 set_pte(kpte, mk_pte(page, prot));
128 if (pte_same(old,standard))
129 atomic_inc(&kpte_page->count);
131 struct page *split = split_large_page(address, prot);
134 atomic_inc(&kpte_page->count);
135 set_pte(kpte,mk_pte(split, PAGE_KERNEL));
137 } else if ((kpte_flags & _PAGE_PSE) == 0) {
138 set_pte(kpte, mk_pte(page, PAGE_KERNEL));
139 atomic_dec(&kpte_page->count);
142 if (atomic_read(&kpte_page->count) == 1) {
143 save_page(address, kpte_page);
144 revert_page(kpte_page, address);
150 * Change the page attributes of an page in the linear mapping.
152 * This should be used when a page is mapped with a different caching policy
153 * than write-back somewhere - some CPUs do not like it when mappings with
154 * different caching policies exist. This changes the page attributes of the
155 * in kernel linear mapping too.
157 * The caller needs to ensure that there are no conflicting mappings elsewhere.
158 * This function only deals with the kernel linear map.
160 * Caller must call global_flush_tlb() after this.
162 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
167 down_write(&init_mm.mmap_sem);
168 for (i = 0; i < numpages; !err && i++, page++) {
169 unsigned long address = (unsigned long)page_address(page);
170 err = __change_page_attr(address, page, prot);
173 /* Handle kernel mapping too which aliases part of the lowmem */
174 if (page_to_phys(page) < KERNEL_TEXT_SIZE) {
175 unsigned long addr2 = __START_KERNEL_map + page_to_phys(page);
176 err = __change_page_attr(addr2, page, prot);
179 up_write(&init_mm.mmap_sem);
183 void global_flush_tlb(void)
185 struct deferred_page *df, *next_df;
187 down_read(&init_mm.mmap_sem);
188 df = xchg(&df_list, NULL);
189 up_read(&init_mm.mmap_sem);
190 flush_map((df && !df->next) ? df->address : 0);
191 for (; df; df = next_df) {
194 __free_page(df->fpage);
199 EXPORT_SYMBOL(change_page_attr);
200 EXPORT_SYMBOL(global_flush_tlb);