2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
18 #include <asm/pgalloc.h>
19 #include <asm/mmu_context.h>
21 LIST_HEAD(mm_unpinned);
22 DEFINE_SPINLOCK(mm_unpinned_lock);
24 static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
26 struct page *page = virt_to_page(pt);
27 unsigned long pfn = page_to_pfn(page);
29 BUG_ON(HYPERVISOR_update_va_mapping(
30 (unsigned long)__va(pfn << PAGE_SHIFT),
31 pfn_pte(pfn, flags), 0));
34 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
44 * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
45 * be the 'current' task's pagetables (e.g., current may be 32-bit,
46 * but the pagetables may be for a 64-bit task).
47 * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
48 * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
50 for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
53 pud = pud_offset(pgd, 0);
54 if (PTRS_PER_PUD > 1) /* not folded */
55 mm_walk_set_prot(pud,flags);
56 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
59 pmd = pmd_offset(pud, 0);
60 if (PTRS_PER_PMD > 1) /* not folded */
61 mm_walk_set_prot(pmd,flags);
62 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
65 pte = pte_offset_kernel(pmd,0);
66 mm_walk_set_prot(pte,flags);
72 void mm_pin(struct mm_struct *mm)
74 if (xen_feature(XENFEAT_writable_page_tables))
77 spin_lock(&mm->page_table_lock);
79 mm_walk(mm, PAGE_KERNEL_RO);
80 BUG_ON(HYPERVISOR_update_va_mapping(
81 (unsigned long)mm->pgd,
82 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
84 BUG_ON(HYPERVISOR_update_va_mapping(
85 (unsigned long)__user_pgd(mm->pgd),
86 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
88 xen_pgd_pin(__pa(mm->pgd)); /* kernel */
89 xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
90 mm->context.pinned = 1;
91 spin_lock(&mm_unpinned_lock);
92 list_del(&mm->context.unpinned);
93 spin_unlock(&mm_unpinned_lock);
95 spin_unlock(&mm->page_table_lock);
98 void mm_unpin(struct mm_struct *mm)
100 if (xen_feature(XENFEAT_writable_page_tables))
103 spin_lock(&mm->page_table_lock);
105 xen_pgd_unpin(__pa(mm->pgd));
106 xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
107 BUG_ON(HYPERVISOR_update_va_mapping(
108 (unsigned long)mm->pgd,
109 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
110 BUG_ON(HYPERVISOR_update_va_mapping(
111 (unsigned long)__user_pgd(mm->pgd),
112 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
113 mm_walk(mm, PAGE_KERNEL);
115 mm->context.pinned = 0;
116 spin_lock(&mm_unpinned_lock);
117 list_add(&mm->context.unpinned, &mm_unpinned);
118 spin_unlock(&mm_unpinned_lock);
120 spin_unlock(&mm->page_table_lock);
123 void mm_pin_all(void)
125 if (xen_feature(XENFEAT_writable_page_tables))
128 while (!list_empty(&mm_unpinned))
129 mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
133 void _arch_dup_mmap(struct mm_struct *mm)
135 if (!mm->context.pinned)
139 void _arch_exit_mmap(struct mm_struct *mm)
141 struct task_struct *tsk = current;
146 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
147 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
149 if ( tsk->active_mm == mm )
151 tsk->active_mm = &init_mm;
152 atomic_inc(&init_mm.mm_count);
154 switch_mm(mm, &init_mm, tsk);
156 atomic_dec(&mm->mm_count);
157 BUG_ON(atomic_read(&mm->mm_count) == 0);
162 if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
163 !mm->context.has_foreign_mappings )
167 void pte_free(struct page *pte)
169 unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
171 if (!pte_write(*virt_to_ptep(va)))
172 BUG_ON(HYPERVISOR_update_va_mapping(
173 va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
176 #endif /* CONFIG_XEN */
178 static inline pte_t *lookup_address(unsigned long address)
180 pgd_t *pgd = pgd_offset_k(address);
186 pud = pud_offset(pgd, address);
187 if (!pud_present(*pud))
189 pmd = pmd_offset(pud, address);
190 if (!pmd_present(*pmd))
194 pte = pte_offset_kernel(pmd, address);
195 if (pte && !pte_present(*pte))
200 static struct page *split_large_page(unsigned long address, pgprot_t prot,
205 struct page *base = alloc_pages(GFP_KERNEL, 0);
210 * page_private is used to track the number of entries in
211 * the page table page have non standard attributes.
213 SetPagePrivate(base);
214 page_private(base) = 0;
216 address = __pa(address);
217 addr = address & LARGE_PAGE_MASK;
218 pbase = (pte_t *)page_address(base);
219 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
220 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
221 addr == address ? prot : ref_prot);
227 static void flush_kernel_map(void *address)
229 if (0 && address && cpu_has_clflush) {
230 /* is this worth it? */
232 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
233 asm volatile("clflush (%0)" :: "r" (address + i));
235 asm volatile("wbinvd":::"memory");
237 __flush_tlb_one(address);
243 static inline void flush_map(unsigned long address)
245 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
248 static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
250 static inline void save_page(struct page *fpage)
252 fpage->lru.next = (struct list_head *)deferred_pages;
253 deferred_pages = fpage;
257 * No more special protections in this 2/4MB area - revert to a
260 static void revert_page(unsigned long address, pgprot_t ref_prot)
267 pgd = pgd_offset_k(address);
268 BUG_ON(pgd_none(*pgd));
269 pud = pud_offset(pgd,address);
270 BUG_ON(pud_none(*pud));
271 pmd = pmd_offset(pud, address);
272 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
273 pgprot_val(ref_prot) |= _PAGE_PSE;
274 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
275 set_pte((pte_t *)pmd, large_pte);
279 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
283 struct page *kpte_page;
286 kpte = lookup_address(address);
288 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
289 kpte_flags = pte_val(*kpte);
290 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
291 if ((kpte_flags & _PAGE_PSE) == 0) {
292 set_pte(kpte, pfn_pte(pfn, prot));
295 * split_large_page will take the reference for this
296 * change_page_attr on the split page.
300 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
302 split = split_large_page(address, prot, ref_prot2);
305 set_pte(kpte,mk_pte(split, ref_prot2));
308 page_private(kpte_page)++;
309 } else if ((kpte_flags & _PAGE_PSE) == 0) {
310 set_pte(kpte, pfn_pte(pfn, ref_prot));
311 BUG_ON(page_private(kpte_page) == 0);
312 page_private(kpte_page)--;
316 /* on x86-64 the direct mapping set at boot is not using 4k pages */
318 * ..., but the XEN guest kernels (currently) do:
319 * If the pte was reserved, it means it was created at boot
320 * time (not via split_large_page) and in turn we must not
321 * replace it with a large page.
324 BUG_ON(PageReserved(kpte_page));
326 if (page_private(kpte_page) == 0) {
327 save_page(kpte_page);
328 revert_page(address, ref_prot);
334 * Change the page attributes of an page in the linear mapping.
336 * This should be used when a page is mapped with a different caching policy
337 * than write-back somewhere - some CPUs do not like it when mappings with
338 * different caching policies exist. This changes the page attributes of the
339 * in kernel linear mapping too.
341 * The caller needs to ensure that there are no conflicting mappings elsewhere.
342 * This function only deals with the kernel linear map.
344 * Caller must call global_flush_tlb() after this.
346 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
351 down_write(&init_mm.mmap_sem);
352 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
353 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
355 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
358 /* Handle kernel mapping too which aliases part of the
360 if (__pa(address) < KERNEL_TEXT_SIZE) {
362 pgprot_t prot2 = prot;
363 addr2 = __START_KERNEL_map + __pa(address);
364 pgprot_val(prot2) &= ~_PAGE_NX;
365 err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
368 up_write(&init_mm.mmap_sem);
372 /* Don't call this for MMIO areas that may not have a mem_map entry */
373 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
375 unsigned long addr = (unsigned long)page_address(page);
376 return change_page_attr_addr(addr, numpages, prot);
379 void global_flush_tlb(void)
383 down_read(&init_mm.mmap_sem);
384 dpage = xchg(&deferred_pages, NULL);
385 up_read(&init_mm.mmap_sem);
387 flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
389 struct page *tmp = dpage;
390 dpage = (struct page *)dpage->lru.next;
391 ClearPagePrivate(tmp);
396 EXPORT_SYMBOL(change_page_attr);
397 EXPORT_SYMBOL(global_flush_tlb);