2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
18 #include <asm/mmu_context.h>
20 LIST_HEAD(mm_unpinned);
21 DEFINE_SPINLOCK(mm_unpinned_lock);
23 static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
25 struct page *page = virt_to_page(pt);
26 unsigned long pfn = page_to_pfn(page);
28 BUG_ON(HYPERVISOR_update_va_mapping(
29 (unsigned long)__va(pfn << PAGE_SHIFT),
30 pfn_pte(pfn, flags), 0));
33 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
43 * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
44 * be the 'current' task's pagetables (e.g., current may be 32-bit,
45 * but the pagetables may be for a 64-bit task).
46 * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
47 * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
49 for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
52 pud = pud_offset(pgd, 0);
53 if (PTRS_PER_PUD > 1) /* not folded */
54 mm_walk_set_prot(pud,flags);
55 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
58 pmd = pmd_offset(pud, 0);
59 if (PTRS_PER_PMD > 1) /* not folded */
60 mm_walk_set_prot(pmd,flags);
61 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
64 pte = pte_offset_kernel(pmd,0);
65 mm_walk_set_prot(pte,flags);
71 void mm_pin(struct mm_struct *mm)
73 if (xen_feature(XENFEAT_writable_page_tables))
76 spin_lock(&mm->page_table_lock);
78 mm_walk(mm, PAGE_KERNEL_RO);
79 BUG_ON(HYPERVISOR_update_va_mapping(
80 (unsigned long)mm->pgd,
81 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
83 BUG_ON(HYPERVISOR_update_va_mapping(
84 (unsigned long)__user_pgd(mm->pgd),
85 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
87 xen_pgd_pin(__pa(mm->pgd)); /* kernel */
88 xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
89 mm->context.pinned = 1;
90 spin_lock(&mm_unpinned_lock);
91 list_del(&mm->context.unpinned);
92 spin_unlock(&mm_unpinned_lock);
94 spin_unlock(&mm->page_table_lock);
97 void mm_unpin(struct mm_struct *mm)
99 if (xen_feature(XENFEAT_writable_page_tables))
102 spin_lock(&mm->page_table_lock);
104 xen_pgd_unpin(__pa(mm->pgd));
105 xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
106 BUG_ON(HYPERVISOR_update_va_mapping(
107 (unsigned long)mm->pgd,
108 pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
109 BUG_ON(HYPERVISOR_update_va_mapping(
110 (unsigned long)__user_pgd(mm->pgd),
111 pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
112 mm_walk(mm, PAGE_KERNEL);
114 mm->context.pinned = 0;
115 spin_lock(&mm_unpinned_lock);
116 list_add(&mm->context.unpinned, &mm_unpinned);
117 spin_unlock(&mm_unpinned_lock);
119 spin_unlock(&mm->page_table_lock);
122 void mm_pin_all(void)
124 if (xen_feature(XENFEAT_writable_page_tables))
127 while (!list_empty(&mm_unpinned))
128 mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
132 void _arch_dup_mmap(struct mm_struct *mm)
134 if (!mm->context.pinned)
138 void _arch_exit_mmap(struct mm_struct *mm)
140 struct task_struct *tsk = current;
145 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
146 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
148 if ( tsk->active_mm == mm )
150 tsk->active_mm = &init_mm;
151 atomic_inc(&init_mm.mm_count);
153 switch_mm(mm, &init_mm, tsk);
155 atomic_dec(&mm->mm_count);
156 BUG_ON(atomic_read(&mm->mm_count) == 0);
161 if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
162 !mm->context.has_foreign_mappings )
166 void pte_free(struct page *pte)
168 unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
170 if (!pte_write(*virt_to_ptep(va)))
171 BUG_ON(HYPERVISOR_update_va_mapping(
172 va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
175 #endif /* CONFIG_XEN */
177 static inline pte_t *lookup_address(unsigned long address)
179 pgd_t *pgd = pgd_offset_k(address);
185 pud = pud_offset(pgd, address);
186 if (!pud_present(*pud))
188 pmd = pmd_offset(pud, address);
189 if (!pmd_present(*pmd))
193 pte = pte_offset_kernel(pmd, address);
194 if (pte && !pte_present(*pte))
199 static struct page *split_large_page(unsigned long address, pgprot_t prot,
204 struct page *base = alloc_pages(GFP_KERNEL, 0);
209 * page_private is used to track the number of entries in
210 * the page table page have non standard attributes.
212 SetPagePrivate(base);
213 page_private(base) = 0;
215 address = __pa(address);
216 addr = address & LARGE_PAGE_MASK;
217 pbase = (pte_t *)page_address(base);
218 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
219 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
220 addr == address ? prot : ref_prot);
225 static void cache_flush_page(void *adr)
228 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
229 asm volatile("clflush (%0)" :: "r" (adr + i));
232 static void flush_kernel_map(void *arg)
234 struct list_head *l = (struct list_head *)arg;
237 /* When clflush is available always use it because it is
238 much cheaper than WBINVD */
239 if (!cpu_has_clflush)
240 asm volatile("wbinvd" ::: "memory");
241 list_for_each_entry(pg, l, lru) {
242 void *adr = page_address(pg);
244 cache_flush_page(adr);
249 static inline void flush_map(struct list_head *l)
251 on_each_cpu(flush_kernel_map, l, 1, 1);
254 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
256 static inline void save_page(struct page *fpage)
258 list_add(&fpage->lru, &deferred_pages);
262 * No more special protections in this 2/4MB area - revert to a
265 static void revert_page(unsigned long address, pgprot_t ref_prot)
272 pgd = pgd_offset_k(address);
273 BUG_ON(pgd_none(*pgd));
274 pud = pud_offset(pgd,address);
275 BUG_ON(pud_none(*pud));
276 pmd = pmd_offset(pud, address);
277 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
278 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
279 large_pte = pte_mkhuge(large_pte);
280 set_pte((pte_t *)pmd, large_pte);
284 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
288 struct page *kpte_page;
290 kpte = lookup_address(address);
292 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
293 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
294 if (!pte_huge(*kpte)) {
295 set_pte(kpte, pfn_pte(pfn, prot));
298 * split_large_page will take the reference for this
299 * change_page_attr on the split page.
302 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
303 split = split_large_page(address, prot, ref_prot2);
306 set_pte(kpte, mk_pte(split, ref_prot2));
309 page_private(kpte_page)++;
310 } else if (!pte_huge(*kpte)) {
311 set_pte(kpte, pfn_pte(pfn, ref_prot));
312 BUG_ON(page_private(kpte_page) == 0);
313 page_private(kpte_page)--;
317 /* on x86-64 the direct mapping set at boot is not using 4k pages */
319 * ..., but the XEN guest kernels (currently) do:
320 * If the pte was reserved, it means it was created at boot
321 * time (not via split_large_page) and in turn we must not
322 * replace it with a large page.
325 BUG_ON(PageReserved(kpte_page));
327 if(!PageReserved(kpte_page))
330 if (page_private(kpte_page) == 0) {
331 save_page(kpte_page);
332 revert_page(address, ref_prot);
338 * Change the page attributes of an page in the linear mapping.
340 * This should be used when a page is mapped with a different caching policy
341 * than write-back somewhere - some CPUs do not like it when mappings with
342 * different caching policies exist. This changes the page attributes of the
343 * in kernel linear mapping too.
345 * The caller needs to ensure that there are no conflicting mappings elsewhere.
346 * This function only deals with the kernel linear map.
348 * Caller must call global_flush_tlb() after this.
350 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
355 down_write(&init_mm.mmap_sem);
356 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
357 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
359 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
362 /* Handle kernel mapping too which aliases part of the
364 if (__pa(address) < KERNEL_TEXT_SIZE) {
367 addr2 = __START_KERNEL_map + __pa(address);
368 /* Make sure the kernel mappings stay executable */
369 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
370 err = __change_page_attr(addr2, pfn, prot2,
374 up_write(&init_mm.mmap_sem);
378 /* Don't call this for MMIO areas that may not have a mem_map entry */
379 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
381 unsigned long addr = (unsigned long)page_address(page);
382 return change_page_attr_addr(addr, numpages, prot);
385 void global_flush_tlb(void)
387 struct page *pg, *next;
390 down_read(&init_mm.mmap_sem);
391 list_replace_init(&deferred_pages, &l);
392 up_read(&init_mm.mmap_sem);
396 list_for_each_entry_safe(pg, next, &l, lru) {
397 ClearPagePrivate(pg);
402 EXPORT_SYMBOL(change_page_attr);
403 EXPORT_SYMBOL(global_flush_tlb);