#include <asm/tlbflush.h>
#include <asm/io.h>
+#ifdef CONFIG_XEN
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+
+LIST_HEAD(mm_unpinned);
+DEFINE_SPINLOCK(mm_unpinned_lock);
+
+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
+{
+ struct page *page = virt_to_page(pt);
+ unsigned long pfn = page_to_pfn(page);
+
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pfn_pte(pfn, flags), 0));
+}
+
+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int g,u,m;
+
+ pgd = mm->pgd;
+ /*
+ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
+ * be the 'current' task's pagetables (e.g., current may be 32-bit,
+ * but the pagetables may be for a 64-bit task).
+ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
+ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
+ */
+ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
+ if (pgd_none(*pgd))
+ continue;
+ pud = pud_offset(pgd, 0);
+ if (PTRS_PER_PUD > 1) /* not folded */
+ mm_walk_set_prot(pud,flags);
+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
+ if (pud_none(*pud))
+ continue;
+ pmd = pmd_offset(pud, 0);
+ if (PTRS_PER_PMD > 1) /* not folded */
+ mm_walk_set_prot(pmd,flags);
+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
+ if (pmd_none(*pmd))
+ continue;
+ pte = pte_offset_kernel(pmd,0);
+ mm_walk_set_prot(pte,flags);
+ }
+ }
+ }
+}
+
+void mm_pin(struct mm_struct *mm)
+{
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+
+ spin_lock(&mm->page_table_lock);
+
+ mm_walk(mm, PAGE_KERNEL_RO);
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ (unsigned long)mm->pgd,
+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
+ UVMF_TLB_FLUSH));
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ (unsigned long)__user_pgd(mm->pgd),
+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
+ UVMF_TLB_FLUSH));
+ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
+ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
+ mm->context.pinned = 1;
+ spin_lock(&mm_unpinned_lock);
+ list_del(&mm->context.unpinned);
+ spin_unlock(&mm_unpinned_lock);
+
+ spin_unlock(&mm->page_table_lock);
+}
+
+void mm_unpin(struct mm_struct *mm)
+{
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+
+ spin_lock(&mm->page_table_lock);
+
+ xen_pgd_unpin(__pa(mm->pgd));
+ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ (unsigned long)mm->pgd,
+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ (unsigned long)__user_pgd(mm->pgd),
+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
+ mm_walk(mm, PAGE_KERNEL);
+ xen_tlb_flush();
+ mm->context.pinned = 0;
+ spin_lock(&mm_unpinned_lock);
+ list_add(&mm->context.unpinned, &mm_unpinned);
+ spin_unlock(&mm_unpinned_lock);
+
+ spin_unlock(&mm->page_table_lock);
+}
+
+void mm_pin_all(void)
+{
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+
+ while (!list_empty(&mm_unpinned))
+ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
+ context.unpinned));
+}
+
+void _arch_dup_mmap(struct mm_struct *mm)
+{
+ if (!mm->context.pinned)
+ mm_pin(mm);
+}
+
+void _arch_exit_mmap(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+
+ /*
+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
+ */
+ if ( tsk->active_mm == mm )
+ {
+ tsk->active_mm = &init_mm;
+ atomic_inc(&init_mm.mm_count);
+
+ switch_mm(mm, &init_mm, tsk);
+
+ atomic_dec(&mm->mm_count);
+ BUG_ON(atomic_read(&mm->mm_count) == 0);
+ }
+
+ task_unlock(tsk);
+
+ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
+ !mm->context.has_foreign_mappings )
+ mm_unpin(mm);
+}
+
+void pte_free(struct page *pte)
+{
+ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
+
+ if (!pte_write(*virt_to_ptep(va)))
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
+ __free_page(pte);
+}
+#endif /* CONFIG_XEN */
+
static inline pte_t *lookup_address(unsigned long address)
{
pgd_t *pgd = pgd_offset_k(address);
return base;
}
-
-static void flush_kernel_map(void *address)
+static void cache_flush_page(void *adr)
{
- if (0 && address && cpu_has_clflush) {
- /* is this worth it? */
- int i;
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- asm volatile("clflush (%0)" :: "r" (address + i));
- } else
- asm volatile("wbinvd":::"memory");
- if (address)
- __flush_tlb_one(address);
- else
- __flush_tlb_all();
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (adr + i));
}
+static void flush_kernel_map(void *arg)
+{
+ struct list_head *l = (struct list_head *)arg;
+ struct page *pg;
+
+ /* When clflush is available always use it because it is
+ much cheaper than WBINVD */
+ if (!cpu_has_clflush)
+ asm volatile("wbinvd" ::: "memory");
+ list_for_each_entry(pg, l, lru) {
+ void *adr = page_address(pg);
+ if (cpu_has_clflush)
+ cache_flush_page(adr);
+ }
+ __flush_tlb_all();
+}
-static inline void flush_map(unsigned long address)
+static inline void flush_map(struct list_head *l)
{
- on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+ on_each_cpu(flush_kernel_map, l, 1, 1);
}
-static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
+static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
static inline void save_page(struct page *fpage)
{
- fpage->lru.next = (struct list_head *)deferred_pages;
- deferred_pages = fpage;
+ list_add(&fpage->lru, &deferred_pages);
}
/*
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
- pgprot_val(ref_prot) |= _PAGE_PSE;
large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
+ large_pte = pte_mkhuge(large_pte);
set_pte((pte_t *)pmd, large_pte);
}
{
pte_t *kpte;
struct page *kpte_page;
- unsigned kpte_flags;
pgprot_t ref_prot2;
kpte = lookup_address(address);
if (!kpte) return 0;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
- kpte_flags = pte_val(*kpte);
if (pgprot_val(prot) != pgprot_val(ref_prot)) {
- if ((kpte_flags & _PAGE_PSE) == 0) {
+ if (!pte_huge(*kpte)) {
set_pte(kpte, pfn_pte(pfn, prot));
} else {
/*
* split_large_page will take the reference for this
* change_page_attr on the split page.
*/
-
struct page *split;
- ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
-
+ ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
- set_pte(kpte,mk_pte(split, ref_prot2));
+ set_pte(kpte, mk_pte(split, ref_prot2));
kpte_page = split;
- }
+ }
page_private(kpte_page)++;
- } else if ((kpte_flags & _PAGE_PSE) == 0) {
+ } else if (!pte_huge(*kpte)) {
set_pte(kpte, pfn_pte(pfn, ref_prot));
BUG_ON(page_private(kpte_page) == 0);
page_private(kpte_page)--;
BUG();
/* on x86-64 the direct mapping set at boot is not using 4k pages */
+ /*
+ * ..., but the XEN guest kernels (currently) do:
+ * If the pte was reserved, it means it was created at boot
+ * time (not via split_large_page) and in turn we must not
+ * replace it with a large page.
+ */
+#ifndef CONFIG_XEN
BUG_ON(PageReserved(kpte_page));
+#else
+ if(!PageReserved(kpte_page))
+#endif
if (page_private(kpte_page) == 0) {
save_page(kpte_page);
* lowmem */
if (__pa(address) < KERNEL_TEXT_SIZE) {
unsigned long addr2;
- pgprot_t prot2 = prot;
+ pgprot_t prot2;
addr2 = __START_KERNEL_map + __pa(address);
- pgprot_val(prot2) &= ~_PAGE_NX;
- err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
+ /* Make sure the kernel mappings stay executable */
+ prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
+ err = __change_page_attr(addr2, pfn, prot2,
+ PAGE_KERNEL_EXEC);
}
}
up_write(&init_mm.mmap_sem);
void global_flush_tlb(void)
{
- struct page *dpage;
+ struct page *pg, *next;
+ struct list_head l;
down_read(&init_mm.mmap_sem);
- dpage = xchg(&deferred_pages, NULL);
+ list_replace_init(&deferred_pages, &l);
up_read(&init_mm.mmap_sem);
- flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
- while (dpage) {
- struct page *tmp = dpage;
- dpage = (struct page *)dpage->lru.next;
- ClearPagePrivate(tmp);
- __free_page(tmp);
+ flush_map(&l);
+
+ list_for_each_entry_safe(pg, next, &l, lru) {
+ ClearPagePrivate(pg);
+ __free_page(pg);
}
}