X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=inline;f=arch%2Farm%2Fmm%2Fcopypage-v6.c;h=269ce6913ee95958948b6ce142c8a1773206434f;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=e6a139ee2f52d61f9ec800bf549fd3413934f0e7;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index e6a139ee2..269ce6913 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -15,34 +15,63 @@ #include #include #include +#include #if SHMLBA > 16384 #error FIX ME #endif #define from_address (0xffff8000) -#define from_pgprot PAGE_KERNEL #define to_address (0xffffc000) -#define to_pgprot PAGE_KERNEL -static pte_t *from_pte; -static pte_t *to_pte; -static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED; +#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) -#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) +static DEFINE_SPINLOCK(v6_lock); + +/* + * Copy the user page. No aliasing to deal with so we can just + * attack the kernel's existing mapping of these pages. + */ +static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) +{ + copy_page(kto, kfrom); +} + +/* + * Clear the user page. No aliasing to deal with so we can just + * attack the kernel's existing mapping of this page. + */ +static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) +{ + clear_page(kaddr); +} /* * Copy the page, taking account of the cache colour. */ -void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) { - unsigned int offset = DCACHE_COLOUR(vaddr); + unsigned int offset = CACHE_COLOUR(vaddr); unsigned long from, to; + /* + * Discard data in the kernel mapping for the new page. + * FIXME: needs this MCRR to be supported. + */ + __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" + : + : "r" (kto), + "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) + : "cc"); + + /* + * Now copy the page using the same cache colour as the + * pages ultimate destination. + */ spin_lock(&v6_lock); - set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); - set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); + set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL)); + set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL)); from = from_address + (offset << PAGE_SHIFT); to = to_address + (offset << PAGE_SHIFT); @@ -55,14 +84,33 @@ void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) spin_unlock(&v6_lock); } -void v6_clear_user_page(void *kaddr, unsigned long vaddr) +/* + * Clear the user page. We need to deal with the aliasing issues, + * so remap the kernel page into the same cache colour as the user + * page. + */ +static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) { - unsigned int offset = DCACHE_COLOUR(vaddr); + unsigned int offset = CACHE_COLOUR(vaddr); unsigned long to = to_address + (offset << PAGE_SHIFT); + /* + * Discard data in the kernel mapping for the new page + * FIXME: needs this MCRR to be supported. + */ + __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" + : + : "r" (kaddr), + "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) + : "cc"); + + /* + * Now clear the page using the same cache colour as + * the pages ultimate destination. + */ spin_lock(&v6_lock); - set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); + set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL)); flush_tlb_kernel_page(to); clear_page((void *)to); @@ -70,29 +118,18 @@ void v6_clear_user_page(void *kaddr, unsigned long vaddr) } struct cpu_user_fns v6_user_fns __initdata = { - .cpu_clear_user_page = v6_clear_user_page, - .cpu_copy_user_page = v6_copy_user_page, + .cpu_clear_user_page = v6_clear_user_page_nonaliasing, + .cpu_copy_user_page = v6_copy_user_page_nonaliasing, }; static int __init v6_userpage_init(void) { - pgd_t *pgd; - pmd_t *pmd; - - pgd = pgd_offset_k(from_address); - pmd = pmd_alloc(&init_mm, pgd, from_address); - if (!pmd) - BUG(); - from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); - if (!from_pte) - BUG(); - - to_pte = pte_alloc_kernel(&init_mm, pmd, to_address); - if (!to_pte) - BUG(); + if (cache_is_vipt_aliasing()) { + cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; + cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; + } return 0; } -__initcall(v6_userpage_init); - +core_initcall(v6_userpage_init);