X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fmm%2Fcopypage-v6.c;h=402dad21a1d4c47765111be5b5b04c0bb294e346;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=e6a139ee2f52d61f9ec800bf549fd3413934f0e7;hpb=a2c21200f1c81b08cb55e417b68150bba439b646;p=linux-2.6.git diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index e6a139ee2..402dad21a 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -31,14 +31,46 @@ static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED; #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) +/* + * Copy the user page. No aliasing to deal with so we can just + * attack the kernel's existing mapping of these pages. + */ +void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) +{ + copy_page(kto, kfrom); +} + +/* + * Clear the user page. No aliasing to deal with so we can just + * attack the kernel's existing mapping of this page. + */ +void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) +{ + clear_page(kaddr); +} + /* * Copy the page, taking account of the cache colour. */ -void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) { unsigned int offset = DCACHE_COLOUR(vaddr); unsigned long from, to; + /* + * Discard data in the kernel mapping for the new page. + * FIXME: needs this MCRR to be supported. + */ + __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" + : + : "r" (kto), + "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) + : "cc"); + + /* + * Now copy the page using the same cache colour as the + * pages ultimate destination. + */ spin_lock(&v6_lock); set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); @@ -55,11 +87,30 @@ void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) spin_unlock(&v6_lock); } -void v6_clear_user_page(void *kaddr, unsigned long vaddr) +/* + * Clear the user page. We need to deal with the aliasing issues, + * so remap the kernel page into the same cache colour as the user + * page. + */ +void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) { unsigned int offset = DCACHE_COLOUR(vaddr); unsigned long to = to_address + (offset << PAGE_SHIFT); + /* + * Discard data in the kernel mapping for the new page + * FIXME: needs this MCRR to be supported. + */ + __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" + : + : "r" (kaddr), + "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) + : "cc"); + + /* + * Now clear the page using the same cache colour as + * the pages ultimate destination. + */ spin_lock(&v6_lock); set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); @@ -70,26 +121,31 @@ void v6_clear_user_page(void *kaddr, unsigned long vaddr) } struct cpu_user_fns v6_user_fns __initdata = { - .cpu_clear_user_page = v6_clear_user_page, - .cpu_copy_user_page = v6_copy_user_page, + .cpu_clear_user_page = v6_clear_user_page_nonaliasing, + .cpu_copy_user_page = v6_copy_user_page_nonaliasing, }; static int __init v6_userpage_init(void) { - pgd_t *pgd; - pmd_t *pmd; - - pgd = pgd_offset_k(from_address); - pmd = pmd_alloc(&init_mm, pgd, from_address); - if (!pmd) - BUG(); - from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); - if (!from_pte) - BUG(); - - to_pte = pte_alloc_kernel(&init_mm, pmd, to_address); - if (!to_pte) - BUG(); + if (cache_is_vipt_aliasing()) { + pgd_t *pgd; + pmd_t *pmd; + + pgd = pgd_offset_k(from_address); + pmd = pmd_alloc(&init_mm, pgd, from_address); + if (!pmd) + BUG(); + from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); + if (!from_pte) + BUG(); + + to_pte = pte_alloc_kernel(&init_mm, pmd, to_address); + if (!to_pte) + BUG(); + + cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; + cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; + } return 0; }