X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fmm%2Fproc-xscale.S;fp=arch%2Farm%2Fmm%2Fproc-xscale.S;h=29bcc4dd65178a99cd640e4fc4fd715b97453e00;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=2d977b4eeeabf95937be3ad863a550d62e21e40e;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 2d977b4ee..29bcc4dd6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -24,8 +24,8 @@ #include #include #include -#include #include +#include #include #include #include "proc-macros.S" @@ -241,7 +241,15 @@ ENTRY(xscale_flush_user_cache_range) * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xscale_coherent_kern_range) - /* FALLTHROUGH */ + bic r0, r0, #CACHELINESIZE - 1 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHELINESIZE + cmp r0, r1 + blo 1b + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB + mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer + mov pc, lr /* * coherent_user_range(start, end) @@ -252,18 +260,16 @@ ENTRY(xscale_coherent_kern_range) * * - start - virtual start address * - end - virtual end address - * - * Note: single I-cache line invalidation isn't used here since - * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xscale_coherent_user_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry add r0, r0, #CACHELINESIZE cmp r0, r1 blo 1b mov r0, #0 - mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB + mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr @@ -370,142 +376,6 @@ ENTRY(cpu_xscale_dcache_clean_area) bhi 1b mov pc, lr -/* ================================ CACHE LOCKING============================ - * - * The XScale MicroArchitecture implements support for locking entries into - * the data and instruction cache. The following functions implement the core - * low level instructions needed to accomplish the locking. The developer's - * manual states that the code that performs the locking must be in non-cached - * memory. To accomplish this, the code in xscale-cache-lock.c copies the - * following functions from the cache into a non-cached memory region that - * is allocated through consistent_alloc(). - * - */ - .align 5 -/* - * xscale_icache_lock - * - * r0: starting address to lock - * r1: end address to lock - */ -ENTRY(xscale_icache_lock) - -iLockLoop: - bic r0, r0, #CACHELINESIZE - 1 - mcr p15, 0, r0, c9, c1, 0 @ lock into cache - cmp r0, r1 @ are we done? - add r0, r0, #CACHELINESIZE @ advance to next cache line - bls iLockLoop - mov pc, lr - -/* - * xscale_icache_unlock - */ -ENTRY(xscale_icache_unlock) - mcr p15, 0, r0, c9, c1, 1 @ Unlock icache - mov pc, lr - -/* - * xscale_dcache_lock - * - * r0: starting address to lock - * r1: end address to lock - */ -ENTRY(xscale_dcache_lock) - mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer - mov r2, #1 - mcr p15, 0, r2, c9, c2, 0 @ Put dcache in lock mode - cpwait ip @ Wait for completion - - mrs r2, cpsr - orr r3, r2, #PSR_F_BIT | PSR_I_BIT -dLockLoop: - msr cpsr_c, r3 - mcr p15, 0, r0, c7, c10, 1 @ Write back line if it is dirty - mcr p15, 0, r0, c7, c6, 1 @ Flush/invalidate line - msr cpsr_c, r2 - ldr ip, [r0], #CACHELINESIZE @ Preload 32 bytes into cache from - @ location [r0]. Post-increment - @ r3 to next cache line - cmp r0, r1 @ Are we done? - bls dLockLoop - - mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer - mov r2, #0 - mcr p15, 0, r2, c9, c2, 0 @ Get out of lock mode - cpwait_ret lr, ip - -/* - * xscale_dcache_unlock - */ -ENTRY(xscale_dcache_unlock) - mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer - mcr p15, 0, ip, c9, c2, 1 @ Unlock cache - mov pc, lr - -/* - * Needed to determine the length of the code that needs to be copied. - */ - .align 5 -ENTRY(xscale_cache_dummy) - mov pc, lr - -/* ================================ TLB LOCKING============================== - * - * The XScale MicroArchitecture implements support for locking entries into - * the Instruction and Data TLBs. The following functions provide the - * low level support for supporting these under Linux. xscale-lock.c - * implements some higher level management code. Most of the following - * is taken straight out of the Developer's Manual. - */ - -/* - * Lock I-TLB entry - * - * r0: Virtual address to translate and lock - */ - .align 5 -ENTRY(xscale_itlb_lock) - mrs r2, cpsr - orr r3, r2, #PSR_F_BIT | PSR_I_BIT - msr cpsr_c, r3 @ Disable interrupts - mcr p15, 0, r0, c8, c5, 1 @ Invalidate I-TLB entry - mcr p15, 0, r0, c10, c4, 0 @ Translate and lock - msr cpsr_c, r2 @ Restore interrupts - cpwait_ret lr, ip - -/* - * Lock D-TLB entry - * - * r0: Virtual address to translate and lock - */ - .align 5 -ENTRY(xscale_dtlb_lock) - mrs r2, cpsr - orr r3, r2, #PSR_F_BIT | PSR_I_BIT - msr cpsr_c, r3 @ Disable interrupts - mcr p15, 0, r0, c8, c6, 1 @ Invalidate D-TLB entry - mcr p15, 0, r0, c10, c8, 0 @ Translate and lock - msr cpsr_c, r2 @ Restore interrupts - cpwait_ret lr, ip - -/* - * Unlock all I-TLB entries - */ - .align 5 -ENTRY(xscale_itlb_unlock) - mcr p15, 0, ip, c10, c4, 1 @ Unlock I-TLB - mcr p15, 0, ip, c8, c5, 0 @ Invalidate I-TLB - cpwait_ret lr, ip - -/* - * Unlock all D-TLB entries - */ -ENTRY(xscale_dtlb_unlock) - mcr p15, 0, ip, c10, c8, 1 @ Unlock D-TBL - mcr p15, 0, ip, c8, c6, 0 @ Invalidate D-TLB - cpwait_ret lr, ip - /* =============================== PageTable ============================== */ #define PTE_CACHE_WRITE_ALLOCATE 0 @@ -714,7 +584,7 @@ cpu_pxa270_name: .align - .section ".proc.info", #alloc, #execinstr + .section ".proc.info.init", #alloc, #execinstr .type __80200_proc_info,#object __80200_proc_info: