2 * arch/ppc/boot/common/util.S
4 * Useful bootup functions, which are more easily done in asm than C.
6 * NOTE: Be very very careful about the registers you use here.
7 * We don't follow any ABI calling convention among the
8 * assembler functions that call each other, especially early
9 * in the initialization. Please preserve at least r3 and r4
10 * for these early functions, as they often contain information
11 * passed from boot roms into the C decompress function.
15 * Derived from arch/ppc/boot/prep/head.S (Cort Dougan, many others).
17 * 2001-2004 (c) MontaVista, Software, Inc. This file is licensed under
18 * the terms of the GNU General Public License version 2. This program
19 * is licensed "as is" without any warranty of any kind, whether express
23 #include <asm/processor.h>
24 #include <asm/cache.h>
25 #include <asm/ppc_asm.h>
30 .globl disable_6xx_mmu
32 /* Establish default MSR value, exception prefix 0xFFF.
33 * If necessary, this function must fix up the LR if we
34 * return to a different address space once the MMU is
44 cmpwi 0,r10,1 /* 601 ? */
70 /* Set segment registers */
71 li r8,16 /* load up segment register values */
72 mtctr r8 /* for context 0 */
73 lis r8,0x2000 /* Ku = 1, VSID = 0 */
76 addi r8,r8,0x111 /* increment VSID */
77 addis r10,r10,0x1000 /* address of next segment */
81 .globl disable_6xx_l1cache
83 /* Enable, invalidate and then disable the L1 icache/dcache. */
85 ori r8,r8,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
101 * We should be skipping this section on CPUs where this results in an
102 * illegal instruction. If not, please send trini@kernel.crashing.org
103 * the PVR of your CPU.
105 /* Invalidate/disable L2 cache */
110 oris r8,r8,L2CR_L2I@h
117 /* Wait for the invalidation to complete */
120 cmplwi cr0,r8,0x8000 /* 7450 */
121 cmplwi cr1,r8,0x8001 /* 7455 */
122 cmplwi cr2,r8,0x8002 /* 7457 */
123 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq /* Now test if any are true. */
124 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
127 1: mfspr r8,L2CR /* On 745x, poll L2I bit (bit 10) */
128 rlwinm. r9,r8,0,10,10
132 2: mfspr r8,L2CR /* On 75x & 74[01]0, poll L2IP bit (bit 31) */
133 rlwinm. r9,r8,0,31,31
136 3: rlwinm r8,r8,0,11,9 /* Turn off L2I bit */
146 /* Invalidate/disable L3 cache */
158 /* Wait for the invalidation to complete */
160 rlwinm. r9,r8,0,21,21
163 rlwinm r8,r8,0,22,20 /* Turn off L3I bit */
172 /* udelay (on non-601 processors) needs to know the period of the
173 * timebase in nanoseconds. This used to be hardcoded to be 60ns
174 * (period of 66MHz/4). Now a variable is used that is initialized to
175 * 60 for backward compatibility, but it can be overridden as necessary
176 * with code something like this:
177 * extern unsigned long timebase_period_ns;
178 * timebase_period_ns = 1000000000 / bd->bi_tbfreq;
181 .globl timebase_period_ns
187 * Delay for a number of microseconds
193 cmpwi 0,r4,1 /* 601 ? */
195 00: li r0,86 /* Instructions / microsecond? */
197 10: addi r0,r0,0 /* NOP */
204 mulli r4,r3,1000 /* nanoseconds */
205 /* Change r4 to be the number of ticks using:
206 * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
207 * timebase_period_ns defaults to 60 (16.6MHz) */
208 lis r5,timebase_period_ns@ha
209 lwz r5,timebase_period_ns@l(r5)
212 divw r4,r4,r5 /* BUS ticks */
217 bne 1b /* Get [synced] base time */
218 addc r9,r6,r4 /* Compute end time */
229 .section ".relocate_code","xa"
231 * Flush and enable instruction cache
232 * First, flush the data cache in case it was enabled and may be
233 * holding instructions for copy back.
235 _GLOBAL(flush_instruction_cache)
244 lis r3, IDC_DISABLE@h
247 lis r3,start@h # r9 = &_start
249 addi r4,r4,_etext@l # r8 = &_etext
250 1: dcbf r0,r3 # Flush the data cache
251 icbi r0,r3 # Invalidate the instruction cache
252 addi r3,r3,0x10 # Increment by one cache line
253 cmplwi cr0,r3,r4 # Are we at the end yet?
254 blt 1b # No, keep flushing and invalidating
256 /* Enable, invalidate and then disable the L1 icache/dcache. */
258 ori r3,r3,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
265 ori r5,r4,HID0_ICE /* Enable cache */
273 #define NUM_CACHE_LINES 128*8
274 #define cache_flush_buffer 0x1000
278 * Do this by just reading lots of stuff into the cache.
280 _GLOBAL(flush_data_cache)
281 lis r3,cache_flush_buffer@h
282 ori r3,r3,cache_flush_buffer@l
283 li r4,NUM_CACHE_LINES
286 addi r3,r3,L1_CACHE_BYTES /* Next line, please */