2 * Low-level PXA250/210 sleep/wakeUp support
5 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
7 * Adapted for PXA by Nicolas Pitre:
8 * Copyright (c) 2002 Monta Vista Software, Inc.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License.
14 #include <linux/config.h>
15 #include <linux/linkage.h>
16 #include <asm/assembler.h>
17 #include <asm/hardware.h>
24 * Forces CPU into sleep state
27 ENTRY(pxa_cpu_suspend)
30 stmfd sp!, {r2 - r12, lr} @ save registers on stack
32 @ get coprocessor registers
33 mrc p15, 0, r4, c15, c1, 0 @ CP access reg
34 mrc p15, 0, r5, c13, c0, 0 @ PID
35 mrc p15, 0, r6, c3, c0, 0 @ domain ID
36 mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
37 mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
38 mrc p15, 0, r9, c1, c0, 0 @ control reg
40 @ store them plus current virtual stack ptr on stack
44 @ preserve phys address of stack
47 ldr r1, =sleep_save_sp
51 bl xscale_flush_kern_cache_all
53 @ Put the processor to sleep
54 @ (also workaround for sighting 28071)
56 @ prepare value for sleep mode
57 mov r1, #3 @ sleep mode
59 @ prepare to put SDRAM into self-refresh manually
62 orr r5, r5, #MDREFR_SLFRSH
64 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
65 mov r2, #UNCACHED_PHYS_0
67 @ align execution to a cache line
74 @ All needed values are now in registers.
75 @ These last instructions should be in cache
77 @ put SDRAM into self-refresh
80 @ force address lines low by reading at physical address 0
84 mcr p14, 0, r1, c7, c0, 0
87 b 20b @ loop waiting for sleep
92 * entry point from bootloader into kernel during resume
94 * Note: Yes, part of the following code is located into the .data section.
95 * This is to allow sleep_save_sp to be accessed with a relative load
96 * while we can't rely on any MMU translation. We could have put
97 * sleep_save_sp in the .text section as well, but some setups might
98 * insist on it to be truly read-only.
103 ENTRY(pxa_cpu_resume)
104 mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC @ set SVC, irqs off
107 ldr r0, sleep_save_sp @ stack phys addr
108 ldr r2, =resume_after_mmu @ its absolute virtual address
109 ldmfd r0, {r4 - r9, sp} @ CP regs + virt stack ptr
112 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
113 mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
115 #ifdef CONFIG_XSCALE_CACHE_ERRATA
116 bic r9, r9, #0x0004 @ see cpu_xscale_proc_init
119 mcr p15, 0, r4, c15, c1, 0 @ CP access reg
120 mcr p15, 0, r5, c13, c0, 0 @ PID
121 mcr p15, 0, r6, c3, c0, 0 @ domain ID
122 mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
123 mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
124 b resume_turn_on_mmu @ cache align execution
128 mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, caches, etc.
130 @ Let us ensure we jump to resume_after_mmu only when the mcr above
131 @ actually took effect. They call it the "cpwait" operation.
132 mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
133 sub pc, r2, r1, lsr #32 @ jump to virtual addr
139 .word 0 @ preserve stack phys ptr here
143 #ifdef CONFIG_XSCALE_CACHE_ERRATA
144 bl cpu_xscale_proc_init
148 ldmfd sp!, {r4 - r12, pc} @ return to caller