xref: /openbmc/linux/arch/arm/mach-pxa/sleep.S (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1/*
2 * Low-level PXA250/210 sleep/wakeUp support
3 *
4 * Initial SA1110 code:
5 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
6 *
7 * Adapted for PXA by Nicolas Pitre:
8 * Copyright (c) 2002 Monta Vista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License.
12 */
13
14#include <linux/linkage.h>
15#include <asm/assembler.h>
16#include <asm/hardware.h>
17
18#include <asm/arch/pxa-regs.h>
19
20#define MDREFR_KDIV	0x200a4000	// all banks
21#define CCCR_SLEEP	0x00000107	// L=7 2N=2 A=0 PPDIS=0 CPDIS=0
22
23		.text
24
25pxa_cpu_save_cp:
26	@ get coprocessor registers
27	mrc	p14, 0, r3, c6, c0, 0		@ clock configuration, for turbo mode
28	mrc	p15, 0, r4, c15, c1, 0		@ CP access reg
29	mrc	p15, 0, r5, c13, c0, 0		@ PID
30	mrc 	p15, 0, r6, c3, c0, 0		@ domain ID
31	mrc 	p15, 0, r7, c2, c0, 0		@ translation table base addr
32	mrc	p15, 0, r8, c1, c1, 0           @ auxiliary control reg
33	mrc 	p15, 0, r9, c1, c0, 0		@ control reg
34
35	bic	r3, r3, #2			@ clear frequency change bit
36
37	@ store them plus current virtual stack ptr on stack
38	mov	r10, sp
39	stmfd	sp!, {r3 - r10}
40
41	mov	pc, lr
42
43pxa_cpu_save_sp:
44	@ preserve phys address of stack
45	mov	r0, sp
46	mov	r2, lr
47	bl	sleep_phys_sp
48	ldr	r1, =sleep_save_sp
49	str	r0, [r1]
50	mov	pc, r2
51
52/*
53 * pxa27x_cpu_suspend()
54 *
55 * Forces CPU into sleep state.
56 *
57 * r0 = value for PWRMODE M field for desired sleep state
58 */
59
60ENTRY(pxa27x_cpu_suspend)
61
62#ifndef CONFIG_IWMMXT
63	mra	r2, r3, acc0
64#endif
65	stmfd	sp!, {r2 - r12, lr}		@ save registers on stack
66
67	bl	pxa_cpu_save_cp
68
69	mov	r5, r0				@ save sleep mode
70	bl	pxa_cpu_save_sp
71
72	@ clean data cache
73	bl	xscale_flush_kern_cache_all
74
75	@ Put the processor to sleep
76	@ (also workaround for sighting 28071)
77
78	@ prepare value for sleep mode
79	mov	r1, r5				@ sleep mode
80
81	@ prepare pointer to physical address 0 (virtual mapping in generic.c)
82	mov	r2, #UNCACHED_PHYS_0
83
84	@ prepare SDRAM refresh settings
85	ldr	r4, =MDREFR
86	ldr	r5, [r4]
87
88	@ enable SDRAM self-refresh mode
89	orr	r5, r5, #MDREFR_SLFRSH
90
91	@ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50)
92	ldr	r6, =MDREFR_KDIV
93	orr	r5, r5, r6
94
95	@ Intel PXA270 Specification Update notes problems sleeping
96	@ with core operating above 91 MHz
97	@ (see Errata 50, ...processor does not exit from sleep...)
98
99	ldr	r6, =CCCR
100	ldr	r8, [r6]		@ keep original value for resume
101
102	ldr	r7, =CCCR_SLEEP		@ prepare CCCR sleep value
103	mov	r0, #0x2		@ prepare value for CLKCFG
104
105	@ align execution to a cache line
106	b	pxa_cpu_do_suspend
107
108/*
109 * pxa27x_cpu_suspend()
110 *
111 * Forces CPU into sleep state.
112 *
113 * r0 = value for PWRMODE M field for desired sleep state
114 */
115
116ENTRY(pxa25x_cpu_suspend)
117	stmfd	sp!, {r2 - r12, lr}		@ save registers on stack
118
119	bl	pxa_cpu_save_cp
120
121	mov	r5, r0				@ save sleep mode
122	bl	pxa_cpu_save_sp
123
124	@ clean data cache
125	bl	xscale_flush_kern_cache_all
126
127	@ prepare value for sleep mode
128	mov	r1, r5				@ sleep mode
129
130	@ prepare pointer to physical address 0 (virtual mapping in generic.c)
131	mov	r2, #UNCACHED_PHYS_0
132
133	@ prepare SDRAM refresh settings
134	ldr	r4, =MDREFR
135	ldr	r5, [r4]
136
137	@ enable SDRAM self-refresh mode
138	orr	r5, r5, #MDREFR_SLFRSH
139
140	@ Intel PXA255 Specification Update notes problems
141	@ about suspending with PXBus operating above 133MHz
142	@ (see Errata 31, GPIO output signals, ... unpredictable in sleep
143	@
144	@ We keep the change-down close to the actual suspend on SDRAM
145	@ as possible to eliminate messing about with the refresh clock
146	@ as the system will restore with the original speed settings
147	@
148	@ Ben Dooks, 13-Sep-2004
149
150	ldr	r6, =CCCR
151	ldr	r8, [r6]		@ keep original value for resume
152
153	@ ensure x1 for run and turbo mode with memory clock
154	bic	r7, r8, #CCCR_M_MASK | CCCR_N_MASK
155	orr	r7, r7, #(1<<5) | (2<<7)
156
157	@ check that the memory frequency is within limits
158	and	r14, r7, #CCCR_L_MASK
159	teq	r14, #1
160	bicne	r7, r7, #CCCR_L_MASK
161	orrne	r7, r7, #1			@@ 99.53MHz
162
163	@ get ready for the change
164
165	@ note, turbo is not preserved over sleep so there is no
166	@ point in preserving it here. we save it on the stack with the
167	@ other CP registers instead.
168	mov	r0, #0
169	mcr	p14, 0, r0, c6, c0, 0
170	orr	r0, r0, #2			@ initiate change bit
171	b	pxa_cpu_do_suspend
172
173	.ltorg
174	.align	5
175pxa_cpu_do_suspend:
176
177	@ All needed values are now in registers.
178	@ These last instructions should be in cache
179
180	@ initiate the frequency change...
181	str	r7, [r6]
182	mcr	p14, 0, r0, c6, c0, 0
183
184	@ restore the original cpu speed value for resume
185	str	r8, [r6]
186
187	@ need 6 13-MHz cycles before changing PWRMODE
188	@ just set frequency to 91-MHz... 6*91/13 = 42
189
190	mov	r0, #42
19110:	subs	r0, r0, #1
192	bne	10b
193
194	@ Do not reorder...
195	@ Intel PXA270 Specification Update notes problems performing
196	@ external accesses after SDRAM is put in self-refresh mode
197	@ (see Errata 39 ...hangs when entering self-refresh mode)
198
199	@ force address lines low by reading at physical address 0
200	ldr	r3, [r2]
201
202	@ put SDRAM into self-refresh
203	str	r5, [r4]
204
205	@ enter sleep mode
206	mcr	p14, 0, r1, c7, c0, 0		@ PWRMODE
207
20820:	b	20b				@ loop waiting for sleep
209
210/*
211 * cpu_pxa_resume()
212 *
213 * entry point from bootloader into kernel during resume
214 *
215 * Note: Yes, part of the following code is located into the .data section.
216 *       This is to allow sleep_save_sp to be accessed with a relative load
217 *       while we can't rely on any MMU translation.  We could have put
218 *       sleep_save_sp in the .text section as well, but some setups might
219 *       insist on it to be truly read-only.
220 */
221
222	.data
223	.align 5
224ENTRY(pxa_cpu_resume)
225	mov	r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE	@ set SVC, irqs off
226	msr	cpsr_c, r0
227
228	ldr	r0, sleep_save_sp		@ stack phys addr
229	ldr	r2, =resume_after_mmu		@ its absolute virtual address
230	ldmfd	r0, {r3 - r9, sp}		@ CP regs + virt stack ptr
231
232	mov	r1, #0
233	mcr	p15, 0, r1, c8, c7, 0   	@ invalidate I & D TLBs
234	mcr	p15, 0, r1, c7, c7, 0		@ invalidate I & D caches, BTB
235
236#ifdef CONFIG_XSCALE_CACHE_ERRATA
237	bic     r9, r9, #0x0004			@ see cpu_xscale_proc_init
238#endif
239
240	mcr	p14, 0, r3, c6, c0, 0		@ clock configuration, turbo mode.
241	mcr	p15, 0, r4, c15, c1, 0		@ CP access reg
242	mcr	p15, 0, r5, c13, c0, 0		@ PID
243	mcr 	p15, 0, r6, c3, c0, 0		@ domain ID
244	mcr 	p15, 0, r7, c2, c0, 0		@ translation table base addr
245	mcr	p15, 0, r8, c1, c1, 0           @ auxiliary control reg
246	b	resume_turn_on_mmu		@ cache align execution
247
248	.align 5
249resume_turn_on_mmu:
250	mcr 	p15, 0, r9, c1, c0, 0		@ turn on MMU, caches, etc.
251
252	@ Let us ensure we jump to resume_after_mmu only when the mcr above
253	@ actually took effect.  They call it the "cpwait" operation.
254	mrc	p15, 0, r1, c2, c0, 0		@ queue a dependency on CP15
255	sub	pc, r2, r1, lsr #32		@ jump to virtual addr
256	nop
257	nop
258	nop
259
260sleep_save_sp:
261	.word	0				@ preserve stack phys ptr here
262
263	.text
264resume_after_mmu:
265#ifdef CONFIG_XSCALE_CACHE_ERRATA
266	bl	cpu_xscale_proc_init
267#endif
268	ldmfd	sp!, {r2, r3}
269#ifndef CONFIG_IWMMXT
270	mar	acc0, r2, r3
271#endif
272	ldmfd	sp!, {r4 - r12, pc}		@ return to caller
273
274
275