xref: /openbmc/linux/arch/powerpc/kernel/swsusp_32.S (revision 293d5b43)
1#include <linux/threads.h>
2#include <asm/processor.h>
3#include <asm/page.h>
4#include <asm/cputable.h>
5#include <asm/thread_info.h>
6#include <asm/ppc_asm.h>
7#include <asm/asm-offsets.h>
8#include <asm/mmu.h>
9
10/*
11 * Structure for storing CPU registers on the save area.
12 */
13#define SL_SP		0
14#define SL_PC		4
15#define SL_MSR		8
16#define SL_SDR1		0xc
17#define SL_SPRG0	0x10	/* 4 sprg's */
18#define SL_DBAT0	0x20
19#define SL_IBAT0	0x28
20#define SL_DBAT1	0x30
21#define SL_IBAT1	0x38
22#define SL_DBAT2	0x40
23#define SL_IBAT2	0x48
24#define SL_DBAT3	0x50
25#define SL_IBAT3	0x58
26#define SL_TB		0x60
27#define SL_R2		0x68
28#define SL_CR		0x6c
29#define SL_LR		0x70
30#define SL_R12		0x74	/* r12 to r31 */
31#define SL_SIZE		(SL_R12 + 80)
32
33	.section .data
34	.align	5
35
36_GLOBAL(swsusp_save_area)
37	.space	SL_SIZE
38
39
40	.section .text
41	.align	5
42
43_GLOBAL(swsusp_arch_suspend)
44
45	lis	r11,swsusp_save_area@h
46	ori	r11,r11,swsusp_save_area@l
47
48	mflr	r0
49	stw	r0,SL_LR(r11)
50	mfcr	r0
51	stw	r0,SL_CR(r11)
52	stw	r1,SL_SP(r11)
53	stw	r2,SL_R2(r11)
54	stmw	r12,SL_R12(r11)
55
56	/* Save MSR & SDR1 */
57	mfmsr	r4
58	stw	r4,SL_MSR(r11)
59	mfsdr1	r4
60	stw	r4,SL_SDR1(r11)
61
62	/* Get a stable timebase and save it */
631:	mftbu	r4
64	stw	r4,SL_TB(r11)
65	mftb	r5
66	stw	r5,SL_TB+4(r11)
67	mftbu	r3
68	cmpw	r3,r4
69	bne	1b
70
71	/* Save SPRGs */
72	mfsprg	r4,0
73	stw	r4,SL_SPRG0(r11)
74	mfsprg	r4,1
75	stw	r4,SL_SPRG0+4(r11)
76	mfsprg	r4,2
77	stw	r4,SL_SPRG0+8(r11)
78	mfsprg	r4,3
79	stw	r4,SL_SPRG0+12(r11)
80
81	/* Save BATs */
82	mfdbatu	r4,0
83	stw	r4,SL_DBAT0(r11)
84	mfdbatl	r4,0
85	stw	r4,SL_DBAT0+4(r11)
86	mfdbatu	r4,1
87	stw	r4,SL_DBAT1(r11)
88	mfdbatl	r4,1
89	stw	r4,SL_DBAT1+4(r11)
90	mfdbatu	r4,2
91	stw	r4,SL_DBAT2(r11)
92	mfdbatl	r4,2
93	stw	r4,SL_DBAT2+4(r11)
94	mfdbatu	r4,3
95	stw	r4,SL_DBAT3(r11)
96	mfdbatl	r4,3
97	stw	r4,SL_DBAT3+4(r11)
98	mfibatu	r4,0
99	stw	r4,SL_IBAT0(r11)
100	mfibatl	r4,0
101	stw	r4,SL_IBAT0+4(r11)
102	mfibatu	r4,1
103	stw	r4,SL_IBAT1(r11)
104	mfibatl	r4,1
105	stw	r4,SL_IBAT1+4(r11)
106	mfibatu	r4,2
107	stw	r4,SL_IBAT2(r11)
108	mfibatl	r4,2
109	stw	r4,SL_IBAT2+4(r11)
110	mfibatu	r4,3
111	stw	r4,SL_IBAT3(r11)
112	mfibatl	r4,3
113	stw	r4,SL_IBAT3+4(r11)
114
115#if  0
116	/* Backup various CPU config stuffs */
117	bl	__save_cpu_setup
118#endif
119	/* Call the low level suspend stuff (we should probably have made
120	 * a stackframe...
121	 */
122	bl	swsusp_save
123
124	/* Restore LR from the save area */
125	lis	r11,swsusp_save_area@h
126	ori	r11,r11,swsusp_save_area@l
127	lwz	r0,SL_LR(r11)
128	mtlr	r0
129
130	blr
131
132
133/* Resume code */
134_GLOBAL(swsusp_arch_resume)
135
136#ifdef CONFIG_ALTIVEC
137	/* Stop pending alitvec streams and memory accesses */
138BEGIN_FTR_SECTION
139	DSSALL
140END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
141#endif
142 	sync
143
144	/* Disable MSR:DR to make sure we don't take a TLB or
145	 * hash miss during the copy, as our hash table will
146	 * for a while be unusable. For .text, we assume we are
147	 * covered by a BAT. This works only for non-G5 at this
148	 * point. G5 will need a better approach, possibly using
149	 * a small temporary hash table filled with large mappings,
150	 * disabling the MMU completely isn't a good option for
151	 * performance reasons.
152	 * (Note that 750's may have the same performance issue as
153	 * the G5 in this case, we should investigate using moving
154	 * BATs for these CPUs)
155	 */
156	mfmsr	r0
157	sync
158	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
159	mtmsr	r0
160	sync
161	isync
162
163	/* Load ptr the list of pages to copy in r3 */
164	lis	r11,(restore_pblist - KERNELBASE)@h
165	ori	r11,r11,restore_pblist@l
166	lwz	r10,0(r11)
167
168	/* Copy the pages. This is a very basic implementation, to
169	 * be replaced by something more cache efficient */
1701:
171	tophys(r3,r10)
172	li	r0,256
173	mtctr	r0
174	lwz	r11,pbe_address(r3)	/* source */
175	tophys(r5,r11)
176	lwz	r10,pbe_orig_address(r3)	/* destination */
177	tophys(r6,r10)
1782:
179	lwz	r8,0(r5)
180	lwz	r9,4(r5)
181	lwz	r10,8(r5)
182	lwz	r11,12(r5)
183	addi	r5,r5,16
184	stw	r8,0(r6)
185	stw	r9,4(r6)
186	stw	r10,8(r6)
187	stw	r11,12(r6)
188	addi	r6,r6,16
189	bdnz	2b
190	lwz		r10,pbe_next(r3)
191	cmpwi	0,r10,0
192	bne	1b
193
194	/* Do a very simple cache flush/inval of the L1 to ensure
195	 * coherency of the icache
196	 */
197	lis	r3,0x0002
198	mtctr	r3
199	li	r3, 0
2001:
201	lwz	r0,0(r3)
202	addi	r3,r3,0x0020
203	bdnz	1b
204	isync
205	sync
206
207	/* Now flush those cache lines */
208	lis	r3,0x0002
209	mtctr	r3
210	li	r3, 0
2111:
212	dcbf	0,r3
213	addi	r3,r3,0x0020
214	bdnz	1b
215	sync
216
217	/* Ok, we are now running with the kernel data of the old
218	 * kernel fully restored. We can get to the save area
219	 * easily now. As for the rest of the code, it assumes the
220	 * loader kernel and the booted one are exactly identical
221	 */
222	lis	r11,swsusp_save_area@h
223	ori	r11,r11,swsusp_save_area@l
224	tophys(r11,r11)
225
226#if 0
227	/* Restore various CPU config stuffs */
228	bl	__restore_cpu_setup
229#endif
230	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
231	 * This is a bit hairy as we are running out of those BATs,
232	 * but first, our code is probably in the icache, and we are
233	 * writing the same value to the BAT, so that should be fine,
234	 * though a better solution will have to be found long-term
235	 */
236	lwz	r4,SL_SDR1(r11)
237	mtsdr1	r4
238	lwz	r4,SL_SPRG0(r11)
239	mtsprg	0,r4
240	lwz	r4,SL_SPRG0+4(r11)
241	mtsprg	1,r4
242	lwz	r4,SL_SPRG0+8(r11)
243	mtsprg	2,r4
244	lwz	r4,SL_SPRG0+12(r11)
245	mtsprg	3,r4
246
247#if 0
248	lwz	r4,SL_DBAT0(r11)
249	mtdbatu	0,r4
250	lwz	r4,SL_DBAT0+4(r11)
251	mtdbatl	0,r4
252	lwz	r4,SL_DBAT1(r11)
253	mtdbatu	1,r4
254	lwz	r4,SL_DBAT1+4(r11)
255	mtdbatl	1,r4
256	lwz	r4,SL_DBAT2(r11)
257	mtdbatu	2,r4
258	lwz	r4,SL_DBAT2+4(r11)
259	mtdbatl	2,r4
260	lwz	r4,SL_DBAT3(r11)
261	mtdbatu	3,r4
262	lwz	r4,SL_DBAT3+4(r11)
263	mtdbatl	3,r4
264	lwz	r4,SL_IBAT0(r11)
265	mtibatu	0,r4
266	lwz	r4,SL_IBAT0+4(r11)
267	mtibatl	0,r4
268	lwz	r4,SL_IBAT1(r11)
269	mtibatu	1,r4
270	lwz	r4,SL_IBAT1+4(r11)
271	mtibatl	1,r4
272	lwz	r4,SL_IBAT2(r11)
273	mtibatu	2,r4
274	lwz	r4,SL_IBAT2+4(r11)
275	mtibatl	2,r4
276	lwz	r4,SL_IBAT3(r11)
277	mtibatu	3,r4
278	lwz	r4,SL_IBAT3+4(r11)
279	mtibatl	3,r4
280#endif
281
282BEGIN_MMU_FTR_SECTION
283	li	r4,0
284	mtspr	SPRN_DBAT4U,r4
285	mtspr	SPRN_DBAT4L,r4
286	mtspr	SPRN_DBAT5U,r4
287	mtspr	SPRN_DBAT5L,r4
288	mtspr	SPRN_DBAT6U,r4
289	mtspr	SPRN_DBAT6L,r4
290	mtspr	SPRN_DBAT7U,r4
291	mtspr	SPRN_DBAT7L,r4
292	mtspr	SPRN_IBAT4U,r4
293	mtspr	SPRN_IBAT4L,r4
294	mtspr	SPRN_IBAT5U,r4
295	mtspr	SPRN_IBAT5L,r4
296	mtspr	SPRN_IBAT6U,r4
297	mtspr	SPRN_IBAT6L,r4
298	mtspr	SPRN_IBAT7U,r4
299	mtspr	SPRN_IBAT7L,r4
300END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
301
302	/* Flush all TLBs */
303	lis	r4,0x1000
3041:	addic.	r4,r4,-0x1000
305	tlbie	r4
306	bgt	1b
307	sync
308
309	/* restore the MSR and turn on the MMU */
310	lwz	r3,SL_MSR(r11)
311	bl	turn_on_mmu
312	tovirt(r11,r11)
313
314	/* Restore TB */
315	li	r3,0
316	mttbl	r3
317	lwz	r3,SL_TB(r11)
318	lwz	r4,SL_TB+4(r11)
319	mttbu	r3
320	mttbl	r4
321
322	/* Kick decrementer */
323	li	r0,1
324	mtdec	r0
325
326	/* Restore the callee-saved registers and return */
327	lwz	r0,SL_CR(r11)
328	mtcr	r0
329	lwz	r2,SL_R2(r11)
330	lmw	r12,SL_R12(r11)
331	lwz	r1,SL_SP(r11)
332	lwz	r0,SL_LR(r11)
333	mtlr	r0
334
335	// XXX Note: we don't really need to call swsusp_resume
336
337	li	r3,0
338	blr
339
340/* FIXME:This construct is actually not useful since we don't shut
341 * down the instruction MMU, we could just flip back MSR-DR on.
342 */
343turn_on_mmu:
344	mflr	r4
345	mtsrr0	r4
346	mtsrr1	r3
347	sync
348	isync
349	rfi
350
351