xref: /openbmc/linux/arch/powerpc/kernel/swsusp_32.S (revision be709d48)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/threads.h>
3#include <asm/processor.h>
4#include <asm/page.h>
5#include <asm/cputable.h>
6#include <asm/thread_info.h>
7#include <asm/ppc_asm.h>
8#include <asm/asm-offsets.h>
9#include <asm/mmu.h>
10#include <asm/feature-fixups.h>
11
12/*
13 * Structure for storing CPU registers on the save area.
14 */
15#define SL_SP		0
16#define SL_PC		4
17#define SL_MSR		8
18#define SL_SDR1		0xc
19#define SL_SPRG0	0x10	/* 4 sprg's */
20#define SL_DBAT0	0x20
21#define SL_IBAT0	0x28
22#define SL_DBAT1	0x30
23#define SL_IBAT1	0x38
24#define SL_DBAT2	0x40
25#define SL_IBAT2	0x48
26#define SL_DBAT3	0x50
27#define SL_IBAT3	0x58
28#define SL_TB		0x60
29#define SL_R2		0x68
30#define SL_CR		0x6c
31#define SL_LR		0x70
32#define SL_R12		0x74	/* r12 to r31 */
33#define SL_SIZE		(SL_R12 + 80)
34
35	.section .data
36	.align	5
37
38_GLOBAL(swsusp_save_area)
39	.space	SL_SIZE
40
41
42	.section .text
43	.align	5
44
45_GLOBAL(swsusp_arch_suspend)
46
47	lis	r11,swsusp_save_area@h
48	ori	r11,r11,swsusp_save_area@l
49
50	mflr	r0
51	stw	r0,SL_LR(r11)
52	mfcr	r0
53	stw	r0,SL_CR(r11)
54	stw	r1,SL_SP(r11)
55	stw	r2,SL_R2(r11)
56	stmw	r12,SL_R12(r11)
57
58	/* Save MSR & SDR1 */
59	mfmsr	r4
60	stw	r4,SL_MSR(r11)
61	mfsdr1	r4
62	stw	r4,SL_SDR1(r11)
63
64	/* Get a stable timebase and save it */
651:	mftbu	r4
66	stw	r4,SL_TB(r11)
67	mftb	r5
68	stw	r5,SL_TB+4(r11)
69	mftbu	r3
70	cmpw	r3,r4
71	bne	1b
72
73	/* Save SPRGs */
74	mfsprg	r4,0
75	stw	r4,SL_SPRG0(r11)
76	mfsprg	r4,1
77	stw	r4,SL_SPRG0+4(r11)
78	mfsprg	r4,2
79	stw	r4,SL_SPRG0+8(r11)
80	mfsprg	r4,3
81	stw	r4,SL_SPRG0+12(r11)
82
83	/* Save BATs */
84	mfdbatu	r4,0
85	stw	r4,SL_DBAT0(r11)
86	mfdbatl	r4,0
87	stw	r4,SL_DBAT0+4(r11)
88	mfdbatu	r4,1
89	stw	r4,SL_DBAT1(r11)
90	mfdbatl	r4,1
91	stw	r4,SL_DBAT1+4(r11)
92	mfdbatu	r4,2
93	stw	r4,SL_DBAT2(r11)
94	mfdbatl	r4,2
95	stw	r4,SL_DBAT2+4(r11)
96	mfdbatu	r4,3
97	stw	r4,SL_DBAT3(r11)
98	mfdbatl	r4,3
99	stw	r4,SL_DBAT3+4(r11)
100	mfibatu	r4,0
101	stw	r4,SL_IBAT0(r11)
102	mfibatl	r4,0
103	stw	r4,SL_IBAT0+4(r11)
104	mfibatu	r4,1
105	stw	r4,SL_IBAT1(r11)
106	mfibatl	r4,1
107	stw	r4,SL_IBAT1+4(r11)
108	mfibatu	r4,2
109	stw	r4,SL_IBAT2(r11)
110	mfibatl	r4,2
111	stw	r4,SL_IBAT2+4(r11)
112	mfibatu	r4,3
113	stw	r4,SL_IBAT3(r11)
114	mfibatl	r4,3
115	stw	r4,SL_IBAT3+4(r11)
116
117#if  0
118	/* Backup various CPU config stuffs */
119	bl	__save_cpu_setup
120#endif
121	/* Call the low level suspend stuff (we should probably have made
122	 * a stackframe...
123	 */
124	bl	swsusp_save
125
126	/* Restore LR from the save area */
127	lis	r11,swsusp_save_area@h
128	ori	r11,r11,swsusp_save_area@l
129	lwz	r0,SL_LR(r11)
130	mtlr	r0
131
132	blr
133
134
135/* Resume code */
136_GLOBAL(swsusp_arch_resume)
137
138#ifdef CONFIG_ALTIVEC
139	/* Stop pending alitvec streams and memory accesses */
140BEGIN_FTR_SECTION
141	DSSALL
142END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
143#endif
144 	sync
145
146	/* Disable MSR:DR to make sure we don't take a TLB or
147	 * hash miss during the copy, as our hash table will
148	 * for a while be unusable. For .text, we assume we are
149	 * covered by a BAT. This works only for non-G5 at this
150	 * point. G5 will need a better approach, possibly using
151	 * a small temporary hash table filled with large mappings,
152	 * disabling the MMU completely isn't a good option for
153	 * performance reasons.
154	 * (Note that 750's may have the same performance issue as
155	 * the G5 in this case, we should investigate using moving
156	 * BATs for these CPUs)
157	 */
158	mfmsr	r0
159	sync
160	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
161	mtmsr	r0
162	sync
163	isync
164
165	/* Load ptr the list of pages to copy in r3 */
166	lis	r11,(restore_pblist - KERNELBASE)@h
167	ori	r11,r11,restore_pblist@l
168	lwz	r10,0(r11)
169
170	/* Copy the pages. This is a very basic implementation, to
171	 * be replaced by something more cache efficient */
1721:
173	tophys(r3,r10)
174	li	r0,256
175	mtctr	r0
176	lwz	r11,pbe_address(r3)	/* source */
177	tophys(r5,r11)
178	lwz	r10,pbe_orig_address(r3)	/* destination */
179	tophys(r6,r10)
1802:
181	lwz	r8,0(r5)
182	lwz	r9,4(r5)
183	lwz	r10,8(r5)
184	lwz	r11,12(r5)
185	addi	r5,r5,16
186	stw	r8,0(r6)
187	stw	r9,4(r6)
188	stw	r10,8(r6)
189	stw	r11,12(r6)
190	addi	r6,r6,16
191	bdnz	2b
192	lwz		r10,pbe_next(r3)
193	cmpwi	0,r10,0
194	bne	1b
195
196	/* Do a very simple cache flush/inval of the L1 to ensure
197	 * coherency of the icache
198	 */
199	lis	r3,0x0002
200	mtctr	r3
201	li	r3, 0
2021:
203	lwz	r0,0(r3)
204	addi	r3,r3,0x0020
205	bdnz	1b
206	isync
207	sync
208
209	/* Now flush those cache lines */
210	lis	r3,0x0002
211	mtctr	r3
212	li	r3, 0
2131:
214	dcbf	0,r3
215	addi	r3,r3,0x0020
216	bdnz	1b
217	sync
218
219	/* Ok, we are now running with the kernel data of the old
220	 * kernel fully restored. We can get to the save area
221	 * easily now. As for the rest of the code, it assumes the
222	 * loader kernel and the booted one are exactly identical
223	 */
224	lis	r11,swsusp_save_area@h
225	ori	r11,r11,swsusp_save_area@l
226	tophys(r11,r11)
227
228#if 0
229	/* Restore various CPU config stuffs */
230	bl	__restore_cpu_setup
231#endif
232	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
233	 * This is a bit hairy as we are running out of those BATs,
234	 * but first, our code is probably in the icache, and we are
235	 * writing the same value to the BAT, so that should be fine,
236	 * though a better solution will have to be found long-term
237	 */
238	lwz	r4,SL_SDR1(r11)
239	mtsdr1	r4
240	lwz	r4,SL_SPRG0(r11)
241	mtsprg	0,r4
242	lwz	r4,SL_SPRG0+4(r11)
243	mtsprg	1,r4
244	lwz	r4,SL_SPRG0+8(r11)
245	mtsprg	2,r4
246	lwz	r4,SL_SPRG0+12(r11)
247	mtsprg	3,r4
248
249#if 0
250	lwz	r4,SL_DBAT0(r11)
251	mtdbatu	0,r4
252	lwz	r4,SL_DBAT0+4(r11)
253	mtdbatl	0,r4
254	lwz	r4,SL_DBAT1(r11)
255	mtdbatu	1,r4
256	lwz	r4,SL_DBAT1+4(r11)
257	mtdbatl	1,r4
258	lwz	r4,SL_DBAT2(r11)
259	mtdbatu	2,r4
260	lwz	r4,SL_DBAT2+4(r11)
261	mtdbatl	2,r4
262	lwz	r4,SL_DBAT3(r11)
263	mtdbatu	3,r4
264	lwz	r4,SL_DBAT3+4(r11)
265	mtdbatl	3,r4
266	lwz	r4,SL_IBAT0(r11)
267	mtibatu	0,r4
268	lwz	r4,SL_IBAT0+4(r11)
269	mtibatl	0,r4
270	lwz	r4,SL_IBAT1(r11)
271	mtibatu	1,r4
272	lwz	r4,SL_IBAT1+4(r11)
273	mtibatl	1,r4
274	lwz	r4,SL_IBAT2(r11)
275	mtibatu	2,r4
276	lwz	r4,SL_IBAT2+4(r11)
277	mtibatl	2,r4
278	lwz	r4,SL_IBAT3(r11)
279	mtibatu	3,r4
280	lwz	r4,SL_IBAT3+4(r11)
281	mtibatl	3,r4
282#endif
283
284BEGIN_MMU_FTR_SECTION
285	li	r4,0
286	mtspr	SPRN_DBAT4U,r4
287	mtspr	SPRN_DBAT4L,r4
288	mtspr	SPRN_DBAT5U,r4
289	mtspr	SPRN_DBAT5L,r4
290	mtspr	SPRN_DBAT6U,r4
291	mtspr	SPRN_DBAT6L,r4
292	mtspr	SPRN_DBAT7U,r4
293	mtspr	SPRN_DBAT7L,r4
294	mtspr	SPRN_IBAT4U,r4
295	mtspr	SPRN_IBAT4L,r4
296	mtspr	SPRN_IBAT5U,r4
297	mtspr	SPRN_IBAT5L,r4
298	mtspr	SPRN_IBAT6U,r4
299	mtspr	SPRN_IBAT6L,r4
300	mtspr	SPRN_IBAT7U,r4
301	mtspr	SPRN_IBAT7L,r4
302END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
303
304	/* Flush all TLBs */
305	lis	r4,0x1000
3061:	addic.	r4,r4,-0x1000
307	tlbie	r4
308	bgt	1b
309	sync
310
311	/* restore the MSR and turn on the MMU */
312	lwz	r3,SL_MSR(r11)
313	bl	turn_on_mmu
314	tovirt(r11,r11)
315
316	/* Restore TB */
317	li	r3,0
318	mttbl	r3
319	lwz	r3,SL_TB(r11)
320	lwz	r4,SL_TB+4(r11)
321	mttbu	r3
322	mttbl	r4
323
324	/* Kick decrementer */
325	li	r0,1
326	mtdec	r0
327
328	/* Restore the callee-saved registers and return */
329	lwz	r0,SL_CR(r11)
330	mtcr	r0
331	lwz	r2,SL_R2(r11)
332	lmw	r12,SL_R12(r11)
333	lwz	r1,SL_SP(r11)
334	lwz	r0,SL_LR(r11)
335	mtlr	r0
336
337	// XXX Note: we don't really need to call swsusp_resume
338
339	li	r3,0
340	blr
341
342/* FIXME:This construct is actually not useful since we don't shut
343 * down the instruction MMU, we could just flip back MSR-DR on.
344 */
345turn_on_mmu:
346	mflr	r4
347	mtsrr0	r4
348	mtsrr1	r3
349	sync
350	isync
351	rfi
352
353