xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 479965a2)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <linux/linkage.h>
22
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/cputable.h>
27#include <asm/thread_info.h>
28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h>
30#include <asm/unistd.h>
31#include <asm/ptrace.h>
32#include <asm/feature-fixups.h>
33#include <asm/barrier.h>
34#include <asm/kup.h>
35#include <asm/bug.h>
36#include <asm/interrupt.h>
37
38#include "head_32.h"
39
40/*
41 * powerpc relies on return from interrupt/syscall being context synchronising
42 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
43 * synchronisation instructions.
44 */
45
46/*
47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
48 * fit into one page in order to not encounter a TLB miss between the
49 * modification of srr0/srr1 and the associated rfi.
50 */
51	.align	12
52
53#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
54	.globl	prepare_transfer_to_handler
55prepare_transfer_to_handler:
56	/* if from kernel, check interrupted DOZE/NAP mode */
57	lwz	r12,TI_LOCAL_FLAGS(r2)
58	mtcrf	0x01,r12
59	bt-	31-TLF_NAPPING,4f
60	bt-	31-TLF_SLEEPING,7f
61	blr
62
634:	rlwinm	r12,r12,0,~_TLF_NAPPING
64	stw	r12,TI_LOCAL_FLAGS(r2)
65	b	power_save_ppc32_restore
66
677:	rlwinm	r12,r12,0,~_TLF_SLEEPING
68	stw	r12,TI_LOCAL_FLAGS(r2)
69	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
70	rlwinm	r9,r9,0,~MSR_EE
71	lwz	r12,_LINK(r11)		/* and return to address in LR */
72	REST_GPR(2, r11)
73	b	fast_exception_return
74_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
75#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
76
77#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
78SYM_FUNC_START(__kuep_lock)
79	lwz	r9, THREAD+THSR0(r2)
80	update_user_segments_by_4 r9, r10, r11, r12
81	blr
82SYM_FUNC_END(__kuep_lock)
83
84SYM_FUNC_START_LOCAL(__kuep_unlock)
85	lwz	r9, THREAD+THSR0(r2)
86	rlwinm  r9,r9,0,~SR_NX
87	update_user_segments_by_4 r9, r10, r11, r12
88	blr
89SYM_FUNC_END(__kuep_unlock)
90
91.macro	kuep_lock
92	bl	__kuep_lock
93.endm
94.macro	kuep_unlock
95	bl	__kuep_unlock
96.endm
97#else
98.macro	kuep_lock
99.endm
100.macro	kuep_unlock
101.endm
102#endif
103
104	.globl	transfer_to_syscall
105transfer_to_syscall:
106	stw	r3, ORIG_GPR3(r1)
107	stw	r11, GPR1(r1)
108	stw	r11, 0(r1)
109	mflr	r12
110	stw	r12, _LINK(r1)
111#ifdef CONFIG_BOOKE_OR_40x
112	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
113#endif
114	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
115	SAVE_GPR(2, r1)
116	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
117	stw	r9,_MSR(r1)
118	li	r2, INTERRUPT_SYSCALL
119	stw	r12,STACK_INT_FRAME_MARKER(r1)
120	stw	r2,_TRAP(r1)
121	SAVE_GPR(0, r1)
122	SAVE_GPRS(3, 8, r1)
123	addi	r2,r10,-THREAD
124	SAVE_NVGPRS(r1)
125	kuep_lock
126
127	/* Calling convention has r3 = regs, r4 = orig r0 */
128	addi	r3,r1,STACK_INT_FRAME_REGS
129	mr	r4,r0
130	bl	system_call_exception
131
132ret_from_syscall:
133	addi    r4,r1,STACK_INT_FRAME_REGS
134	li	r5,0
135	bl	syscall_exit_prepare
136#ifdef CONFIG_PPC_47x
137	lis	r4,icache_44x_need_flush@ha
138	lwz	r5,icache_44x_need_flush@l(r4)
139	cmplwi	cr0,r5,0
140	bne-	2f
141#endif /* CONFIG_PPC_47x */
142	kuep_unlock
143	lwz	r4,_LINK(r1)
144	lwz	r5,_CCR(r1)
145	mtlr	r4
146	lwz	r7,_NIP(r1)
147	lwz	r8,_MSR(r1)
148	cmpwi	r3,0
149	REST_GPR(3, r1)
150syscall_exit_finish:
151	mtspr	SPRN_SRR0,r7
152	mtspr	SPRN_SRR1,r8
153
154	bne	3f
155	mtcr	r5
156
1571:	REST_GPR(2, r1)
158	REST_GPR(1, r1)
159	rfi
160#ifdef CONFIG_40x
161	b .	/* Prevent prefetch past rfi */
162#endif
163
1643:	mtcr	r5
165	lwz	r4,_CTR(r1)
166	lwz	r5,_XER(r1)
167	REST_NVGPRS(r1)
168	mtctr	r4
169	mtxer	r5
170	REST_GPR(0, r1)
171	REST_GPRS(3, 12, r1)
172	b	1b
173
174#ifdef CONFIG_44x
1752:	li	r7,0
176	iccci	r0,r0
177	stw	r7,icache_44x_need_flush@l(r4)
178	b	1b
179#endif  /* CONFIG_44x */
180
181	.globl	ret_from_fork
182ret_from_fork:
183	REST_NVGPRS(r1)
184	bl	schedule_tail
185	li	r3,0	/* fork() return value */
186	b	ret_from_syscall
187
188	.globl	ret_from_kernel_user_thread
189ret_from_kernel_user_thread:
190	bl	schedule_tail
191	mtctr	r14
192	mr	r3,r15
193	PPC440EP_ERR42
194	bctrl
195	li	r3,0
196	b	ret_from_syscall
197
198	.globl	start_kernel_thread
199start_kernel_thread:
200	bl	schedule_tail
201	mtctr	r14
202	mr	r3,r15
203	PPC440EP_ERR42
204	bctrl
205	/*
206	 * This must not return. We actually want to BUG here, not WARN,
207	 * because BUG will exit the process which is what the kernel thread
208	 * should have done, which may give some hope of continuing.
209	 */
210100:	trap
211	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
212
213	.globl	fast_exception_return
214fast_exception_return:
215#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
216	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
217	beq	3f			/* if not, we've got problems */
218#endif
219
2202:	lwz	r10,_CCR(r11)
221	REST_GPRS(1, 6, r11)
222	mtcr	r10
223	lwz	r10,_LINK(r11)
224	mtlr	r10
225	/* Clear the exception marker on the stack to avoid confusing stacktrace */
226	li	r10, 0
227	stw	r10, 8(r11)
228	REST_GPR(10, r11)
229#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
230	mtspr	SPRN_NRI, r0
231#endif
232	mtspr	SPRN_SRR1,r9
233	mtspr	SPRN_SRR0,r12
234	REST_GPR(9, r11)
235	REST_GPR(12, r11)
236	REST_GPR(11, r11)
237	rfi
238#ifdef CONFIG_40x
239	b .	/* Prevent prefetch past rfi */
240#endif
241_ASM_NOKPROBE_SYMBOL(fast_exception_return)
242
243/* aargh, a nonrecoverable interrupt, panic */
244/* aargh, we don't know which trap this is */
2453:
246	li	r10,-1
247	stw	r10,_TRAP(r11)
248	prepare_transfer_to_handler
249	bl	unrecoverable_exception
250	trap	/* should not get here */
251
252	.globl interrupt_return
253interrupt_return:
254	lwz	r4,_MSR(r1)
255	addi	r3,r1,STACK_INT_FRAME_REGS
256	andi.	r0,r4,MSR_PR
257	beq	.Lkernel_interrupt_return
258	bl	interrupt_exit_user_prepare
259	cmpwi	r3,0
260	kuep_unlock
261	bne-	.Lrestore_nvgprs
262
263.Lfast_user_interrupt_return:
264	lwz	r11,_NIP(r1)
265	lwz	r12,_MSR(r1)
266	mtspr	SPRN_SRR0,r11
267	mtspr	SPRN_SRR1,r12
268
269BEGIN_FTR_SECTION
270	stwcx.	r0,0,r1		/* to clear the reservation */
271FTR_SECTION_ELSE
272	lwarx	r0,0,r1
273ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
274
275	lwz	r3,_CCR(r1)
276	lwz	r4,_LINK(r1)
277	lwz	r5,_CTR(r1)
278	lwz	r6,_XER(r1)
279	li	r0,0
280
281	/*
282	 * Leaving a stale exception marker on the stack can confuse
283	 * the reliable stack unwinder later on. Clear it.
284	 */
285	stw	r0,8(r1)
286	REST_GPRS(7, 12, r1)
287
288	mtcr	r3
289	mtlr	r4
290	mtctr	r5
291	mtspr	SPRN_XER,r6
292
293	REST_GPRS(2, 6, r1)
294	REST_GPR(0, r1)
295	REST_GPR(1, r1)
296	rfi
297#ifdef CONFIG_40x
298	b .	/* Prevent prefetch past rfi */
299#endif
300
301.Lrestore_nvgprs:
302	REST_NVGPRS(r1)
303	b	.Lfast_user_interrupt_return
304
305.Lkernel_interrupt_return:
306	bl	interrupt_exit_kernel_prepare
307
308.Lfast_kernel_interrupt_return:
309	cmpwi	cr1,r3,0
310	lwz	r11,_NIP(r1)
311	lwz	r12,_MSR(r1)
312	mtspr	SPRN_SRR0,r11
313	mtspr	SPRN_SRR1,r12
314
315BEGIN_FTR_SECTION
316	stwcx.	r0,0,r1		/* to clear the reservation */
317FTR_SECTION_ELSE
318	lwarx	r0,0,r1
319ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
320
321	lwz	r3,_LINK(r1)
322	lwz	r4,_CTR(r1)
323	lwz	r5,_XER(r1)
324	lwz	r6,_CCR(r1)
325	li	r0,0
326
327	REST_GPRS(7, 12, r1)
328
329	mtlr	r3
330	mtctr	r4
331	mtspr	SPRN_XER,r5
332
333	/*
334	 * Leaving a stale exception marker on the stack can confuse
335	 * the reliable stack unwinder later on. Clear it.
336	 */
337	stw	r0,8(r1)
338
339	REST_GPRS(2, 5, r1)
340
341	bne-	cr1,1f /* emulate stack store */
342	mtcr	r6
343	REST_GPR(6, r1)
344	REST_GPR(0, r1)
345	REST_GPR(1, r1)
346	rfi
347#ifdef CONFIG_40x
348	b .	/* Prevent prefetch past rfi */
349#endif
350
3511:	/*
352	 * Emulate stack store with update. New r1 value was already calculated
353	 * and updated in our interrupt regs by emulate_loadstore, but we can't
354	 * store the previous value of r1 to the stack before re-loading our
355	 * registers from it, otherwise they could be clobbered.  Use
356	 * SPRG Scratch0 as temporary storage to hold the store
357	 * data, as interrupts are disabled here so it won't be clobbered.
358	 */
359	mtcr	r6
360#ifdef CONFIG_BOOKE
361	mtspr	SPRN_SPRG_WSCRATCH0, r9
362#else
363	mtspr	SPRN_SPRG_SCRATCH0, r9
364#endif
365	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
366	REST_GPR(6, r1)
367	REST_GPR(0, r1)
368	REST_GPR(1, r1)
369	stw	r9,0(r1) /* perform store component of stwu */
370#ifdef CONFIG_BOOKE
371	mfspr	r9, SPRN_SPRG_RSCRATCH0
372#else
373	mfspr	r9, SPRN_SPRG_SCRATCH0
374#endif
375	rfi
376#ifdef CONFIG_40x
377	b .	/* Prevent prefetch past rfi */
378#endif
379_ASM_NOKPROBE_SYMBOL(interrupt_return)
380
381#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
382
383/*
384 * Returning from a critical interrupt in user mode doesn't need
385 * to be any different from a normal exception.  For a critical
386 * interrupt in the kernel, we just return (without checking for
387 * preemption) since the interrupt may have happened at some crucial
388 * place (e.g. inside the TLB miss handler), and because we will be
389 * running with r1 pointing into critical_stack, not the current
390 * process's kernel stack (and therefore current_thread_info() will
391 * give the wrong answer).
392 * We have to restore various SPRs that may have been in use at the
393 * time of the critical interrupt.
394 *
395 */
396#ifdef CONFIG_40x
397#define PPC_40x_TURN_OFF_MSR_DR						    \
398	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
399	 * assume the instructions here are mapped by a pinned TLB entry */ \
400	li	r10,MSR_IR;						    \
401	mtmsr	r10;							    \
402	isync;								    \
403	tophys(r1, r1);
404#else
405#define PPC_40x_TURN_OFF_MSR_DR
406#endif
407
408#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
409	REST_NVGPRS(r1);						\
410	lwz	r3,_MSR(r1);						\
411	andi.	r3,r3,MSR_PR;						\
412	bne	interrupt_return;					\
413	REST_GPR(0, r1);						\
414	REST_GPRS(2, 8, r1);						\
415	lwz	r10,_XER(r1);						\
416	lwz	r11,_CTR(r1);						\
417	mtspr	SPRN_XER,r10;						\
418	mtctr	r11;							\
419	stwcx.	r0,0,r1;		/* to clear the reservation */	\
420	lwz	r11,_LINK(r1);						\
421	mtlr	r11;							\
422	lwz	r10,_CCR(r1);						\
423	mtcrf	0xff,r10;						\
424	PPC_40x_TURN_OFF_MSR_DR;					\
425	lwz	r9,_DEAR(r1);						\
426	lwz	r10,_ESR(r1);						\
427	mtspr	SPRN_DEAR,r9;						\
428	mtspr	SPRN_ESR,r10;						\
429	lwz	r11,_NIP(r1);						\
430	lwz	r12,_MSR(r1);						\
431	mtspr	exc_lvl_srr0,r11;					\
432	mtspr	exc_lvl_srr1,r12;					\
433	REST_GPRS(9, 12, r1);						\
434	REST_GPR(1, r1);						\
435	exc_lvl_rfi;							\
436	b	.;		/* prevent prefetch past exc_lvl_rfi */
437
438#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
439	lwz	r9,_##exc_lvl_srr0(r1);					\
440	lwz	r10,_##exc_lvl_srr1(r1);				\
441	mtspr	SPRN_##exc_lvl_srr0,r9;					\
442	mtspr	SPRN_##exc_lvl_srr1,r10;
443
444#if defined(CONFIG_PPC_E500)
445#ifdef CONFIG_PHYS_64BIT
446#define	RESTORE_MAS7							\
447	lwz	r11,MAS7(r1);						\
448	mtspr	SPRN_MAS7,r11;
449#else
450#define	RESTORE_MAS7
451#endif /* CONFIG_PHYS_64BIT */
452#define RESTORE_MMU_REGS						\
453	lwz	r9,MAS0(r1);						\
454	lwz	r10,MAS1(r1);						\
455	lwz	r11,MAS2(r1);						\
456	mtspr	SPRN_MAS0,r9;						\
457	lwz	r9,MAS3(r1);						\
458	mtspr	SPRN_MAS1,r10;						\
459	lwz	r10,MAS6(r1);						\
460	mtspr	SPRN_MAS2,r11;						\
461	mtspr	SPRN_MAS3,r9;						\
462	mtspr	SPRN_MAS6,r10;						\
463	RESTORE_MAS7;
464#elif defined(CONFIG_44x)
465#define RESTORE_MMU_REGS						\
466	lwz	r9,MMUCR(r1);						\
467	mtspr	SPRN_MMUCR,r9;
468#else
469#define RESTORE_MMU_REGS
470#endif
471
472#ifdef CONFIG_40x
473	.globl	ret_from_crit_exc
474ret_from_crit_exc:
475	lis	r9,crit_srr0@ha;
476	lwz	r9,crit_srr0@l(r9);
477	lis	r10,crit_srr1@ha;
478	lwz	r10,crit_srr1@l(r10);
479	mtspr	SPRN_SRR0,r9;
480	mtspr	SPRN_SRR1,r10;
481	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
482_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
483#endif /* CONFIG_40x */
484
485#ifdef CONFIG_BOOKE
486	.globl	ret_from_crit_exc
487ret_from_crit_exc:
488	RESTORE_xSRR(SRR0,SRR1);
489	RESTORE_MMU_REGS;
490	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
491_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
492
493	.globl	ret_from_debug_exc
494ret_from_debug_exc:
495	RESTORE_xSRR(SRR0,SRR1);
496	RESTORE_xSRR(CSRR0,CSRR1);
497	RESTORE_MMU_REGS;
498	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
499_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
500
501	.globl	ret_from_mcheck_exc
502ret_from_mcheck_exc:
503	RESTORE_xSRR(SRR0,SRR1);
504	RESTORE_xSRR(CSRR0,CSRR1);
505	RESTORE_xSRR(DSRR0,DSRR1);
506	RESTORE_MMU_REGS;
507	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
508_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
509#endif /* CONFIG_BOOKE */
510#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
511