xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 0de459a3)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35#include <asm/interrupt.h>
36
37#include "head_32.h"
38
39/*
40 * powerpc relies on return from interrupt/syscall being context synchronising
41 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
42 * synchronisation instructions.
43 */
44
45/*
46 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
47 * fit into one page in order to not encounter a TLB miss between the
48 * modification of srr0/srr1 and the associated rfi.
49 */
50	.align	12
51
52#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
53	.globl	prepare_transfer_to_handler
54prepare_transfer_to_handler:
55	/* if from kernel, check interrupted DOZE/NAP mode */
56	lwz	r12,TI_LOCAL_FLAGS(r2)
57	mtcrf	0x01,r12
58	bt-	31-TLF_NAPPING,4f
59	bt-	31-TLF_SLEEPING,7f
60	blr
61
624:	rlwinm	r12,r12,0,~_TLF_NAPPING
63	stw	r12,TI_LOCAL_FLAGS(r2)
64	b	power_save_ppc32_restore
65
667:	rlwinm	r12,r12,0,~_TLF_SLEEPING
67	stw	r12,TI_LOCAL_FLAGS(r2)
68	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
69	rlwinm	r9,r9,0,~MSR_EE
70	lwz	r12,_LINK(r11)		/* and return to address in LR */
71	REST_GPR(2, r11)
72	b	fast_exception_return
73_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
74#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
75
76#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
77	.globl	__kuep_lock
78__kuep_lock:
79	lwz	r9, THREAD+THSR0(r2)
80	update_user_segments_by_4 r9, r10, r11, r12
81	blr
82
83__kuep_unlock:
84	lwz	r9, THREAD+THSR0(r2)
85	rlwinm  r9,r9,0,~SR_NX
86	update_user_segments_by_4 r9, r10, r11, r12
87	blr
88
89.macro	kuep_lock
90	bl	__kuep_lock
91.endm
92.macro	kuep_unlock
93	bl	__kuep_unlock
94.endm
95#else
96.macro	kuep_lock
97.endm
98.macro	kuep_unlock
99.endm
100#endif
101
102	.globl	transfer_to_syscall
103transfer_to_syscall:
104	stw	r3, ORIG_GPR3(r1)
105	stw	r11, GPR1(r1)
106	stw	r11, 0(r1)
107	mflr	r12
108	stw	r12, _LINK(r1)
109#ifdef CONFIG_BOOKE_OR_40x
110	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
111#endif
112	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
113	SAVE_GPR(2, r1)
114	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
115	stw	r9,_MSR(r1)
116	li	r2, INTERRUPT_SYSCALL
117	stw	r12,8(r1)
118	stw	r2,_TRAP(r1)
119	SAVE_GPR(0, r1)
120	SAVE_GPRS(3, 8, r1)
121	addi	r2,r10,-THREAD
122	SAVE_NVGPRS(r1)
123	kuep_lock
124
125	/* Calling convention has r3 = regs, r4 = orig r0 */
126	addi	r3,r1,STACK_FRAME_OVERHEAD
127	mr	r4,r0
128	bl	system_call_exception
129
130ret_from_syscall:
131	addi    r4,r1,STACK_FRAME_OVERHEAD
132	li	r5,0
133	bl	syscall_exit_prepare
134#ifdef CONFIG_PPC_47x
135	lis	r4,icache_44x_need_flush@ha
136	lwz	r5,icache_44x_need_flush@l(r4)
137	cmplwi	cr0,r5,0
138	bne-	2f
139#endif /* CONFIG_PPC_47x */
140	kuep_unlock
141	lwz	r4,_LINK(r1)
142	lwz	r5,_CCR(r1)
143	mtlr	r4
144	lwz	r7,_NIP(r1)
145	lwz	r8,_MSR(r1)
146	cmpwi	r3,0
147	REST_GPR(3, r1)
148syscall_exit_finish:
149	mtspr	SPRN_SRR0,r7
150	mtspr	SPRN_SRR1,r8
151
152	bne	3f
153	mtcr	r5
154
1551:	REST_GPR(2, r1)
156	REST_GPR(1, r1)
157	rfi
158#ifdef CONFIG_40x
159	b .	/* Prevent prefetch past rfi */
160#endif
161
1623:	mtcr	r5
163	lwz	r4,_CTR(r1)
164	lwz	r5,_XER(r1)
165	REST_NVGPRS(r1)
166	mtctr	r4
167	mtxer	r5
168	REST_GPR(0, r1)
169	REST_GPRS(3, 12, r1)
170	b	1b
171
172#ifdef CONFIG_44x
1732:	li	r7,0
174	iccci	r0,r0
175	stw	r7,icache_44x_need_flush@l(r4)
176	b	1b
177#endif  /* CONFIG_44x */
178
179	.globl	ret_from_fork
180ret_from_fork:
181	REST_NVGPRS(r1)
182	bl	schedule_tail
183	li	r3,0
184	b	ret_from_syscall
185
186	.globl	ret_from_kernel_thread
187ret_from_kernel_thread:
188	REST_NVGPRS(r1)
189	bl	schedule_tail
190	mtctr	r14
191	mr	r3,r15
192	PPC440EP_ERR42
193	bctrl
194	li	r3,0
195	b	ret_from_syscall
196
197/*
198 * This routine switches between two different tasks.  The process
199 * state of one is saved on its kernel stack.  Then the state
200 * of the other is restored from its kernel stack.  The memory
201 * management hardware is updated to the second process's state.
202 * Finally, we can return to the second process.
203 * On entry, r3 points to the THREAD for the current task, r4
204 * points to the THREAD for the new task.
205 *
206 * This routine is always called with interrupts disabled.
207 *
208 * Note: there are two ways to get to the "going out" portion
209 * of this code; either by coming in via the entry (_switch)
210 * or via "fork" which must set up an environment equivalent
211 * to the "_switch" path.  If you change this , you'll have to
212 * change the fork code also.
213 *
214 * The code which creates the new task context is in 'copy_thread'
215 * in arch/ppc/kernel/process.c
216 */
217_GLOBAL(_switch)
218	stwu	r1,-INT_FRAME_SIZE(r1)
219	mflr	r0
220	stw	r0,INT_FRAME_SIZE+4(r1)
221	/* r3-r12 are caller saved -- Cort */
222	SAVE_NVGPRS(r1)
223	stw	r0,_NIP(r1)	/* Return to switch caller */
224	mfcr	r10
225	stw	r10,_CCR(r1)
226	stw	r1,KSP(r3)	/* Set old stack pointer */
227
228#ifdef CONFIG_SMP
229	/* We need a sync somewhere here to make sure that if the
230	 * previous task gets rescheduled on another CPU, it sees all
231	 * stores it has performed on this one.
232	 */
233	sync
234#endif /* CONFIG_SMP */
235
236	tophys(r0,r4)
237	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
238	lwz	r1,KSP(r4)	/* Load new stack pointer */
239
240	/* save the old current 'last' for return value */
241	mr	r3,r2
242	addi	r2,r4,-THREAD	/* Update current */
243
244	lwz	r0,_CCR(r1)
245	mtcrf	0xFF,r0
246	/* r3-r12 are destroyed -- Cort */
247	REST_NVGPRS(r1)
248
249	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
250	mtlr	r4
251	addi	r1,r1,INT_FRAME_SIZE
252	blr
253
254	.globl	fast_exception_return
255fast_exception_return:
256#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
257	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
258	beq	3f			/* if not, we've got problems */
259#endif
260
2612:	lwz	r10,_CCR(r11)
262	REST_GPRS(1, 6, r11)
263	mtcr	r10
264	lwz	r10,_LINK(r11)
265	mtlr	r10
266	/* Clear the exception marker on the stack to avoid confusing stacktrace */
267	li	r10, 0
268	stw	r10, 8(r11)
269	REST_GPR(10, r11)
270#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
271	mtspr	SPRN_NRI, r0
272#endif
273	mtspr	SPRN_SRR1,r9
274	mtspr	SPRN_SRR0,r12
275	REST_GPR(9, r11)
276	REST_GPR(12, r11)
277	REST_GPR(11, r11)
278	rfi
279#ifdef CONFIG_40x
280	b .	/* Prevent prefetch past rfi */
281#endif
282_ASM_NOKPROBE_SYMBOL(fast_exception_return)
283
284/* aargh, a nonrecoverable interrupt, panic */
285/* aargh, we don't know which trap this is */
2863:
287	li	r10,-1
288	stw	r10,_TRAP(r11)
289	prepare_transfer_to_handler
290	bl	unrecoverable_exception
291	trap	/* should not get here */
292
293	.globl interrupt_return
294interrupt_return:
295	lwz	r4,_MSR(r1)
296	addi	r3,r1,STACK_FRAME_OVERHEAD
297	andi.	r0,r4,MSR_PR
298	beq	.Lkernel_interrupt_return
299	bl	interrupt_exit_user_prepare
300	cmpwi	r3,0
301	kuep_unlock
302	bne-	.Lrestore_nvgprs
303
304.Lfast_user_interrupt_return:
305	lwz	r11,_NIP(r1)
306	lwz	r12,_MSR(r1)
307	mtspr	SPRN_SRR0,r11
308	mtspr	SPRN_SRR1,r12
309
310BEGIN_FTR_SECTION
311	stwcx.	r0,0,r1		/* to clear the reservation */
312FTR_SECTION_ELSE
313	lwarx	r0,0,r1
314ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
315
316	lwz	r3,_CCR(r1)
317	lwz	r4,_LINK(r1)
318	lwz	r5,_CTR(r1)
319	lwz	r6,_XER(r1)
320	li	r0,0
321
322	/*
323	 * Leaving a stale exception marker on the stack can confuse
324	 * the reliable stack unwinder later on. Clear it.
325	 */
326	stw	r0,8(r1)
327	REST_GPRS(7, 12, r1)
328
329	mtcr	r3
330	mtlr	r4
331	mtctr	r5
332	mtspr	SPRN_XER,r6
333
334	REST_GPRS(2, 6, r1)
335	REST_GPR(0, r1)
336	REST_GPR(1, r1)
337	rfi
338#ifdef CONFIG_40x
339	b .	/* Prevent prefetch past rfi */
340#endif
341
342.Lrestore_nvgprs:
343	REST_NVGPRS(r1)
344	b	.Lfast_user_interrupt_return
345
346.Lkernel_interrupt_return:
347	bl	interrupt_exit_kernel_prepare
348
349.Lfast_kernel_interrupt_return:
350	cmpwi	cr1,r3,0
351	lwz	r11,_NIP(r1)
352	lwz	r12,_MSR(r1)
353	mtspr	SPRN_SRR0,r11
354	mtspr	SPRN_SRR1,r12
355
356BEGIN_FTR_SECTION
357	stwcx.	r0,0,r1		/* to clear the reservation */
358FTR_SECTION_ELSE
359	lwarx	r0,0,r1
360ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
361
362	lwz	r3,_LINK(r1)
363	lwz	r4,_CTR(r1)
364	lwz	r5,_XER(r1)
365	lwz	r6,_CCR(r1)
366	li	r0,0
367
368	REST_GPRS(7, 12, r1)
369
370	mtlr	r3
371	mtctr	r4
372	mtspr	SPRN_XER,r5
373
374	/*
375	 * Leaving a stale exception marker on the stack can confuse
376	 * the reliable stack unwinder later on. Clear it.
377	 */
378	stw	r0,8(r1)
379
380	REST_GPRS(2, 5, r1)
381
382	bne-	cr1,1f /* emulate stack store */
383	mtcr	r6
384	REST_GPR(6, r1)
385	REST_GPR(0, r1)
386	REST_GPR(1, r1)
387	rfi
388#ifdef CONFIG_40x
389	b .	/* Prevent prefetch past rfi */
390#endif
391
3921:	/*
393	 * Emulate stack store with update. New r1 value was already calculated
394	 * and updated in our interrupt regs by emulate_loadstore, but we can't
395	 * store the previous value of r1 to the stack before re-loading our
396	 * registers from it, otherwise they could be clobbered.  Use
397	 * SPRG Scratch0 as temporary storage to hold the store
398	 * data, as interrupts are disabled here so it won't be clobbered.
399	 */
400	mtcr	r6
401#ifdef CONFIG_BOOKE
402	mtspr	SPRN_SPRG_WSCRATCH0, r9
403#else
404	mtspr	SPRN_SPRG_SCRATCH0, r9
405#endif
406	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
407	REST_GPR(6, r1)
408	REST_GPR(0, r1)
409	REST_GPR(1, r1)
410	stw	r9,0(r1) /* perform store component of stwu */
411#ifdef CONFIG_BOOKE
412	mfspr	r9, SPRN_SPRG_RSCRATCH0
413#else
414	mfspr	r9, SPRN_SPRG_SCRATCH0
415#endif
416	rfi
417#ifdef CONFIG_40x
418	b .	/* Prevent prefetch past rfi */
419#endif
420_ASM_NOKPROBE_SYMBOL(interrupt_return)
421
422#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
423
424/*
425 * Returning from a critical interrupt in user mode doesn't need
426 * to be any different from a normal exception.  For a critical
427 * interrupt in the kernel, we just return (without checking for
428 * preemption) since the interrupt may have happened at some crucial
429 * place (e.g. inside the TLB miss handler), and because we will be
430 * running with r1 pointing into critical_stack, not the current
431 * process's kernel stack (and therefore current_thread_info() will
432 * give the wrong answer).
433 * We have to restore various SPRs that may have been in use at the
434 * time of the critical interrupt.
435 *
436 */
437#ifdef CONFIG_40x
438#define PPC_40x_TURN_OFF_MSR_DR						    \
439	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
440	 * assume the instructions here are mapped by a pinned TLB entry */ \
441	li	r10,MSR_IR;						    \
442	mtmsr	r10;							    \
443	isync;								    \
444	tophys(r1, r1);
445#else
446#define PPC_40x_TURN_OFF_MSR_DR
447#endif
448
449#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
450	REST_NVGPRS(r1);						\
451	lwz	r3,_MSR(r1);						\
452	andi.	r3,r3,MSR_PR;						\
453	bne	interrupt_return;					\
454	REST_GPR(0, r1);						\
455	REST_GPRS(2, 8, r1);						\
456	lwz	r10,_XER(r1);						\
457	lwz	r11,_CTR(r1);						\
458	mtspr	SPRN_XER,r10;						\
459	mtctr	r11;							\
460	stwcx.	r0,0,r1;		/* to clear the reservation */	\
461	lwz	r11,_LINK(r1);						\
462	mtlr	r11;							\
463	lwz	r10,_CCR(r1);						\
464	mtcrf	0xff,r10;						\
465	PPC_40x_TURN_OFF_MSR_DR;					\
466	lwz	r9,_DEAR(r1);						\
467	lwz	r10,_ESR(r1);						\
468	mtspr	SPRN_DEAR,r9;						\
469	mtspr	SPRN_ESR,r10;						\
470	lwz	r11,_NIP(r1);						\
471	lwz	r12,_MSR(r1);						\
472	mtspr	exc_lvl_srr0,r11;					\
473	mtspr	exc_lvl_srr1,r12;					\
474	REST_GPRS(9, 12, r1);						\
475	REST_GPR(1, r1);						\
476	exc_lvl_rfi;							\
477	b	.;		/* prevent prefetch past exc_lvl_rfi */
478
479#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
480	lwz	r9,_##exc_lvl_srr0(r1);					\
481	lwz	r10,_##exc_lvl_srr1(r1);				\
482	mtspr	SPRN_##exc_lvl_srr0,r9;					\
483	mtspr	SPRN_##exc_lvl_srr1,r10;
484
485#if defined(CONFIG_PPC_E500)
486#ifdef CONFIG_PHYS_64BIT
487#define	RESTORE_MAS7							\
488	lwz	r11,MAS7(r1);						\
489	mtspr	SPRN_MAS7,r11;
490#else
491#define	RESTORE_MAS7
492#endif /* CONFIG_PHYS_64BIT */
493#define RESTORE_MMU_REGS						\
494	lwz	r9,MAS0(r1);						\
495	lwz	r10,MAS1(r1);						\
496	lwz	r11,MAS2(r1);						\
497	mtspr	SPRN_MAS0,r9;						\
498	lwz	r9,MAS3(r1);						\
499	mtspr	SPRN_MAS1,r10;						\
500	lwz	r10,MAS6(r1);						\
501	mtspr	SPRN_MAS2,r11;						\
502	mtspr	SPRN_MAS3,r9;						\
503	mtspr	SPRN_MAS6,r10;						\
504	RESTORE_MAS7;
505#elif defined(CONFIG_44x)
506#define RESTORE_MMU_REGS						\
507	lwz	r9,MMUCR(r1);						\
508	mtspr	SPRN_MMUCR,r9;
509#else
510#define RESTORE_MMU_REGS
511#endif
512
513#ifdef CONFIG_40x
514	.globl	ret_from_crit_exc
515ret_from_crit_exc:
516	lis	r9,crit_srr0@ha;
517	lwz	r9,crit_srr0@l(r9);
518	lis	r10,crit_srr1@ha;
519	lwz	r10,crit_srr1@l(r10);
520	mtspr	SPRN_SRR0,r9;
521	mtspr	SPRN_SRR1,r10;
522	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
523_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
524#endif /* CONFIG_40x */
525
526#ifdef CONFIG_BOOKE
527	.globl	ret_from_crit_exc
528ret_from_crit_exc:
529	RESTORE_xSRR(SRR0,SRR1);
530	RESTORE_MMU_REGS;
531	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
532_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
533
534	.globl	ret_from_debug_exc
535ret_from_debug_exc:
536	RESTORE_xSRR(SRR0,SRR1);
537	RESTORE_xSRR(CSRR0,CSRR1);
538	RESTORE_MMU_REGS;
539	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
540_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
541
542	.globl	ret_from_mcheck_exc
543ret_from_mcheck_exc:
544	RESTORE_xSRR(SRR0,SRR1);
545	RESTORE_xSRR(CSRR0,CSRR1);
546	RESTORE_xSRR(DSRR0,DSRR1);
547	RESTORE_MMU_REGS;
548	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
549_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
550#endif /* CONFIG_BOOKE */
551#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
552