xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 55b37d9c)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <linux/linkage.h>
22
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/cputable.h>
27#include <asm/thread_info.h>
28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h>
30#include <asm/unistd.h>
31#include <asm/ptrace.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34#include <asm/barrier.h>
35#include <asm/kup.h>
36#include <asm/bug.h>
37#include <asm/interrupt.h>
38
39#include "head_32.h"
40
41/*
42 * powerpc relies on return from interrupt/syscall being context synchronising
43 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
44 * synchronisation instructions.
45 */
46
47/*
48 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
49 * fit into one page in order to not encounter a TLB miss between the
50 * modification of srr0/srr1 and the associated rfi.
51 */
52	.align	12
53
54#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
55	.globl	prepare_transfer_to_handler
56prepare_transfer_to_handler:
57	/* if from kernel, check interrupted DOZE/NAP mode */
58	lwz	r12,TI_LOCAL_FLAGS(r2)
59	mtcrf	0x01,r12
60	bt-	31-TLF_NAPPING,4f
61	bt-	31-TLF_SLEEPING,7f
62	blr
63
644:	rlwinm	r12,r12,0,~_TLF_NAPPING
65	stw	r12,TI_LOCAL_FLAGS(r2)
66	b	power_save_ppc32_restore
67
687:	rlwinm	r12,r12,0,~_TLF_SLEEPING
69	stw	r12,TI_LOCAL_FLAGS(r2)
70	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
71	rlwinm	r9,r9,0,~MSR_EE
72	lwz	r12,_LINK(r11)		/* and return to address in LR */
73	REST_GPR(2, r11)
74	b	fast_exception_return
75_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
76#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
77
78#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
79SYM_FUNC_START(__kuep_lock)
80	lwz	r9, THREAD+THSR0(r2)
81	update_user_segments_by_4 r9, r10, r11, r12
82	blr
83SYM_FUNC_END(__kuep_lock)
84
85SYM_FUNC_START_LOCAL(__kuep_unlock)
86	lwz	r9, THREAD+THSR0(r2)
87	rlwinm  r9,r9,0,~SR_NX
88	update_user_segments_by_4 r9, r10, r11, r12
89	blr
90SYM_FUNC_END(__kuep_unlock)
91
92.macro	kuep_lock
93	bl	__kuep_lock
94.endm
95.macro	kuep_unlock
96	bl	__kuep_unlock
97.endm
98#else
99.macro	kuep_lock
100.endm
101.macro	kuep_unlock
102.endm
103#endif
104
105	.globl	transfer_to_syscall
106transfer_to_syscall:
107	stw	r3, ORIG_GPR3(r1)
108	stw	r11, GPR1(r1)
109	stw	r11, 0(r1)
110	mflr	r12
111	stw	r12, _LINK(r1)
112#ifdef CONFIG_BOOKE_OR_40x
113	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
114#endif
115	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
116	SAVE_GPR(2, r1)
117	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
118	stw	r9,_MSR(r1)
119	li	r2, INTERRUPT_SYSCALL
120	stw	r12,STACK_INT_FRAME_MARKER(r1)
121	stw	r2,_TRAP(r1)
122	SAVE_GPR(0, r1)
123	SAVE_GPRS(3, 8, r1)
124	addi	r2,r10,-THREAD
125	SAVE_NVGPRS(r1)
126	kuep_lock
127
128	/* Calling convention has r3 = regs, r4 = orig r0 */
129	addi	r3,r1,STACK_INT_FRAME_REGS
130	mr	r4,r0
131	bl	system_call_exception
132
133ret_from_syscall:
134	addi    r4,r1,STACK_INT_FRAME_REGS
135	li	r5,0
136	bl	syscall_exit_prepare
137#ifdef CONFIG_PPC_47x
138	lis	r4,icache_44x_need_flush@ha
139	lwz	r5,icache_44x_need_flush@l(r4)
140	cmplwi	cr0,r5,0
141	bne-	2f
142#endif /* CONFIG_PPC_47x */
143	kuep_unlock
144	lwz	r4,_LINK(r1)
145	lwz	r5,_CCR(r1)
146	mtlr	r4
147	lwz	r7,_NIP(r1)
148	lwz	r8,_MSR(r1)
149	cmpwi	r3,0
150	REST_GPR(3, r1)
151syscall_exit_finish:
152	mtspr	SPRN_SRR0,r7
153	mtspr	SPRN_SRR1,r8
154
155	bne	3f
156	mtcr	r5
157
1581:	REST_GPR(2, r1)
159	REST_GPR(1, r1)
160	rfi
161#ifdef CONFIG_40x
162	b .	/* Prevent prefetch past rfi */
163#endif
164
1653:	mtcr	r5
166	lwz	r4,_CTR(r1)
167	lwz	r5,_XER(r1)
168	REST_NVGPRS(r1)
169	mtctr	r4
170	mtxer	r5
171	REST_GPR(0, r1)
172	REST_GPRS(3, 12, r1)
173	b	1b
174
175#ifdef CONFIG_44x
1762:	li	r7,0
177	iccci	r0,r0
178	stw	r7,icache_44x_need_flush@l(r4)
179	b	1b
180#endif  /* CONFIG_44x */
181
182	.globl	ret_from_fork
183ret_from_fork:
184	REST_NVGPRS(r1)
185	bl	schedule_tail
186	li	r3,0	/* fork() return value */
187	b	ret_from_syscall
188
189	.globl	ret_from_kernel_user_thread
190ret_from_kernel_user_thread:
191	bl	schedule_tail
192	mtctr	r14
193	mr	r3,r15
194	PPC440EP_ERR42
195	bctrl
196	li	r3,0
197	b	ret_from_syscall
198
199	.globl	start_kernel_thread
200start_kernel_thread:
201	bl	schedule_tail
202	mtctr	r14
203	mr	r3,r15
204	PPC440EP_ERR42
205	bctrl
206	/*
207	 * This must not return. We actually want to BUG here, not WARN,
208	 * because BUG will exit the process which is what the kernel thread
209	 * should have done, which may give some hope of continuing.
210	 */
211100:	trap
212	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
213
214
215/*
216 * This routine switches between two different tasks.  The process
217 * state of one is saved on its kernel stack.  Then the state
218 * of the other is restored from its kernel stack.  The memory
219 * management hardware is updated to the second process's state.
220 * Finally, we can return to the second process.
221 * On entry, r3 points to the THREAD for the current task, r4
222 * points to the THREAD for the new task.
223 *
224 * This routine is always called with interrupts disabled.
225 *
226 * Note: there are two ways to get to the "going out" portion
227 * of this code; either by coming in via the entry (_switch)
228 * or via "fork" which must set up an environment equivalent
229 * to the "_switch" path.  If you change this , you'll have to
230 * change the fork code also.
231 *
232 * The code which creates the new task context is in 'copy_thread'
233 * in arch/ppc/kernel/process.c
234 */
235_GLOBAL(_switch)
236	stwu	r1,-SWITCH_FRAME_SIZE(r1)
237	mflr	r0
238	stw	r0,SWITCH_FRAME_SIZE+4(r1)
239	/* r3-r12 are caller saved -- Cort */
240	SAVE_NVGPRS(r1)
241	stw	r0,_NIP(r1)	/* Return to switch caller */
242	mfcr	r10
243	stw	r10,_CCR(r1)
244	stw	r1,KSP(r3)	/* Set old stack pointer */
245
246#ifdef CONFIG_SMP
247	/* We need a sync somewhere here to make sure that if the
248	 * previous task gets rescheduled on another CPU, it sees all
249	 * stores it has performed on this one.
250	 */
251	sync
252#endif /* CONFIG_SMP */
253
254	tophys(r0,r4)
255	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
256	lwz	r1,KSP(r4)	/* Load new stack pointer */
257
258	/* save the old current 'last' for return value */
259	mr	r3,r2
260	addi	r2,r4,-THREAD	/* Update current */
261
262	lwz	r0,_CCR(r1)
263	mtcrf	0xFF,r0
264	/* r3-r12 are destroyed -- Cort */
265	REST_NVGPRS(r1)
266
267	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
268	mtlr	r4
269	addi	r1,r1,SWITCH_FRAME_SIZE
270	blr
271
272	.globl	fast_exception_return
273fast_exception_return:
274#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
275	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
276	beq	3f			/* if not, we've got problems */
277#endif
278
2792:	lwz	r10,_CCR(r11)
280	REST_GPRS(1, 6, r11)
281	mtcr	r10
282	lwz	r10,_LINK(r11)
283	mtlr	r10
284	/* Clear the exception marker on the stack to avoid confusing stacktrace */
285	li	r10, 0
286	stw	r10, 8(r11)
287	REST_GPR(10, r11)
288#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
289	mtspr	SPRN_NRI, r0
290#endif
291	mtspr	SPRN_SRR1,r9
292	mtspr	SPRN_SRR0,r12
293	REST_GPR(9, r11)
294	REST_GPR(12, r11)
295	REST_GPR(11, r11)
296	rfi
297#ifdef CONFIG_40x
298	b .	/* Prevent prefetch past rfi */
299#endif
300_ASM_NOKPROBE_SYMBOL(fast_exception_return)
301
302/* aargh, a nonrecoverable interrupt, panic */
303/* aargh, we don't know which trap this is */
3043:
305	li	r10,-1
306	stw	r10,_TRAP(r11)
307	prepare_transfer_to_handler
308	bl	unrecoverable_exception
309	trap	/* should not get here */
310
311	.globl interrupt_return
312interrupt_return:
313	lwz	r4,_MSR(r1)
314	addi	r3,r1,STACK_INT_FRAME_REGS
315	andi.	r0,r4,MSR_PR
316	beq	.Lkernel_interrupt_return
317	bl	interrupt_exit_user_prepare
318	cmpwi	r3,0
319	kuep_unlock
320	bne-	.Lrestore_nvgprs
321
322.Lfast_user_interrupt_return:
323	lwz	r11,_NIP(r1)
324	lwz	r12,_MSR(r1)
325	mtspr	SPRN_SRR0,r11
326	mtspr	SPRN_SRR1,r12
327
328BEGIN_FTR_SECTION
329	stwcx.	r0,0,r1		/* to clear the reservation */
330FTR_SECTION_ELSE
331	lwarx	r0,0,r1
332ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
333
334	lwz	r3,_CCR(r1)
335	lwz	r4,_LINK(r1)
336	lwz	r5,_CTR(r1)
337	lwz	r6,_XER(r1)
338	li	r0,0
339
340	/*
341	 * Leaving a stale exception marker on the stack can confuse
342	 * the reliable stack unwinder later on. Clear it.
343	 */
344	stw	r0,8(r1)
345	REST_GPRS(7, 12, r1)
346
347	mtcr	r3
348	mtlr	r4
349	mtctr	r5
350	mtspr	SPRN_XER,r6
351
352	REST_GPRS(2, 6, r1)
353	REST_GPR(0, r1)
354	REST_GPR(1, r1)
355	rfi
356#ifdef CONFIG_40x
357	b .	/* Prevent prefetch past rfi */
358#endif
359
360.Lrestore_nvgprs:
361	REST_NVGPRS(r1)
362	b	.Lfast_user_interrupt_return
363
364.Lkernel_interrupt_return:
365	bl	interrupt_exit_kernel_prepare
366
367.Lfast_kernel_interrupt_return:
368	cmpwi	cr1,r3,0
369	lwz	r11,_NIP(r1)
370	lwz	r12,_MSR(r1)
371	mtspr	SPRN_SRR0,r11
372	mtspr	SPRN_SRR1,r12
373
374BEGIN_FTR_SECTION
375	stwcx.	r0,0,r1		/* to clear the reservation */
376FTR_SECTION_ELSE
377	lwarx	r0,0,r1
378ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
379
380	lwz	r3,_LINK(r1)
381	lwz	r4,_CTR(r1)
382	lwz	r5,_XER(r1)
383	lwz	r6,_CCR(r1)
384	li	r0,0
385
386	REST_GPRS(7, 12, r1)
387
388	mtlr	r3
389	mtctr	r4
390	mtspr	SPRN_XER,r5
391
392	/*
393	 * Leaving a stale exception marker on the stack can confuse
394	 * the reliable stack unwinder later on. Clear it.
395	 */
396	stw	r0,8(r1)
397
398	REST_GPRS(2, 5, r1)
399
400	bne-	cr1,1f /* emulate stack store */
401	mtcr	r6
402	REST_GPR(6, r1)
403	REST_GPR(0, r1)
404	REST_GPR(1, r1)
405	rfi
406#ifdef CONFIG_40x
407	b .	/* Prevent prefetch past rfi */
408#endif
409
4101:	/*
411	 * Emulate stack store with update. New r1 value was already calculated
412	 * and updated in our interrupt regs by emulate_loadstore, but we can't
413	 * store the previous value of r1 to the stack before re-loading our
414	 * registers from it, otherwise they could be clobbered.  Use
415	 * SPRG Scratch0 as temporary storage to hold the store
416	 * data, as interrupts are disabled here so it won't be clobbered.
417	 */
418	mtcr	r6
419#ifdef CONFIG_BOOKE
420	mtspr	SPRN_SPRG_WSCRATCH0, r9
421#else
422	mtspr	SPRN_SPRG_SCRATCH0, r9
423#endif
424	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
425	REST_GPR(6, r1)
426	REST_GPR(0, r1)
427	REST_GPR(1, r1)
428	stw	r9,0(r1) /* perform store component of stwu */
429#ifdef CONFIG_BOOKE
430	mfspr	r9, SPRN_SPRG_RSCRATCH0
431#else
432	mfspr	r9, SPRN_SPRG_SCRATCH0
433#endif
434	rfi
435#ifdef CONFIG_40x
436	b .	/* Prevent prefetch past rfi */
437#endif
438_ASM_NOKPROBE_SYMBOL(interrupt_return)
439
440#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
441
442/*
443 * Returning from a critical interrupt in user mode doesn't need
444 * to be any different from a normal exception.  For a critical
445 * interrupt in the kernel, we just return (without checking for
446 * preemption) since the interrupt may have happened at some crucial
447 * place (e.g. inside the TLB miss handler), and because we will be
448 * running with r1 pointing into critical_stack, not the current
449 * process's kernel stack (and therefore current_thread_info() will
450 * give the wrong answer).
451 * We have to restore various SPRs that may have been in use at the
452 * time of the critical interrupt.
453 *
454 */
455#ifdef CONFIG_40x
456#define PPC_40x_TURN_OFF_MSR_DR						    \
457	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
458	 * assume the instructions here are mapped by a pinned TLB entry */ \
459	li	r10,MSR_IR;						    \
460	mtmsr	r10;							    \
461	isync;								    \
462	tophys(r1, r1);
463#else
464#define PPC_40x_TURN_OFF_MSR_DR
465#endif
466
467#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
468	REST_NVGPRS(r1);						\
469	lwz	r3,_MSR(r1);						\
470	andi.	r3,r3,MSR_PR;						\
471	bne	interrupt_return;					\
472	REST_GPR(0, r1);						\
473	REST_GPRS(2, 8, r1);						\
474	lwz	r10,_XER(r1);						\
475	lwz	r11,_CTR(r1);						\
476	mtspr	SPRN_XER,r10;						\
477	mtctr	r11;							\
478	stwcx.	r0,0,r1;		/* to clear the reservation */	\
479	lwz	r11,_LINK(r1);						\
480	mtlr	r11;							\
481	lwz	r10,_CCR(r1);						\
482	mtcrf	0xff,r10;						\
483	PPC_40x_TURN_OFF_MSR_DR;					\
484	lwz	r9,_DEAR(r1);						\
485	lwz	r10,_ESR(r1);						\
486	mtspr	SPRN_DEAR,r9;						\
487	mtspr	SPRN_ESR,r10;						\
488	lwz	r11,_NIP(r1);						\
489	lwz	r12,_MSR(r1);						\
490	mtspr	exc_lvl_srr0,r11;					\
491	mtspr	exc_lvl_srr1,r12;					\
492	REST_GPRS(9, 12, r1);						\
493	REST_GPR(1, r1);						\
494	exc_lvl_rfi;							\
495	b	.;		/* prevent prefetch past exc_lvl_rfi */
496
497#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
498	lwz	r9,_##exc_lvl_srr0(r1);					\
499	lwz	r10,_##exc_lvl_srr1(r1);				\
500	mtspr	SPRN_##exc_lvl_srr0,r9;					\
501	mtspr	SPRN_##exc_lvl_srr1,r10;
502
503#if defined(CONFIG_PPC_E500)
504#ifdef CONFIG_PHYS_64BIT
505#define	RESTORE_MAS7							\
506	lwz	r11,MAS7(r1);						\
507	mtspr	SPRN_MAS7,r11;
508#else
509#define	RESTORE_MAS7
510#endif /* CONFIG_PHYS_64BIT */
511#define RESTORE_MMU_REGS						\
512	lwz	r9,MAS0(r1);						\
513	lwz	r10,MAS1(r1);						\
514	lwz	r11,MAS2(r1);						\
515	mtspr	SPRN_MAS0,r9;						\
516	lwz	r9,MAS3(r1);						\
517	mtspr	SPRN_MAS1,r10;						\
518	lwz	r10,MAS6(r1);						\
519	mtspr	SPRN_MAS2,r11;						\
520	mtspr	SPRN_MAS3,r9;						\
521	mtspr	SPRN_MAS6,r10;						\
522	RESTORE_MAS7;
523#elif defined(CONFIG_44x)
524#define RESTORE_MMU_REGS						\
525	lwz	r9,MMUCR(r1);						\
526	mtspr	SPRN_MMUCR,r9;
527#else
528#define RESTORE_MMU_REGS
529#endif
530
531#ifdef CONFIG_40x
532	.globl	ret_from_crit_exc
533ret_from_crit_exc:
534	lis	r9,crit_srr0@ha;
535	lwz	r9,crit_srr0@l(r9);
536	lis	r10,crit_srr1@ha;
537	lwz	r10,crit_srr1@l(r10);
538	mtspr	SPRN_SRR0,r9;
539	mtspr	SPRN_SRR1,r10;
540	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
541_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
542#endif /* CONFIG_40x */
543
544#ifdef CONFIG_BOOKE
545	.globl	ret_from_crit_exc
546ret_from_crit_exc:
547	RESTORE_xSRR(SRR0,SRR1);
548	RESTORE_MMU_REGS;
549	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
550_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
551
552	.globl	ret_from_debug_exc
553ret_from_debug_exc:
554	RESTORE_xSRR(SRR0,SRR1);
555	RESTORE_xSRR(CSRR0,CSRR1);
556	RESTORE_MMU_REGS;
557	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
558_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
559
560	.globl	ret_from_mcheck_exc
561ret_from_mcheck_exc:
562	RESTORE_xSRR(SRR0,SRR1);
563	RESTORE_xSRR(CSRR0,CSRR1);
564	RESTORE_xSRR(DSRR0,DSRR1);
565	RESTORE_MMU_REGS;
566	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
567_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
568#endif /* CONFIG_BOOKE */
569#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
570