xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 25b892b5)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35#include <asm/interrupt.h>
36
37#include "head_32.h"
38
39/*
40 * powerpc relies on return from interrupt/syscall being context synchronising
41 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
42 * synchronisation instructions.
43 */
44
45/*
46 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
47 * fit into one page in order to not encounter a TLB miss between the
48 * modification of srr0/srr1 and the associated rfi.
49 */
50	.align	12
51
52#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
53	.globl	prepare_transfer_to_handler
54prepare_transfer_to_handler:
55	/* if from kernel, check interrupted DOZE/NAP mode */
56	lwz	r12,TI_LOCAL_FLAGS(r2)
57	mtcrf	0x01,r12
58	bt-	31-TLF_NAPPING,4f
59	bt-	31-TLF_SLEEPING,7f
60	blr
61
624:	rlwinm	r12,r12,0,~_TLF_NAPPING
63	stw	r12,TI_LOCAL_FLAGS(r2)
64	b	power_save_ppc32_restore
65
667:	rlwinm	r12,r12,0,~_TLF_SLEEPING
67	stw	r12,TI_LOCAL_FLAGS(r2)
68	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
69	rlwinm	r9,r9,0,~MSR_EE
70	lwz	r12,_LINK(r11)		/* and return to address in LR */
71	lwz	r2, GPR2(r11)
72	b	fast_exception_return
73_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
74#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
75
76	.globl	transfer_to_syscall
77transfer_to_syscall:
78	stw	r11, GPR1(r1)
79	stw	r11, 0(r1)
80	mflr	r12
81	stw	r12, _LINK(r1)
82#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
83	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
84#endif
85	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
86	SAVE_GPR(2, r1)
87	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
88	stw	r9,_MSR(r1)
89	li	r2, INTERRUPT_SYSCALL
90	stw	r12,8(r1)
91	stw	r2,_TRAP(r1)
92	SAVE_GPR(0, r1)
93	SAVE_4GPRS(3, r1)
94	SAVE_2GPRS(7, r1)
95	addi	r2,r10,-THREAD
96	SAVE_NVGPRS(r1)
97
98	/* Calling convention has r9 = orig r0, r10 = regs */
99	addi	r10,r1,STACK_FRAME_OVERHEAD
100	mr	r9,r0
101	bl	system_call_exception
102
103ret_from_syscall:
104	addi    r4,r1,STACK_FRAME_OVERHEAD
105	li	r5,0
106	bl	syscall_exit_prepare
107#ifdef CONFIG_PPC_47x
108	lis	r4,icache_44x_need_flush@ha
109	lwz	r5,icache_44x_need_flush@l(r4)
110	cmplwi	cr0,r5,0
111	bne-	2f
112#endif /* CONFIG_PPC_47x */
113	lwz	r4,_LINK(r1)
114	lwz	r5,_CCR(r1)
115	mtlr	r4
116	lwz	r7,_NIP(r1)
117	lwz	r8,_MSR(r1)
118	cmpwi	r3,0
119	lwz	r3,GPR3(r1)
120syscall_exit_finish:
121	mtspr	SPRN_SRR0,r7
122	mtspr	SPRN_SRR1,r8
123
124	bne	3f
125	mtcr	r5
126
1271:	lwz	r2,GPR2(r1)
128	lwz	r1,GPR1(r1)
129	rfi
130#ifdef CONFIG_40x
131	b .	/* Prevent prefetch past rfi */
132#endif
133
1343:	mtcr	r5
135	lwz	r4,_CTR(r1)
136	lwz	r5,_XER(r1)
137	REST_NVGPRS(r1)
138	mtctr	r4
139	mtxer	r5
140	lwz	r0,GPR0(r1)
141	lwz	r3,GPR3(r1)
142	REST_8GPRS(4,r1)
143	lwz	r12,GPR12(r1)
144	b	1b
145
146#ifdef CONFIG_44x
1472:	li	r7,0
148	iccci	r0,r0
149	stw	r7,icache_44x_need_flush@l(r4)
150	b	1b
151#endif  /* CONFIG_44x */
152
153	.globl	ret_from_fork
154ret_from_fork:
155	REST_NVGPRS(r1)
156	bl	schedule_tail
157	li	r3,0
158	b	ret_from_syscall
159
160	.globl	ret_from_kernel_thread
161ret_from_kernel_thread:
162	REST_NVGPRS(r1)
163	bl	schedule_tail
164	mtctr	r14
165	mr	r3,r15
166	PPC440EP_ERR42
167	bctrl
168	li	r3,0
169	b	ret_from_syscall
170
171/*
172 * This routine switches between two different tasks.  The process
173 * state of one is saved on its kernel stack.  Then the state
174 * of the other is restored from its kernel stack.  The memory
175 * management hardware is updated to the second process's state.
176 * Finally, we can return to the second process.
177 * On entry, r3 points to the THREAD for the current task, r4
178 * points to the THREAD for the new task.
179 *
180 * This routine is always called with interrupts disabled.
181 *
182 * Note: there are two ways to get to the "going out" portion
183 * of this code; either by coming in via the entry (_switch)
184 * or via "fork" which must set up an environment equivalent
185 * to the "_switch" path.  If you change this , you'll have to
186 * change the fork code also.
187 *
188 * The code which creates the new task context is in 'copy_thread'
189 * in arch/ppc/kernel/process.c
190 */
191_GLOBAL(_switch)
192	stwu	r1,-INT_FRAME_SIZE(r1)
193	mflr	r0
194	stw	r0,INT_FRAME_SIZE+4(r1)
195	/* r3-r12 are caller saved -- Cort */
196	SAVE_NVGPRS(r1)
197	stw	r0,_NIP(r1)	/* Return to switch caller */
198	mfcr	r10
199	stw	r10,_CCR(r1)
200	stw	r1,KSP(r3)	/* Set old stack pointer */
201
202#ifdef CONFIG_SMP
203	/* We need a sync somewhere here to make sure that if the
204	 * previous task gets rescheduled on another CPU, it sees all
205	 * stores it has performed on this one.
206	 */
207	sync
208#endif /* CONFIG_SMP */
209
210	tophys(r0,r4)
211	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
212	lwz	r1,KSP(r4)	/* Load new stack pointer */
213
214	/* save the old current 'last' for return value */
215	mr	r3,r2
216	addi	r2,r4,-THREAD	/* Update current */
217
218	lwz	r0,_CCR(r1)
219	mtcrf	0xFF,r0
220	/* r3-r12 are destroyed -- Cort */
221	REST_NVGPRS(r1)
222
223	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
224	mtlr	r4
225	addi	r1,r1,INT_FRAME_SIZE
226	blr
227
228	.globl	fast_exception_return
229fast_exception_return:
230#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
231	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
232	beq	3f			/* if not, we've got problems */
233#endif
234
2352:	REST_4GPRS(3, r11)
236	lwz	r10,_CCR(r11)
237	REST_2GPRS(1, r11)
238	mtcr	r10
239	lwz	r10,_LINK(r11)
240	mtlr	r10
241	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
242	li	r10, 0
243	stw	r10, 8(r11)
244	REST_GPR(10, r11)
245#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
246	mtspr	SPRN_NRI, r0
247#endif
248	mtspr	SPRN_SRR1,r9
249	mtspr	SPRN_SRR0,r12
250	REST_GPR(9, r11)
251	REST_GPR(12, r11)
252	lwz	r11,GPR11(r11)
253	rfi
254#ifdef CONFIG_40x
255	b .	/* Prevent prefetch past rfi */
256#endif
257_ASM_NOKPROBE_SYMBOL(fast_exception_return)
258
259/* aargh, a nonrecoverable interrupt, panic */
260/* aargh, we don't know which trap this is */
2613:
262	li	r10,-1
263	stw	r10,_TRAP(r11)
264	prepare_transfer_to_handler
265	bl	unrecoverable_exception
266	trap	/* should not get here */
267
268	.globl interrupt_return
269interrupt_return:
270	lwz	r4,_MSR(r1)
271	addi	r3,r1,STACK_FRAME_OVERHEAD
272	andi.	r0,r4,MSR_PR
273	beq	.Lkernel_interrupt_return
274	bl	interrupt_exit_user_prepare
275	cmpwi	r3,0
276	bne-	.Lrestore_nvgprs
277
278.Lfast_user_interrupt_return:
279	lwz	r11,_NIP(r1)
280	lwz	r12,_MSR(r1)
281	mtspr	SPRN_SRR0,r11
282	mtspr	SPRN_SRR1,r12
283
284BEGIN_FTR_SECTION
285	stwcx.	r0,0,r1		/* to clear the reservation */
286FTR_SECTION_ELSE
287	lwarx	r0,0,r1
288ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
289
290	lwz	r3,_CCR(r1)
291	lwz	r4,_LINK(r1)
292	lwz	r5,_CTR(r1)
293	lwz	r6,_XER(r1)
294	li	r0,0
295
296	/*
297	 * Leaving a stale exception_marker on the stack can confuse
298	 * the reliable stack unwinder later on. Clear it.
299	 */
300	stw	r0,8(r1)
301	REST_4GPRS(7, r1)
302	REST_2GPRS(11, r1)
303
304	mtcr	r3
305	mtlr	r4
306	mtctr	r5
307	mtspr	SPRN_XER,r6
308
309	REST_4GPRS(2, r1)
310	REST_GPR(6, r1)
311	REST_GPR(0, r1)
312	REST_GPR(1, r1)
313	rfi
314#ifdef CONFIG_40x
315	b .	/* Prevent prefetch past rfi */
316#endif
317
318.Lrestore_nvgprs:
319	REST_NVGPRS(r1)
320	b	.Lfast_user_interrupt_return
321
322.Lkernel_interrupt_return:
323	bl	interrupt_exit_kernel_prepare
324
325.Lfast_kernel_interrupt_return:
326	cmpwi	cr1,r3,0
327	lwz	r11,_NIP(r1)
328	lwz	r12,_MSR(r1)
329	mtspr	SPRN_SRR0,r11
330	mtspr	SPRN_SRR1,r12
331
332BEGIN_FTR_SECTION
333	stwcx.	r0,0,r1		/* to clear the reservation */
334FTR_SECTION_ELSE
335	lwarx	r0,0,r1
336ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
337
338	lwz	r3,_LINK(r1)
339	lwz	r4,_CTR(r1)
340	lwz	r5,_XER(r1)
341	lwz	r6,_CCR(r1)
342	li	r0,0
343
344	REST_4GPRS(7, r1)
345	REST_2GPRS(11, r1)
346
347	mtlr	r3
348	mtctr	r4
349	mtspr	SPRN_XER,r5
350
351	/*
352	 * Leaving a stale exception_marker on the stack can confuse
353	 * the reliable stack unwinder later on. Clear it.
354	 */
355	stw	r0,8(r1)
356
357	REST_4GPRS(2, r1)
358
359	bne-	cr1,1f /* emulate stack store */
360	mtcr	r6
361	REST_GPR(6, r1)
362	REST_GPR(0, r1)
363	REST_GPR(1, r1)
364	rfi
365#ifdef CONFIG_40x
366	b .	/* Prevent prefetch past rfi */
367#endif
368
3691:	/*
370	 * Emulate stack store with update. New r1 value was already calculated
371	 * and updated in our interrupt regs by emulate_loadstore, but we can't
372	 * store the previous value of r1 to the stack before re-loading our
373	 * registers from it, otherwise they could be clobbered.  Use
374	 * SPRG Scratch0 as temporary storage to hold the store
375	 * data, as interrupts are disabled here so it won't be clobbered.
376	 */
377	mtcr	r6
378#ifdef CONFIG_BOOKE
379	mtspr	SPRN_SPRG_WSCRATCH0, r9
380#else
381	mtspr	SPRN_SPRG_SCRATCH0, r9
382#endif
383	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
384	REST_GPR(6, r1)
385	REST_GPR(0, r1)
386	REST_GPR(1, r1)
387	stw	r9,0(r1) /* perform store component of stwu */
388#ifdef CONFIG_BOOKE
389	mfspr	r9, SPRN_SPRG_RSCRATCH0
390#else
391	mfspr	r9, SPRN_SPRG_SCRATCH0
392#endif
393	rfi
394#ifdef CONFIG_40x
395	b .	/* Prevent prefetch past rfi */
396#endif
397_ASM_NOKPROBE_SYMBOL(interrupt_return)
398
399#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
400
401/*
402 * Returning from a critical interrupt in user mode doesn't need
403 * to be any different from a normal exception.  For a critical
404 * interrupt in the kernel, we just return (without checking for
405 * preemption) since the interrupt may have happened at some crucial
406 * place (e.g. inside the TLB miss handler), and because we will be
407 * running with r1 pointing into critical_stack, not the current
408 * process's kernel stack (and therefore current_thread_info() will
409 * give the wrong answer).
410 * We have to restore various SPRs that may have been in use at the
411 * time of the critical interrupt.
412 *
413 */
414#ifdef CONFIG_40x
415#define PPC_40x_TURN_OFF_MSR_DR						    \
416	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
417	 * assume the instructions here are mapped by a pinned TLB entry */ \
418	li	r10,MSR_IR;						    \
419	mtmsr	r10;							    \
420	isync;								    \
421	tophys(r1, r1);
422#else
423#define PPC_40x_TURN_OFF_MSR_DR
424#endif
425
426#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
427	REST_NVGPRS(r1);						\
428	lwz	r3,_MSR(r1);						\
429	andi.	r3,r3,MSR_PR;						\
430	bne	interrupt_return;					\
431	lwz	r0,GPR0(r1);						\
432	lwz	r2,GPR2(r1);						\
433	REST_4GPRS(3, r1);						\
434	REST_2GPRS(7, r1);						\
435	lwz	r10,_XER(r1);						\
436	lwz	r11,_CTR(r1);						\
437	mtspr	SPRN_XER,r10;						\
438	mtctr	r11;							\
439	stwcx.	r0,0,r1;		/* to clear the reservation */	\
440	lwz	r11,_LINK(r1);						\
441	mtlr	r11;							\
442	lwz	r10,_CCR(r1);						\
443	mtcrf	0xff,r10;						\
444	PPC_40x_TURN_OFF_MSR_DR;					\
445	lwz	r9,_DEAR(r1);						\
446	lwz	r10,_ESR(r1);						\
447	mtspr	SPRN_DEAR,r9;						\
448	mtspr	SPRN_ESR,r10;						\
449	lwz	r11,_NIP(r1);						\
450	lwz	r12,_MSR(r1);						\
451	mtspr	exc_lvl_srr0,r11;					\
452	mtspr	exc_lvl_srr1,r12;					\
453	lwz	r9,GPR9(r1);						\
454	lwz	r12,GPR12(r1);						\
455	lwz	r10,GPR10(r1);						\
456	lwz	r11,GPR11(r1);						\
457	lwz	r1,GPR1(r1);						\
458	exc_lvl_rfi;							\
459	b	.;		/* prevent prefetch past exc_lvl_rfi */
460
461#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
462	lwz	r9,_##exc_lvl_srr0(r1);					\
463	lwz	r10,_##exc_lvl_srr1(r1);				\
464	mtspr	SPRN_##exc_lvl_srr0,r9;					\
465	mtspr	SPRN_##exc_lvl_srr1,r10;
466
467#if defined(CONFIG_PPC_BOOK3E_MMU)
468#ifdef CONFIG_PHYS_64BIT
469#define	RESTORE_MAS7							\
470	lwz	r11,MAS7(r1);						\
471	mtspr	SPRN_MAS7,r11;
472#else
473#define	RESTORE_MAS7
474#endif /* CONFIG_PHYS_64BIT */
475#define RESTORE_MMU_REGS						\
476	lwz	r9,MAS0(r1);						\
477	lwz	r10,MAS1(r1);						\
478	lwz	r11,MAS2(r1);						\
479	mtspr	SPRN_MAS0,r9;						\
480	lwz	r9,MAS3(r1);						\
481	mtspr	SPRN_MAS1,r10;						\
482	lwz	r10,MAS6(r1);						\
483	mtspr	SPRN_MAS2,r11;						\
484	mtspr	SPRN_MAS3,r9;						\
485	mtspr	SPRN_MAS6,r10;						\
486	RESTORE_MAS7;
487#elif defined(CONFIG_44x)
488#define RESTORE_MMU_REGS						\
489	lwz	r9,MMUCR(r1);						\
490	mtspr	SPRN_MMUCR,r9;
491#else
492#define RESTORE_MMU_REGS
493#endif
494
495#ifdef CONFIG_40x
496	.globl	ret_from_crit_exc
497ret_from_crit_exc:
498	lis	r9,crit_srr0@ha;
499	lwz	r9,crit_srr0@l(r9);
500	lis	r10,crit_srr1@ha;
501	lwz	r10,crit_srr1@l(r10);
502	mtspr	SPRN_SRR0,r9;
503	mtspr	SPRN_SRR1,r10;
504	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
505_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
506#endif /* CONFIG_40x */
507
508#ifdef CONFIG_BOOKE
509	.globl	ret_from_crit_exc
510ret_from_crit_exc:
511	RESTORE_xSRR(SRR0,SRR1);
512	RESTORE_MMU_REGS;
513	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
514_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
515
516	.globl	ret_from_debug_exc
517ret_from_debug_exc:
518	RESTORE_xSRR(SRR0,SRR1);
519	RESTORE_xSRR(CSRR0,CSRR1);
520	RESTORE_MMU_REGS;
521	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
522_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
523
524	.globl	ret_from_mcheck_exc
525ret_from_mcheck_exc:
526	RESTORE_xSRR(SRR0,SRR1);
527	RESTORE_xSRR(CSRR0,CSRR1);
528	RESTORE_xSRR(DSRR0,DSRR1);
529	RESTORE_MMU_REGS;
530	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
531_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
532#endif /* CONFIG_BOOKE */
533#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
534
535/*
536 * PROM code for specific machines follows.  Put it
537 * here so it's easy to add arch-specific sections later.
538 * -- Cort
539 */
540#ifdef CONFIG_PPC_RTAS
541/*
542 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
543 * called with the MMU off.
544 */
545_GLOBAL(enter_rtas)
546	stwu	r1,-INT_FRAME_SIZE(r1)
547	mflr	r0
548	stw	r0,INT_FRAME_SIZE+4(r1)
549	LOAD_REG_ADDR(r4, rtas)
550	lis	r6,1f@ha	/* physical return address for rtas */
551	addi	r6,r6,1f@l
552	tophys(r6,r6)
553	lwz	r8,RTASENTRY(r4)
554	lwz	r4,RTASBASE(r4)
555	mfmsr	r9
556	stw	r9,8(r1)
557	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
558	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
559	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
560	mtlr	r6
561	stw	r1, THREAD + RTAS_SP(r2)
562	mtspr	SPRN_SRR0,r8
563	mtspr	SPRN_SRR1,r9
564	rfi
5651:
566	lis	r8, 1f@h
567	ori	r8, r8, 1f@l
568	LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
569	mtspr	SPRN_SRR0,r8
570	mtspr	SPRN_SRR1,r9
571	rfi			/* Reactivate MMU translation */
5721:
573	lwz	r8,INT_FRAME_SIZE+4(r1)	/* get return address */
574	lwz	r9,8(r1)	/* original msr value */
575	addi	r1,r1,INT_FRAME_SIZE
576	li	r0,0
577	stw	r0, THREAD + RTAS_SP(r2)
578	mtlr	r8
579	mtmsr	r9
580	blr			/* return to caller */
581_ASM_NOKPROBE_SYMBOL(enter_rtas)
582#endif /* CONFIG_PPC_RTAS */
583