xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 20e8ef5c)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35#include <asm/interrupt.h>
36
37#include "head_32.h"
38
39/*
40 * powerpc relies on return from interrupt/syscall being context synchronising
41 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
42 * synchronisation instructions.
43 */
44
45/*
46 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
47 * fit into one page in order to not encounter a TLB miss between the
48 * modification of srr0/srr1 and the associated rfi.
49 */
50	.align	12
51
52#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
53	.globl	prepare_transfer_to_handler
54prepare_transfer_to_handler:
55	/* if from kernel, check interrupted DOZE/NAP mode */
56	lwz	r12,TI_LOCAL_FLAGS(r2)
57	mtcrf	0x01,r12
58	bt-	31-TLF_NAPPING,4f
59	bt-	31-TLF_SLEEPING,7f
60	blr
61
624:	rlwinm	r12,r12,0,~_TLF_NAPPING
63	stw	r12,TI_LOCAL_FLAGS(r2)
64	b	power_save_ppc32_restore
65
667:	rlwinm	r12,r12,0,~_TLF_SLEEPING
67	stw	r12,TI_LOCAL_FLAGS(r2)
68	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
69	rlwinm	r9,r9,0,~MSR_EE
70	lwz	r12,_LINK(r11)		/* and return to address in LR */
71	lwz	r2, GPR2(r11)
72	b	fast_exception_return
73_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
74#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
75
76#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
77	.globl	__kuep_lock
78__kuep_lock:
79	lwz	r9, THREAD+THSR0(r2)
80	update_user_segments_by_4 r9, r10, r11, r12
81	blr
82
83__kuep_unlock:
84	lwz	r9, THREAD+THSR0(r2)
85	rlwinm  r9,r9,0,~SR_NX
86	update_user_segments_by_4 r9, r10, r11, r12
87	blr
88
89.macro	kuep_lock
90	bl	__kuep_lock
91.endm
92.macro	kuep_unlock
93	bl	__kuep_unlock
94.endm
95#else
96.macro	kuep_lock
97.endm
98.macro	kuep_unlock
99.endm
100#endif
101
102	.globl	transfer_to_syscall
103transfer_to_syscall:
104	stw	r11, GPR1(r1)
105	stw	r11, 0(r1)
106	mflr	r12
107	stw	r12, _LINK(r1)
108#ifdef CONFIG_BOOKE_OR_40x
109	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
110#endif
111	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
112	SAVE_GPR(2, r1)
113	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
114	stw	r9,_MSR(r1)
115	li	r2, INTERRUPT_SYSCALL
116	stw	r12,8(r1)
117	stw	r2,_TRAP(r1)
118	SAVE_GPR(0, r1)
119	SAVE_GPRS(3, 8, r1)
120	addi	r2,r10,-THREAD
121	SAVE_NVGPRS(r1)
122	kuep_lock
123
124	/* Calling convention has r9 = orig r0, r10 = regs */
125	addi	r10,r1,STACK_FRAME_OVERHEAD
126	mr	r9,r0
127	bl	system_call_exception
128
129ret_from_syscall:
130	addi    r4,r1,STACK_FRAME_OVERHEAD
131	li	r5,0
132	bl	syscall_exit_prepare
133#ifdef CONFIG_PPC_47x
134	lis	r4,icache_44x_need_flush@ha
135	lwz	r5,icache_44x_need_flush@l(r4)
136	cmplwi	cr0,r5,0
137	bne-	2f
138#endif /* CONFIG_PPC_47x */
139	kuep_unlock
140	lwz	r4,_LINK(r1)
141	lwz	r5,_CCR(r1)
142	mtlr	r4
143	lwz	r7,_NIP(r1)
144	lwz	r8,_MSR(r1)
145	cmpwi	r3,0
146	lwz	r3,GPR3(r1)
147syscall_exit_finish:
148	mtspr	SPRN_SRR0,r7
149	mtspr	SPRN_SRR1,r8
150
151	bne	3f
152	mtcr	r5
153
1541:	lwz	r2,GPR2(r1)
155	lwz	r1,GPR1(r1)
156	rfi
157#ifdef CONFIG_40x
158	b .	/* Prevent prefetch past rfi */
159#endif
160
1613:	mtcr	r5
162	lwz	r4,_CTR(r1)
163	lwz	r5,_XER(r1)
164	REST_NVGPRS(r1)
165	mtctr	r4
166	mtxer	r5
167	lwz	r0,GPR0(r1)
168	lwz	r3,GPR3(r1)
169	REST_GPRS(4, 11, r1)
170	lwz	r12,GPR12(r1)
171	b	1b
172
173#ifdef CONFIG_44x
1742:	li	r7,0
175	iccci	r0,r0
176	stw	r7,icache_44x_need_flush@l(r4)
177	b	1b
178#endif  /* CONFIG_44x */
179
180	.globl	ret_from_fork
181ret_from_fork:
182	REST_NVGPRS(r1)
183	bl	schedule_tail
184	li	r3,0
185	b	ret_from_syscall
186
187	.globl	ret_from_kernel_thread
188ret_from_kernel_thread:
189	REST_NVGPRS(r1)
190	bl	schedule_tail
191	mtctr	r14
192	mr	r3,r15
193	PPC440EP_ERR42
194	bctrl
195	li	r3,0
196	b	ret_from_syscall
197
198/*
199 * This routine switches between two different tasks.  The process
200 * state of one is saved on its kernel stack.  Then the state
201 * of the other is restored from its kernel stack.  The memory
202 * management hardware is updated to the second process's state.
203 * Finally, we can return to the second process.
204 * On entry, r3 points to the THREAD for the current task, r4
205 * points to the THREAD for the new task.
206 *
207 * This routine is always called with interrupts disabled.
208 *
209 * Note: there are two ways to get to the "going out" portion
210 * of this code; either by coming in via the entry (_switch)
211 * or via "fork" which must set up an environment equivalent
212 * to the "_switch" path.  If you change this , you'll have to
213 * change the fork code also.
214 *
215 * The code which creates the new task context is in 'copy_thread'
216 * in arch/ppc/kernel/process.c
217 */
218_GLOBAL(_switch)
219	stwu	r1,-INT_FRAME_SIZE(r1)
220	mflr	r0
221	stw	r0,INT_FRAME_SIZE+4(r1)
222	/* r3-r12 are caller saved -- Cort */
223	SAVE_NVGPRS(r1)
224	stw	r0,_NIP(r1)	/* Return to switch caller */
225	mfcr	r10
226	stw	r10,_CCR(r1)
227	stw	r1,KSP(r3)	/* Set old stack pointer */
228
229#ifdef CONFIG_SMP
230	/* We need a sync somewhere here to make sure that if the
231	 * previous task gets rescheduled on another CPU, it sees all
232	 * stores it has performed on this one.
233	 */
234	sync
235#endif /* CONFIG_SMP */
236
237	tophys(r0,r4)
238	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
239	lwz	r1,KSP(r4)	/* Load new stack pointer */
240
241	/* save the old current 'last' for return value */
242	mr	r3,r2
243	addi	r2,r4,-THREAD	/* Update current */
244
245	lwz	r0,_CCR(r1)
246	mtcrf	0xFF,r0
247	/* r3-r12 are destroyed -- Cort */
248	REST_NVGPRS(r1)
249
250	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
251	mtlr	r4
252	addi	r1,r1,INT_FRAME_SIZE
253	blr
254
255	.globl	fast_exception_return
256fast_exception_return:
257#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
258	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
259	beq	3f			/* if not, we've got problems */
260#endif
261
2622:	REST_GPRS(3, 6, r11)
263	lwz	r10,_CCR(r11)
264	REST_GPRS(1, 2, r11)
265	mtcr	r10
266	lwz	r10,_LINK(r11)
267	mtlr	r10
268	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
269	li	r10, 0
270	stw	r10, 8(r11)
271	REST_GPR(10, r11)
272#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
273	mtspr	SPRN_NRI, r0
274#endif
275	mtspr	SPRN_SRR1,r9
276	mtspr	SPRN_SRR0,r12
277	REST_GPR(9, r11)
278	REST_GPR(12, r11)
279	lwz	r11,GPR11(r11)
280	rfi
281#ifdef CONFIG_40x
282	b .	/* Prevent prefetch past rfi */
283#endif
284_ASM_NOKPROBE_SYMBOL(fast_exception_return)
285
286/* aargh, a nonrecoverable interrupt, panic */
287/* aargh, we don't know which trap this is */
2883:
289	li	r10,-1
290	stw	r10,_TRAP(r11)
291	prepare_transfer_to_handler
292	bl	unrecoverable_exception
293	trap	/* should not get here */
294
295	.globl interrupt_return
296interrupt_return:
297	lwz	r4,_MSR(r1)
298	addi	r3,r1,STACK_FRAME_OVERHEAD
299	andi.	r0,r4,MSR_PR
300	beq	.Lkernel_interrupt_return
301	bl	interrupt_exit_user_prepare
302	cmpwi	r3,0
303	kuep_unlock
304	bne-	.Lrestore_nvgprs
305
306.Lfast_user_interrupt_return:
307	lwz	r11,_NIP(r1)
308	lwz	r12,_MSR(r1)
309	mtspr	SPRN_SRR0,r11
310	mtspr	SPRN_SRR1,r12
311
312BEGIN_FTR_SECTION
313	stwcx.	r0,0,r1		/* to clear the reservation */
314FTR_SECTION_ELSE
315	lwarx	r0,0,r1
316ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
317
318	lwz	r3,_CCR(r1)
319	lwz	r4,_LINK(r1)
320	lwz	r5,_CTR(r1)
321	lwz	r6,_XER(r1)
322	li	r0,0
323
324	/*
325	 * Leaving a stale exception_marker on the stack can confuse
326	 * the reliable stack unwinder later on. Clear it.
327	 */
328	stw	r0,8(r1)
329	REST_GPRS(7, 12, r1)
330
331	mtcr	r3
332	mtlr	r4
333	mtctr	r5
334	mtspr	SPRN_XER,r6
335
336	REST_GPRS(2, 6, r1)
337	REST_GPR(0, r1)
338	REST_GPR(1, r1)
339	rfi
340#ifdef CONFIG_40x
341	b .	/* Prevent prefetch past rfi */
342#endif
343
344.Lrestore_nvgprs:
345	REST_NVGPRS(r1)
346	b	.Lfast_user_interrupt_return
347
348.Lkernel_interrupt_return:
349	bl	interrupt_exit_kernel_prepare
350
351.Lfast_kernel_interrupt_return:
352	cmpwi	cr1,r3,0
353	lwz	r11,_NIP(r1)
354	lwz	r12,_MSR(r1)
355	mtspr	SPRN_SRR0,r11
356	mtspr	SPRN_SRR1,r12
357
358BEGIN_FTR_SECTION
359	stwcx.	r0,0,r1		/* to clear the reservation */
360FTR_SECTION_ELSE
361	lwarx	r0,0,r1
362ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
363
364	lwz	r3,_LINK(r1)
365	lwz	r4,_CTR(r1)
366	lwz	r5,_XER(r1)
367	lwz	r6,_CCR(r1)
368	li	r0,0
369
370	REST_GPRS(7, 12, r1)
371
372	mtlr	r3
373	mtctr	r4
374	mtspr	SPRN_XER,r5
375
376	/*
377	 * Leaving a stale exception_marker on the stack can confuse
378	 * the reliable stack unwinder later on. Clear it.
379	 */
380	stw	r0,8(r1)
381
382	REST_GPRS(2, 5, r1)
383
384	bne-	cr1,1f /* emulate stack store */
385	mtcr	r6
386	REST_GPR(6, r1)
387	REST_GPR(0, r1)
388	REST_GPR(1, r1)
389	rfi
390#ifdef CONFIG_40x
391	b .	/* Prevent prefetch past rfi */
392#endif
393
3941:	/*
395	 * Emulate stack store with update. New r1 value was already calculated
396	 * and updated in our interrupt regs by emulate_loadstore, but we can't
397	 * store the previous value of r1 to the stack before re-loading our
398	 * registers from it, otherwise they could be clobbered.  Use
399	 * SPRG Scratch0 as temporary storage to hold the store
400	 * data, as interrupts are disabled here so it won't be clobbered.
401	 */
402	mtcr	r6
403#ifdef CONFIG_BOOKE
404	mtspr	SPRN_SPRG_WSCRATCH0, r9
405#else
406	mtspr	SPRN_SPRG_SCRATCH0, r9
407#endif
408	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
409	REST_GPR(6, r1)
410	REST_GPR(0, r1)
411	REST_GPR(1, r1)
412	stw	r9,0(r1) /* perform store component of stwu */
413#ifdef CONFIG_BOOKE
414	mfspr	r9, SPRN_SPRG_RSCRATCH0
415#else
416	mfspr	r9, SPRN_SPRG_SCRATCH0
417#endif
418	rfi
419#ifdef CONFIG_40x
420	b .	/* Prevent prefetch past rfi */
421#endif
422_ASM_NOKPROBE_SYMBOL(interrupt_return)
423
424#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
425
426/*
427 * Returning from a critical interrupt in user mode doesn't need
428 * to be any different from a normal exception.  For a critical
429 * interrupt in the kernel, we just return (without checking for
430 * preemption) since the interrupt may have happened at some crucial
431 * place (e.g. inside the TLB miss handler), and because we will be
432 * running with r1 pointing into critical_stack, not the current
433 * process's kernel stack (and therefore current_thread_info() will
434 * give the wrong answer).
435 * We have to restore various SPRs that may have been in use at the
436 * time of the critical interrupt.
437 *
438 */
439#ifdef CONFIG_40x
440#define PPC_40x_TURN_OFF_MSR_DR						    \
441	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
442	 * assume the instructions here are mapped by a pinned TLB entry */ \
443	li	r10,MSR_IR;						    \
444	mtmsr	r10;							    \
445	isync;								    \
446	tophys(r1, r1);
447#else
448#define PPC_40x_TURN_OFF_MSR_DR
449#endif
450
451#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
452	REST_NVGPRS(r1);						\
453	lwz	r3,_MSR(r1);						\
454	andi.	r3,r3,MSR_PR;						\
455	bne	interrupt_return;					\
456	lwz	r0,GPR0(r1);						\
457	lwz	r2,GPR2(r1);						\
458	REST_GPRS(3, 8, r1);						\
459	lwz	r10,_XER(r1);						\
460	lwz	r11,_CTR(r1);						\
461	mtspr	SPRN_XER,r10;						\
462	mtctr	r11;							\
463	stwcx.	r0,0,r1;		/* to clear the reservation */	\
464	lwz	r11,_LINK(r1);						\
465	mtlr	r11;							\
466	lwz	r10,_CCR(r1);						\
467	mtcrf	0xff,r10;						\
468	PPC_40x_TURN_OFF_MSR_DR;					\
469	lwz	r9,_DEAR(r1);						\
470	lwz	r10,_ESR(r1);						\
471	mtspr	SPRN_DEAR,r9;						\
472	mtspr	SPRN_ESR,r10;						\
473	lwz	r11,_NIP(r1);						\
474	lwz	r12,_MSR(r1);						\
475	mtspr	exc_lvl_srr0,r11;					\
476	mtspr	exc_lvl_srr1,r12;					\
477	lwz	r9,GPR9(r1);						\
478	lwz	r12,GPR12(r1);						\
479	lwz	r10,GPR10(r1);						\
480	lwz	r11,GPR11(r1);						\
481	lwz	r1,GPR1(r1);						\
482	exc_lvl_rfi;							\
483	b	.;		/* prevent prefetch past exc_lvl_rfi */
484
485#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
486	lwz	r9,_##exc_lvl_srr0(r1);					\
487	lwz	r10,_##exc_lvl_srr1(r1);				\
488	mtspr	SPRN_##exc_lvl_srr0,r9;					\
489	mtspr	SPRN_##exc_lvl_srr1,r10;
490
491#if defined(CONFIG_PPC_BOOK3E_MMU)
492#ifdef CONFIG_PHYS_64BIT
493#define	RESTORE_MAS7							\
494	lwz	r11,MAS7(r1);						\
495	mtspr	SPRN_MAS7,r11;
496#else
497#define	RESTORE_MAS7
498#endif /* CONFIG_PHYS_64BIT */
499#define RESTORE_MMU_REGS						\
500	lwz	r9,MAS0(r1);						\
501	lwz	r10,MAS1(r1);						\
502	lwz	r11,MAS2(r1);						\
503	mtspr	SPRN_MAS0,r9;						\
504	lwz	r9,MAS3(r1);						\
505	mtspr	SPRN_MAS1,r10;						\
506	lwz	r10,MAS6(r1);						\
507	mtspr	SPRN_MAS2,r11;						\
508	mtspr	SPRN_MAS3,r9;						\
509	mtspr	SPRN_MAS6,r10;						\
510	RESTORE_MAS7;
511#elif defined(CONFIG_44x)
512#define RESTORE_MMU_REGS						\
513	lwz	r9,MMUCR(r1);						\
514	mtspr	SPRN_MMUCR,r9;
515#else
516#define RESTORE_MMU_REGS
517#endif
518
519#ifdef CONFIG_40x
520	.globl	ret_from_crit_exc
521ret_from_crit_exc:
522	lis	r9,crit_srr0@ha;
523	lwz	r9,crit_srr0@l(r9);
524	lis	r10,crit_srr1@ha;
525	lwz	r10,crit_srr1@l(r10);
526	mtspr	SPRN_SRR0,r9;
527	mtspr	SPRN_SRR1,r10;
528	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
529_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
530#endif /* CONFIG_40x */
531
532#ifdef CONFIG_BOOKE
533	.globl	ret_from_crit_exc
534ret_from_crit_exc:
535	RESTORE_xSRR(SRR0,SRR1);
536	RESTORE_MMU_REGS;
537	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
538_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
539
540	.globl	ret_from_debug_exc
541ret_from_debug_exc:
542	RESTORE_xSRR(SRR0,SRR1);
543	RESTORE_xSRR(CSRR0,CSRR1);
544	RESTORE_MMU_REGS;
545	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
546_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
547
548	.globl	ret_from_mcheck_exc
549ret_from_mcheck_exc:
550	RESTORE_xSRR(SRR0,SRR1);
551	RESTORE_xSRR(CSRR0,CSRR1);
552	RESTORE_xSRR(DSRR0,DSRR1);
553	RESTORE_MMU_REGS;
554	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
555_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
556#endif /* CONFIG_BOOKE */
557#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
558
559/*
560 * PROM code for specific machines follows.  Put it
561 * here so it's easy to add arch-specific sections later.
562 * -- Cort
563 */
564#ifdef CONFIG_PPC_RTAS
565/*
566 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
567 * called with the MMU off.
568 */
569_GLOBAL(enter_rtas)
570	stwu	r1,-INT_FRAME_SIZE(r1)
571	mflr	r0
572	stw	r0,INT_FRAME_SIZE+4(r1)
573	LOAD_REG_ADDR(r4, rtas)
574	lis	r6,1f@ha	/* physical return address for rtas */
575	addi	r6,r6,1f@l
576	tophys(r6,r6)
577	lwz	r8,RTASENTRY(r4)
578	lwz	r4,RTASBASE(r4)
579	mfmsr	r9
580	stw	r9,8(r1)
581	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
582	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
583	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
584	mtlr	r6
585	stw	r1, THREAD + RTAS_SP(r2)
586	mtspr	SPRN_SRR0,r8
587	mtspr	SPRN_SRR1,r9
588	rfi
5891:
590	lis	r8, 1f@h
591	ori	r8, r8, 1f@l
592	LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
593	mtspr	SPRN_SRR0,r8
594	mtspr	SPRN_SRR1,r9
595	rfi			/* Reactivate MMU translation */
5961:
597	lwz	r8,INT_FRAME_SIZE+4(r1)	/* get return address */
598	lwz	r9,8(r1)	/* original msr value */
599	addi	r1,r1,INT_FRAME_SIZE
600	li	r0,0
601	stw	r0, THREAD + RTAS_SP(r2)
602	mtlr	r8
603	mtmsr	r9
604	blr			/* return to caller */
605_ASM_NOKPROBE_SYMBOL(enter_rtas)
606#endif /* CONFIG_PPC_RTAS */
607