xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 79f382b9)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35
36#include "head_32.h"
37
38/*
39 * powerpc relies on return from interrupt/syscall being context synchronising
40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41 * synchronisation instructions.
42 */
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49	.align	12
50
51#ifdef CONFIG_BOOKE
52	.globl	mcheck_transfer_to_handler
53mcheck_transfer_to_handler:
54	mfspr	r0,SPRN_DSRR0
55	stw	r0,_DSRR0(r11)
56	mfspr	r0,SPRN_DSRR1
57	stw	r0,_DSRR1(r11)
58	/* fall through */
59_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
60
61	.globl	debug_transfer_to_handler
62debug_transfer_to_handler:
63	mfspr	r0,SPRN_CSRR0
64	stw	r0,_CSRR0(r11)
65	mfspr	r0,SPRN_CSRR1
66	stw	r0,_CSRR1(r11)
67	/* fall through */
68_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
69
70	.globl	crit_transfer_to_handler
71crit_transfer_to_handler:
72#ifdef CONFIG_PPC_BOOK3E_MMU
73	mfspr	r0,SPRN_MAS0
74	stw	r0,MAS0(r11)
75	mfspr	r0,SPRN_MAS1
76	stw	r0,MAS1(r11)
77	mfspr	r0,SPRN_MAS2
78	stw	r0,MAS2(r11)
79	mfspr	r0,SPRN_MAS3
80	stw	r0,MAS3(r11)
81	mfspr	r0,SPRN_MAS6
82	stw	r0,MAS6(r11)
83#ifdef CONFIG_PHYS_64BIT
84	mfspr	r0,SPRN_MAS7
85	stw	r0,MAS7(r11)
86#endif /* CONFIG_PHYS_64BIT */
87#endif /* CONFIG_PPC_BOOK3E_MMU */
88#ifdef CONFIG_44x
89	mfspr	r0,SPRN_MMUCR
90	stw	r0,MMUCR(r11)
91#endif
92	mfspr	r0,SPRN_SRR0
93	stw	r0,_SRR0(r11)
94	mfspr	r0,SPRN_SRR1
95	stw	r0,_SRR1(r11)
96
97	/* set the stack limit to the current stack */
98	mfspr	r8,SPRN_SPRG_THREAD
99	lwz	r0,KSP_LIMIT(r8)
100	stw	r0,SAVED_KSP_LIMIT(r11)
101	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
102	stw	r0,KSP_LIMIT(r8)
103	/* fall through */
104_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
105#endif
106
107#ifdef CONFIG_40x
108	.globl	crit_transfer_to_handler
109crit_transfer_to_handler:
110	lwz	r0,crit_r10@l(0)
111	stw	r0,GPR10(r11)
112	lwz	r0,crit_r11@l(0)
113	stw	r0,GPR11(r11)
114	mfspr	r0,SPRN_SRR0
115	stw	r0,crit_srr0@l(0)
116	mfspr	r0,SPRN_SRR1
117	stw	r0,crit_srr1@l(0)
118
119	/* set the stack limit to the current stack */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
127#endif
128
129/*
130 * This code finishes saving the registers to the exception frame
131 * and jumps to the appropriate handler for the exception, turning
132 * on address translation.
133 * Note that we rely on the caller having set cr0.eq iff the exception
134 * occurred in kernel mode (i.e. MSR:PR = 0).
135 */
136	.globl	transfer_to_handler_full
137transfer_to_handler_full:
138	SAVE_NVGPRS(r11)
139_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
140	/* fall through */
141
142	.globl	transfer_to_handler
143transfer_to_handler:
144	stw	r2,GPR2(r11)
145	stw	r12,_NIP(r11)
146	stw	r9,_MSR(r11)
147	andi.	r2,r9,MSR_PR
148	mfctr	r12
149	mfspr	r2,SPRN_XER
150	stw	r12,_CTR(r11)
151	stw	r2,_XER(r11)
152	mfspr	r12,SPRN_SPRG_THREAD
153	tovirt_vmstack r12, r12
154	beq	2f			/* if from user, fix up THREAD.regs */
155	addi	r2, r12, -THREAD
156	addi	r11,r1,STACK_FRAME_OVERHEAD
157	stw	r11,PT_REGS(r12)
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159	/* Check to see if the dbcr0 register is set up to debug.  Use the
160	   internal debug mode bit to do this. */
161	lwz	r12,THREAD_DBCR0(r12)
162	andis.	r12,r12,DBCR0_IDM@h
163#endif
164	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
165#ifdef CONFIG_PPC_BOOK3S_32
166	kuep_lock r11, r12
167#endif
168#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
169	beq+	3f
170	/* From user and task is ptraced - load up global dbcr0 */
171	li	r12,-1			/* clear all pending debug events */
172	mtspr	SPRN_DBSR,r12
173	lis	r11,global_dbcr0@ha
174	tophys(r11,r11)
175	addi	r11,r11,global_dbcr0@l
176#ifdef CONFIG_SMP
177	lwz	r9,TASK_CPU(r2)
178	slwi	r9,r9,2
179	add	r11,r11,r9
180#endif
181	lwz	r12,0(r11)
182	mtspr	SPRN_DBCR0,r12
183#endif
184
185	b	3f
186
1872:	/* if from kernel, check interrupted DOZE/NAP mode and
188         * check for stack overflow
189         */
190	kuap_save_and_lock r11, r12, r9, r2, r6
191	addi	r2, r12, -THREAD
192#ifndef CONFIG_VMAP_STACK
193	lwz	r9,KSP_LIMIT(r12)
194	cmplw	r1,r9			/* if r1 <= ksp_limit */
195	ble-	stack_ovf		/* then the kernel stack overflowed */
196#endif
1975:
198#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
199	lwz	r12,TI_LOCAL_FLAGS(r2)
200	mtcrf	0x01,r12
201	bt-	31-TLF_NAPPING,4f
202	bt-	31-TLF_SLEEPING,7f
203#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
204	.globl transfer_to_handler_cont
205transfer_to_handler_cont:
2063:
207	mflr	r9
208	tovirt_novmstack r2, r2 	/* set r2 to current */
209	tovirt_vmstack r9, r9
210	lwz	r11,0(r9)		/* virtual address of handler */
211	lwz	r9,4(r9)		/* where to go when done */
212#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
213	mtspr	SPRN_NRI, r0
214#endif
215#ifdef CONFIG_TRACE_IRQFLAGS
216	/*
217	 * When tracing IRQ state (lockdep) we enable the MMU before we call
218	 * the IRQ tracing functions as they might access vmalloc space or
219	 * perform IOs for console output.
220	 *
221	 * To speed up the syscall path where interrupts stay on, let's check
222	 * first if we are changing the MSR value at all.
223	 */
224	tophys_novmstack r12, r1
225	lwz	r12,_MSR(r12)
226	andi.	r12,r12,MSR_EE
227	bne	1f
228
229	/* MSR isn't changing, just transition directly */
230#endif
231	mtspr	SPRN_SRR0,r11
232	mtspr	SPRN_SRR1,r10
233	mtlr	r9
234	rfi				/* jump to handler, enable MMU */
235#ifdef CONFIG_40x
236	b .	/* Prevent prefetch past rfi */
237#endif
238
239#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2404:	rlwinm	r12,r12,0,~_TLF_NAPPING
241	stw	r12,TI_LOCAL_FLAGS(r2)
242	b	power_save_ppc32_restore
243
2447:	rlwinm	r12,r12,0,~_TLF_SLEEPING
245	stw	r12,TI_LOCAL_FLAGS(r2)
246	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
247	rlwinm	r9,r9,0,~MSR_EE
248	lwz	r12,_LINK(r11)		/* and return to address in LR */
249	kuap_restore r11, r2, r3, r4, r5
250	lwz	r2, GPR2(r11)
251	b	fast_exception_return
252#endif
253_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
254_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
255
256#ifdef CONFIG_TRACE_IRQFLAGS
2571:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
258	 * keep interrupts disabled at this point otherwise we might risk
259	 * taking an interrupt before we tell lockdep they are enabled.
260	 */
261	lis	r12,reenable_mmu@h
262	ori	r12,r12,reenable_mmu@l
263	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
264	mtspr	SPRN_SRR0,r12
265	mtspr	SPRN_SRR1,r0
266	rfi
267#ifdef CONFIG_40x
268	b .	/* Prevent prefetch past rfi */
269#endif
270
271reenable_mmu:
272	/*
273	 * We save a bunch of GPRs,
274	 * r3 can be different from GPR3(r1) at this point, r9 and r11
275	 * contains the old MSR and handler address respectively,
276	 * r0, r4-r8, r12, CCR, CTR, XER etc... are left
277	 * clobbered as they aren't useful past this point.
278	 */
279
280	stwu	r1,-32(r1)
281	stw	r9,8(r1)
282	stw	r11,12(r1)
283	stw	r3,16(r1)
284
285	/* If we are disabling interrupts (normal case), simply log it with
286	 * lockdep
287	 */
2881:	bl	trace_hardirqs_off
289	lwz	r3,16(r1)
290	lwz	r11,12(r1)
291	lwz	r9,8(r1)
292	addi	r1,r1,32
293	mtctr	r11
294	mtlr	r9
295	bctr				/* jump to handler */
296#endif /* CONFIG_TRACE_IRQFLAGS */
297
298#ifndef CONFIG_VMAP_STACK
299/*
300 * On kernel stack overflow, load up an initial stack pointer
301 * and call StackOverflow(regs), which should not return.
302 */
303stack_ovf:
304	/* sometimes we use a statically-allocated stack, which is OK. */
305	lis	r12,_end@h
306	ori	r12,r12,_end@l
307	cmplw	r1,r12
308	ble	5b			/* r1 <= &_end is OK */
309	SAVE_NVGPRS(r11)
310	addi	r3,r1,STACK_FRAME_OVERHEAD
311	lis	r1,init_thread_union@ha
312	addi	r1,r1,init_thread_union@l
313	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
314	lis	r9,StackOverflow@ha
315	addi	r9,r9,StackOverflow@l
316	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
317#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
318	mtspr	SPRN_NRI, r0
319#endif
320	mtspr	SPRN_SRR0,r9
321	mtspr	SPRN_SRR1,r10
322	rfi
323#ifdef CONFIG_40x
324	b .	/* Prevent prefetch past rfi */
325#endif
326_ASM_NOKPROBE_SYMBOL(stack_ovf)
327#endif
328
329	.globl	transfer_to_syscall
330transfer_to_syscall:
331	SAVE_NVGPRS(r1)
332#ifdef CONFIG_PPC_BOOK3S_32
333	kuep_lock r11, r12
334#endif
335
336	/* Calling convention has r9 = orig r0, r10 = regs */
337	addi	r10,r1,STACK_FRAME_OVERHEAD
338	mr	r9,r0
339	stw	r10,THREAD+PT_REGS(r2)
340	bl	system_call_exception
341
342ret_from_syscall:
343	addi    r4,r1,STACK_FRAME_OVERHEAD
344	li	r5,0
345	bl	syscall_exit_prepare
346#ifdef CONFIG_PPC_47x
347	lis	r4,icache_44x_need_flush@ha
348	lwz	r5,icache_44x_need_flush@l(r4)
349	cmplwi	cr0,r5,0
350	bne-	2f
351#endif /* CONFIG_PPC_47x */
352#ifdef CONFIG_PPC_BOOK3S_32
353	kuep_unlock r5, r7
354#endif
355	kuap_check r2, r4
356	lwz	r4,_LINK(r1)
357	lwz	r5,_CCR(r1)
358	mtlr	r4
359	lwz	r7,_NIP(r1)
360	lwz	r8,_MSR(r1)
361	cmpwi	r3,0
362	lwz	r3,GPR3(r1)
363syscall_exit_finish:
364	mtspr	SPRN_SRR0,r7
365	mtspr	SPRN_SRR1,r8
366
367	bne	3f
368	mtcr	r5
369
3701:	lwz	r2,GPR2(r1)
371	lwz	r1,GPR1(r1)
372	rfi
373#ifdef CONFIG_40x
374	b .	/* Prevent prefetch past rfi */
375#endif
376
3773:	mtcr	r5
378	lwz	r4,_CTR(r1)
379	lwz	r5,_XER(r1)
380	REST_NVGPRS(r1)
381	mtctr	r4
382	mtxer	r5
383	lwz	r0,GPR0(r1)
384	lwz	r3,GPR3(r1)
385	REST_8GPRS(4,r1)
386	lwz	r12,GPR12(r1)
387	b	1b
388
389#ifdef CONFIG_44x
3902:	li	r7,0
391	iccci	r0,r0
392	stw	r7,icache_44x_need_flush@l(r4)
393	b	1b
394#endif  /* CONFIG_44x */
395
396	.globl	ret_from_fork
397ret_from_fork:
398	REST_NVGPRS(r1)
399	bl	schedule_tail
400	li	r3,0
401	b	ret_from_syscall
402
403	.globl	ret_from_kernel_thread
404ret_from_kernel_thread:
405	REST_NVGPRS(r1)
406	bl	schedule_tail
407	mtlr	r14
408	mr	r3,r15
409	PPC440EP_ERR42
410	blrl
411	li	r3,0
412	b	ret_from_syscall
413
414/*
415 * Top-level page fault handling.
416 * This is in assembler because if do_page_fault tells us that
417 * it is a bad kernel page fault, we want to save the non-volatile
418 * registers before calling bad_page_fault.
419 */
420	.globl	handle_page_fault
421handle_page_fault:
422	addi	r3,r1,STACK_FRAME_OVERHEAD
423	bl	do_page_fault
424	cmpwi	r3,0
425	beq+	ret_from_except
426	SAVE_NVGPRS(r1)
427	lwz	r0,_TRAP(r1)
428	clrrwi	r0,r0,1
429	stw	r0,_TRAP(r1)
430	mr	r4,r3		/* err arg for bad_page_fault */
431	addi	r3,r1,STACK_FRAME_OVERHEAD
432	bl	__bad_page_fault
433	b	ret_from_except_full
434
435/*
436 * This routine switches between two different tasks.  The process
437 * state of one is saved on its kernel stack.  Then the state
438 * of the other is restored from its kernel stack.  The memory
439 * management hardware is updated to the second process's state.
440 * Finally, we can return to the second process.
441 * On entry, r3 points to the THREAD for the current task, r4
442 * points to the THREAD for the new task.
443 *
444 * This routine is always called with interrupts disabled.
445 *
446 * Note: there are two ways to get to the "going out" portion
447 * of this code; either by coming in via the entry (_switch)
448 * or via "fork" which must set up an environment equivalent
449 * to the "_switch" path.  If you change this , you'll have to
450 * change the fork code also.
451 *
452 * The code which creates the new task context is in 'copy_thread'
453 * in arch/ppc/kernel/process.c
454 */
455_GLOBAL(_switch)
456	stwu	r1,-INT_FRAME_SIZE(r1)
457	mflr	r0
458	stw	r0,INT_FRAME_SIZE+4(r1)
459	/* r3-r12 are caller saved -- Cort */
460	SAVE_NVGPRS(r1)
461	stw	r0,_NIP(r1)	/* Return to switch caller */
462	mfmsr	r11
463	li	r0,MSR_FP	/* Disable floating-point */
464#ifdef CONFIG_ALTIVEC
465BEGIN_FTR_SECTION
466	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
467	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
468	stw	r12,THREAD+THREAD_VRSAVE(r2)
469END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
470#endif /* CONFIG_ALTIVEC */
471#ifdef CONFIG_SPE
472BEGIN_FTR_SECTION
473	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
474	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
475	stw	r12,THREAD+THREAD_SPEFSCR(r2)
476END_FTR_SECTION_IFSET(CPU_FTR_SPE)
477#endif /* CONFIG_SPE */
478	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
479	beq+	1f
480	andc	r11,r11,r0
481	mtmsr	r11
482	isync
4831:	stw	r11,_MSR(r1)
484	mfcr	r10
485	stw	r10,_CCR(r1)
486	stw	r1,KSP(r3)	/* Set old stack pointer */
487
488	kuap_check r2, r0
489#ifdef CONFIG_SMP
490	/* We need a sync somewhere here to make sure that if the
491	 * previous task gets rescheduled on another CPU, it sees all
492	 * stores it has performed on this one.
493	 */
494	sync
495#endif /* CONFIG_SMP */
496
497	tophys(r0,r4)
498	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
499	lwz	r1,KSP(r4)	/* Load new stack pointer */
500
501	/* save the old current 'last' for return value */
502	mr	r3,r2
503	addi	r2,r4,-THREAD	/* Update current */
504
505#ifdef CONFIG_ALTIVEC
506BEGIN_FTR_SECTION
507	lwz	r0,THREAD+THREAD_VRSAVE(r2)
508	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
509END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
510#endif /* CONFIG_ALTIVEC */
511#ifdef CONFIG_SPE
512BEGIN_FTR_SECTION
513	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
514	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
515END_FTR_SECTION_IFSET(CPU_FTR_SPE)
516#endif /* CONFIG_SPE */
517
518	lwz	r0,_CCR(r1)
519	mtcrf	0xFF,r0
520	/* r3-r12 are destroyed -- Cort */
521	REST_NVGPRS(r1)
522
523	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
524	mtlr	r4
525	addi	r1,r1,INT_FRAME_SIZE
526	blr
527
528	.globl	fast_exception_return
529fast_exception_return:
530#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
531	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
532	beq	1f			/* if not, we've got problems */
533#endif
534
5352:	REST_4GPRS(3, r11)
536	lwz	r10,_CCR(r11)
537	REST_GPR(1, r11)
538	mtcr	r10
539	lwz	r10,_LINK(r11)
540	mtlr	r10
541	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
542	li	r10, 0
543	stw	r10, 8(r11)
544	REST_GPR(10, r11)
545#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
546	mtspr	SPRN_NRI, r0
547#endif
548	mtspr	SPRN_SRR1,r9
549	mtspr	SPRN_SRR0,r12
550	REST_GPR(9, r11)
551	REST_GPR(12, r11)
552	lwz	r11,GPR11(r11)
553	rfi
554#ifdef CONFIG_40x
555	b .	/* Prevent prefetch past rfi */
556#endif
557_ASM_NOKPROBE_SYMBOL(fast_exception_return)
558
559#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
560/* check if the exception happened in a restartable section */
5611:	lis	r3,exc_exit_restart_end@ha
562	addi	r3,r3,exc_exit_restart_end@l
563	cmplw	r12,r3
564	bge	3f
565	lis	r4,exc_exit_restart@ha
566	addi	r4,r4,exc_exit_restart@l
567	cmplw	r12,r4
568	blt	3f
569	lis	r3,fee_restarts@ha
570	tophys(r3,r3)
571	lwz	r5,fee_restarts@l(r3)
572	addi	r5,r5,1
573	stw	r5,fee_restarts@l(r3)
574	mr	r12,r4		/* restart at exc_exit_restart */
575	b	2b
576
577	.section .bss
578	.align	2
579fee_restarts:
580	.space	4
581	.previous
582
583/* aargh, a nonrecoverable interrupt, panic */
584/* aargh, we don't know which trap this is */
5853:
586	li	r10,-1
587	stw	r10,_TRAP(r11)
588	addi	r3,r1,STACK_FRAME_OVERHEAD
589	lis	r10,MSR_KERNEL@h
590	ori	r10,r10,MSR_KERNEL@l
591	bl	transfer_to_handler_full
592	.long	unrecoverable_exception
593	.long	ret_from_except
594#endif
595
596	.globl	ret_from_except_full
597ret_from_except_full:
598	REST_NVGPRS(r1)
599	/* fall through */
600
601	.globl	ret_from_except
602ret_from_except:
603	/* Hard-disable interrupts so that current_thread_info()->flags
604	 * can't change between when we test it and when we return
605	 * from the interrupt. */
606	/* Note: We don't bother telling lockdep about it */
607	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
608	mtmsr	r10		/* disable interrupts */
609
610	lwz	r3,_MSR(r1)	/* Returning to user mode? */
611	andi.	r0,r3,MSR_PR
612	beq	resume_kernel
613
614user_exc_return:		/* r10 contains MSR_KERNEL here */
615	/* Check current_thread_info()->flags */
616	lwz	r9,TI_FLAGS(r2)
617	andi.	r0,r9,_TIF_USER_WORK_MASK
618	bne	do_work
619
620restore_user:
621#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
622	/* Check whether this process has its own DBCR0 value.  The internal
623	   debug mode bit tells us that dbcr0 should be loaded. */
624	lwz	r0,THREAD+THREAD_DBCR0(r2)
625	andis.	r10,r0,DBCR0_IDM@h
626	bnel-	load_dbcr0
627#endif
628	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
629#ifdef CONFIG_PPC_BOOK3S_32
630	kuep_unlock	r10, r11
631#endif
632
633	b	restore
634
635/* N.B. the only way to get here is from the beq following ret_from_except. */
636resume_kernel:
637	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
638	lwz	r8,TI_FLAGS(r2)
639	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
640	beq+	1f
641
642	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
643
644	lwz	r3,GPR1(r1)
645	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
646	mr	r4,r1			/* src:  current exception frame */
647	mr	r1,r3			/* Reroute the trampoline frame to r1 */
648
649	/* Copy from the original to the trampoline. */
650	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
651	li	r6,0			/* start offset: 0 */
652	mtctr	r5
6532:	lwzx	r0,r6,r4
654	stwx	r0,r6,r3
655	addi	r6,r6,4
656	bdnz	2b
657
658	/* Do real store operation to complete stwu */
659	lwz	r5,GPR1(r1)
660	stw	r8,0(r5)
661
662	/* Clear _TIF_EMULATE_STACK_STORE flag */
663	lis	r11,_TIF_EMULATE_STACK_STORE@h
664	addi	r5,r2,TI_FLAGS
6650:	lwarx	r8,0,r5
666	andc	r8,r8,r11
667	stwcx.	r8,0,r5
668	bne-	0b
6691:
670
671#ifdef CONFIG_PREEMPTION
672	/* check current_thread_info->preempt_count */
673	lwz	r0,TI_PREEMPT(r2)
674	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
675	bne	restore_kuap
676	andi.	r8,r8,_TIF_NEED_RESCHED
677	beq+	restore_kuap
678	lwz	r3,_MSR(r1)
679	andi.	r0,r3,MSR_EE	/* interrupts off? */
680	beq	restore_kuap	/* don't schedule if so */
681#ifdef CONFIG_TRACE_IRQFLAGS
682	/* Lockdep thinks irqs are enabled, we need to call
683	 * preempt_schedule_irq with IRQs off, so we inform lockdep
684	 * now that we -did- turn them off already
685	 */
686	bl	trace_hardirqs_off
687#endif
688	bl	preempt_schedule_irq
689#ifdef CONFIG_TRACE_IRQFLAGS
690	/* And now, to properly rebalance the above, we tell lockdep they
691	 * are being turned back on, which will happen when we return
692	 */
693	bl	trace_hardirqs_on
694#endif
695#endif /* CONFIG_PREEMPTION */
696restore_kuap:
697	kuap_restore r1, r2, r9, r10, r0
698
699	/* interrupts are hard-disabled at this point */
700restore:
701#if defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
702	lis	r4,icache_44x_need_flush@ha
703	lwz	r5,icache_44x_need_flush@l(r4)
704	cmplwi	cr0,r5,0
705	beq+	1f
706	li	r6,0
707	iccci	r0,r0
708	stw	r6,icache_44x_need_flush@l(r4)
7091:
710#endif  /* CONFIG_44x */
711
712	lwz	r9,_MSR(r1)
713#ifdef CONFIG_TRACE_IRQFLAGS
714	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
715	 * off in this assembly code while peeking at TI_FLAGS() and such. However
716	 * we need to inform it if the exception turned interrupts off, and we
717	 * are about to trun them back on.
718	 */
719	andi.	r10,r9,MSR_EE
720	beq	1f
721	stwu	r1,-32(r1)
722	mflr	r0
723	stw	r0,4(r1)
724	bl	trace_hardirqs_on
725	addi	r1, r1, 32
726	lwz	r9,_MSR(r1)
7271:
728#endif /* CONFIG_TRACE_IRQFLAGS */
729
730	lwz	r0,GPR0(r1)
731	lwz	r2,GPR2(r1)
732	REST_4GPRS(3, r1)
733	REST_2GPRS(7, r1)
734
735	lwz	r10,_XER(r1)
736	lwz	r11,_CTR(r1)
737	mtspr	SPRN_XER,r10
738	mtctr	r11
739
740BEGIN_FTR_SECTION
741	lwarx	r11,0,r1
742END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
743	stwcx.	r0,0,r1			/* to clear the reservation */
744
745#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
746	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
747	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
748
749	lwz	r10,_CCR(r1)
750	lwz	r11,_LINK(r1)
751	mtcrf	0xFF,r10
752	mtlr	r11
753
754	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
755	li	r10, 0
756	stw	r10, 8(r1)
757	/*
758	 * Once we put values in SRR0 and SRR1, we are in a state
759	 * where exceptions are not recoverable, since taking an
760	 * exception will trash SRR0 and SRR1.  Therefore we clear the
761	 * MSR:RI bit to indicate this.  If we do take an exception,
762	 * we can't return to the point of the exception but we
763	 * can restart the exception exit path at the label
764	 * exc_exit_restart below.  -- paulus
765	 */
766	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
767	mtmsr	r10		/* clear the RI bit */
768	.globl exc_exit_restart
769exc_exit_restart:
770	lwz	r12,_NIP(r1)
771	mtspr	SPRN_SRR0,r12
772	mtspr	SPRN_SRR1,r9
773	REST_4GPRS(9, r1)
774	lwz	r1,GPR1(r1)
775	.globl exc_exit_restart_end
776exc_exit_restart_end:
777	rfi
778_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
779_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
780
781#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
782	/*
783	 * This is a bit different on 4xx/Book-E because it doesn't have
784	 * the RI bit in the MSR.
785	 * The TLB miss handler checks if we have interrupted
786	 * the exception exit path and restarts it if so
787	 * (well maybe one day it will... :).
788	 */
789	lwz	r11,_LINK(r1)
790	mtlr	r11
791	lwz	r10,_CCR(r1)
792	mtcrf	0xff,r10
793	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
794	li	r10, 0
795	stw	r10, 8(r1)
796	REST_2GPRS(9, r1)
797	.globl exc_exit_restart
798exc_exit_restart:
799	lwz	r11,_NIP(r1)
800	lwz	r12,_MSR(r1)
801	mtspr	SPRN_SRR0,r11
802	mtspr	SPRN_SRR1,r12
803	REST_2GPRS(11, r1)
804	lwz	r1,GPR1(r1)
805	.globl exc_exit_restart_end
806exc_exit_restart_end:
807	rfi
808	b	.			/* prevent prefetch past rfi */
809_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
810
811/*
812 * Returning from a critical interrupt in user mode doesn't need
813 * to be any different from a normal exception.  For a critical
814 * interrupt in the kernel, we just return (without checking for
815 * preemption) since the interrupt may have happened at some crucial
816 * place (e.g. inside the TLB miss handler), and because we will be
817 * running with r1 pointing into critical_stack, not the current
818 * process's kernel stack (and therefore current_thread_info() will
819 * give the wrong answer).
820 * We have to restore various SPRs that may have been in use at the
821 * time of the critical interrupt.
822 *
823 */
824#ifdef CONFIG_40x
825#define PPC_40x_TURN_OFF_MSR_DR						    \
826	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
827	 * assume the instructions here are mapped by a pinned TLB entry */ \
828	li	r10,MSR_IR;						    \
829	mtmsr	r10;							    \
830	isync;								    \
831	tophys(r1, r1);
832#else
833#define PPC_40x_TURN_OFF_MSR_DR
834#endif
835
836#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
837	REST_NVGPRS(r1);						\
838	lwz	r3,_MSR(r1);						\
839	andi.	r3,r3,MSR_PR;						\
840	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
841	bne	user_exc_return;					\
842	lwz	r0,GPR0(r1);						\
843	lwz	r2,GPR2(r1);						\
844	REST_4GPRS(3, r1);						\
845	REST_2GPRS(7, r1);						\
846	lwz	r10,_XER(r1);						\
847	lwz	r11,_CTR(r1);						\
848	mtspr	SPRN_XER,r10;						\
849	mtctr	r11;							\
850	stwcx.	r0,0,r1;		/* to clear the reservation */	\
851	lwz	r11,_LINK(r1);						\
852	mtlr	r11;							\
853	lwz	r10,_CCR(r1);						\
854	mtcrf	0xff,r10;						\
855	PPC_40x_TURN_OFF_MSR_DR;					\
856	lwz	r9,_DEAR(r1);						\
857	lwz	r10,_ESR(r1);						\
858	mtspr	SPRN_DEAR,r9;						\
859	mtspr	SPRN_ESR,r10;						\
860	lwz	r11,_NIP(r1);						\
861	lwz	r12,_MSR(r1);						\
862	mtspr	exc_lvl_srr0,r11;					\
863	mtspr	exc_lvl_srr1,r12;					\
864	lwz	r9,GPR9(r1);						\
865	lwz	r12,GPR12(r1);						\
866	lwz	r10,GPR10(r1);						\
867	lwz	r11,GPR11(r1);						\
868	lwz	r1,GPR1(r1);						\
869	exc_lvl_rfi;							\
870	b	.;		/* prevent prefetch past exc_lvl_rfi */
871
872#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
873	lwz	r9,_##exc_lvl_srr0(r1);					\
874	lwz	r10,_##exc_lvl_srr1(r1);				\
875	mtspr	SPRN_##exc_lvl_srr0,r9;					\
876	mtspr	SPRN_##exc_lvl_srr1,r10;
877
878#if defined(CONFIG_PPC_BOOK3E_MMU)
879#ifdef CONFIG_PHYS_64BIT
880#define	RESTORE_MAS7							\
881	lwz	r11,MAS7(r1);						\
882	mtspr	SPRN_MAS7,r11;
883#else
884#define	RESTORE_MAS7
885#endif /* CONFIG_PHYS_64BIT */
886#define RESTORE_MMU_REGS						\
887	lwz	r9,MAS0(r1);						\
888	lwz	r10,MAS1(r1);						\
889	lwz	r11,MAS2(r1);						\
890	mtspr	SPRN_MAS0,r9;						\
891	lwz	r9,MAS3(r1);						\
892	mtspr	SPRN_MAS1,r10;						\
893	lwz	r10,MAS6(r1);						\
894	mtspr	SPRN_MAS2,r11;						\
895	mtspr	SPRN_MAS3,r9;						\
896	mtspr	SPRN_MAS6,r10;						\
897	RESTORE_MAS7;
898#elif defined(CONFIG_44x)
899#define RESTORE_MMU_REGS						\
900	lwz	r9,MMUCR(r1);						\
901	mtspr	SPRN_MMUCR,r9;
902#else
903#define RESTORE_MMU_REGS
904#endif
905
906#ifdef CONFIG_40x
907	.globl	ret_from_crit_exc
908ret_from_crit_exc:
909	mfspr	r9,SPRN_SPRG_THREAD
910	lis	r10,saved_ksp_limit@ha;
911	lwz	r10,saved_ksp_limit@l(r10);
912	tovirt(r9,r9);
913	stw	r10,KSP_LIMIT(r9)
914	lis	r9,crit_srr0@ha;
915	lwz	r9,crit_srr0@l(r9);
916	lis	r10,crit_srr1@ha;
917	lwz	r10,crit_srr1@l(r10);
918	mtspr	SPRN_SRR0,r9;
919	mtspr	SPRN_SRR1,r10;
920	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
921_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
922#endif /* CONFIG_40x */
923
924#ifdef CONFIG_BOOKE
925	.globl	ret_from_crit_exc
926ret_from_crit_exc:
927	mfspr	r9,SPRN_SPRG_THREAD
928	lwz	r10,SAVED_KSP_LIMIT(r1)
929	stw	r10,KSP_LIMIT(r9)
930	RESTORE_xSRR(SRR0,SRR1);
931	RESTORE_MMU_REGS;
932	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
933_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
934
935	.globl	ret_from_debug_exc
936ret_from_debug_exc:
937	mfspr	r9,SPRN_SPRG_THREAD
938	lwz	r10,SAVED_KSP_LIMIT(r1)
939	stw	r10,KSP_LIMIT(r9)
940	RESTORE_xSRR(SRR0,SRR1);
941	RESTORE_xSRR(CSRR0,CSRR1);
942	RESTORE_MMU_REGS;
943	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
944_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
945
946	.globl	ret_from_mcheck_exc
947ret_from_mcheck_exc:
948	mfspr	r9,SPRN_SPRG_THREAD
949	lwz	r10,SAVED_KSP_LIMIT(r1)
950	stw	r10,KSP_LIMIT(r9)
951	RESTORE_xSRR(SRR0,SRR1);
952	RESTORE_xSRR(CSRR0,CSRR1);
953	RESTORE_xSRR(DSRR0,DSRR1);
954	RESTORE_MMU_REGS;
955	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
956_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
957#endif /* CONFIG_BOOKE */
958
959/*
960 * Load the DBCR0 value for a task that is being ptraced,
961 * having first saved away the global DBCR0.  Note that r0
962 * has the dbcr0 value to set upon entry to this.
963 */
964load_dbcr0:
965	mfmsr	r10		/* first disable debug exceptions */
966	rlwinm	r10,r10,0,~MSR_DE
967	mtmsr	r10
968	isync
969	mfspr	r10,SPRN_DBCR0
970	lis	r11,global_dbcr0@ha
971	addi	r11,r11,global_dbcr0@l
972#ifdef CONFIG_SMP
973	lwz	r9,TASK_CPU(r2)
974	slwi	r9,r9,2
975	add	r11,r11,r9
976#endif
977	stw	r10,0(r11)
978	mtspr	SPRN_DBCR0,r0
979	li	r11,-1
980	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
981	blr
982
983	.section .bss
984	.align	4
985	.global global_dbcr0
986global_dbcr0:
987	.space	4*NR_CPUS
988	.previous
989#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
990
991do_work:			/* r10 contains MSR_KERNEL here */
992	andi.	r0,r9,_TIF_NEED_RESCHED
993	beq	do_user_signal
994
995do_resched:			/* r10 contains MSR_KERNEL here */
996#ifdef CONFIG_TRACE_IRQFLAGS
997	bl	trace_hardirqs_on
998	mfmsr	r10
999#endif
1000	ori	r10,r10,MSR_EE
1001	mtmsr	r10		/* hard-enable interrupts */
1002	bl	schedule
1003recheck:
1004	/* Note: And we don't tell it we are disabling them again
1005	 * neither. Those disable/enable cycles used to peek at
1006	 * TI_FLAGS aren't advertised.
1007	 */
1008	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1009	mtmsr	r10		/* disable interrupts */
1010	lwz	r9,TI_FLAGS(r2)
1011	andi.	r0,r9,_TIF_NEED_RESCHED
1012	bne-	do_resched
1013	andi.	r0,r9,_TIF_USER_WORK_MASK
1014	beq	restore_user
1015do_user_signal:			/* r10 contains MSR_KERNEL here */
1016	ori	r10,r10,MSR_EE
1017	mtmsr	r10		/* hard-enable interrupts */
1018	/* save r13-r31 in the exception frame, if not already done */
1019	lwz	r3,_TRAP(r1)
1020	andi.	r0,r3,1
1021	beq	2f
1022	SAVE_NVGPRS(r1)
1023	rlwinm	r3,r3,0,0,30
1024	stw	r3,_TRAP(r1)
10252:	addi	r3,r1,STACK_FRAME_OVERHEAD
1026	mr	r4,r9
1027	bl	do_notify_resume
1028	REST_NVGPRS(r1)
1029	b	recheck
1030
1031/*
1032 * We come here when we are at the end of handling an exception
1033 * that occurred at a place where taking an exception will lose
1034 * state information, such as the contents of SRR0 and SRR1.
1035 */
1036nonrecoverable:
1037	lis	r10,exc_exit_restart_end@ha
1038	addi	r10,r10,exc_exit_restart_end@l
1039	cmplw	r12,r10
1040	bge	3f
1041	lis	r11,exc_exit_restart@ha
1042	addi	r11,r11,exc_exit_restart@l
1043	cmplw	r12,r11
1044	blt	3f
1045	lis	r10,ee_restarts@ha
1046	lwz	r12,ee_restarts@l(r10)
1047	addi	r12,r12,1
1048	stw	r12,ee_restarts@l(r10)
1049	mr	r12,r11		/* restart at exc_exit_restart */
1050	blr
10513:	/* OK, we can't recover, kill this process */
1052	lwz	r3,_TRAP(r1)
1053	andi.	r0,r3,1
1054	beq	5f
1055	SAVE_NVGPRS(r1)
1056	rlwinm	r3,r3,0,0,30
1057	stw	r3,_TRAP(r1)
10585:	mfspr	r2,SPRN_SPRG_THREAD
1059	addi	r2,r2,-THREAD
1060	tovirt(r2,r2)			/* set back r2 to current */
10614:	addi	r3,r1,STACK_FRAME_OVERHEAD
1062	bl	unrecoverable_exception
1063	/* shouldn't return */
1064	b	4b
1065_ASM_NOKPROBE_SYMBOL(nonrecoverable)
1066
1067	.section .bss
1068	.align	2
1069ee_restarts:
1070	.space	4
1071	.previous
1072
1073/*
1074 * PROM code for specific machines follows.  Put it
1075 * here so it's easy to add arch-specific sections later.
1076 * -- Cort
1077 */
1078#ifdef CONFIG_PPC_RTAS
1079/*
1080 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1081 * called with the MMU off.
1082 */
1083_GLOBAL(enter_rtas)
1084	stwu	r1,-INT_FRAME_SIZE(r1)
1085	mflr	r0
1086	stw	r0,INT_FRAME_SIZE+4(r1)
1087	LOAD_REG_ADDR(r4, rtas)
1088	lis	r6,1f@ha	/* physical return address for rtas */
1089	addi	r6,r6,1f@l
1090	tophys(r6,r6)
1091	tophys_novmstack r7, r1
1092	lwz	r8,RTASENTRY(r4)
1093	lwz	r4,RTASBASE(r4)
1094	mfmsr	r9
1095	stw	r9,8(r1)
1096	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1097	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
1098	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1099	mtlr	r6
1100	stw	r7, THREAD + RTAS_SP(r2)
1101	mtspr	SPRN_SRR0,r8
1102	mtspr	SPRN_SRR1,r9
1103	rfi
11041:	tophys_novmstack r9, r1
1105#ifdef CONFIG_VMAP_STACK
1106	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
1107	mtmsr	r0
1108	isync
1109#endif
1110	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1111	lwz	r9,8(r9)	/* original msr value */
1112	addi	r1,r1,INT_FRAME_SIZE
1113	li	r0,0
1114	tophys_novmstack r7, r2
1115	stw	r0, THREAD + RTAS_SP(r7)
1116	mtspr	SPRN_SRR0,r8
1117	mtspr	SPRN_SRR1,r9
1118	rfi			/* return to caller */
1119_ASM_NOKPROBE_SYMBOL(enter_rtas)
1120#endif /* CONFIG_PPC_RTAS */
1121