xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 7fe8e483)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/asm-405.h>
32#include <asm/feature-fixups.h>
33#include <asm/barrier.h>
34#include <asm/kup.h>
35#include <asm/bug.h>
36
37#include "head_32.h"
38
39/*
40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
41 * fit into one page in order to not encounter a TLB miss between the
42 * modification of srr0/srr1 and the associated rfi.
43 */
44	.align	12
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack */
91	mfspr	r8,SPRN_SPRG_THREAD
92	lwz	r0,KSP_LIMIT(r8)
93	stw	r0,SAVED_KSP_LIMIT(r11)
94	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
95	stw	r0,KSP_LIMIT(r8)
96	/* fall through */
97#endif
98
99#ifdef CONFIG_40x
100	.globl	crit_transfer_to_handler
101crit_transfer_to_handler:
102	lwz	r0,crit_r10@l(0)
103	stw	r0,GPR10(r11)
104	lwz	r0,crit_r11@l(0)
105	stw	r0,GPR11(r11)
106	mfspr	r0,SPRN_SRR0
107	stw	r0,crit_srr0@l(0)
108	mfspr	r0,SPRN_SRR1
109	stw	r0,crit_srr1@l(0)
110
111	/* set the stack limit to the current stack */
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	tovirt_vmstack r12, r12
144	beq	2f			/* if from user, fix up THREAD.regs */
145	addi	r2, r12, -THREAD
146	addi	r11,r1,STACK_FRAME_OVERHEAD
147	stw	r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149	/* Check to see if the dbcr0 register is set up to debug.  Use the
150	   internal debug mode bit to do this. */
151	lwz	r12,THREAD_DBCR0(r12)
152	andis.	r12,r12,DBCR0_IDM@h
153#endif
154	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
155#ifdef CONFIG_PPC_BOOK3S_32
156	kuep_lock r11, r12
157#endif
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159	beq+	3f
160	/* From user and task is ptraced - load up global dbcr0 */
161	li	r12,-1			/* clear all pending debug events */
162	mtspr	SPRN_DBSR,r12
163	lis	r11,global_dbcr0@ha
164	tophys(r11,r11)
165	addi	r11,r11,global_dbcr0@l
166#ifdef CONFIG_SMP
167	lwz	r9,TASK_CPU(r2)
168	slwi	r9,r9,3
169	add	r11,r11,r9
170#endif
171	lwz	r12,0(r11)
172	mtspr	SPRN_DBCR0,r12
173	lwz	r12,4(r11)
174	addi	r12,r12,-1
175	stw	r12,4(r11)
176#endif
177
178	b	3f
179
1802:	/* if from kernel, check interrupted DOZE/NAP mode and
181         * check for stack overflow
182         */
183	kuap_save_and_lock r11, r12, r9, r2, r6
184	addi	r2, r12, -THREAD
185#ifndef CONFIG_VMAP_STACK
186	lwz	r9,KSP_LIMIT(r12)
187	cmplw	r1,r9			/* if r1 <= ksp_limit */
188	ble-	stack_ovf		/* then the kernel stack overflowed */
189#endif
1905:
191#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
192	lwz	r12,TI_LOCAL_FLAGS(r2)
193	mtcrf	0x01,r12
194	bt-	31-TLF_NAPPING,4f
195	bt-	31-TLF_SLEEPING,7f
196#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
197	.globl transfer_to_handler_cont
198transfer_to_handler_cont:
1993:
200	mflr	r9
201	tovirt_novmstack r2, r2 	/* set r2 to current */
202	tovirt_vmstack r9, r9
203	lwz	r11,0(r9)		/* virtual address of handler */
204	lwz	r9,4(r9)		/* where to go when done */
205#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
206	mtspr	SPRN_NRI, r0
207#endif
208#ifdef CONFIG_TRACE_IRQFLAGS
209	/*
210	 * When tracing IRQ state (lockdep) we enable the MMU before we call
211	 * the IRQ tracing functions as they might access vmalloc space or
212	 * perform IOs for console output.
213	 *
214	 * To speed up the syscall path where interrupts stay on, let's check
215	 * first if we are changing the MSR value at all.
216	 */
217	tophys_novmstack r12, r1
218	lwz	r12,_MSR(r12)
219	andi.	r12,r12,MSR_EE
220	bne	1f
221
222	/* MSR isn't changing, just transition directly */
223#endif
224	mtspr	SPRN_SRR0,r11
225	mtspr	SPRN_SRR1,r10
226	mtlr	r9
227	SYNC
228	RFI				/* jump to handler, enable MMU */
229
230#ifdef CONFIG_TRACE_IRQFLAGS
2311:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
232	 * keep interrupts disabled at this point otherwise we might risk
233	 * taking an interrupt before we tell lockdep they are enabled.
234	 */
235	lis	r12,reenable_mmu@h
236	ori	r12,r12,reenable_mmu@l
237	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
238	mtspr	SPRN_SRR0,r12
239	mtspr	SPRN_SRR1,r0
240	SYNC
241	RFI
242
243reenable_mmu:
244	/*
245	 * We save a bunch of GPRs,
246	 * r3 can be different from GPR3(r1) at this point, r9 and r11
247	 * contains the old MSR and handler address respectively,
248	 * r4 & r5 can contain page fault arguments that need to be passed
249	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
250	 * they aren't useful past this point (aren't syscall arguments),
251	 * the rest is restored from the exception frame.
252	 */
253
254	stwu	r1,-32(r1)
255	stw	r9,8(r1)
256	stw	r11,12(r1)
257	stw	r3,16(r1)
258	stw	r4,20(r1)
259	stw	r5,24(r1)
260
261	/* If we are disabling interrupts (normal case), simply log it with
262	 * lockdep
263	 */
2641:	bl	trace_hardirqs_off
2652:	lwz	r5,24(r1)
266	lwz	r4,20(r1)
267	lwz	r3,16(r1)
268	lwz	r11,12(r1)
269	lwz	r9,8(r1)
270	addi	r1,r1,32
271	lwz	r0,GPR0(r1)
272	lwz	r6,GPR6(r1)
273	lwz	r7,GPR7(r1)
274	lwz	r8,GPR8(r1)
275	mtctr	r11
276	mtlr	r9
277	bctr				/* jump to handler */
278#endif /* CONFIG_TRACE_IRQFLAGS */
279
280#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2814:	rlwinm	r12,r12,0,~_TLF_NAPPING
282	stw	r12,TI_LOCAL_FLAGS(r2)
283	b	power_save_ppc32_restore
284
2857:	rlwinm	r12,r12,0,~_TLF_SLEEPING
286	stw	r12,TI_LOCAL_FLAGS(r2)
287	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
288	rlwinm	r9,r9,0,~MSR_EE
289	lwz	r12,_LINK(r11)		/* and return to address in LR */
290	kuap_restore r11, r2, r3, r4, r5
291	lwz	r2, GPR2(r11)
292	b	fast_exception_return
293#endif
294
295#ifndef CONFIG_VMAP_STACK
296/*
297 * On kernel stack overflow, load up an initial stack pointer
298 * and call StackOverflow(regs), which should not return.
299 */
300stack_ovf:
301	/* sometimes we use a statically-allocated stack, which is OK. */
302	lis	r12,_end@h
303	ori	r12,r12,_end@l
304	cmplw	r1,r12
305	ble	5b			/* r1 <= &_end is OK */
306	SAVE_NVGPRS(r11)
307	addi	r3,r1,STACK_FRAME_OVERHEAD
308	lis	r1,init_thread_union@ha
309	addi	r1,r1,init_thread_union@l
310	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
311	lis	r9,StackOverflow@ha
312	addi	r9,r9,StackOverflow@l
313	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
314#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
315	mtspr	SPRN_NRI, r0
316#endif
317	mtspr	SPRN_SRR0,r9
318	mtspr	SPRN_SRR1,r10
319	SYNC
320	RFI
321#endif
322
323#ifdef CONFIG_TRACE_IRQFLAGS
324trace_syscall_entry_irq_off:
325	/*
326	 * Syscall shouldn't happen while interrupts are disabled,
327	 * so let's do a warning here.
328	 */
3290:	trap
330	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
331	bl	trace_hardirqs_on
332
333	/* Now enable for real */
334	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
335	mtmsr	r10
336
337	REST_GPR(0, r1)
338	REST_4GPRS(3, r1)
339	REST_2GPRS(7, r1)
340	b	DoSyscall
341#endif /* CONFIG_TRACE_IRQFLAGS */
342
343	.globl	transfer_to_syscall
344transfer_to_syscall:
345#ifdef CONFIG_TRACE_IRQFLAGS
346	andi.	r12,r9,MSR_EE
347	beq-	trace_syscall_entry_irq_off
348#endif /* CONFIG_TRACE_IRQFLAGS */
349
350/*
351 * Handle a system call.
352 */
353	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
354	.stabs	"entry_32.S",N_SO,0,0,0f
3550:
356
357_GLOBAL(DoSyscall)
358	stw	r3,ORIG_GPR3(r1)
359	li	r12,0
360	stw	r12,RESULT(r1)
361#ifdef CONFIG_TRACE_IRQFLAGS
362	/* Make sure interrupts are enabled */
363	mfmsr	r11
364	andi.	r12,r11,MSR_EE
365	/* We came in with interrupts disabled, we WARN and mark them enabled
366	 * for lockdep now */
3670:	tweqi	r12, 0
368	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
369#endif /* CONFIG_TRACE_IRQFLAGS */
370	lwz	r11,TI_FLAGS(r2)
371	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
372	bne-	syscall_dotrace
373syscall_dotrace_cont:
374	cmplwi	0,r0,NR_syscalls
375	lis	r10,sys_call_table@h
376	ori	r10,r10,sys_call_table@l
377	slwi	r0,r0,2
378	bge-	66f
379
380	barrier_nospec_asm
381	/*
382	 * Prevent the load of the handler below (based on the user-passed
383	 * system call number) being speculatively executed until the test
384	 * against NR_syscalls and branch to .66f above has
385	 * committed.
386	 */
387
388	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
389	mtlr	r10
390	addi	r9,r1,STACK_FRAME_OVERHEAD
391	PPC440EP_ERR42
392	blrl			/* Call handler */
393	.globl	ret_from_syscall
394ret_from_syscall:
395#ifdef CONFIG_DEBUG_RSEQ
396	/* Check whether the syscall is issued inside a restartable sequence */
397	stw	r3,GPR3(r1)
398	addi    r3,r1,STACK_FRAME_OVERHEAD
399	bl      rseq_syscall
400	lwz	r3,GPR3(r1)
401#endif
402	mr	r6,r3
403	/* disable interrupts so current_thread_info()->flags can't change */
404	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
405	/* Note: We don't bother telling lockdep about it */
406	SYNC
407	mtmsr	r10
408	lwz	r9,TI_FLAGS(r2)
409	li	r8,-MAX_ERRNO
410	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
411	bne-	syscall_exit_work
412	cmplw	0,r3,r8
413	blt+	syscall_exit_cont
414	lwz	r11,_CCR(r1)			/* Load CR */
415	neg	r3,r3
416	oris	r11,r11,0x1000	/* Set SO bit in CR */
417	stw	r11,_CCR(r1)
418syscall_exit_cont:
419	lwz	r8,_MSR(r1)
420#ifdef CONFIG_TRACE_IRQFLAGS
421	/* If we are going to return from the syscall with interrupts
422	 * off, we trace that here. It shouldn't normally happen.
423	 */
424	andi.	r10,r8,MSR_EE
425	bne+	1f
426	stw	r3,GPR3(r1)
427	bl      trace_hardirqs_off
428	lwz	r3,GPR3(r1)
4291:
430#endif /* CONFIG_TRACE_IRQFLAGS */
431#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
432	/* If the process has its own DBCR0 value, load it up.  The internal
433	   debug mode bit tells us that dbcr0 should be loaded. */
434	lwz	r0,THREAD+THREAD_DBCR0(r2)
435	andis.	r10,r0,DBCR0_IDM@h
436	bnel-	load_dbcr0
437#endif
438#ifdef CONFIG_44x
439BEGIN_MMU_FTR_SECTION
440	lis	r4,icache_44x_need_flush@ha
441	lwz	r5,icache_44x_need_flush@l(r4)
442	cmplwi	cr0,r5,0
443	bne-	2f
4441:
445END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
446#endif /* CONFIG_44x */
447BEGIN_FTR_SECTION
448	lwarx	r7,0,r1
449END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
450	stwcx.	r0,0,r1			/* to clear the reservation */
451	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
452#ifdef CONFIG_PPC_BOOK3S_32
453	kuep_unlock r5, r7
454#endif
455	kuap_check r2, r4
456	lwz	r4,_LINK(r1)
457	lwz	r5,_CCR(r1)
458	mtlr	r4
459	mtcr	r5
460	lwz	r7,_NIP(r1)
461	lwz	r2,GPR2(r1)
462	lwz	r1,GPR1(r1)
463#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
464	mtspr	SPRN_NRI, r0
465#endif
466	mtspr	SPRN_SRR0,r7
467	mtspr	SPRN_SRR1,r8
468	SYNC
469	RFI
470#ifdef CONFIG_44x
4712:	li	r7,0
472	iccci	r0,r0
473	stw	r7,icache_44x_need_flush@l(r4)
474	b	1b
475#endif  /* CONFIG_44x */
476
47766:	li	r3,-ENOSYS
478	b	ret_from_syscall
479
480	.globl	ret_from_fork
481ret_from_fork:
482	REST_NVGPRS(r1)
483	bl	schedule_tail
484	li	r3,0
485	b	ret_from_syscall
486
487	.globl	ret_from_kernel_thread
488ret_from_kernel_thread:
489	REST_NVGPRS(r1)
490	bl	schedule_tail
491	mtlr	r14
492	mr	r3,r15
493	PPC440EP_ERR42
494	blrl
495	li	r3,0
496	b	ret_from_syscall
497
498/* Traced system call support */
499syscall_dotrace:
500	SAVE_NVGPRS(r1)
501	li	r0,0xc00
502	stw	r0,_TRAP(r1)
503	addi	r3,r1,STACK_FRAME_OVERHEAD
504	bl	do_syscall_trace_enter
505	/*
506	 * Restore argument registers possibly just changed.
507	 * We use the return value of do_syscall_trace_enter
508	 * for call number to look up in the table (r0).
509	 */
510	mr	r0,r3
511	lwz	r3,GPR3(r1)
512	lwz	r4,GPR4(r1)
513	lwz	r5,GPR5(r1)
514	lwz	r6,GPR6(r1)
515	lwz	r7,GPR7(r1)
516	lwz	r8,GPR8(r1)
517	REST_NVGPRS(r1)
518
519	cmplwi	r0,NR_syscalls
520	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
521	bge-	ret_from_syscall
522	b	syscall_dotrace_cont
523
524syscall_exit_work:
525	andi.	r0,r9,_TIF_RESTOREALL
526	beq+	0f
527	REST_NVGPRS(r1)
528	b	2f
5290:	cmplw	0,r3,r8
530	blt+	1f
531	andi.	r0,r9,_TIF_NOERROR
532	bne-	1f
533	lwz	r11,_CCR(r1)			/* Load CR */
534	neg	r3,r3
535	oris	r11,r11,0x1000	/* Set SO bit in CR */
536	stw	r11,_CCR(r1)
537
5381:	stw	r6,RESULT(r1)	/* Save result */
539	stw	r3,GPR3(r1)	/* Update return value */
5402:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
541	beq	4f
542
543	/* Clear per-syscall TIF flags if any are set.  */
544
545	li	r11,_TIF_PERSYSCALL_MASK
546	addi	r12,r2,TI_FLAGS
5473:	lwarx	r8,0,r12
548	andc	r8,r8,r11
549#ifdef CONFIG_IBM405_ERR77
550	dcbt	0,r12
551#endif
552	stwcx.	r8,0,r12
553	bne-	3b
554
5554:	/* Anything which requires enabling interrupts? */
556	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
557	beq	ret_from_except
558
559	/* Re-enable interrupts. There is no need to trace that with
560	 * lockdep as we are supposed to have IRQs on at this point
561	 */
562	ori	r10,r10,MSR_EE
563	SYNC
564	mtmsr	r10
565
566	/* Save NVGPRS if they're not saved already */
567	lwz	r4,_TRAP(r1)
568	andi.	r4,r4,1
569	beq	5f
570	SAVE_NVGPRS(r1)
571	li	r4,0xc00
572	stw	r4,_TRAP(r1)
5735:
574	addi	r3,r1,STACK_FRAME_OVERHEAD
575	bl	do_syscall_trace_leave
576	b	ret_from_except_full
577
578/*
579 * The fork/clone functions need to copy the full register set into
580 * the child process. Therefore we need to save all the nonvolatile
581 * registers (r13 - r31) before calling the C code.
582 */
583	.globl	ppc_fork
584ppc_fork:
585	SAVE_NVGPRS(r1)
586	lwz	r0,_TRAP(r1)
587	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
588	stw	r0,_TRAP(r1)		/* register set saved */
589	b	sys_fork
590
591	.globl	ppc_vfork
592ppc_vfork:
593	SAVE_NVGPRS(r1)
594	lwz	r0,_TRAP(r1)
595	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
596	stw	r0,_TRAP(r1)		/* register set saved */
597	b	sys_vfork
598
599	.globl	ppc_clone
600ppc_clone:
601	SAVE_NVGPRS(r1)
602	lwz	r0,_TRAP(r1)
603	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
604	stw	r0,_TRAP(r1)		/* register set saved */
605	b	sys_clone
606
607	.globl	ppc_clone3
608ppc_clone3:
609	SAVE_NVGPRS(r1)
610	lwz	r0,_TRAP(r1)
611	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
612	stw	r0,_TRAP(r1)		/* register set saved */
613	b	sys_clone3
614
615	.globl	ppc_swapcontext
616ppc_swapcontext:
617	SAVE_NVGPRS(r1)
618	lwz	r0,_TRAP(r1)
619	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
620	stw	r0,_TRAP(r1)		/* register set saved */
621	b	sys_swapcontext
622
623/*
624 * Top-level page fault handling.
625 * This is in assembler because if do_page_fault tells us that
626 * it is a bad kernel page fault, we want to save the non-volatile
627 * registers before calling bad_page_fault.
628 */
629	.globl	handle_page_fault
630handle_page_fault:
631	addi	r3,r1,STACK_FRAME_OVERHEAD
632#ifdef CONFIG_PPC_BOOK3S_32
633	andis.  r0,r5,DSISR_DABRMATCH@h
634	bne-    handle_dabr_fault
635#endif
636	bl	do_page_fault
637	cmpwi	r3,0
638	beq+	ret_from_except
639	SAVE_NVGPRS(r1)
640	lwz	r0,_TRAP(r1)
641	clrrwi	r0,r0,1
642	stw	r0,_TRAP(r1)
643	mr	r5,r3
644	addi	r3,r1,STACK_FRAME_OVERHEAD
645	lwz	r4,_DAR(r1)
646	bl	bad_page_fault
647	b	ret_from_except_full
648
649#ifdef CONFIG_PPC_BOOK3S_32
650	/* We have a data breakpoint exception - handle it */
651handle_dabr_fault:
652	SAVE_NVGPRS(r1)
653	lwz	r0,_TRAP(r1)
654	clrrwi	r0,r0,1
655	stw	r0,_TRAP(r1)
656	bl      do_break
657	b	ret_from_except_full
658#endif
659
660/*
661 * This routine switches between two different tasks.  The process
662 * state of one is saved on its kernel stack.  Then the state
663 * of the other is restored from its kernel stack.  The memory
664 * management hardware is updated to the second process's state.
665 * Finally, we can return to the second process.
666 * On entry, r3 points to the THREAD for the current task, r4
667 * points to the THREAD for the new task.
668 *
669 * This routine is always called with interrupts disabled.
670 *
671 * Note: there are two ways to get to the "going out" portion
672 * of this code; either by coming in via the entry (_switch)
673 * or via "fork" which must set up an environment equivalent
674 * to the "_switch" path.  If you change this , you'll have to
675 * change the fork code also.
676 *
677 * The code which creates the new task context is in 'copy_thread'
678 * in arch/ppc/kernel/process.c
679 */
680_GLOBAL(_switch)
681	stwu	r1,-INT_FRAME_SIZE(r1)
682	mflr	r0
683	stw	r0,INT_FRAME_SIZE+4(r1)
684	/* r3-r12 are caller saved -- Cort */
685	SAVE_NVGPRS(r1)
686	stw	r0,_NIP(r1)	/* Return to switch caller */
687	mfmsr	r11
688	li	r0,MSR_FP	/* Disable floating-point */
689#ifdef CONFIG_ALTIVEC
690BEGIN_FTR_SECTION
691	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
692	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
693	stw	r12,THREAD+THREAD_VRSAVE(r2)
694END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
695#endif /* CONFIG_ALTIVEC */
696#ifdef CONFIG_SPE
697BEGIN_FTR_SECTION
698	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
699	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
700	stw	r12,THREAD+THREAD_SPEFSCR(r2)
701END_FTR_SECTION_IFSET(CPU_FTR_SPE)
702#endif /* CONFIG_SPE */
703	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
704	beq+	1f
705	andc	r11,r11,r0
706	mtmsr	r11
707	isync
7081:	stw	r11,_MSR(r1)
709	mfcr	r10
710	stw	r10,_CCR(r1)
711	stw	r1,KSP(r3)	/* Set old stack pointer */
712
713	kuap_check r2, r4
714#ifdef CONFIG_SMP
715	/* We need a sync somewhere here to make sure that if the
716	 * previous task gets rescheduled on another CPU, it sees all
717	 * stores it has performed on this one.
718	 */
719	sync
720#endif /* CONFIG_SMP */
721
722	tophys(r0,r4)
723	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
724	lwz	r1,KSP(r4)	/* Load new stack pointer */
725
726	/* save the old current 'last' for return value */
727	mr	r3,r2
728	addi	r2,r4,-THREAD	/* Update current */
729
730#ifdef CONFIG_ALTIVEC
731BEGIN_FTR_SECTION
732	lwz	r0,THREAD+THREAD_VRSAVE(r2)
733	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
734END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
735#endif /* CONFIG_ALTIVEC */
736#ifdef CONFIG_SPE
737BEGIN_FTR_SECTION
738	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
739	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
740END_FTR_SECTION_IFSET(CPU_FTR_SPE)
741#endif /* CONFIG_SPE */
742
743	lwz	r0,_CCR(r1)
744	mtcrf	0xFF,r0
745	/* r3-r12 are destroyed -- Cort */
746	REST_NVGPRS(r1)
747
748	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
749	mtlr	r4
750	addi	r1,r1,INT_FRAME_SIZE
751	blr
752
753	.globl	fast_exception_return
754fast_exception_return:
755#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
756	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
757	beq	1f			/* if not, we've got problems */
758#endif
759
7602:	REST_4GPRS(3, r11)
761	lwz	r10,_CCR(r11)
762	REST_GPR(1, r11)
763	mtcr	r10
764	lwz	r10,_LINK(r11)
765	mtlr	r10
766	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
767	li	r10, 0
768	stw	r10, 8(r11)
769	REST_GPR(10, r11)
770#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
771	mtspr	SPRN_NRI, r0
772#endif
773	mtspr	SPRN_SRR1,r9
774	mtspr	SPRN_SRR0,r12
775	REST_GPR(9, r11)
776	REST_GPR(12, r11)
777	lwz	r11,GPR11(r11)
778	SYNC
779	RFI
780
781#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
782/* check if the exception happened in a restartable section */
7831:	lis	r3,exc_exit_restart_end@ha
784	addi	r3,r3,exc_exit_restart_end@l
785	cmplw	r12,r3
786#if CONFIG_PPC_BOOK3S_601
787	bge	2b
788#else
789	bge	3f
790#endif
791	lis	r4,exc_exit_restart@ha
792	addi	r4,r4,exc_exit_restart@l
793	cmplw	r12,r4
794#if CONFIG_PPC_BOOK3S_601
795	blt	2b
796#else
797	blt	3f
798#endif
799	lis	r3,fee_restarts@ha
800	tophys(r3,r3)
801	lwz	r5,fee_restarts@l(r3)
802	addi	r5,r5,1
803	stw	r5,fee_restarts@l(r3)
804	mr	r12,r4		/* restart at exc_exit_restart */
805	b	2b
806
807	.section .bss
808	.align	2
809fee_restarts:
810	.space	4
811	.previous
812
813/* aargh, a nonrecoverable interrupt, panic */
814/* aargh, we don't know which trap this is */
815/* but the 601 doesn't implement the RI bit, so assume it's OK */
8163:
817	li	r10,-1
818	stw	r10,_TRAP(r11)
819	addi	r3,r1,STACK_FRAME_OVERHEAD
820	lis	r10,MSR_KERNEL@h
821	ori	r10,r10,MSR_KERNEL@l
822	bl	transfer_to_handler_full
823	.long	unrecoverable_exception
824	.long	ret_from_except
825#endif
826
827	.globl	ret_from_except_full
828ret_from_except_full:
829	REST_NVGPRS(r1)
830	/* fall through */
831
832	.globl	ret_from_except
833ret_from_except:
834	/* Hard-disable interrupts so that current_thread_info()->flags
835	 * can't change between when we test it and when we return
836	 * from the interrupt. */
837	/* Note: We don't bother telling lockdep about it */
838	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
839	SYNC			/* Some chip revs have problems here... */
840	mtmsr	r10		/* disable interrupts */
841
842	lwz	r3,_MSR(r1)	/* Returning to user mode? */
843	andi.	r0,r3,MSR_PR
844	beq	resume_kernel
845
846user_exc_return:		/* r10 contains MSR_KERNEL here */
847	/* Check current_thread_info()->flags */
848	lwz	r9,TI_FLAGS(r2)
849	andi.	r0,r9,_TIF_USER_WORK_MASK
850	bne	do_work
851
852restore_user:
853#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
854	/* Check whether this process has its own DBCR0 value.  The internal
855	   debug mode bit tells us that dbcr0 should be loaded. */
856	lwz	r0,THREAD+THREAD_DBCR0(r2)
857	andis.	r10,r0,DBCR0_IDM@h
858	bnel-	load_dbcr0
859#endif
860	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
861#ifdef CONFIG_PPC_BOOK3S_32
862	kuep_unlock	r10, r11
863#endif
864
865	b	restore
866
867/* N.B. the only way to get here is from the beq following ret_from_except. */
868resume_kernel:
869	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
870	lwz	r8,TI_FLAGS(r2)
871	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
872	beq+	1f
873
874	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
875
876	lwz	r3,GPR1(r1)
877	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
878	mr	r4,r1			/* src:  current exception frame */
879	mr	r1,r3			/* Reroute the trampoline frame to r1 */
880
881	/* Copy from the original to the trampoline. */
882	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
883	li	r6,0			/* start offset: 0 */
884	mtctr	r5
8852:	lwzx	r0,r6,r4
886	stwx	r0,r6,r3
887	addi	r6,r6,4
888	bdnz	2b
889
890	/* Do real store operation to complete stwu */
891	lwz	r5,GPR1(r1)
892	stw	r8,0(r5)
893
894	/* Clear _TIF_EMULATE_STACK_STORE flag */
895	lis	r11,_TIF_EMULATE_STACK_STORE@h
896	addi	r5,r2,TI_FLAGS
8970:	lwarx	r8,0,r5
898	andc	r8,r8,r11
899#ifdef CONFIG_IBM405_ERR77
900	dcbt	0,r5
901#endif
902	stwcx.	r8,0,r5
903	bne-	0b
9041:
905
906#ifdef CONFIG_PREEMPTION
907	/* check current_thread_info->preempt_count */
908	lwz	r0,TI_PREEMPT(r2)
909	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
910	bne	restore_kuap
911	andi.	r8,r8,_TIF_NEED_RESCHED
912	beq+	restore_kuap
913	lwz	r3,_MSR(r1)
914	andi.	r0,r3,MSR_EE	/* interrupts off? */
915	beq	restore_kuap	/* don't schedule if so */
916#ifdef CONFIG_TRACE_IRQFLAGS
917	/* Lockdep thinks irqs are enabled, we need to call
918	 * preempt_schedule_irq with IRQs off, so we inform lockdep
919	 * now that we -did- turn them off already
920	 */
921	bl	trace_hardirqs_off
922#endif
923	bl	preempt_schedule_irq
924#ifdef CONFIG_TRACE_IRQFLAGS
925	/* And now, to properly rebalance the above, we tell lockdep they
926	 * are being turned back on, which will happen when we return
927	 */
928	bl	trace_hardirqs_on
929#endif
930#endif /* CONFIG_PREEMPTION */
931restore_kuap:
932	kuap_restore r1, r2, r9, r10, r0
933
934	/* interrupts are hard-disabled at this point */
935restore:
936#ifdef CONFIG_44x
937BEGIN_MMU_FTR_SECTION
938	b	1f
939END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
940	lis	r4,icache_44x_need_flush@ha
941	lwz	r5,icache_44x_need_flush@l(r4)
942	cmplwi	cr0,r5,0
943	beq+	1f
944	li	r6,0
945	iccci	r0,r0
946	stw	r6,icache_44x_need_flush@l(r4)
9471:
948#endif  /* CONFIG_44x */
949
950	lwz	r9,_MSR(r1)
951#ifdef CONFIG_TRACE_IRQFLAGS
952	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
953	 * off in this assembly code while peeking at TI_FLAGS() and such. However
954	 * we need to inform it if the exception turned interrupts off, and we
955	 * are about to trun them back on.
956	 */
957	andi.	r10,r9,MSR_EE
958	beq	1f
959	stwu	r1,-32(r1)
960	mflr	r0
961	stw	r0,4(r1)
962	bl	trace_hardirqs_on
963	addi	r1, r1, 32
964	lwz	r9,_MSR(r1)
9651:
966#endif /* CONFIG_TRACE_IRQFLAGS */
967
968	lwz	r0,GPR0(r1)
969	lwz	r2,GPR2(r1)
970	REST_4GPRS(3, r1)
971	REST_2GPRS(7, r1)
972
973	lwz	r10,_XER(r1)
974	lwz	r11,_CTR(r1)
975	mtspr	SPRN_XER,r10
976	mtctr	r11
977
978	PPC405_ERR77(0,r1)
979BEGIN_FTR_SECTION
980	lwarx	r11,0,r1
981END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
982	stwcx.	r0,0,r1			/* to clear the reservation */
983
984#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
985	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
986	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
987
988	lwz	r10,_CCR(r1)
989	lwz	r11,_LINK(r1)
990	mtcrf	0xFF,r10
991	mtlr	r11
992
993	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
994	li	r10, 0
995	stw	r10, 8(r1)
996	/*
997	 * Once we put values in SRR0 and SRR1, we are in a state
998	 * where exceptions are not recoverable, since taking an
999	 * exception will trash SRR0 and SRR1.  Therefore we clear the
1000	 * MSR:RI bit to indicate this.  If we do take an exception,
1001	 * we can't return to the point of the exception but we
1002	 * can restart the exception exit path at the label
1003	 * exc_exit_restart below.  -- paulus
1004	 */
1005	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1006	SYNC
1007	mtmsr	r10		/* clear the RI bit */
1008	.globl exc_exit_restart
1009exc_exit_restart:
1010	lwz	r12,_NIP(r1)
1011	mtspr	SPRN_SRR0,r12
1012	mtspr	SPRN_SRR1,r9
1013	REST_4GPRS(9, r1)
1014	lwz	r1,GPR1(r1)
1015	.globl exc_exit_restart_end
1016exc_exit_restart_end:
1017	SYNC
1018	RFI
1019
1020#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1021	/*
1022	 * This is a bit different on 4xx/Book-E because it doesn't have
1023	 * the RI bit in the MSR.
1024	 * The TLB miss handler checks if we have interrupted
1025	 * the exception exit path and restarts it if so
1026	 * (well maybe one day it will... :).
1027	 */
1028	lwz	r11,_LINK(r1)
1029	mtlr	r11
1030	lwz	r10,_CCR(r1)
1031	mtcrf	0xff,r10
1032	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1033	li	r10, 0
1034	stw	r10, 8(r1)
1035	REST_2GPRS(9, r1)
1036	.globl exc_exit_restart
1037exc_exit_restart:
1038	lwz	r11,_NIP(r1)
1039	lwz	r12,_MSR(r1)
1040exc_exit_start:
1041	mtspr	SPRN_SRR0,r11
1042	mtspr	SPRN_SRR1,r12
1043	REST_2GPRS(11, r1)
1044	lwz	r1,GPR1(r1)
1045	.globl exc_exit_restart_end
1046exc_exit_restart_end:
1047	PPC405_ERR77_SYNC
1048	rfi
1049	b	.			/* prevent prefetch past rfi */
1050
1051/*
1052 * Returning from a critical interrupt in user mode doesn't need
1053 * to be any different from a normal exception.  For a critical
1054 * interrupt in the kernel, we just return (without checking for
1055 * preemption) since the interrupt may have happened at some crucial
1056 * place (e.g. inside the TLB miss handler), and because we will be
1057 * running with r1 pointing into critical_stack, not the current
1058 * process's kernel stack (and therefore current_thread_info() will
1059 * give the wrong answer).
1060 * We have to restore various SPRs that may have been in use at the
1061 * time of the critical interrupt.
1062 *
1063 */
1064#ifdef CONFIG_40x
1065#define PPC_40x_TURN_OFF_MSR_DR						    \
1066	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1067	 * assume the instructions here are mapped by a pinned TLB entry */ \
1068	li	r10,MSR_IR;						    \
1069	mtmsr	r10;							    \
1070	isync;								    \
1071	tophys(r1, r1);
1072#else
1073#define PPC_40x_TURN_OFF_MSR_DR
1074#endif
1075
1076#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1077	REST_NVGPRS(r1);						\
1078	lwz	r3,_MSR(r1);						\
1079	andi.	r3,r3,MSR_PR;						\
1080	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1081	bne	user_exc_return;					\
1082	lwz	r0,GPR0(r1);						\
1083	lwz	r2,GPR2(r1);						\
1084	REST_4GPRS(3, r1);						\
1085	REST_2GPRS(7, r1);						\
1086	lwz	r10,_XER(r1);						\
1087	lwz	r11,_CTR(r1);						\
1088	mtspr	SPRN_XER,r10;						\
1089	mtctr	r11;							\
1090	PPC405_ERR77(0,r1);						\
1091	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1092	lwz	r11,_LINK(r1);						\
1093	mtlr	r11;							\
1094	lwz	r10,_CCR(r1);						\
1095	mtcrf	0xff,r10;						\
1096	PPC_40x_TURN_OFF_MSR_DR;					\
1097	lwz	r9,_DEAR(r1);						\
1098	lwz	r10,_ESR(r1);						\
1099	mtspr	SPRN_DEAR,r9;						\
1100	mtspr	SPRN_ESR,r10;						\
1101	lwz	r11,_NIP(r1);						\
1102	lwz	r12,_MSR(r1);						\
1103	mtspr	exc_lvl_srr0,r11;					\
1104	mtspr	exc_lvl_srr1,r12;					\
1105	lwz	r9,GPR9(r1);						\
1106	lwz	r12,GPR12(r1);						\
1107	lwz	r10,GPR10(r1);						\
1108	lwz	r11,GPR11(r1);						\
1109	lwz	r1,GPR1(r1);						\
1110	PPC405_ERR77_SYNC;						\
1111	exc_lvl_rfi;							\
1112	b	.;		/* prevent prefetch past exc_lvl_rfi */
1113
1114#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1115	lwz	r9,_##exc_lvl_srr0(r1);					\
1116	lwz	r10,_##exc_lvl_srr1(r1);				\
1117	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1118	mtspr	SPRN_##exc_lvl_srr1,r10;
1119
1120#if defined(CONFIG_PPC_BOOK3E_MMU)
1121#ifdef CONFIG_PHYS_64BIT
1122#define	RESTORE_MAS7							\
1123	lwz	r11,MAS7(r1);						\
1124	mtspr	SPRN_MAS7,r11;
1125#else
1126#define	RESTORE_MAS7
1127#endif /* CONFIG_PHYS_64BIT */
1128#define RESTORE_MMU_REGS						\
1129	lwz	r9,MAS0(r1);						\
1130	lwz	r10,MAS1(r1);						\
1131	lwz	r11,MAS2(r1);						\
1132	mtspr	SPRN_MAS0,r9;						\
1133	lwz	r9,MAS3(r1);						\
1134	mtspr	SPRN_MAS1,r10;						\
1135	lwz	r10,MAS6(r1);						\
1136	mtspr	SPRN_MAS2,r11;						\
1137	mtspr	SPRN_MAS3,r9;						\
1138	mtspr	SPRN_MAS6,r10;						\
1139	RESTORE_MAS7;
1140#elif defined(CONFIG_44x)
1141#define RESTORE_MMU_REGS						\
1142	lwz	r9,MMUCR(r1);						\
1143	mtspr	SPRN_MMUCR,r9;
1144#else
1145#define RESTORE_MMU_REGS
1146#endif
1147
1148#ifdef CONFIG_40x
1149	.globl	ret_from_crit_exc
1150ret_from_crit_exc:
1151	mfspr	r9,SPRN_SPRG_THREAD
1152	lis	r10,saved_ksp_limit@ha;
1153	lwz	r10,saved_ksp_limit@l(r10);
1154	tovirt(r9,r9);
1155	stw	r10,KSP_LIMIT(r9)
1156	lis	r9,crit_srr0@ha;
1157	lwz	r9,crit_srr0@l(r9);
1158	lis	r10,crit_srr1@ha;
1159	lwz	r10,crit_srr1@l(r10);
1160	mtspr	SPRN_SRR0,r9;
1161	mtspr	SPRN_SRR1,r10;
1162	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1163#endif /* CONFIG_40x */
1164
1165#ifdef CONFIG_BOOKE
1166	.globl	ret_from_crit_exc
1167ret_from_crit_exc:
1168	mfspr	r9,SPRN_SPRG_THREAD
1169	lwz	r10,SAVED_KSP_LIMIT(r1)
1170	stw	r10,KSP_LIMIT(r9)
1171	RESTORE_xSRR(SRR0,SRR1);
1172	RESTORE_MMU_REGS;
1173	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1174
1175	.globl	ret_from_debug_exc
1176ret_from_debug_exc:
1177	mfspr	r9,SPRN_SPRG_THREAD
1178	lwz	r10,SAVED_KSP_LIMIT(r1)
1179	stw	r10,KSP_LIMIT(r9)
1180	RESTORE_xSRR(SRR0,SRR1);
1181	RESTORE_xSRR(CSRR0,CSRR1);
1182	RESTORE_MMU_REGS;
1183	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1184
1185	.globl	ret_from_mcheck_exc
1186ret_from_mcheck_exc:
1187	mfspr	r9,SPRN_SPRG_THREAD
1188	lwz	r10,SAVED_KSP_LIMIT(r1)
1189	stw	r10,KSP_LIMIT(r9)
1190	RESTORE_xSRR(SRR0,SRR1);
1191	RESTORE_xSRR(CSRR0,CSRR1);
1192	RESTORE_xSRR(DSRR0,DSRR1);
1193	RESTORE_MMU_REGS;
1194	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1195#endif /* CONFIG_BOOKE */
1196
1197/*
1198 * Load the DBCR0 value for a task that is being ptraced,
1199 * having first saved away the global DBCR0.  Note that r0
1200 * has the dbcr0 value to set upon entry to this.
1201 */
1202load_dbcr0:
1203	mfmsr	r10		/* first disable debug exceptions */
1204	rlwinm	r10,r10,0,~MSR_DE
1205	mtmsr	r10
1206	isync
1207	mfspr	r10,SPRN_DBCR0
1208	lis	r11,global_dbcr0@ha
1209	addi	r11,r11,global_dbcr0@l
1210#ifdef CONFIG_SMP
1211	lwz	r9,TASK_CPU(r2)
1212	slwi	r9,r9,3
1213	add	r11,r11,r9
1214#endif
1215	stw	r10,0(r11)
1216	mtspr	SPRN_DBCR0,r0
1217	lwz	r10,4(r11)
1218	addi	r10,r10,1
1219	stw	r10,4(r11)
1220	li	r11,-1
1221	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1222	blr
1223
1224	.section .bss
1225	.align	4
1226	.global global_dbcr0
1227global_dbcr0:
1228	.space	8*NR_CPUS
1229	.previous
1230#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1231
1232do_work:			/* r10 contains MSR_KERNEL here */
1233	andi.	r0,r9,_TIF_NEED_RESCHED
1234	beq	do_user_signal
1235
1236do_resched:			/* r10 contains MSR_KERNEL here */
1237#ifdef CONFIG_TRACE_IRQFLAGS
1238	bl	trace_hardirqs_on
1239	mfmsr	r10
1240#endif
1241	ori	r10,r10,MSR_EE
1242	SYNC
1243	mtmsr	r10		/* hard-enable interrupts */
1244	bl	schedule
1245recheck:
1246	/* Note: And we don't tell it we are disabling them again
1247	 * neither. Those disable/enable cycles used to peek at
1248	 * TI_FLAGS aren't advertised.
1249	 */
1250	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1251	SYNC
1252	mtmsr	r10		/* disable interrupts */
1253	lwz	r9,TI_FLAGS(r2)
1254	andi.	r0,r9,_TIF_NEED_RESCHED
1255	bne-	do_resched
1256	andi.	r0,r9,_TIF_USER_WORK_MASK
1257	beq	restore_user
1258do_user_signal:			/* r10 contains MSR_KERNEL here */
1259	ori	r10,r10,MSR_EE
1260	SYNC
1261	mtmsr	r10		/* hard-enable interrupts */
1262	/* save r13-r31 in the exception frame, if not already done */
1263	lwz	r3,_TRAP(r1)
1264	andi.	r0,r3,1
1265	beq	2f
1266	SAVE_NVGPRS(r1)
1267	rlwinm	r3,r3,0,0,30
1268	stw	r3,_TRAP(r1)
12692:	addi	r3,r1,STACK_FRAME_OVERHEAD
1270	mr	r4,r9
1271	bl	do_notify_resume
1272	REST_NVGPRS(r1)
1273	b	recheck
1274
1275/*
1276 * We come here when we are at the end of handling an exception
1277 * that occurred at a place where taking an exception will lose
1278 * state information, such as the contents of SRR0 and SRR1.
1279 */
1280nonrecoverable:
1281	lis	r10,exc_exit_restart_end@ha
1282	addi	r10,r10,exc_exit_restart_end@l
1283	cmplw	r12,r10
1284#ifdef CONFIG_PPC_BOOK3S_601
1285	bgelr
1286#else
1287	bge	3f
1288#endif
1289	lis	r11,exc_exit_restart@ha
1290	addi	r11,r11,exc_exit_restart@l
1291	cmplw	r12,r11
1292#ifdef CONFIG_PPC_BOOK3S_601
1293	bltlr
1294#else
1295	blt	3f
1296#endif
1297	lis	r10,ee_restarts@ha
1298	lwz	r12,ee_restarts@l(r10)
1299	addi	r12,r12,1
1300	stw	r12,ee_restarts@l(r10)
1301	mr	r12,r11		/* restart at exc_exit_restart */
1302	blr
13033:	/* OK, we can't recover, kill this process */
1304	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1305	lwz	r3,_TRAP(r1)
1306	andi.	r0,r3,1
1307	beq	5f
1308	SAVE_NVGPRS(r1)
1309	rlwinm	r3,r3,0,0,30
1310	stw	r3,_TRAP(r1)
13115:	mfspr	r2,SPRN_SPRG_THREAD
1312	addi	r2,r2,-THREAD
1313	tovirt(r2,r2)			/* set back r2 to current */
13144:	addi	r3,r1,STACK_FRAME_OVERHEAD
1315	bl	unrecoverable_exception
1316	/* shouldn't return */
1317	b	4b
1318
1319	.section .bss
1320	.align	2
1321ee_restarts:
1322	.space	4
1323	.previous
1324
1325/*
1326 * PROM code for specific machines follows.  Put it
1327 * here so it's easy to add arch-specific sections later.
1328 * -- Cort
1329 */
1330#ifdef CONFIG_PPC_RTAS
1331/*
1332 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1333 * called with the MMU off.
1334 */
1335_GLOBAL(enter_rtas)
1336	stwu	r1,-INT_FRAME_SIZE(r1)
1337	mflr	r0
1338	stw	r0,INT_FRAME_SIZE+4(r1)
1339	LOAD_REG_ADDR(r4, rtas)
1340	lis	r6,1f@ha	/* physical return address for rtas */
1341	addi	r6,r6,1f@l
1342	tophys(r6,r6)
1343	tophys_novmstack r7, r1
1344	lwz	r8,RTASENTRY(r4)
1345	lwz	r4,RTASBASE(r4)
1346	mfmsr	r9
1347	stw	r9,8(r1)
1348	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1349	SYNC			/* disable interrupts so SRR0/1 */
1350	mtmsr	r0		/* don't get trashed */
1351	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1352	mtlr	r6
1353	stw	r7, THREAD + RTAS_SP(r2)
1354	mtspr	SPRN_SRR0,r8
1355	mtspr	SPRN_SRR1,r9
1356	RFI
13571:	tophys(r9,r1)
1358	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1359	lwz	r9,8(r9)	/* original msr value */
1360	addi	r1,r1,INT_FRAME_SIZE
1361	li	r0,0
1362	tophys(r7, r2)
1363	stw	r0, THREAD + RTAS_SP(r7)
1364	mtspr	SPRN_SRR0,r8
1365	mtspr	SPRN_SRR1,r9
1366	RFI			/* return to caller */
1367
1368	.globl	machine_check_in_rtas
1369machine_check_in_rtas:
1370	twi	31,0,0
1371	/* XXX load up BATs and panic */
1372
1373#endif /* CONFIG_PPC_RTAS */
1374