xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision b58c6630)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/asm-405.h>
32#include <asm/feature-fixups.h>
33#include <asm/barrier.h>
34#include <asm/kup.h>
35#include <asm/bug.h>
36
37#include "head_32.h"
38
39/*
40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
41 * fit into one page in order to not encounter a TLB miss between the
42 * modification of srr0/srr1 and the associated rfi.
43 */
44	.align	12
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack */
91	mfspr	r8,SPRN_SPRG_THREAD
92	lwz	r0,KSP_LIMIT(r8)
93	stw	r0,SAVED_KSP_LIMIT(r11)
94	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
95	stw	r0,KSP_LIMIT(r8)
96	/* fall through */
97#endif
98
99#ifdef CONFIG_40x
100	.globl	crit_transfer_to_handler
101crit_transfer_to_handler:
102	lwz	r0,crit_r10@l(0)
103	stw	r0,GPR10(r11)
104	lwz	r0,crit_r11@l(0)
105	stw	r0,GPR11(r11)
106	mfspr	r0,SPRN_SRR0
107	stw	r0,crit_srr0@l(0)
108	mfspr	r0,SPRN_SRR1
109	stw	r0,crit_srr1@l(0)
110
111	/* set the stack limit to the current stack */
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	tovirt_vmstack r12, r12
144	beq	2f			/* if from user, fix up THREAD.regs */
145	addi	r2, r12, -THREAD
146	addi	r11,r1,STACK_FRAME_OVERHEAD
147	stw	r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149	/* Check to see if the dbcr0 register is set up to debug.  Use the
150	   internal debug mode bit to do this. */
151	lwz	r12,THREAD_DBCR0(r12)
152	andis.	r12,r12,DBCR0_IDM@h
153#endif
154	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
155#ifdef CONFIG_PPC_BOOK3S_32
156	kuep_lock r11, r12
157#endif
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159	beq+	3f
160	/* From user and task is ptraced - load up global dbcr0 */
161	li	r12,-1			/* clear all pending debug events */
162	mtspr	SPRN_DBSR,r12
163	lis	r11,global_dbcr0@ha
164	tophys(r11,r11)
165	addi	r11,r11,global_dbcr0@l
166#ifdef CONFIG_SMP
167	lwz	r9,TASK_CPU(r2)
168	slwi	r9,r9,3
169	add	r11,r11,r9
170#endif
171	lwz	r12,0(r11)
172	mtspr	SPRN_DBCR0,r12
173	lwz	r12,4(r11)
174	addi	r12,r12,-1
175	stw	r12,4(r11)
176#endif
177
178	b	3f
179
1802:	/* if from kernel, check interrupted DOZE/NAP mode and
181         * check for stack overflow
182         */
183	kuap_save_and_lock r11, r12, r9, r2, r6
184	addi	r2, r12, -THREAD
185#ifndef CONFIG_VMAP_STACK
186	lwz	r9,KSP_LIMIT(r12)
187	cmplw	r1,r9			/* if r1 <= ksp_limit */
188	ble-	stack_ovf		/* then the kernel stack overflowed */
189#endif
1905:
191#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
192	lwz	r12,TI_LOCAL_FLAGS(r2)
193	mtcrf	0x01,r12
194	bt-	31-TLF_NAPPING,4f
195	bt-	31-TLF_SLEEPING,7f
196#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
197	.globl transfer_to_handler_cont
198transfer_to_handler_cont:
1993:
200	mflr	r9
201	tovirt_novmstack r2, r2 	/* set r2 to current */
202	tovirt_vmstack r9, r9
203	lwz	r11,0(r9)		/* virtual address of handler */
204	lwz	r9,4(r9)		/* where to go when done */
205#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
206	mtspr	SPRN_NRI, r0
207#endif
208#ifdef CONFIG_TRACE_IRQFLAGS
209	/*
210	 * When tracing IRQ state (lockdep) we enable the MMU before we call
211	 * the IRQ tracing functions as they might access vmalloc space or
212	 * perform IOs for console output.
213	 *
214	 * To speed up the syscall path where interrupts stay on, let's check
215	 * first if we are changing the MSR value at all.
216	 */
217	tophys_novmstack r12, r1
218	lwz	r12,_MSR(r12)
219	andi.	r12,r12,MSR_EE
220	bne	1f
221
222	/* MSR isn't changing, just transition directly */
223#endif
224	mtspr	SPRN_SRR0,r11
225	mtspr	SPRN_SRR1,r10
226	mtlr	r9
227	SYNC
228	RFI				/* jump to handler, enable MMU */
229
230#ifdef CONFIG_TRACE_IRQFLAGS
2311:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
232	 * keep interrupts disabled at this point otherwise we might risk
233	 * taking an interrupt before we tell lockdep they are enabled.
234	 */
235	lis	r12,reenable_mmu@h
236	ori	r12,r12,reenable_mmu@l
237	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
238	mtspr	SPRN_SRR0,r12
239	mtspr	SPRN_SRR1,r0
240	SYNC
241	RFI
242
243reenable_mmu:
244	/*
245	 * We save a bunch of GPRs,
246	 * r3 can be different from GPR3(r1) at this point, r9 and r11
247	 * contains the old MSR and handler address respectively,
248	 * r4 & r5 can contain page fault arguments that need to be passed
249	 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
250	 * clobbered as they aren't useful past this point.
251	 */
252
253	stwu	r1,-32(r1)
254	stw	r9,8(r1)
255	stw	r11,12(r1)
256	stw	r3,16(r1)
257	stw	r4,20(r1)
258	stw	r5,24(r1)
259
260	/* If we are disabling interrupts (normal case), simply log it with
261	 * lockdep
262	 */
2631:	bl	trace_hardirqs_off
264	lwz	r5,24(r1)
265	lwz	r4,20(r1)
266	lwz	r3,16(r1)
267	lwz	r11,12(r1)
268	lwz	r9,8(r1)
269	addi	r1,r1,32
270	mtctr	r11
271	mtlr	r9
272	bctr				/* jump to handler */
273#endif /* CONFIG_TRACE_IRQFLAGS */
274
275#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2764:	rlwinm	r12,r12,0,~_TLF_NAPPING
277	stw	r12,TI_LOCAL_FLAGS(r2)
278	b	power_save_ppc32_restore
279
2807:	rlwinm	r12,r12,0,~_TLF_SLEEPING
281	stw	r12,TI_LOCAL_FLAGS(r2)
282	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
283	rlwinm	r9,r9,0,~MSR_EE
284	lwz	r12,_LINK(r11)		/* and return to address in LR */
285	kuap_restore r11, r2, r3, r4, r5
286	lwz	r2, GPR2(r11)
287	b	fast_exception_return
288#endif
289
290#ifndef CONFIG_VMAP_STACK
291/*
292 * On kernel stack overflow, load up an initial stack pointer
293 * and call StackOverflow(regs), which should not return.
294 */
295stack_ovf:
296	/* sometimes we use a statically-allocated stack, which is OK. */
297	lis	r12,_end@h
298	ori	r12,r12,_end@l
299	cmplw	r1,r12
300	ble	5b			/* r1 <= &_end is OK */
301	SAVE_NVGPRS(r11)
302	addi	r3,r1,STACK_FRAME_OVERHEAD
303	lis	r1,init_thread_union@ha
304	addi	r1,r1,init_thread_union@l
305	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
306	lis	r9,StackOverflow@ha
307	addi	r9,r9,StackOverflow@l
308	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
309#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
310	mtspr	SPRN_NRI, r0
311#endif
312	mtspr	SPRN_SRR0,r9
313	mtspr	SPRN_SRR1,r10
314	SYNC
315	RFI
316#endif
317
318#ifdef CONFIG_TRACE_IRQFLAGS
319trace_syscall_entry_irq_off:
320	/*
321	 * Syscall shouldn't happen while interrupts are disabled,
322	 * so let's do a warning here.
323	 */
3240:	trap
325	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
326	bl	trace_hardirqs_on
327
328	/* Now enable for real */
329	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
330	mtmsr	r10
331
332	REST_GPR(0, r1)
333	REST_4GPRS(3, r1)
334	REST_2GPRS(7, r1)
335	b	DoSyscall
336#endif /* CONFIG_TRACE_IRQFLAGS */
337
338	.globl	transfer_to_syscall
339transfer_to_syscall:
340#ifdef CONFIG_TRACE_IRQFLAGS
341	andi.	r12,r9,MSR_EE
342	beq-	trace_syscall_entry_irq_off
343#endif /* CONFIG_TRACE_IRQFLAGS */
344
345/*
346 * Handle a system call.
347 */
348	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
349	.stabs	"entry_32.S",N_SO,0,0,0f
3500:
351
352_GLOBAL(DoSyscall)
353	stw	r3,ORIG_GPR3(r1)
354	li	r12,0
355	stw	r12,RESULT(r1)
356#ifdef CONFIG_TRACE_IRQFLAGS
357	/* Make sure interrupts are enabled */
358	mfmsr	r11
359	andi.	r12,r11,MSR_EE
360	/* We came in with interrupts disabled, we WARN and mark them enabled
361	 * for lockdep now */
3620:	tweqi	r12, 0
363	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
364#endif /* CONFIG_TRACE_IRQFLAGS */
365	lwz	r11,TI_FLAGS(r2)
366	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
367	bne-	syscall_dotrace
368syscall_dotrace_cont:
369	cmplwi	0,r0,NR_syscalls
370	lis	r10,sys_call_table@h
371	ori	r10,r10,sys_call_table@l
372	slwi	r0,r0,2
373	bge-	66f
374
375	barrier_nospec_asm
376	/*
377	 * Prevent the load of the handler below (based on the user-passed
378	 * system call number) being speculatively executed until the test
379	 * against NR_syscalls and branch to .66f above has
380	 * committed.
381	 */
382
383	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
384	mtlr	r10
385	addi	r9,r1,STACK_FRAME_OVERHEAD
386	PPC440EP_ERR42
387	blrl			/* Call handler */
388	.globl	ret_from_syscall
389ret_from_syscall:
390#ifdef CONFIG_DEBUG_RSEQ
391	/* Check whether the syscall is issued inside a restartable sequence */
392	stw	r3,GPR3(r1)
393	addi    r3,r1,STACK_FRAME_OVERHEAD
394	bl      rseq_syscall
395	lwz	r3,GPR3(r1)
396#endif
397	mr	r6,r3
398	/* disable interrupts so current_thread_info()->flags can't change */
399	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
400	/* Note: We don't bother telling lockdep about it */
401	SYNC
402	mtmsr	r10
403	lwz	r9,TI_FLAGS(r2)
404	li	r8,-MAX_ERRNO
405	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
406	bne-	syscall_exit_work
407	cmplw	0,r3,r8
408	blt+	syscall_exit_cont
409	lwz	r11,_CCR(r1)			/* Load CR */
410	neg	r3,r3
411	oris	r11,r11,0x1000	/* Set SO bit in CR */
412	stw	r11,_CCR(r1)
413syscall_exit_cont:
414	lwz	r8,_MSR(r1)
415#ifdef CONFIG_TRACE_IRQFLAGS
416	/* If we are going to return from the syscall with interrupts
417	 * off, we trace that here. It shouldn't normally happen.
418	 */
419	andi.	r10,r8,MSR_EE
420	bne+	1f
421	stw	r3,GPR3(r1)
422	bl      trace_hardirqs_off
423	lwz	r3,GPR3(r1)
4241:
425#endif /* CONFIG_TRACE_IRQFLAGS */
426#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
427	/* If the process has its own DBCR0 value, load it up.  The internal
428	   debug mode bit tells us that dbcr0 should be loaded. */
429	lwz	r0,THREAD+THREAD_DBCR0(r2)
430	andis.	r10,r0,DBCR0_IDM@h
431	bnel-	load_dbcr0
432#endif
433#ifdef CONFIG_44x
434BEGIN_MMU_FTR_SECTION
435	lis	r4,icache_44x_need_flush@ha
436	lwz	r5,icache_44x_need_flush@l(r4)
437	cmplwi	cr0,r5,0
438	bne-	2f
4391:
440END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
441#endif /* CONFIG_44x */
442BEGIN_FTR_SECTION
443	lwarx	r7,0,r1
444END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
445	stwcx.	r0,0,r1			/* to clear the reservation */
446	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
447#ifdef CONFIG_PPC_BOOK3S_32
448	kuep_unlock r5, r7
449#endif
450	kuap_check r2, r4
451	lwz	r4,_LINK(r1)
452	lwz	r5,_CCR(r1)
453	mtlr	r4
454	mtcr	r5
455	lwz	r7,_NIP(r1)
456	lwz	r2,GPR2(r1)
457	lwz	r1,GPR1(r1)
458#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
459	mtspr	SPRN_NRI, r0
460#endif
461	mtspr	SPRN_SRR0,r7
462	mtspr	SPRN_SRR1,r8
463	SYNC
464	RFI
465#ifdef CONFIG_44x
4662:	li	r7,0
467	iccci	r0,r0
468	stw	r7,icache_44x_need_flush@l(r4)
469	b	1b
470#endif  /* CONFIG_44x */
471
47266:	li	r3,-ENOSYS
473	b	ret_from_syscall
474
475	.globl	ret_from_fork
476ret_from_fork:
477	REST_NVGPRS(r1)
478	bl	schedule_tail
479	li	r3,0
480	b	ret_from_syscall
481
482	.globl	ret_from_kernel_thread
483ret_from_kernel_thread:
484	REST_NVGPRS(r1)
485	bl	schedule_tail
486	mtlr	r14
487	mr	r3,r15
488	PPC440EP_ERR42
489	blrl
490	li	r3,0
491	b	ret_from_syscall
492
493/* Traced system call support */
494syscall_dotrace:
495	SAVE_NVGPRS(r1)
496	li	r0,0xc00
497	stw	r0,_TRAP(r1)
498	addi	r3,r1,STACK_FRAME_OVERHEAD
499	bl	do_syscall_trace_enter
500	/*
501	 * Restore argument registers possibly just changed.
502	 * We use the return value of do_syscall_trace_enter
503	 * for call number to look up in the table (r0).
504	 */
505	mr	r0,r3
506	lwz	r3,GPR3(r1)
507	lwz	r4,GPR4(r1)
508	lwz	r5,GPR5(r1)
509	lwz	r6,GPR6(r1)
510	lwz	r7,GPR7(r1)
511	lwz	r8,GPR8(r1)
512	REST_NVGPRS(r1)
513
514	cmplwi	r0,NR_syscalls
515	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
516	bge-	ret_from_syscall
517	b	syscall_dotrace_cont
518
519syscall_exit_work:
520	andi.	r0,r9,_TIF_RESTOREALL
521	beq+	0f
522	REST_NVGPRS(r1)
523	b	2f
5240:	cmplw	0,r3,r8
525	blt+	1f
526	andi.	r0,r9,_TIF_NOERROR
527	bne-	1f
528	lwz	r11,_CCR(r1)			/* Load CR */
529	neg	r3,r3
530	oris	r11,r11,0x1000	/* Set SO bit in CR */
531	stw	r11,_CCR(r1)
532
5331:	stw	r6,RESULT(r1)	/* Save result */
534	stw	r3,GPR3(r1)	/* Update return value */
5352:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
536	beq	4f
537
538	/* Clear per-syscall TIF flags if any are set.  */
539
540	li	r11,_TIF_PERSYSCALL_MASK
541	addi	r12,r2,TI_FLAGS
5423:	lwarx	r8,0,r12
543	andc	r8,r8,r11
544#ifdef CONFIG_IBM405_ERR77
545	dcbt	0,r12
546#endif
547	stwcx.	r8,0,r12
548	bne-	3b
549
5504:	/* Anything which requires enabling interrupts? */
551	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
552	beq	ret_from_except
553
554	/* Re-enable interrupts. There is no need to trace that with
555	 * lockdep as we are supposed to have IRQs on at this point
556	 */
557	ori	r10,r10,MSR_EE
558	SYNC
559	mtmsr	r10
560
561	/* Save NVGPRS if they're not saved already */
562	lwz	r4,_TRAP(r1)
563	andi.	r4,r4,1
564	beq	5f
565	SAVE_NVGPRS(r1)
566	li	r4,0xc00
567	stw	r4,_TRAP(r1)
5685:
569	addi	r3,r1,STACK_FRAME_OVERHEAD
570	bl	do_syscall_trace_leave
571	b	ret_from_except_full
572
573	/*
574	 * System call was called from kernel. We get here with SRR1 in r9.
575	 * Mark the exception as recoverable once we have retrieved SRR0,
576	 * trap a warning and return ENOSYS with CR[SO] set.
577	 */
578	.globl	ret_from_kernel_syscall
579ret_from_kernel_syscall:
580	mfspr	r9, SPRN_SRR0
581	mfspr	r10, SPRN_SRR1
582#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
583	LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
584	mtmsr	r11
585#endif
586
5870:	trap
588	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
589
590	li	r3, ENOSYS
591	crset	so
592#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
593	mtspr	SPRN_NRI, r0
594#endif
595	mtspr	SPRN_SRR0, r9
596	mtspr	SPRN_SRR1, r10
597	SYNC
598	RFI
599
600/*
601 * The fork/clone functions need to copy the full register set into
602 * the child process. Therefore we need to save all the nonvolatile
603 * registers (r13 - r31) before calling the C code.
604 */
605	.globl	ppc_fork
606ppc_fork:
607	SAVE_NVGPRS(r1)
608	lwz	r0,_TRAP(r1)
609	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
610	stw	r0,_TRAP(r1)		/* register set saved */
611	b	sys_fork
612
613	.globl	ppc_vfork
614ppc_vfork:
615	SAVE_NVGPRS(r1)
616	lwz	r0,_TRAP(r1)
617	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
618	stw	r0,_TRAP(r1)		/* register set saved */
619	b	sys_vfork
620
621	.globl	ppc_clone
622ppc_clone:
623	SAVE_NVGPRS(r1)
624	lwz	r0,_TRAP(r1)
625	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
626	stw	r0,_TRAP(r1)		/* register set saved */
627	b	sys_clone
628
629	.globl	ppc_clone3
630ppc_clone3:
631	SAVE_NVGPRS(r1)
632	lwz	r0,_TRAP(r1)
633	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
634	stw	r0,_TRAP(r1)		/* register set saved */
635	b	sys_clone3
636
637	.globl	ppc_swapcontext
638ppc_swapcontext:
639	SAVE_NVGPRS(r1)
640	lwz	r0,_TRAP(r1)
641	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
642	stw	r0,_TRAP(r1)		/* register set saved */
643	b	sys_swapcontext
644
645/*
646 * Top-level page fault handling.
647 * This is in assembler because if do_page_fault tells us that
648 * it is a bad kernel page fault, we want to save the non-volatile
649 * registers before calling bad_page_fault.
650 */
651	.globl	handle_page_fault
652handle_page_fault:
653	addi	r3,r1,STACK_FRAME_OVERHEAD
654#ifdef CONFIG_PPC_BOOK3S_32
655	andis.  r0,r5,DSISR_DABRMATCH@h
656	bne-    handle_dabr_fault
657#endif
658	bl	do_page_fault
659	cmpwi	r3,0
660	beq+	ret_from_except
661	SAVE_NVGPRS(r1)
662	lwz	r0,_TRAP(r1)
663	clrrwi	r0,r0,1
664	stw	r0,_TRAP(r1)
665	mr	r5,r3
666	addi	r3,r1,STACK_FRAME_OVERHEAD
667	lwz	r4,_DAR(r1)
668	bl	bad_page_fault
669	b	ret_from_except_full
670
671#ifdef CONFIG_PPC_BOOK3S_32
672	/* We have a data breakpoint exception - handle it */
673handle_dabr_fault:
674	SAVE_NVGPRS(r1)
675	lwz	r0,_TRAP(r1)
676	clrrwi	r0,r0,1
677	stw	r0,_TRAP(r1)
678	bl      do_break
679	b	ret_from_except_full
680#endif
681
682/*
683 * This routine switches between two different tasks.  The process
684 * state of one is saved on its kernel stack.  Then the state
685 * of the other is restored from its kernel stack.  The memory
686 * management hardware is updated to the second process's state.
687 * Finally, we can return to the second process.
688 * On entry, r3 points to the THREAD for the current task, r4
689 * points to the THREAD for the new task.
690 *
691 * This routine is always called with interrupts disabled.
692 *
693 * Note: there are two ways to get to the "going out" portion
694 * of this code; either by coming in via the entry (_switch)
695 * or via "fork" which must set up an environment equivalent
696 * to the "_switch" path.  If you change this , you'll have to
697 * change the fork code also.
698 *
699 * The code which creates the new task context is in 'copy_thread'
700 * in arch/ppc/kernel/process.c
701 */
702_GLOBAL(_switch)
703	stwu	r1,-INT_FRAME_SIZE(r1)
704	mflr	r0
705	stw	r0,INT_FRAME_SIZE+4(r1)
706	/* r3-r12 are caller saved -- Cort */
707	SAVE_NVGPRS(r1)
708	stw	r0,_NIP(r1)	/* Return to switch caller */
709	mfmsr	r11
710	li	r0,MSR_FP	/* Disable floating-point */
711#ifdef CONFIG_ALTIVEC
712BEGIN_FTR_SECTION
713	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
714	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
715	stw	r12,THREAD+THREAD_VRSAVE(r2)
716END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
717#endif /* CONFIG_ALTIVEC */
718#ifdef CONFIG_SPE
719BEGIN_FTR_SECTION
720	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
721	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
722	stw	r12,THREAD+THREAD_SPEFSCR(r2)
723END_FTR_SECTION_IFSET(CPU_FTR_SPE)
724#endif /* CONFIG_SPE */
725	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
726	beq+	1f
727	andc	r11,r11,r0
728	mtmsr	r11
729	isync
7301:	stw	r11,_MSR(r1)
731	mfcr	r10
732	stw	r10,_CCR(r1)
733	stw	r1,KSP(r3)	/* Set old stack pointer */
734
735	kuap_check r2, r0
736#ifdef CONFIG_SMP
737	/* We need a sync somewhere here to make sure that if the
738	 * previous task gets rescheduled on another CPU, it sees all
739	 * stores it has performed on this one.
740	 */
741	sync
742#endif /* CONFIG_SMP */
743
744	tophys(r0,r4)
745	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
746	lwz	r1,KSP(r4)	/* Load new stack pointer */
747
748	/* save the old current 'last' for return value */
749	mr	r3,r2
750	addi	r2,r4,-THREAD	/* Update current */
751
752#ifdef CONFIG_ALTIVEC
753BEGIN_FTR_SECTION
754	lwz	r0,THREAD+THREAD_VRSAVE(r2)
755	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
756END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
757#endif /* CONFIG_ALTIVEC */
758#ifdef CONFIG_SPE
759BEGIN_FTR_SECTION
760	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
761	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
762END_FTR_SECTION_IFSET(CPU_FTR_SPE)
763#endif /* CONFIG_SPE */
764
765	lwz	r0,_CCR(r1)
766	mtcrf	0xFF,r0
767	/* r3-r12 are destroyed -- Cort */
768	REST_NVGPRS(r1)
769
770	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
771	mtlr	r4
772	addi	r1,r1,INT_FRAME_SIZE
773	blr
774
775	.globl	fast_exception_return
776fast_exception_return:
777#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
778	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
779	beq	1f			/* if not, we've got problems */
780#endif
781
7822:	REST_4GPRS(3, r11)
783	lwz	r10,_CCR(r11)
784	REST_GPR(1, r11)
785	mtcr	r10
786	lwz	r10,_LINK(r11)
787	mtlr	r10
788	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
789	li	r10, 0
790	stw	r10, 8(r11)
791	REST_GPR(10, r11)
792#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
793	mtspr	SPRN_NRI, r0
794#endif
795	mtspr	SPRN_SRR1,r9
796	mtspr	SPRN_SRR0,r12
797	REST_GPR(9, r11)
798	REST_GPR(12, r11)
799	lwz	r11,GPR11(r11)
800	SYNC
801	RFI
802
803#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
804/* check if the exception happened in a restartable section */
8051:	lis	r3,exc_exit_restart_end@ha
806	addi	r3,r3,exc_exit_restart_end@l
807	cmplw	r12,r3
808#ifdef CONFIG_PPC_BOOK3S_601
809	bge	2b
810#else
811	bge	3f
812#endif
813	lis	r4,exc_exit_restart@ha
814	addi	r4,r4,exc_exit_restart@l
815	cmplw	r12,r4
816#ifdef CONFIG_PPC_BOOK3S_601
817	blt	2b
818#else
819	blt	3f
820#endif
821	lis	r3,fee_restarts@ha
822	tophys(r3,r3)
823	lwz	r5,fee_restarts@l(r3)
824	addi	r5,r5,1
825	stw	r5,fee_restarts@l(r3)
826	mr	r12,r4		/* restart at exc_exit_restart */
827	b	2b
828
829	.section .bss
830	.align	2
831fee_restarts:
832	.space	4
833	.previous
834
835/* aargh, a nonrecoverable interrupt, panic */
836/* aargh, we don't know which trap this is */
837/* but the 601 doesn't implement the RI bit, so assume it's OK */
8383:
839	li	r10,-1
840	stw	r10,_TRAP(r11)
841	addi	r3,r1,STACK_FRAME_OVERHEAD
842	lis	r10,MSR_KERNEL@h
843	ori	r10,r10,MSR_KERNEL@l
844	bl	transfer_to_handler_full
845	.long	unrecoverable_exception
846	.long	ret_from_except
847#endif
848
849	.globl	ret_from_except_full
850ret_from_except_full:
851	REST_NVGPRS(r1)
852	/* fall through */
853
854	.globl	ret_from_except
855ret_from_except:
856	/* Hard-disable interrupts so that current_thread_info()->flags
857	 * can't change between when we test it and when we return
858	 * from the interrupt. */
859	/* Note: We don't bother telling lockdep about it */
860	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
861	SYNC			/* Some chip revs have problems here... */
862	mtmsr	r10		/* disable interrupts */
863
864	lwz	r3,_MSR(r1)	/* Returning to user mode? */
865	andi.	r0,r3,MSR_PR
866	beq	resume_kernel
867
868user_exc_return:		/* r10 contains MSR_KERNEL here */
869	/* Check current_thread_info()->flags */
870	lwz	r9,TI_FLAGS(r2)
871	andi.	r0,r9,_TIF_USER_WORK_MASK
872	bne	do_work
873
874restore_user:
875#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
876	/* Check whether this process has its own DBCR0 value.  The internal
877	   debug mode bit tells us that dbcr0 should be loaded. */
878	lwz	r0,THREAD+THREAD_DBCR0(r2)
879	andis.	r10,r0,DBCR0_IDM@h
880	bnel-	load_dbcr0
881#endif
882	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
883#ifdef CONFIG_PPC_BOOK3S_32
884	kuep_unlock	r10, r11
885#endif
886
887	b	restore
888
889/* N.B. the only way to get here is from the beq following ret_from_except. */
890resume_kernel:
891	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
892	lwz	r8,TI_FLAGS(r2)
893	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
894	beq+	1f
895
896	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
897
898	lwz	r3,GPR1(r1)
899	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
900	mr	r4,r1			/* src:  current exception frame */
901	mr	r1,r3			/* Reroute the trampoline frame to r1 */
902
903	/* Copy from the original to the trampoline. */
904	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
905	li	r6,0			/* start offset: 0 */
906	mtctr	r5
9072:	lwzx	r0,r6,r4
908	stwx	r0,r6,r3
909	addi	r6,r6,4
910	bdnz	2b
911
912	/* Do real store operation to complete stwu */
913	lwz	r5,GPR1(r1)
914	stw	r8,0(r5)
915
916	/* Clear _TIF_EMULATE_STACK_STORE flag */
917	lis	r11,_TIF_EMULATE_STACK_STORE@h
918	addi	r5,r2,TI_FLAGS
9190:	lwarx	r8,0,r5
920	andc	r8,r8,r11
921#ifdef CONFIG_IBM405_ERR77
922	dcbt	0,r5
923#endif
924	stwcx.	r8,0,r5
925	bne-	0b
9261:
927
928#ifdef CONFIG_PREEMPTION
929	/* check current_thread_info->preempt_count */
930	lwz	r0,TI_PREEMPT(r2)
931	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
932	bne	restore_kuap
933	andi.	r8,r8,_TIF_NEED_RESCHED
934	beq+	restore_kuap
935	lwz	r3,_MSR(r1)
936	andi.	r0,r3,MSR_EE	/* interrupts off? */
937	beq	restore_kuap	/* don't schedule if so */
938#ifdef CONFIG_TRACE_IRQFLAGS
939	/* Lockdep thinks irqs are enabled, we need to call
940	 * preempt_schedule_irq with IRQs off, so we inform lockdep
941	 * now that we -did- turn them off already
942	 */
943	bl	trace_hardirqs_off
944#endif
945	bl	preempt_schedule_irq
946#ifdef CONFIG_TRACE_IRQFLAGS
947	/* And now, to properly rebalance the above, we tell lockdep they
948	 * are being turned back on, which will happen when we return
949	 */
950	bl	trace_hardirqs_on
951#endif
952#endif /* CONFIG_PREEMPTION */
953restore_kuap:
954	kuap_restore r1, r2, r9, r10, r0
955
956	/* interrupts are hard-disabled at this point */
957restore:
958#ifdef CONFIG_44x
959BEGIN_MMU_FTR_SECTION
960	b	1f
961END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
962	lis	r4,icache_44x_need_flush@ha
963	lwz	r5,icache_44x_need_flush@l(r4)
964	cmplwi	cr0,r5,0
965	beq+	1f
966	li	r6,0
967	iccci	r0,r0
968	stw	r6,icache_44x_need_flush@l(r4)
9691:
970#endif  /* CONFIG_44x */
971
972	lwz	r9,_MSR(r1)
973#ifdef CONFIG_TRACE_IRQFLAGS
974	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
975	 * off in this assembly code while peeking at TI_FLAGS() and such. However
976	 * we need to inform it if the exception turned interrupts off, and we
977	 * are about to trun them back on.
978	 */
979	andi.	r10,r9,MSR_EE
980	beq	1f
981	stwu	r1,-32(r1)
982	mflr	r0
983	stw	r0,4(r1)
984	bl	trace_hardirqs_on
985	addi	r1, r1, 32
986	lwz	r9,_MSR(r1)
9871:
988#endif /* CONFIG_TRACE_IRQFLAGS */
989
990	lwz	r0,GPR0(r1)
991	lwz	r2,GPR2(r1)
992	REST_4GPRS(3, r1)
993	REST_2GPRS(7, r1)
994
995	lwz	r10,_XER(r1)
996	lwz	r11,_CTR(r1)
997	mtspr	SPRN_XER,r10
998	mtctr	r11
999
1000	PPC405_ERR77(0,r1)
1001BEGIN_FTR_SECTION
1002	lwarx	r11,0,r1
1003END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1004	stwcx.	r0,0,r1			/* to clear the reservation */
1005
1006#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
1007	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
1008	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
1009
1010	lwz	r10,_CCR(r1)
1011	lwz	r11,_LINK(r1)
1012	mtcrf	0xFF,r10
1013	mtlr	r11
1014
1015	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1016	li	r10, 0
1017	stw	r10, 8(r1)
1018	/*
1019	 * Once we put values in SRR0 and SRR1, we are in a state
1020	 * where exceptions are not recoverable, since taking an
1021	 * exception will trash SRR0 and SRR1.  Therefore we clear the
1022	 * MSR:RI bit to indicate this.  If we do take an exception,
1023	 * we can't return to the point of the exception but we
1024	 * can restart the exception exit path at the label
1025	 * exc_exit_restart below.  -- paulus
1026	 */
1027	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1028	SYNC
1029	mtmsr	r10		/* clear the RI bit */
1030	.globl exc_exit_restart
1031exc_exit_restart:
1032	lwz	r12,_NIP(r1)
1033	mtspr	SPRN_SRR0,r12
1034	mtspr	SPRN_SRR1,r9
1035	REST_4GPRS(9, r1)
1036	lwz	r1,GPR1(r1)
1037	.globl exc_exit_restart_end
1038exc_exit_restart_end:
1039	SYNC
1040	RFI
1041
1042#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1043	/*
1044	 * This is a bit different on 4xx/Book-E because it doesn't have
1045	 * the RI bit in the MSR.
1046	 * The TLB miss handler checks if we have interrupted
1047	 * the exception exit path and restarts it if so
1048	 * (well maybe one day it will... :).
1049	 */
1050	lwz	r11,_LINK(r1)
1051	mtlr	r11
1052	lwz	r10,_CCR(r1)
1053	mtcrf	0xff,r10
1054	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1055	li	r10, 0
1056	stw	r10, 8(r1)
1057	REST_2GPRS(9, r1)
1058	.globl exc_exit_restart
1059exc_exit_restart:
1060	lwz	r11,_NIP(r1)
1061	lwz	r12,_MSR(r1)
1062exc_exit_start:
1063	mtspr	SPRN_SRR0,r11
1064	mtspr	SPRN_SRR1,r12
1065	REST_2GPRS(11, r1)
1066	lwz	r1,GPR1(r1)
1067	.globl exc_exit_restart_end
1068exc_exit_restart_end:
1069	PPC405_ERR77_SYNC
1070	rfi
1071	b	.			/* prevent prefetch past rfi */
1072
1073/*
1074 * Returning from a critical interrupt in user mode doesn't need
1075 * to be any different from a normal exception.  For a critical
1076 * interrupt in the kernel, we just return (without checking for
1077 * preemption) since the interrupt may have happened at some crucial
1078 * place (e.g. inside the TLB miss handler), and because we will be
1079 * running with r1 pointing into critical_stack, not the current
1080 * process's kernel stack (and therefore current_thread_info() will
1081 * give the wrong answer).
1082 * We have to restore various SPRs that may have been in use at the
1083 * time of the critical interrupt.
1084 *
1085 */
1086#ifdef CONFIG_40x
1087#define PPC_40x_TURN_OFF_MSR_DR						    \
1088	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1089	 * assume the instructions here are mapped by a pinned TLB entry */ \
1090	li	r10,MSR_IR;						    \
1091	mtmsr	r10;							    \
1092	isync;								    \
1093	tophys(r1, r1);
1094#else
1095#define PPC_40x_TURN_OFF_MSR_DR
1096#endif
1097
1098#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1099	REST_NVGPRS(r1);						\
1100	lwz	r3,_MSR(r1);						\
1101	andi.	r3,r3,MSR_PR;						\
1102	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1103	bne	user_exc_return;					\
1104	lwz	r0,GPR0(r1);						\
1105	lwz	r2,GPR2(r1);						\
1106	REST_4GPRS(3, r1);						\
1107	REST_2GPRS(7, r1);						\
1108	lwz	r10,_XER(r1);						\
1109	lwz	r11,_CTR(r1);						\
1110	mtspr	SPRN_XER,r10;						\
1111	mtctr	r11;							\
1112	PPC405_ERR77(0,r1);						\
1113	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1114	lwz	r11,_LINK(r1);						\
1115	mtlr	r11;							\
1116	lwz	r10,_CCR(r1);						\
1117	mtcrf	0xff,r10;						\
1118	PPC_40x_TURN_OFF_MSR_DR;					\
1119	lwz	r9,_DEAR(r1);						\
1120	lwz	r10,_ESR(r1);						\
1121	mtspr	SPRN_DEAR,r9;						\
1122	mtspr	SPRN_ESR,r10;						\
1123	lwz	r11,_NIP(r1);						\
1124	lwz	r12,_MSR(r1);						\
1125	mtspr	exc_lvl_srr0,r11;					\
1126	mtspr	exc_lvl_srr1,r12;					\
1127	lwz	r9,GPR9(r1);						\
1128	lwz	r12,GPR12(r1);						\
1129	lwz	r10,GPR10(r1);						\
1130	lwz	r11,GPR11(r1);						\
1131	lwz	r1,GPR1(r1);						\
1132	PPC405_ERR77_SYNC;						\
1133	exc_lvl_rfi;							\
1134	b	.;		/* prevent prefetch past exc_lvl_rfi */
1135
1136#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1137	lwz	r9,_##exc_lvl_srr0(r1);					\
1138	lwz	r10,_##exc_lvl_srr1(r1);				\
1139	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1140	mtspr	SPRN_##exc_lvl_srr1,r10;
1141
1142#if defined(CONFIG_PPC_BOOK3E_MMU)
1143#ifdef CONFIG_PHYS_64BIT
1144#define	RESTORE_MAS7							\
1145	lwz	r11,MAS7(r1);						\
1146	mtspr	SPRN_MAS7,r11;
1147#else
1148#define	RESTORE_MAS7
1149#endif /* CONFIG_PHYS_64BIT */
1150#define RESTORE_MMU_REGS						\
1151	lwz	r9,MAS0(r1);						\
1152	lwz	r10,MAS1(r1);						\
1153	lwz	r11,MAS2(r1);						\
1154	mtspr	SPRN_MAS0,r9;						\
1155	lwz	r9,MAS3(r1);						\
1156	mtspr	SPRN_MAS1,r10;						\
1157	lwz	r10,MAS6(r1);						\
1158	mtspr	SPRN_MAS2,r11;						\
1159	mtspr	SPRN_MAS3,r9;						\
1160	mtspr	SPRN_MAS6,r10;						\
1161	RESTORE_MAS7;
1162#elif defined(CONFIG_44x)
1163#define RESTORE_MMU_REGS						\
1164	lwz	r9,MMUCR(r1);						\
1165	mtspr	SPRN_MMUCR,r9;
1166#else
1167#define RESTORE_MMU_REGS
1168#endif
1169
1170#ifdef CONFIG_40x
1171	.globl	ret_from_crit_exc
1172ret_from_crit_exc:
1173	mfspr	r9,SPRN_SPRG_THREAD
1174	lis	r10,saved_ksp_limit@ha;
1175	lwz	r10,saved_ksp_limit@l(r10);
1176	tovirt(r9,r9);
1177	stw	r10,KSP_LIMIT(r9)
1178	lis	r9,crit_srr0@ha;
1179	lwz	r9,crit_srr0@l(r9);
1180	lis	r10,crit_srr1@ha;
1181	lwz	r10,crit_srr1@l(r10);
1182	mtspr	SPRN_SRR0,r9;
1183	mtspr	SPRN_SRR1,r10;
1184	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1185#endif /* CONFIG_40x */
1186
1187#ifdef CONFIG_BOOKE
1188	.globl	ret_from_crit_exc
1189ret_from_crit_exc:
1190	mfspr	r9,SPRN_SPRG_THREAD
1191	lwz	r10,SAVED_KSP_LIMIT(r1)
1192	stw	r10,KSP_LIMIT(r9)
1193	RESTORE_xSRR(SRR0,SRR1);
1194	RESTORE_MMU_REGS;
1195	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1196
1197	.globl	ret_from_debug_exc
1198ret_from_debug_exc:
1199	mfspr	r9,SPRN_SPRG_THREAD
1200	lwz	r10,SAVED_KSP_LIMIT(r1)
1201	stw	r10,KSP_LIMIT(r9)
1202	RESTORE_xSRR(SRR0,SRR1);
1203	RESTORE_xSRR(CSRR0,CSRR1);
1204	RESTORE_MMU_REGS;
1205	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1206
1207	.globl	ret_from_mcheck_exc
1208ret_from_mcheck_exc:
1209	mfspr	r9,SPRN_SPRG_THREAD
1210	lwz	r10,SAVED_KSP_LIMIT(r1)
1211	stw	r10,KSP_LIMIT(r9)
1212	RESTORE_xSRR(SRR0,SRR1);
1213	RESTORE_xSRR(CSRR0,CSRR1);
1214	RESTORE_xSRR(DSRR0,DSRR1);
1215	RESTORE_MMU_REGS;
1216	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1217#endif /* CONFIG_BOOKE */
1218
1219/*
1220 * Load the DBCR0 value for a task that is being ptraced,
1221 * having first saved away the global DBCR0.  Note that r0
1222 * has the dbcr0 value to set upon entry to this.
1223 */
1224load_dbcr0:
1225	mfmsr	r10		/* first disable debug exceptions */
1226	rlwinm	r10,r10,0,~MSR_DE
1227	mtmsr	r10
1228	isync
1229	mfspr	r10,SPRN_DBCR0
1230	lis	r11,global_dbcr0@ha
1231	addi	r11,r11,global_dbcr0@l
1232#ifdef CONFIG_SMP
1233	lwz	r9,TASK_CPU(r2)
1234	slwi	r9,r9,3
1235	add	r11,r11,r9
1236#endif
1237	stw	r10,0(r11)
1238	mtspr	SPRN_DBCR0,r0
1239	lwz	r10,4(r11)
1240	addi	r10,r10,1
1241	stw	r10,4(r11)
1242	li	r11,-1
1243	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1244	blr
1245
1246	.section .bss
1247	.align	4
1248	.global global_dbcr0
1249global_dbcr0:
1250	.space	8*NR_CPUS
1251	.previous
1252#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1253
1254do_work:			/* r10 contains MSR_KERNEL here */
1255	andi.	r0,r9,_TIF_NEED_RESCHED
1256	beq	do_user_signal
1257
1258do_resched:			/* r10 contains MSR_KERNEL here */
1259#ifdef CONFIG_TRACE_IRQFLAGS
1260	bl	trace_hardirqs_on
1261	mfmsr	r10
1262#endif
1263	ori	r10,r10,MSR_EE
1264	SYNC
1265	mtmsr	r10		/* hard-enable interrupts */
1266	bl	schedule
1267recheck:
1268	/* Note: And we don't tell it we are disabling them again
1269	 * neither. Those disable/enable cycles used to peek at
1270	 * TI_FLAGS aren't advertised.
1271	 */
1272	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1273	SYNC
1274	mtmsr	r10		/* disable interrupts */
1275	lwz	r9,TI_FLAGS(r2)
1276	andi.	r0,r9,_TIF_NEED_RESCHED
1277	bne-	do_resched
1278	andi.	r0,r9,_TIF_USER_WORK_MASK
1279	beq	restore_user
1280do_user_signal:			/* r10 contains MSR_KERNEL here */
1281	ori	r10,r10,MSR_EE
1282	SYNC
1283	mtmsr	r10		/* hard-enable interrupts */
1284	/* save r13-r31 in the exception frame, if not already done */
1285	lwz	r3,_TRAP(r1)
1286	andi.	r0,r3,1
1287	beq	2f
1288	SAVE_NVGPRS(r1)
1289	rlwinm	r3,r3,0,0,30
1290	stw	r3,_TRAP(r1)
12912:	addi	r3,r1,STACK_FRAME_OVERHEAD
1292	mr	r4,r9
1293	bl	do_notify_resume
1294	REST_NVGPRS(r1)
1295	b	recheck
1296
1297/*
1298 * We come here when we are at the end of handling an exception
1299 * that occurred at a place where taking an exception will lose
1300 * state information, such as the contents of SRR0 and SRR1.
1301 */
1302nonrecoverable:
1303	lis	r10,exc_exit_restart_end@ha
1304	addi	r10,r10,exc_exit_restart_end@l
1305	cmplw	r12,r10
1306#ifdef CONFIG_PPC_BOOK3S_601
1307	bgelr
1308#else
1309	bge	3f
1310#endif
1311	lis	r11,exc_exit_restart@ha
1312	addi	r11,r11,exc_exit_restart@l
1313	cmplw	r12,r11
1314#ifdef CONFIG_PPC_BOOK3S_601
1315	bltlr
1316#else
1317	blt	3f
1318#endif
1319	lis	r10,ee_restarts@ha
1320	lwz	r12,ee_restarts@l(r10)
1321	addi	r12,r12,1
1322	stw	r12,ee_restarts@l(r10)
1323	mr	r12,r11		/* restart at exc_exit_restart */
1324	blr
13253:	/* OK, we can't recover, kill this process */
1326	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1327	lwz	r3,_TRAP(r1)
1328	andi.	r0,r3,1
1329	beq	5f
1330	SAVE_NVGPRS(r1)
1331	rlwinm	r3,r3,0,0,30
1332	stw	r3,_TRAP(r1)
13335:	mfspr	r2,SPRN_SPRG_THREAD
1334	addi	r2,r2,-THREAD
1335	tovirt(r2,r2)			/* set back r2 to current */
13364:	addi	r3,r1,STACK_FRAME_OVERHEAD
1337	bl	unrecoverable_exception
1338	/* shouldn't return */
1339	b	4b
1340
1341	.section .bss
1342	.align	2
1343ee_restarts:
1344	.space	4
1345	.previous
1346
1347/*
1348 * PROM code for specific machines follows.  Put it
1349 * here so it's easy to add arch-specific sections later.
1350 * -- Cort
1351 */
1352#ifdef CONFIG_PPC_RTAS
1353/*
1354 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1355 * called with the MMU off.
1356 */
1357_GLOBAL(enter_rtas)
1358	stwu	r1,-INT_FRAME_SIZE(r1)
1359	mflr	r0
1360	stw	r0,INT_FRAME_SIZE+4(r1)
1361	LOAD_REG_ADDR(r4, rtas)
1362	lis	r6,1f@ha	/* physical return address for rtas */
1363	addi	r6,r6,1f@l
1364	tophys(r6,r6)
1365	tophys_novmstack r7, r1
1366	lwz	r8,RTASENTRY(r4)
1367	lwz	r4,RTASBASE(r4)
1368	mfmsr	r9
1369	stw	r9,8(r1)
1370	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1371	SYNC			/* disable interrupts so SRR0/1 */
1372	mtmsr	r0		/* don't get trashed */
1373	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1374	mtlr	r6
1375	stw	r7, THREAD + RTAS_SP(r2)
1376	mtspr	SPRN_SRR0,r8
1377	mtspr	SPRN_SRR1,r9
1378	RFI
13791:	tophys_novmstack r9, r1
1380#ifdef CONFIG_VMAP_STACK
1381	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
1382	mtmsr	r0
1383	isync
1384#endif
1385	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1386	lwz	r9,8(r9)	/* original msr value */
1387	addi	r1,r1,INT_FRAME_SIZE
1388	li	r0,0
1389	tophys_novmstack r7, r2
1390	stw	r0, THREAD + RTAS_SP(r7)
1391	mtspr	SPRN_SRR0,r8
1392	mtspr	SPRN_SRR1,r9
1393	RFI			/* return to caller */
1394
1395	.globl	machine_check_in_rtas
1396machine_check_in_rtas:
1397	twi	31,0,0
1398	/* XXX load up BATs and panic */
1399
1400#endif /* CONFIG_PPC_RTAS */
1401