xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 20e2fc42)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/asm-405.h>
32#include <asm/feature-fixups.h>
33#include <asm/barrier.h>
34#include <asm/kup.h>
35#include <asm/bug.h>
36
37#include "head_32.h"
38
39/*
40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
41 * fit into one page in order to not encounter a TLB miss between the
42 * modification of srr0/srr1 and the associated rfi.
43 */
44	.align	12
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack */
91	mfspr	r8,SPRN_SPRG_THREAD
92	lwz	r0,KSP_LIMIT(r8)
93	stw	r0,SAVED_KSP_LIMIT(r11)
94	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
95	stw	r0,KSP_LIMIT(r8)
96	/* fall through */
97#endif
98
99#ifdef CONFIG_40x
100	.globl	crit_transfer_to_handler
101crit_transfer_to_handler:
102	lwz	r0,crit_r10@l(0)
103	stw	r0,GPR10(r11)
104	lwz	r0,crit_r11@l(0)
105	stw	r0,GPR11(r11)
106	mfspr	r0,SPRN_SRR0
107	stw	r0,crit_srr0@l(0)
108	mfspr	r0,SPRN_SRR1
109	stw	r0,crit_srr1@l(0)
110
111	/* set the stack limit to the current stack */
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	beq	2f			/* if from user, fix up THREAD.regs */
144	addi	r2, r12, -THREAD
145	addi	r11,r1,STACK_FRAME_OVERHEAD
146	stw	r11,PT_REGS(r12)
147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148	/* Check to see if the dbcr0 register is set up to debug.  Use the
149	   internal debug mode bit to do this. */
150	lwz	r12,THREAD_DBCR0(r12)
151	andis.	r12,r12,DBCR0_IDM@h
152#endif
153	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
154#ifdef CONFIG_PPC_BOOK3S_32
155	kuep_lock r11, r12
156#endif
157#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
158	beq+	3f
159	/* From user and task is ptraced - load up global dbcr0 */
160	li	r12,-1			/* clear all pending debug events */
161	mtspr	SPRN_DBSR,r12
162	lis	r11,global_dbcr0@ha
163	tophys(r11,r11)
164	addi	r11,r11,global_dbcr0@l
165#ifdef CONFIG_SMP
166	lwz	r9,TASK_CPU(r2)
167	slwi	r9,r9,3
168	add	r11,r11,r9
169#endif
170	lwz	r12,0(r11)
171	mtspr	SPRN_DBCR0,r12
172	lwz	r12,4(r11)
173	addi	r12,r12,-1
174	stw	r12,4(r11)
175#endif
176
177	b	3f
178
1792:	/* if from kernel, check interrupted DOZE/NAP mode and
180         * check for stack overflow
181         */
182	kuap_save_and_lock r11, r12, r9, r2, r0
183	addi	r2, r12, -THREAD
184	lwz	r9,KSP_LIMIT(r12)
185	cmplw	r1,r9			/* if r1 <= ksp_limit */
186	ble-	stack_ovf		/* then the kernel stack overflowed */
1875:
188#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
189	lwz	r12,TI_LOCAL_FLAGS(r2)
190	mtcrf	0x01,r12
191	bt-	31-TLF_NAPPING,4f
192	bt-	31-TLF_SLEEPING,7f
193#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
194	.globl transfer_to_handler_cont
195transfer_to_handler_cont:
1963:
197	mflr	r9
198	tovirt(r2, r2)			/* set r2 to current */
199	lwz	r11,0(r9)		/* virtual address of handler */
200	lwz	r9,4(r9)		/* where to go when done */
201#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
202	mtspr	SPRN_NRI, r0
203#endif
204#ifdef CONFIG_TRACE_IRQFLAGS
205	/*
206	 * When tracing IRQ state (lockdep) we enable the MMU before we call
207	 * the IRQ tracing functions as they might access vmalloc space or
208	 * perform IOs for console output.
209	 *
210	 * To speed up the syscall path where interrupts stay on, let's check
211	 * first if we are changing the MSR value at all.
212	 */
213	tophys(r12, r1)
214	lwz	r12,_MSR(r12)
215	andi.	r12,r12,MSR_EE
216	bne	1f
217
218	/* MSR isn't changing, just transition directly */
219#endif
220	mtspr	SPRN_SRR0,r11
221	mtspr	SPRN_SRR1,r10
222	mtlr	r9
223	SYNC
224	RFI				/* jump to handler, enable MMU */
225
226#ifdef CONFIG_TRACE_IRQFLAGS
2271:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
228	 * keep interrupts disabled at this point otherwise we might risk
229	 * taking an interrupt before we tell lockdep they are enabled.
230	 */
231	lis	r12,reenable_mmu@h
232	ori	r12,r12,reenable_mmu@l
233	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
234	mtspr	SPRN_SRR0,r12
235	mtspr	SPRN_SRR1,r0
236	SYNC
237	RFI
238
239reenable_mmu:
240	/*
241	 * We save a bunch of GPRs,
242	 * r3 can be different from GPR3(r1) at this point, r9 and r11
243	 * contains the old MSR and handler address respectively,
244	 * r4 & r5 can contain page fault arguments that need to be passed
245	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
246	 * they aren't useful past this point (aren't syscall arguments),
247	 * the rest is restored from the exception frame.
248	 */
249
250	stwu	r1,-32(r1)
251	stw	r9,8(r1)
252	stw	r11,12(r1)
253	stw	r3,16(r1)
254	stw	r4,20(r1)
255	stw	r5,24(r1)
256
257	/* If we are disabling interrupts (normal case), simply log it with
258	 * lockdep
259	 */
2601:	bl	trace_hardirqs_off
2612:	lwz	r5,24(r1)
262	lwz	r4,20(r1)
263	lwz	r3,16(r1)
264	lwz	r11,12(r1)
265	lwz	r9,8(r1)
266	addi	r1,r1,32
267	lwz	r0,GPR0(r1)
268	lwz	r6,GPR6(r1)
269	lwz	r7,GPR7(r1)
270	lwz	r8,GPR8(r1)
271	mtctr	r11
272	mtlr	r9
273	bctr				/* jump to handler */
274#endif /* CONFIG_TRACE_IRQFLAGS */
275
276#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2774:	rlwinm	r12,r12,0,~_TLF_NAPPING
278	stw	r12,TI_LOCAL_FLAGS(r2)
279	b	power_save_ppc32_restore
280
2817:	rlwinm	r12,r12,0,~_TLF_SLEEPING
282	stw	r12,TI_LOCAL_FLAGS(r2)
283	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
284	rlwinm	r9,r9,0,~MSR_EE
285	lwz	r12,_LINK(r11)		/* and return to address in LR */
286	kuap_restore r11, r2, r3, r4, r5
287	b	fast_exception_return
288#endif
289
290/*
291 * On kernel stack overflow, load up an initial stack pointer
292 * and call StackOverflow(regs), which should not return.
293 */
294stack_ovf:
295	/* sometimes we use a statically-allocated stack, which is OK. */
296	lis	r12,_end@h
297	ori	r12,r12,_end@l
298	cmplw	r1,r12
299	ble	5b			/* r1 <= &_end is OK */
300	SAVE_NVGPRS(r11)
301	addi	r3,r1,STACK_FRAME_OVERHEAD
302	lis	r1,init_thread_union@ha
303	addi	r1,r1,init_thread_union@l
304	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
305	lis	r9,StackOverflow@ha
306	addi	r9,r9,StackOverflow@l
307	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
308#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
309	mtspr	SPRN_NRI, r0
310#endif
311	mtspr	SPRN_SRR0,r9
312	mtspr	SPRN_SRR1,r10
313	SYNC
314	RFI
315
316#ifdef CONFIG_TRACE_IRQFLAGS
317trace_syscall_entry_irq_off:
318	/*
319	 * Syscall shouldn't happen while interrupts are disabled,
320	 * so let's do a warning here.
321	 */
3220:	trap
323	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
324	bl	trace_hardirqs_on
325
326	/* Now enable for real */
327	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
328	mtmsr	r10
329
330	REST_GPR(0, r1)
331	REST_4GPRS(3, r1)
332	REST_2GPRS(7, r1)
333	b	DoSyscall
334#endif /* CONFIG_TRACE_IRQFLAGS */
335
336	.globl	transfer_to_syscall
337transfer_to_syscall:
338#ifdef CONFIG_TRACE_IRQFLAGS
339	andi.	r12,r9,MSR_EE
340	beq-	trace_syscall_entry_irq_off
341#endif /* CONFIG_TRACE_IRQFLAGS */
342
343/*
344 * Handle a system call.
345 */
346	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
347	.stabs	"entry_32.S",N_SO,0,0,0f
3480:
349
350_GLOBAL(DoSyscall)
351	stw	r3,ORIG_GPR3(r1)
352	li	r12,0
353	stw	r12,RESULT(r1)
354#ifdef CONFIG_TRACE_IRQFLAGS
355	/* Make sure interrupts are enabled */
356	mfmsr	r11
357	andi.	r12,r11,MSR_EE
358	/* We came in with interrupts disabled, we WARN and mark them enabled
359	 * for lockdep now */
3600:	tweqi	r12, 0
361	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
362#endif /* CONFIG_TRACE_IRQFLAGS */
363	lwz	r11,TI_FLAGS(r2)
364	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
365	bne-	syscall_dotrace
366syscall_dotrace_cont:
367	cmplwi	0,r0,NR_syscalls
368	lis	r10,sys_call_table@h
369	ori	r10,r10,sys_call_table@l
370	slwi	r0,r0,2
371	bge-	66f
372
373	barrier_nospec_asm
374	/*
375	 * Prevent the load of the handler below (based on the user-passed
376	 * system call number) being speculatively executed until the test
377	 * against NR_syscalls and branch to .66f above has
378	 * committed.
379	 */
380
381	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
382	mtlr	r10
383	addi	r9,r1,STACK_FRAME_OVERHEAD
384	PPC440EP_ERR42
385	blrl			/* Call handler */
386	.globl	ret_from_syscall
387ret_from_syscall:
388#ifdef CONFIG_DEBUG_RSEQ
389	/* Check whether the syscall is issued inside a restartable sequence */
390	stw	r3,GPR3(r1)
391	addi    r3,r1,STACK_FRAME_OVERHEAD
392	bl      rseq_syscall
393	lwz	r3,GPR3(r1)
394#endif
395	mr	r6,r3
396	/* disable interrupts so current_thread_info()->flags can't change */
397	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
398	/* Note: We don't bother telling lockdep about it */
399	SYNC
400	MTMSRD(r10)
401	lwz	r9,TI_FLAGS(r2)
402	li	r8,-MAX_ERRNO
403	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
404	bne-	syscall_exit_work
405	cmplw	0,r3,r8
406	blt+	syscall_exit_cont
407	lwz	r11,_CCR(r1)			/* Load CR */
408	neg	r3,r3
409	oris	r11,r11,0x1000	/* Set SO bit in CR */
410	stw	r11,_CCR(r1)
411syscall_exit_cont:
412	lwz	r8,_MSR(r1)
413#ifdef CONFIG_TRACE_IRQFLAGS
414	/* If we are going to return from the syscall with interrupts
415	 * off, we trace that here. It shouldn't normally happen.
416	 */
417	andi.	r10,r8,MSR_EE
418	bne+	1f
419	stw	r3,GPR3(r1)
420	bl      trace_hardirqs_off
421	lwz	r3,GPR3(r1)
4221:
423#endif /* CONFIG_TRACE_IRQFLAGS */
424#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
425	/* If the process has its own DBCR0 value, load it up.  The internal
426	   debug mode bit tells us that dbcr0 should be loaded. */
427	lwz	r0,THREAD+THREAD_DBCR0(r2)
428	andis.	r10,r0,DBCR0_IDM@h
429	bnel-	load_dbcr0
430#endif
431#ifdef CONFIG_44x
432BEGIN_MMU_FTR_SECTION
433	lis	r4,icache_44x_need_flush@ha
434	lwz	r5,icache_44x_need_flush@l(r4)
435	cmplwi	cr0,r5,0
436	bne-	2f
4371:
438END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
439#endif /* CONFIG_44x */
440BEGIN_FTR_SECTION
441	lwarx	r7,0,r1
442END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
443	stwcx.	r0,0,r1			/* to clear the reservation */
444	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
445#ifdef CONFIG_PPC_BOOK3S_32
446	kuep_unlock r5, r7
447#endif
448	kuap_check r2, r4
449	lwz	r4,_LINK(r1)
450	lwz	r5,_CCR(r1)
451	mtlr	r4
452	mtcr	r5
453	lwz	r7,_NIP(r1)
454	lwz	r2,GPR2(r1)
455	lwz	r1,GPR1(r1)
456#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
457	mtspr	SPRN_NRI, r0
458#endif
459	mtspr	SPRN_SRR0,r7
460	mtspr	SPRN_SRR1,r8
461	SYNC
462	RFI
463#ifdef CONFIG_44x
4642:	li	r7,0
465	iccci	r0,r0
466	stw	r7,icache_44x_need_flush@l(r4)
467	b	1b
468#endif  /* CONFIG_44x */
469
47066:	li	r3,-ENOSYS
471	b	ret_from_syscall
472
473	.globl	ret_from_fork
474ret_from_fork:
475	REST_NVGPRS(r1)
476	bl	schedule_tail
477	li	r3,0
478	b	ret_from_syscall
479
480	.globl	ret_from_kernel_thread
481ret_from_kernel_thread:
482	REST_NVGPRS(r1)
483	bl	schedule_tail
484	mtlr	r14
485	mr	r3,r15
486	PPC440EP_ERR42
487	blrl
488	li	r3,0
489	b	ret_from_syscall
490
491/* Traced system call support */
492syscall_dotrace:
493	SAVE_NVGPRS(r1)
494	li	r0,0xc00
495	stw	r0,_TRAP(r1)
496	addi	r3,r1,STACK_FRAME_OVERHEAD
497	bl	do_syscall_trace_enter
498	/*
499	 * Restore argument registers possibly just changed.
500	 * We use the return value of do_syscall_trace_enter
501	 * for call number to look up in the table (r0).
502	 */
503	mr	r0,r3
504	lwz	r3,GPR3(r1)
505	lwz	r4,GPR4(r1)
506	lwz	r5,GPR5(r1)
507	lwz	r6,GPR6(r1)
508	lwz	r7,GPR7(r1)
509	lwz	r8,GPR8(r1)
510	REST_NVGPRS(r1)
511
512	cmplwi	r0,NR_syscalls
513	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
514	bge-	ret_from_syscall
515	b	syscall_dotrace_cont
516
517syscall_exit_work:
518	andi.	r0,r9,_TIF_RESTOREALL
519	beq+	0f
520	REST_NVGPRS(r1)
521	b	2f
5220:	cmplw	0,r3,r8
523	blt+	1f
524	andi.	r0,r9,_TIF_NOERROR
525	bne-	1f
526	lwz	r11,_CCR(r1)			/* Load CR */
527	neg	r3,r3
528	oris	r11,r11,0x1000	/* Set SO bit in CR */
529	stw	r11,_CCR(r1)
530
5311:	stw	r6,RESULT(r1)	/* Save result */
532	stw	r3,GPR3(r1)	/* Update return value */
5332:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
534	beq	4f
535
536	/* Clear per-syscall TIF flags if any are set.  */
537
538	li	r11,_TIF_PERSYSCALL_MASK
539	addi	r12,r2,TI_FLAGS
5403:	lwarx	r8,0,r12
541	andc	r8,r8,r11
542#ifdef CONFIG_IBM405_ERR77
543	dcbt	0,r12
544#endif
545	stwcx.	r8,0,r12
546	bne-	3b
547
5484:	/* Anything which requires enabling interrupts? */
549	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
550	beq	ret_from_except
551
552	/* Re-enable interrupts. There is no need to trace that with
553	 * lockdep as we are supposed to have IRQs on at this point
554	 */
555	ori	r10,r10,MSR_EE
556	SYNC
557	MTMSRD(r10)
558
559	/* Save NVGPRS if they're not saved already */
560	lwz	r4,_TRAP(r1)
561	andi.	r4,r4,1
562	beq	5f
563	SAVE_NVGPRS(r1)
564	li	r4,0xc00
565	stw	r4,_TRAP(r1)
5665:
567	addi	r3,r1,STACK_FRAME_OVERHEAD
568	bl	do_syscall_trace_leave
569	b	ret_from_except_full
570
571/*
572 * The fork/clone functions need to copy the full register set into
573 * the child process. Therefore we need to save all the nonvolatile
574 * registers (r13 - r31) before calling the C code.
575 */
576	.globl	ppc_fork
577ppc_fork:
578	SAVE_NVGPRS(r1)
579	lwz	r0,_TRAP(r1)
580	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
581	stw	r0,_TRAP(r1)		/* register set saved */
582	b	sys_fork
583
584	.globl	ppc_vfork
585ppc_vfork:
586	SAVE_NVGPRS(r1)
587	lwz	r0,_TRAP(r1)
588	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
589	stw	r0,_TRAP(r1)		/* register set saved */
590	b	sys_vfork
591
592	.globl	ppc_clone
593ppc_clone:
594	SAVE_NVGPRS(r1)
595	lwz	r0,_TRAP(r1)
596	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
597	stw	r0,_TRAP(r1)		/* register set saved */
598	b	sys_clone
599
600	.globl	ppc_clone3
601ppc_clone3:
602	SAVE_NVGPRS(r1)
603	lwz	r0,_TRAP(r1)
604	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
605	stw	r0,_TRAP(r1)		/* register set saved */
606	b	sys_clone3
607
608	.globl	ppc_swapcontext
609ppc_swapcontext:
610	SAVE_NVGPRS(r1)
611	lwz	r0,_TRAP(r1)
612	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
613	stw	r0,_TRAP(r1)		/* register set saved */
614	b	sys_swapcontext
615
616/*
617 * Top-level page fault handling.
618 * This is in assembler because if do_page_fault tells us that
619 * it is a bad kernel page fault, we want to save the non-volatile
620 * registers before calling bad_page_fault.
621 */
622	.globl	handle_page_fault
623handle_page_fault:
624	stw	r4,_DAR(r1)
625	addi	r3,r1,STACK_FRAME_OVERHEAD
626#ifdef CONFIG_PPC_BOOK3S_32
627	andis.  r0,r5,DSISR_DABRMATCH@h
628	bne-    handle_dabr_fault
629#endif
630	bl	do_page_fault
631	cmpwi	r3,0
632	beq+	ret_from_except
633	SAVE_NVGPRS(r1)
634	lwz	r0,_TRAP(r1)
635	clrrwi	r0,r0,1
636	stw	r0,_TRAP(r1)
637	mr	r5,r3
638	addi	r3,r1,STACK_FRAME_OVERHEAD
639	lwz	r4,_DAR(r1)
640	bl	bad_page_fault
641	b	ret_from_except_full
642
643#ifdef CONFIG_PPC_BOOK3S_32
644	/* We have a data breakpoint exception - handle it */
645handle_dabr_fault:
646	SAVE_NVGPRS(r1)
647	lwz	r0,_TRAP(r1)
648	clrrwi	r0,r0,1
649	stw	r0,_TRAP(r1)
650	bl      do_break
651	b	ret_from_except_full
652#endif
653
654/*
655 * This routine switches between two different tasks.  The process
656 * state of one is saved on its kernel stack.  Then the state
657 * of the other is restored from its kernel stack.  The memory
658 * management hardware is updated to the second process's state.
659 * Finally, we can return to the second process.
660 * On entry, r3 points to the THREAD for the current task, r4
661 * points to the THREAD for the new task.
662 *
663 * This routine is always called with interrupts disabled.
664 *
665 * Note: there are two ways to get to the "going out" portion
666 * of this code; either by coming in via the entry (_switch)
667 * or via "fork" which must set up an environment equivalent
668 * to the "_switch" path.  If you change this , you'll have to
669 * change the fork code also.
670 *
671 * The code which creates the new task context is in 'copy_thread'
672 * in arch/ppc/kernel/process.c
673 */
674_GLOBAL(_switch)
675	stwu	r1,-INT_FRAME_SIZE(r1)
676	mflr	r0
677	stw	r0,INT_FRAME_SIZE+4(r1)
678	/* r3-r12 are caller saved -- Cort */
679	SAVE_NVGPRS(r1)
680	stw	r0,_NIP(r1)	/* Return to switch caller */
681	mfmsr	r11
682	li	r0,MSR_FP	/* Disable floating-point */
683#ifdef CONFIG_ALTIVEC
684BEGIN_FTR_SECTION
685	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
686	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
687	stw	r12,THREAD+THREAD_VRSAVE(r2)
688END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
689#endif /* CONFIG_ALTIVEC */
690#ifdef CONFIG_SPE
691BEGIN_FTR_SECTION
692	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
693	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
694	stw	r12,THREAD+THREAD_SPEFSCR(r2)
695END_FTR_SECTION_IFSET(CPU_FTR_SPE)
696#endif /* CONFIG_SPE */
697	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
698	beq+	1f
699	andc	r11,r11,r0
700	MTMSRD(r11)
701	isync
7021:	stw	r11,_MSR(r1)
703	mfcr	r10
704	stw	r10,_CCR(r1)
705	stw	r1,KSP(r3)	/* Set old stack pointer */
706
707	kuap_check r2, r4
708#ifdef CONFIG_SMP
709	/* We need a sync somewhere here to make sure that if the
710	 * previous task gets rescheduled on another CPU, it sees all
711	 * stores it has performed on this one.
712	 */
713	sync
714#endif /* CONFIG_SMP */
715
716	tophys(r0,r4)
717	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
718	lwz	r1,KSP(r4)	/* Load new stack pointer */
719
720	/* save the old current 'last' for return value */
721	mr	r3,r2
722	addi	r2,r4,-THREAD	/* Update current */
723
724#ifdef CONFIG_ALTIVEC
725BEGIN_FTR_SECTION
726	lwz	r0,THREAD+THREAD_VRSAVE(r2)
727	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
728END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
729#endif /* CONFIG_ALTIVEC */
730#ifdef CONFIG_SPE
731BEGIN_FTR_SECTION
732	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
733	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
734END_FTR_SECTION_IFSET(CPU_FTR_SPE)
735#endif /* CONFIG_SPE */
736
737	lwz	r0,_CCR(r1)
738	mtcrf	0xFF,r0
739	/* r3-r12 are destroyed -- Cort */
740	REST_NVGPRS(r1)
741
742	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
743	mtlr	r4
744	addi	r1,r1,INT_FRAME_SIZE
745	blr
746
747	.globl	fast_exception_return
748fast_exception_return:
749#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
750	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
751	beq	1f			/* if not, we've got problems */
752#endif
753
7542:	REST_4GPRS(3, r11)
755	lwz	r10,_CCR(r11)
756	REST_GPR(1, r11)
757	mtcr	r10
758	lwz	r10,_LINK(r11)
759	mtlr	r10
760	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
761	li	r10, 0
762	stw	r10, 8(r11)
763	REST_GPR(10, r11)
764#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
765	mtspr	SPRN_NRI, r0
766#endif
767	mtspr	SPRN_SRR1,r9
768	mtspr	SPRN_SRR0,r12
769	REST_GPR(9, r11)
770	REST_GPR(12, r11)
771	lwz	r11,GPR11(r11)
772	SYNC
773	RFI
774
775#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
776/* check if the exception happened in a restartable section */
7771:	lis	r3,exc_exit_restart_end@ha
778	addi	r3,r3,exc_exit_restart_end@l
779	cmplw	r12,r3
780#if CONFIG_PPC_BOOK3S_601
781	bge	2b
782#else
783	bge	3f
784#endif
785	lis	r4,exc_exit_restart@ha
786	addi	r4,r4,exc_exit_restart@l
787	cmplw	r12,r4
788#if CONFIG_PPC_BOOK3S_601
789	blt	2b
790#else
791	blt	3f
792#endif
793	lis	r3,fee_restarts@ha
794	tophys(r3,r3)
795	lwz	r5,fee_restarts@l(r3)
796	addi	r5,r5,1
797	stw	r5,fee_restarts@l(r3)
798	mr	r12,r4		/* restart at exc_exit_restart */
799	b	2b
800
801	.section .bss
802	.align	2
803fee_restarts:
804	.space	4
805	.previous
806
807/* aargh, a nonrecoverable interrupt, panic */
808/* aargh, we don't know which trap this is */
809/* but the 601 doesn't implement the RI bit, so assume it's OK */
8103:
811	li	r10,-1
812	stw	r10,_TRAP(r11)
813	addi	r3,r1,STACK_FRAME_OVERHEAD
814	lis	r10,MSR_KERNEL@h
815	ori	r10,r10,MSR_KERNEL@l
816	bl	transfer_to_handler_full
817	.long	unrecoverable_exception
818	.long	ret_from_except
819#endif
820
821	.globl	ret_from_except_full
822ret_from_except_full:
823	REST_NVGPRS(r1)
824	/* fall through */
825
826	.globl	ret_from_except
827ret_from_except:
828	/* Hard-disable interrupts so that current_thread_info()->flags
829	 * can't change between when we test it and when we return
830	 * from the interrupt. */
831	/* Note: We don't bother telling lockdep about it */
832	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
833	SYNC			/* Some chip revs have problems here... */
834	MTMSRD(r10)		/* disable interrupts */
835
836	lwz	r3,_MSR(r1)	/* Returning to user mode? */
837	andi.	r0,r3,MSR_PR
838	beq	resume_kernel
839
840user_exc_return:		/* r10 contains MSR_KERNEL here */
841	/* Check current_thread_info()->flags */
842	lwz	r9,TI_FLAGS(r2)
843	andi.	r0,r9,_TIF_USER_WORK_MASK
844	bne	do_work
845
846restore_user:
847#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
848	/* Check whether this process has its own DBCR0 value.  The internal
849	   debug mode bit tells us that dbcr0 should be loaded. */
850	lwz	r0,THREAD+THREAD_DBCR0(r2)
851	andis.	r10,r0,DBCR0_IDM@h
852	bnel-	load_dbcr0
853#endif
854	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
855#ifdef CONFIG_PPC_BOOK3S_32
856	kuep_unlock	r10, r11
857#endif
858
859	b	restore
860
861/* N.B. the only way to get here is from the beq following ret_from_except. */
862resume_kernel:
863	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
864	lwz	r8,TI_FLAGS(r2)
865	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
866	beq+	1f
867
868	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
869
870	lwz	r3,GPR1(r1)
871	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
872	mr	r4,r1			/* src:  current exception frame */
873	mr	r1,r3			/* Reroute the trampoline frame to r1 */
874
875	/* Copy from the original to the trampoline. */
876	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
877	li	r6,0			/* start offset: 0 */
878	mtctr	r5
8792:	lwzx	r0,r6,r4
880	stwx	r0,r6,r3
881	addi	r6,r6,4
882	bdnz	2b
883
884	/* Do real store operation to complete stwu */
885	lwz	r5,GPR1(r1)
886	stw	r8,0(r5)
887
888	/* Clear _TIF_EMULATE_STACK_STORE flag */
889	lis	r11,_TIF_EMULATE_STACK_STORE@h
890	addi	r5,r2,TI_FLAGS
8910:	lwarx	r8,0,r5
892	andc	r8,r8,r11
893#ifdef CONFIG_IBM405_ERR77
894	dcbt	0,r5
895#endif
896	stwcx.	r8,0,r5
897	bne-	0b
8981:
899
900#ifdef CONFIG_PREEMPT
901	/* check current_thread_info->preempt_count */
902	lwz	r0,TI_PREEMPT(r2)
903	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
904	bne	restore_kuap
905	andi.	r8,r8,_TIF_NEED_RESCHED
906	beq+	restore_kuap
907	lwz	r3,_MSR(r1)
908	andi.	r0,r3,MSR_EE	/* interrupts off? */
909	beq	restore_kuap	/* don't schedule if so */
910#ifdef CONFIG_TRACE_IRQFLAGS
911	/* Lockdep thinks irqs are enabled, we need to call
912	 * preempt_schedule_irq with IRQs off, so we inform lockdep
913	 * now that we -did- turn them off already
914	 */
915	bl	trace_hardirqs_off
916#endif
917	bl	preempt_schedule_irq
918#ifdef CONFIG_TRACE_IRQFLAGS
919	/* And now, to properly rebalance the above, we tell lockdep they
920	 * are being turned back on, which will happen when we return
921	 */
922	bl	trace_hardirqs_on
923#endif
924#endif /* CONFIG_PREEMPT */
925restore_kuap:
926	kuap_restore r1, r2, r9, r10, r0
927
928	/* interrupts are hard-disabled at this point */
929restore:
930#ifdef CONFIG_44x
931BEGIN_MMU_FTR_SECTION
932	b	1f
933END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
934	lis	r4,icache_44x_need_flush@ha
935	lwz	r5,icache_44x_need_flush@l(r4)
936	cmplwi	cr0,r5,0
937	beq+	1f
938	li	r6,0
939	iccci	r0,r0
940	stw	r6,icache_44x_need_flush@l(r4)
9411:
942#endif  /* CONFIG_44x */
943
944	lwz	r9,_MSR(r1)
945#ifdef CONFIG_TRACE_IRQFLAGS
946	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
947	 * off in this assembly code while peeking at TI_FLAGS() and such. However
948	 * we need to inform it if the exception turned interrupts off, and we
949	 * are about to trun them back on.
950	 */
951	andi.	r10,r9,MSR_EE
952	beq	1f
953	stwu	r1,-32(r1)
954	mflr	r0
955	stw	r0,4(r1)
956	bl	trace_hardirqs_on
957	addi	r1, r1, 32
958	lwz	r9,_MSR(r1)
9591:
960#endif /* CONFIG_TRACE_IRQFLAGS */
961
962	lwz	r0,GPR0(r1)
963	lwz	r2,GPR2(r1)
964	REST_4GPRS(3, r1)
965	REST_2GPRS(7, r1)
966
967	lwz	r10,_XER(r1)
968	lwz	r11,_CTR(r1)
969	mtspr	SPRN_XER,r10
970	mtctr	r11
971
972	PPC405_ERR77(0,r1)
973BEGIN_FTR_SECTION
974	lwarx	r11,0,r1
975END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
976	stwcx.	r0,0,r1			/* to clear the reservation */
977
978#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
979	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
980	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
981
982	lwz	r10,_CCR(r1)
983	lwz	r11,_LINK(r1)
984	mtcrf	0xFF,r10
985	mtlr	r11
986
987	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
988	li	r10, 0
989	stw	r10, 8(r1)
990	/*
991	 * Once we put values in SRR0 and SRR1, we are in a state
992	 * where exceptions are not recoverable, since taking an
993	 * exception will trash SRR0 and SRR1.  Therefore we clear the
994	 * MSR:RI bit to indicate this.  If we do take an exception,
995	 * we can't return to the point of the exception but we
996	 * can restart the exception exit path at the label
997	 * exc_exit_restart below.  -- paulus
998	 */
999	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1000	SYNC
1001	MTMSRD(r10)		/* clear the RI bit */
1002	.globl exc_exit_restart
1003exc_exit_restart:
1004	lwz	r12,_NIP(r1)
1005	mtspr	SPRN_SRR0,r12
1006	mtspr	SPRN_SRR1,r9
1007	REST_4GPRS(9, r1)
1008	lwz	r1,GPR1(r1)
1009	.globl exc_exit_restart_end
1010exc_exit_restart_end:
1011	SYNC
1012	RFI
1013
1014#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1015	/*
1016	 * This is a bit different on 4xx/Book-E because it doesn't have
1017	 * the RI bit in the MSR.
1018	 * The TLB miss handler checks if we have interrupted
1019	 * the exception exit path and restarts it if so
1020	 * (well maybe one day it will... :).
1021	 */
1022	lwz	r11,_LINK(r1)
1023	mtlr	r11
1024	lwz	r10,_CCR(r1)
1025	mtcrf	0xff,r10
1026	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1027	li	r10, 0
1028	stw	r10, 8(r1)
1029	REST_2GPRS(9, r1)
1030	.globl exc_exit_restart
1031exc_exit_restart:
1032	lwz	r11,_NIP(r1)
1033	lwz	r12,_MSR(r1)
1034exc_exit_start:
1035	mtspr	SPRN_SRR0,r11
1036	mtspr	SPRN_SRR1,r12
1037	REST_2GPRS(11, r1)
1038	lwz	r1,GPR1(r1)
1039	.globl exc_exit_restart_end
1040exc_exit_restart_end:
1041	PPC405_ERR77_SYNC
1042	rfi
1043	b	.			/* prevent prefetch past rfi */
1044
1045/*
1046 * Returning from a critical interrupt in user mode doesn't need
1047 * to be any different from a normal exception.  For a critical
1048 * interrupt in the kernel, we just return (without checking for
1049 * preemption) since the interrupt may have happened at some crucial
1050 * place (e.g. inside the TLB miss handler), and because we will be
1051 * running with r1 pointing into critical_stack, not the current
1052 * process's kernel stack (and therefore current_thread_info() will
1053 * give the wrong answer).
1054 * We have to restore various SPRs that may have been in use at the
1055 * time of the critical interrupt.
1056 *
1057 */
1058#ifdef CONFIG_40x
1059#define PPC_40x_TURN_OFF_MSR_DR						    \
1060	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1061	 * assume the instructions here are mapped by a pinned TLB entry */ \
1062	li	r10,MSR_IR;						    \
1063	mtmsr	r10;							    \
1064	isync;								    \
1065	tophys(r1, r1);
1066#else
1067#define PPC_40x_TURN_OFF_MSR_DR
1068#endif
1069
1070#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1071	REST_NVGPRS(r1);						\
1072	lwz	r3,_MSR(r1);						\
1073	andi.	r3,r3,MSR_PR;						\
1074	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1075	bne	user_exc_return;					\
1076	lwz	r0,GPR0(r1);						\
1077	lwz	r2,GPR2(r1);						\
1078	REST_4GPRS(3, r1);						\
1079	REST_2GPRS(7, r1);						\
1080	lwz	r10,_XER(r1);						\
1081	lwz	r11,_CTR(r1);						\
1082	mtspr	SPRN_XER,r10;						\
1083	mtctr	r11;							\
1084	PPC405_ERR77(0,r1);						\
1085	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1086	lwz	r11,_LINK(r1);						\
1087	mtlr	r11;							\
1088	lwz	r10,_CCR(r1);						\
1089	mtcrf	0xff,r10;						\
1090	PPC_40x_TURN_OFF_MSR_DR;					\
1091	lwz	r9,_DEAR(r1);						\
1092	lwz	r10,_ESR(r1);						\
1093	mtspr	SPRN_DEAR,r9;						\
1094	mtspr	SPRN_ESR,r10;						\
1095	lwz	r11,_NIP(r1);						\
1096	lwz	r12,_MSR(r1);						\
1097	mtspr	exc_lvl_srr0,r11;					\
1098	mtspr	exc_lvl_srr1,r12;					\
1099	lwz	r9,GPR9(r1);						\
1100	lwz	r12,GPR12(r1);						\
1101	lwz	r10,GPR10(r1);						\
1102	lwz	r11,GPR11(r1);						\
1103	lwz	r1,GPR1(r1);						\
1104	PPC405_ERR77_SYNC;						\
1105	exc_lvl_rfi;							\
1106	b	.;		/* prevent prefetch past exc_lvl_rfi */
1107
1108#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1109	lwz	r9,_##exc_lvl_srr0(r1);					\
1110	lwz	r10,_##exc_lvl_srr1(r1);				\
1111	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1112	mtspr	SPRN_##exc_lvl_srr1,r10;
1113
1114#if defined(CONFIG_PPC_BOOK3E_MMU)
1115#ifdef CONFIG_PHYS_64BIT
1116#define	RESTORE_MAS7							\
1117	lwz	r11,MAS7(r1);						\
1118	mtspr	SPRN_MAS7,r11;
1119#else
1120#define	RESTORE_MAS7
1121#endif /* CONFIG_PHYS_64BIT */
1122#define RESTORE_MMU_REGS						\
1123	lwz	r9,MAS0(r1);						\
1124	lwz	r10,MAS1(r1);						\
1125	lwz	r11,MAS2(r1);						\
1126	mtspr	SPRN_MAS0,r9;						\
1127	lwz	r9,MAS3(r1);						\
1128	mtspr	SPRN_MAS1,r10;						\
1129	lwz	r10,MAS6(r1);						\
1130	mtspr	SPRN_MAS2,r11;						\
1131	mtspr	SPRN_MAS3,r9;						\
1132	mtspr	SPRN_MAS6,r10;						\
1133	RESTORE_MAS7;
1134#elif defined(CONFIG_44x)
1135#define RESTORE_MMU_REGS						\
1136	lwz	r9,MMUCR(r1);						\
1137	mtspr	SPRN_MMUCR,r9;
1138#else
1139#define RESTORE_MMU_REGS
1140#endif
1141
1142#ifdef CONFIG_40x
1143	.globl	ret_from_crit_exc
1144ret_from_crit_exc:
1145	mfspr	r9,SPRN_SPRG_THREAD
1146	lis	r10,saved_ksp_limit@ha;
1147	lwz	r10,saved_ksp_limit@l(r10);
1148	tovirt(r9,r9);
1149	stw	r10,KSP_LIMIT(r9)
1150	lis	r9,crit_srr0@ha;
1151	lwz	r9,crit_srr0@l(r9);
1152	lis	r10,crit_srr1@ha;
1153	lwz	r10,crit_srr1@l(r10);
1154	mtspr	SPRN_SRR0,r9;
1155	mtspr	SPRN_SRR1,r10;
1156	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1157#endif /* CONFIG_40x */
1158
1159#ifdef CONFIG_BOOKE
1160	.globl	ret_from_crit_exc
1161ret_from_crit_exc:
1162	mfspr	r9,SPRN_SPRG_THREAD
1163	lwz	r10,SAVED_KSP_LIMIT(r1)
1164	stw	r10,KSP_LIMIT(r9)
1165	RESTORE_xSRR(SRR0,SRR1);
1166	RESTORE_MMU_REGS;
1167	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1168
1169	.globl	ret_from_debug_exc
1170ret_from_debug_exc:
1171	mfspr	r9,SPRN_SPRG_THREAD
1172	lwz	r10,SAVED_KSP_LIMIT(r1)
1173	stw	r10,KSP_LIMIT(r9)
1174	RESTORE_xSRR(SRR0,SRR1);
1175	RESTORE_xSRR(CSRR0,CSRR1);
1176	RESTORE_MMU_REGS;
1177	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1178
1179	.globl	ret_from_mcheck_exc
1180ret_from_mcheck_exc:
1181	mfspr	r9,SPRN_SPRG_THREAD
1182	lwz	r10,SAVED_KSP_LIMIT(r1)
1183	stw	r10,KSP_LIMIT(r9)
1184	RESTORE_xSRR(SRR0,SRR1);
1185	RESTORE_xSRR(CSRR0,CSRR1);
1186	RESTORE_xSRR(DSRR0,DSRR1);
1187	RESTORE_MMU_REGS;
1188	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1189#endif /* CONFIG_BOOKE */
1190
1191/*
1192 * Load the DBCR0 value for a task that is being ptraced,
1193 * having first saved away the global DBCR0.  Note that r0
1194 * has the dbcr0 value to set upon entry to this.
1195 */
1196load_dbcr0:
1197	mfmsr	r10		/* first disable debug exceptions */
1198	rlwinm	r10,r10,0,~MSR_DE
1199	mtmsr	r10
1200	isync
1201	mfspr	r10,SPRN_DBCR0
1202	lis	r11,global_dbcr0@ha
1203	addi	r11,r11,global_dbcr0@l
1204#ifdef CONFIG_SMP
1205	lwz	r9,TASK_CPU(r2)
1206	slwi	r9,r9,3
1207	add	r11,r11,r9
1208#endif
1209	stw	r10,0(r11)
1210	mtspr	SPRN_DBCR0,r0
1211	lwz	r10,4(r11)
1212	addi	r10,r10,1
1213	stw	r10,4(r11)
1214	li	r11,-1
1215	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1216	blr
1217
1218	.section .bss
1219	.align	4
1220	.global global_dbcr0
1221global_dbcr0:
1222	.space	8*NR_CPUS
1223	.previous
1224#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1225
1226do_work:			/* r10 contains MSR_KERNEL here */
1227	andi.	r0,r9,_TIF_NEED_RESCHED
1228	beq	do_user_signal
1229
1230do_resched:			/* r10 contains MSR_KERNEL here */
1231#ifdef CONFIG_TRACE_IRQFLAGS
1232	bl	trace_hardirqs_on
1233	mfmsr	r10
1234#endif
1235	ori	r10,r10,MSR_EE
1236	SYNC
1237	MTMSRD(r10)		/* hard-enable interrupts */
1238	bl	schedule
1239recheck:
1240	/* Note: And we don't tell it we are disabling them again
1241	 * neither. Those disable/enable cycles used to peek at
1242	 * TI_FLAGS aren't advertised.
1243	 */
1244	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1245	SYNC
1246	MTMSRD(r10)		/* disable interrupts */
1247	lwz	r9,TI_FLAGS(r2)
1248	andi.	r0,r9,_TIF_NEED_RESCHED
1249	bne-	do_resched
1250	andi.	r0,r9,_TIF_USER_WORK_MASK
1251	beq	restore_user
1252do_user_signal:			/* r10 contains MSR_KERNEL here */
1253	ori	r10,r10,MSR_EE
1254	SYNC
1255	MTMSRD(r10)		/* hard-enable interrupts */
1256	/* save r13-r31 in the exception frame, if not already done */
1257	lwz	r3,_TRAP(r1)
1258	andi.	r0,r3,1
1259	beq	2f
1260	SAVE_NVGPRS(r1)
1261	rlwinm	r3,r3,0,0,30
1262	stw	r3,_TRAP(r1)
12632:	addi	r3,r1,STACK_FRAME_OVERHEAD
1264	mr	r4,r9
1265	bl	do_notify_resume
1266	REST_NVGPRS(r1)
1267	b	recheck
1268
1269/*
1270 * We come here when we are at the end of handling an exception
1271 * that occurred at a place where taking an exception will lose
1272 * state information, such as the contents of SRR0 and SRR1.
1273 */
1274nonrecoverable:
1275	lis	r10,exc_exit_restart_end@ha
1276	addi	r10,r10,exc_exit_restart_end@l
1277	cmplw	r12,r10
1278#ifdef CONFIG_PPC_BOOK3S_601
1279	bgelr
1280#else
1281	bge	3f
1282#endif
1283	lis	r11,exc_exit_restart@ha
1284	addi	r11,r11,exc_exit_restart@l
1285	cmplw	r12,r11
1286#ifdef CONFIG_PPC_BOOK3S_601
1287	bltlr
1288#else
1289	blt	3f
1290#endif
1291	lis	r10,ee_restarts@ha
1292	lwz	r12,ee_restarts@l(r10)
1293	addi	r12,r12,1
1294	stw	r12,ee_restarts@l(r10)
1295	mr	r12,r11		/* restart at exc_exit_restart */
1296	blr
12973:	/* OK, we can't recover, kill this process */
1298	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1299	lwz	r3,_TRAP(r1)
1300	andi.	r0,r3,1
1301	beq	5f
1302	SAVE_NVGPRS(r1)
1303	rlwinm	r3,r3,0,0,30
1304	stw	r3,_TRAP(r1)
13055:	mfspr	r2,SPRN_SPRG_THREAD
1306	addi	r2,r2,-THREAD
1307	tovirt(r2,r2)			/* set back r2 to current */
13084:	addi	r3,r1,STACK_FRAME_OVERHEAD
1309	bl	unrecoverable_exception
1310	/* shouldn't return */
1311	b	4b
1312
1313	.section .bss
1314	.align	2
1315ee_restarts:
1316	.space	4
1317	.previous
1318
1319/*
1320 * PROM code for specific machines follows.  Put it
1321 * here so it's easy to add arch-specific sections later.
1322 * -- Cort
1323 */
1324#ifdef CONFIG_PPC_RTAS
1325/*
1326 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1327 * called with the MMU off.
1328 */
1329_GLOBAL(enter_rtas)
1330	stwu	r1,-INT_FRAME_SIZE(r1)
1331	mflr	r0
1332	stw	r0,INT_FRAME_SIZE+4(r1)
1333	LOAD_REG_ADDR(r4, rtas)
1334	lis	r6,1f@ha	/* physical return address for rtas */
1335	addi	r6,r6,1f@l
1336	tophys(r6,r6)
1337	tophys(r7,r1)
1338	lwz	r8,RTASENTRY(r4)
1339	lwz	r4,RTASBASE(r4)
1340	mfmsr	r9
1341	stw	r9,8(r1)
1342	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1343	SYNC			/* disable interrupts so SRR0/1 */
1344	MTMSRD(r0)		/* don't get trashed */
1345	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1346	mtlr	r6
1347	stw	r7, THREAD + RTAS_SP(r2)
1348	mtspr	SPRN_SRR0,r8
1349	mtspr	SPRN_SRR1,r9
1350	RFI
13511:	tophys(r9,r1)
1352	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1353	lwz	r9,8(r9)	/* original msr value */
1354	addi	r1,r1,INT_FRAME_SIZE
1355	li	r0,0
1356	tophys(r7, r2)
1357	stw	r0, THREAD + RTAS_SP(r7)
1358	mtspr	SPRN_SRR0,r8
1359	mtspr	SPRN_SRR1,r9
1360	RFI			/* return to caller */
1361
1362	.globl	machine_check_in_rtas
1363machine_check_in_rtas:
1364	twi	31,0,0
1365	/* XXX load up BATs and panic */
1366
1367#endif /* CONFIG_PPC_RTAS */
1368