xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 5a1ea477)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ptrace.h>
35#include <asm/export.h>
36#include <asm/asm-405.h>
37#include <asm/feature-fixups.h>
38#include <asm/barrier.h>
39#include <asm/kup.h>
40#include <asm/bug.h>
41
42#include "head_32.h"
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49	.align	12
50
51#ifdef CONFIG_BOOKE
52	.globl	mcheck_transfer_to_handler
53mcheck_transfer_to_handler:
54	mfspr	r0,SPRN_DSRR0
55	stw	r0,_DSRR0(r11)
56	mfspr	r0,SPRN_DSRR1
57	stw	r0,_DSRR1(r11)
58	/* fall through */
59
60	.globl	debug_transfer_to_handler
61debug_transfer_to_handler:
62	mfspr	r0,SPRN_CSRR0
63	stw	r0,_CSRR0(r11)
64	mfspr	r0,SPRN_CSRR1
65	stw	r0,_CSRR1(r11)
66	/* fall through */
67
68	.globl	crit_transfer_to_handler
69crit_transfer_to_handler:
70#ifdef CONFIG_PPC_BOOK3E_MMU
71	mfspr	r0,SPRN_MAS0
72	stw	r0,MAS0(r11)
73	mfspr	r0,SPRN_MAS1
74	stw	r0,MAS1(r11)
75	mfspr	r0,SPRN_MAS2
76	stw	r0,MAS2(r11)
77	mfspr	r0,SPRN_MAS3
78	stw	r0,MAS3(r11)
79	mfspr	r0,SPRN_MAS6
80	stw	r0,MAS6(r11)
81#ifdef CONFIG_PHYS_64BIT
82	mfspr	r0,SPRN_MAS7
83	stw	r0,MAS7(r11)
84#endif /* CONFIG_PHYS_64BIT */
85#endif /* CONFIG_PPC_BOOK3E_MMU */
86#ifdef CONFIG_44x
87	mfspr	r0,SPRN_MMUCR
88	stw	r0,MMUCR(r11)
89#endif
90	mfspr	r0,SPRN_SRR0
91	stw	r0,_SRR0(r11)
92	mfspr	r0,SPRN_SRR1
93	stw	r0,_SRR1(r11)
94
95	/* set the stack limit to the current stack */
96	mfspr	r8,SPRN_SPRG_THREAD
97	lwz	r0,KSP_LIMIT(r8)
98	stw	r0,SAVED_KSP_LIMIT(r11)
99	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
100	stw	r0,KSP_LIMIT(r8)
101	/* fall through */
102#endif
103
104#ifdef CONFIG_40x
105	.globl	crit_transfer_to_handler
106crit_transfer_to_handler:
107	lwz	r0,crit_r10@l(0)
108	stw	r0,GPR10(r11)
109	lwz	r0,crit_r11@l(0)
110	stw	r0,GPR11(r11)
111	mfspr	r0,SPRN_SRR0
112	stw	r0,crit_srr0@l(0)
113	mfspr	r0,SPRN_SRR1
114	stw	r0,crit_srr1@l(0)
115
116	/* set the stack limit to the current stack */
117	mfspr	r8,SPRN_SPRG_THREAD
118	lwz	r0,KSP_LIMIT(r8)
119	stw	r0,saved_ksp_limit@l(0)
120	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
121	stw	r0,KSP_LIMIT(r8)
122	/* fall through */
123#endif
124
125/*
126 * This code finishes saving the registers to the exception frame
127 * and jumps to the appropriate handler for the exception, turning
128 * on address translation.
129 * Note that we rely on the caller having set cr0.eq iff the exception
130 * occurred in kernel mode (i.e. MSR:PR = 0).
131 */
132	.globl	transfer_to_handler_full
133transfer_to_handler_full:
134	SAVE_NVGPRS(r11)
135	/* fall through */
136
137	.globl	transfer_to_handler
138transfer_to_handler:
139	stw	r2,GPR2(r11)
140	stw	r12,_NIP(r11)
141	stw	r9,_MSR(r11)
142	andi.	r2,r9,MSR_PR
143	mfctr	r12
144	mfspr	r2,SPRN_XER
145	stw	r12,_CTR(r11)
146	stw	r2,_XER(r11)
147	mfspr	r12,SPRN_SPRG_THREAD
148	beq	2f			/* if from user, fix up THREAD.regs */
149	addi	r2, r12, -THREAD
150	addi	r11,r1,STACK_FRAME_OVERHEAD
151	stw	r11,PT_REGS(r12)
152#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
153	/* Check to see if the dbcr0 register is set up to debug.  Use the
154	   internal debug mode bit to do this. */
155	lwz	r12,THREAD_DBCR0(r12)
156	andis.	r12,r12,DBCR0_IDM@h
157#endif
158	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
159#ifdef CONFIG_PPC_BOOK3S_32
160	kuep_lock r11, r12
161#endif
162#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
163	beq+	3f
164	/* From user and task is ptraced - load up global dbcr0 */
165	li	r12,-1			/* clear all pending debug events */
166	mtspr	SPRN_DBSR,r12
167	lis	r11,global_dbcr0@ha
168	tophys(r11,r11)
169	addi	r11,r11,global_dbcr0@l
170#ifdef CONFIG_SMP
171	lwz	r9,TASK_CPU(r2)
172	slwi	r9,r9,3
173	add	r11,r11,r9
174#endif
175	lwz	r12,0(r11)
176	mtspr	SPRN_DBCR0,r12
177	lwz	r12,4(r11)
178	addi	r12,r12,-1
179	stw	r12,4(r11)
180#endif
181
182	b	3f
183
1842:	/* if from kernel, check interrupted DOZE/NAP mode and
185         * check for stack overflow
186         */
187	kuap_save_and_lock r11, r12, r9, r2, r0
188	addi	r2, r12, -THREAD
189	lwz	r9,KSP_LIMIT(r12)
190	cmplw	r1,r9			/* if r1 <= ksp_limit */
191	ble-	stack_ovf		/* then the kernel stack overflowed */
1925:
193#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
194	lwz	r12,TI_LOCAL_FLAGS(r2)
195	mtcrf	0x01,r12
196	bt-	31-TLF_NAPPING,4f
197	bt-	31-TLF_SLEEPING,7f
198#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
199	.globl transfer_to_handler_cont
200transfer_to_handler_cont:
2013:
202	mflr	r9
203	tovirt(r2, r2)			/* set r2 to current */
204	lwz	r11,0(r9)		/* virtual address of handler */
205	lwz	r9,4(r9)		/* where to go when done */
206#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
207	mtspr	SPRN_NRI, r0
208#endif
209#ifdef CONFIG_TRACE_IRQFLAGS
210	/*
211	 * When tracing IRQ state (lockdep) we enable the MMU before we call
212	 * the IRQ tracing functions as they might access vmalloc space or
213	 * perform IOs for console output.
214	 *
215	 * To speed up the syscall path where interrupts stay on, let's check
216	 * first if we are changing the MSR value at all.
217	 */
218	tophys(r12, r1)
219	lwz	r12,_MSR(r12)
220	andi.	r12,r12,MSR_EE
221	bne	1f
222
223	/* MSR isn't changing, just transition directly */
224#endif
225	mtspr	SPRN_SRR0,r11
226	mtspr	SPRN_SRR1,r10
227	mtlr	r9
228	SYNC
229	RFI				/* jump to handler, enable MMU */
230
231#ifdef CONFIG_TRACE_IRQFLAGS
2321:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
233	 * keep interrupts disabled at this point otherwise we might risk
234	 * taking an interrupt before we tell lockdep they are enabled.
235	 */
236	lis	r12,reenable_mmu@h
237	ori	r12,r12,reenable_mmu@l
238	LOAD_MSR_KERNEL(r0, MSR_KERNEL)
239	mtspr	SPRN_SRR0,r12
240	mtspr	SPRN_SRR1,r0
241	SYNC
242	RFI
243
244reenable_mmu:
245	/*
246	 * We save a bunch of GPRs,
247	 * r3 can be different from GPR3(r1) at this point, r9 and r11
248	 * contains the old MSR and handler address respectively,
249	 * r4 & r5 can contain page fault arguments that need to be passed
250	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
251	 * they aren't useful past this point (aren't syscall arguments),
252	 * the rest is restored from the exception frame.
253	 */
254
255	stwu	r1,-32(r1)
256	stw	r9,8(r1)
257	stw	r11,12(r1)
258	stw	r3,16(r1)
259	stw	r4,20(r1)
260	stw	r5,24(r1)
261
262	/* If we are disabling interrupts (normal case), simply log it with
263	 * lockdep
264	 */
2651:	bl	trace_hardirqs_off
2662:	lwz	r5,24(r1)
267	lwz	r4,20(r1)
268	lwz	r3,16(r1)
269	lwz	r11,12(r1)
270	lwz	r9,8(r1)
271	addi	r1,r1,32
272	lwz	r0,GPR0(r1)
273	lwz	r6,GPR6(r1)
274	lwz	r7,GPR7(r1)
275	lwz	r8,GPR8(r1)
276	mtctr	r11
277	mtlr	r9
278	bctr				/* jump to handler */
279#endif /* CONFIG_TRACE_IRQFLAGS */
280
281#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2824:	rlwinm	r12,r12,0,~_TLF_NAPPING
283	stw	r12,TI_LOCAL_FLAGS(r2)
284	b	power_save_ppc32_restore
285
2867:	rlwinm	r12,r12,0,~_TLF_SLEEPING
287	stw	r12,TI_LOCAL_FLAGS(r2)
288	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
289	rlwinm	r9,r9,0,~MSR_EE
290	lwz	r12,_LINK(r11)		/* and return to address in LR */
291	kuap_restore r11, r2, r3, r4, r5
292	b	fast_exception_return
293#endif
294
295/*
296 * On kernel stack overflow, load up an initial stack pointer
297 * and call StackOverflow(regs), which should not return.
298 */
299stack_ovf:
300	/* sometimes we use a statically-allocated stack, which is OK. */
301	lis	r12,_end@h
302	ori	r12,r12,_end@l
303	cmplw	r1,r12
304	ble	5b			/* r1 <= &_end is OK */
305	SAVE_NVGPRS(r11)
306	addi	r3,r1,STACK_FRAME_OVERHEAD
307	lis	r1,init_thread_union@ha
308	addi	r1,r1,init_thread_union@l
309	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
310	lis	r9,StackOverflow@ha
311	addi	r9,r9,StackOverflow@l
312	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
313#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
314	mtspr	SPRN_NRI, r0
315#endif
316	mtspr	SPRN_SRR0,r9
317	mtspr	SPRN_SRR1,r10
318	SYNC
319	RFI
320
321#ifdef CONFIG_TRACE_IRQFLAGS
322trace_syscall_entry_irq_off:
323	/*
324	 * Syscall shouldn't happen while interrupts are disabled,
325	 * so let's do a warning here.
326	 */
3270:	trap
328	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
329	bl	trace_hardirqs_on
330
331	/* Now enable for real */
332	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
333	mtmsr	r10
334
335	REST_GPR(0, r1)
336	REST_4GPRS(3, r1)
337	REST_2GPRS(7, r1)
338	b	DoSyscall
339#endif /* CONFIG_TRACE_IRQFLAGS */
340
341	.globl	transfer_to_syscall
342transfer_to_syscall:
343#ifdef CONFIG_TRACE_IRQFLAGS
344	andi.	r12,r9,MSR_EE
345	beq-	trace_syscall_entry_irq_off
346#endif /* CONFIG_TRACE_IRQFLAGS */
347
348/*
349 * Handle a system call.
350 */
351	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
352	.stabs	"entry_32.S",N_SO,0,0,0f
3530:
354
355_GLOBAL(DoSyscall)
356	stw	r3,ORIG_GPR3(r1)
357	li	r12,0
358	stw	r12,RESULT(r1)
359#ifdef CONFIG_TRACE_IRQFLAGS
360	/* Make sure interrupts are enabled */
361	mfmsr	r11
362	andi.	r12,r11,MSR_EE
363	/* We came in with interrupts disabled, we WARN and mark them enabled
364	 * for lockdep now */
3650:	tweqi	r12, 0
366	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
367#endif /* CONFIG_TRACE_IRQFLAGS */
368	lwz	r11,TI_FLAGS(r2)
369	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
370	bne-	syscall_dotrace
371syscall_dotrace_cont:
372	cmplwi	0,r0,NR_syscalls
373	lis	r10,sys_call_table@h
374	ori	r10,r10,sys_call_table@l
375	slwi	r0,r0,2
376	bge-	66f
377
378	barrier_nospec_asm
379	/*
380	 * Prevent the load of the handler below (based on the user-passed
381	 * system call number) being speculatively executed until the test
382	 * against NR_syscalls and branch to .66f above has
383	 * committed.
384	 */
385
386	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
387	mtlr	r10
388	addi	r9,r1,STACK_FRAME_OVERHEAD
389	PPC440EP_ERR42
390	blrl			/* Call handler */
391	.globl	ret_from_syscall
392ret_from_syscall:
393#ifdef CONFIG_DEBUG_RSEQ
394	/* Check whether the syscall is issued inside a restartable sequence */
395	stw	r3,GPR3(r1)
396	addi    r3,r1,STACK_FRAME_OVERHEAD
397	bl      rseq_syscall
398	lwz	r3,GPR3(r1)
399#endif
400	mr	r6,r3
401	/* disable interrupts so current_thread_info()->flags can't change */
402	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
403	/* Note: We don't bother telling lockdep about it */
404	SYNC
405	MTMSRD(r10)
406	lwz	r9,TI_FLAGS(r2)
407	li	r8,-MAX_ERRNO
408	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
409	bne-	syscall_exit_work
410	cmplw	0,r3,r8
411	blt+	syscall_exit_cont
412	lwz	r11,_CCR(r1)			/* Load CR */
413	neg	r3,r3
414	oris	r11,r11,0x1000	/* Set SO bit in CR */
415	stw	r11,_CCR(r1)
416syscall_exit_cont:
417	lwz	r8,_MSR(r1)
418#ifdef CONFIG_TRACE_IRQFLAGS
419	/* If we are going to return from the syscall with interrupts
420	 * off, we trace that here. It shouldn't normally happen.
421	 */
422	andi.	r10,r8,MSR_EE
423	bne+	1f
424	stw	r3,GPR3(r1)
425	bl      trace_hardirqs_off
426	lwz	r3,GPR3(r1)
4271:
428#endif /* CONFIG_TRACE_IRQFLAGS */
429#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
430	/* If the process has its own DBCR0 value, load it up.  The internal
431	   debug mode bit tells us that dbcr0 should be loaded. */
432	lwz	r0,THREAD+THREAD_DBCR0(r2)
433	andis.	r10,r0,DBCR0_IDM@h
434	bnel-	load_dbcr0
435#endif
436#ifdef CONFIG_44x
437BEGIN_MMU_FTR_SECTION
438	lis	r4,icache_44x_need_flush@ha
439	lwz	r5,icache_44x_need_flush@l(r4)
440	cmplwi	cr0,r5,0
441	bne-	2f
4421:
443END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
444#endif /* CONFIG_44x */
445BEGIN_FTR_SECTION
446	lwarx	r7,0,r1
447END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
448	stwcx.	r0,0,r1			/* to clear the reservation */
449	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
450#ifdef CONFIG_PPC_BOOK3S_32
451	kuep_unlock r5, r7
452#endif
453	kuap_check r2, r4
454	lwz	r4,_LINK(r1)
455	lwz	r5,_CCR(r1)
456	mtlr	r4
457	mtcr	r5
458	lwz	r7,_NIP(r1)
459	lwz	r2,GPR2(r1)
460	lwz	r1,GPR1(r1)
461#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
462	mtspr	SPRN_NRI, r0
463#endif
464	mtspr	SPRN_SRR0,r7
465	mtspr	SPRN_SRR1,r8
466	SYNC
467	RFI
468#ifdef CONFIG_44x
4692:	li	r7,0
470	iccci	r0,r0
471	stw	r7,icache_44x_need_flush@l(r4)
472	b	1b
473#endif  /* CONFIG_44x */
474
47566:	li	r3,-ENOSYS
476	b	ret_from_syscall
477
478	.globl	ret_from_fork
479ret_from_fork:
480	REST_NVGPRS(r1)
481	bl	schedule_tail
482	li	r3,0
483	b	ret_from_syscall
484
485	.globl	ret_from_kernel_thread
486ret_from_kernel_thread:
487	REST_NVGPRS(r1)
488	bl	schedule_tail
489	mtlr	r14
490	mr	r3,r15
491	PPC440EP_ERR42
492	blrl
493	li	r3,0
494	b	ret_from_syscall
495
496/* Traced system call support */
497syscall_dotrace:
498	SAVE_NVGPRS(r1)
499	li	r0,0xc00
500	stw	r0,_TRAP(r1)
501	addi	r3,r1,STACK_FRAME_OVERHEAD
502	bl	do_syscall_trace_enter
503	/*
504	 * Restore argument registers possibly just changed.
505	 * We use the return value of do_syscall_trace_enter
506	 * for call number to look up in the table (r0).
507	 */
508	mr	r0,r3
509	lwz	r3,GPR3(r1)
510	lwz	r4,GPR4(r1)
511	lwz	r5,GPR5(r1)
512	lwz	r6,GPR6(r1)
513	lwz	r7,GPR7(r1)
514	lwz	r8,GPR8(r1)
515	REST_NVGPRS(r1)
516
517	cmplwi	r0,NR_syscalls
518	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
519	bge-	ret_from_syscall
520	b	syscall_dotrace_cont
521
522syscall_exit_work:
523	andi.	r0,r9,_TIF_RESTOREALL
524	beq+	0f
525	REST_NVGPRS(r1)
526	b	2f
5270:	cmplw	0,r3,r8
528	blt+	1f
529	andi.	r0,r9,_TIF_NOERROR
530	bne-	1f
531	lwz	r11,_CCR(r1)			/* Load CR */
532	neg	r3,r3
533	oris	r11,r11,0x1000	/* Set SO bit in CR */
534	stw	r11,_CCR(r1)
535
5361:	stw	r6,RESULT(r1)	/* Save result */
537	stw	r3,GPR3(r1)	/* Update return value */
5382:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
539	beq	4f
540
541	/* Clear per-syscall TIF flags if any are set.  */
542
543	li	r11,_TIF_PERSYSCALL_MASK
544	addi	r12,r2,TI_FLAGS
5453:	lwarx	r8,0,r12
546	andc	r8,r8,r11
547#ifdef CONFIG_IBM405_ERR77
548	dcbt	0,r12
549#endif
550	stwcx.	r8,0,r12
551	bne-	3b
552
5534:	/* Anything which requires enabling interrupts? */
554	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
555	beq	ret_from_except
556
557	/* Re-enable interrupts. There is no need to trace that with
558	 * lockdep as we are supposed to have IRQs on at this point
559	 */
560	ori	r10,r10,MSR_EE
561	SYNC
562	MTMSRD(r10)
563
564	/* Save NVGPRS if they're not saved already */
565	lwz	r4,_TRAP(r1)
566	andi.	r4,r4,1
567	beq	5f
568	SAVE_NVGPRS(r1)
569	li	r4,0xc00
570	stw	r4,_TRAP(r1)
5715:
572	addi	r3,r1,STACK_FRAME_OVERHEAD
573	bl	do_syscall_trace_leave
574	b	ret_from_except_full
575
576/*
577 * The fork/clone functions need to copy the full register set into
578 * the child process. Therefore we need to save all the nonvolatile
579 * registers (r13 - r31) before calling the C code.
580 */
581	.globl	ppc_fork
582ppc_fork:
583	SAVE_NVGPRS(r1)
584	lwz	r0,_TRAP(r1)
585	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
586	stw	r0,_TRAP(r1)		/* register set saved */
587	b	sys_fork
588
589	.globl	ppc_vfork
590ppc_vfork:
591	SAVE_NVGPRS(r1)
592	lwz	r0,_TRAP(r1)
593	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
594	stw	r0,_TRAP(r1)		/* register set saved */
595	b	sys_vfork
596
597	.globl	ppc_clone
598ppc_clone:
599	SAVE_NVGPRS(r1)
600	lwz	r0,_TRAP(r1)
601	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
602	stw	r0,_TRAP(r1)		/* register set saved */
603	b	sys_clone
604
605	.globl	ppc_swapcontext
606ppc_swapcontext:
607	SAVE_NVGPRS(r1)
608	lwz	r0,_TRAP(r1)
609	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
610	stw	r0,_TRAP(r1)		/* register set saved */
611	b	sys_swapcontext
612
613/*
614 * Top-level page fault handling.
615 * This is in assembler because if do_page_fault tells us that
616 * it is a bad kernel page fault, we want to save the non-volatile
617 * registers before calling bad_page_fault.
618 */
619	.globl	handle_page_fault
620handle_page_fault:
621	stw	r4,_DAR(r1)
622	addi	r3,r1,STACK_FRAME_OVERHEAD
623#ifdef CONFIG_PPC_BOOK3S_32
624	andis.  r0,r5,DSISR_DABRMATCH@h
625	bne-    handle_dabr_fault
626#endif
627	bl	do_page_fault
628	cmpwi	r3,0
629	beq+	ret_from_except
630	SAVE_NVGPRS(r1)
631	lwz	r0,_TRAP(r1)
632	clrrwi	r0,r0,1
633	stw	r0,_TRAP(r1)
634	mr	r5,r3
635	addi	r3,r1,STACK_FRAME_OVERHEAD
636	lwz	r4,_DAR(r1)
637	bl	bad_page_fault
638	b	ret_from_except_full
639
640#ifdef CONFIG_PPC_BOOK3S_32
641	/* We have a data breakpoint exception - handle it */
642handle_dabr_fault:
643	SAVE_NVGPRS(r1)
644	lwz	r0,_TRAP(r1)
645	clrrwi	r0,r0,1
646	stw	r0,_TRAP(r1)
647	bl      do_break
648	b	ret_from_except_full
649#endif
650
651/*
652 * This routine switches between two different tasks.  The process
653 * state of one is saved on its kernel stack.  Then the state
654 * of the other is restored from its kernel stack.  The memory
655 * management hardware is updated to the second process's state.
656 * Finally, we can return to the second process.
657 * On entry, r3 points to the THREAD for the current task, r4
658 * points to the THREAD for the new task.
659 *
660 * This routine is always called with interrupts disabled.
661 *
662 * Note: there are two ways to get to the "going out" portion
663 * of this code; either by coming in via the entry (_switch)
664 * or via "fork" which must set up an environment equivalent
665 * to the "_switch" path.  If you change this , you'll have to
666 * change the fork code also.
667 *
668 * The code which creates the new task context is in 'copy_thread'
669 * in arch/ppc/kernel/process.c
670 */
671_GLOBAL(_switch)
672	stwu	r1,-INT_FRAME_SIZE(r1)
673	mflr	r0
674	stw	r0,INT_FRAME_SIZE+4(r1)
675	/* r3-r12 are caller saved -- Cort */
676	SAVE_NVGPRS(r1)
677	stw	r0,_NIP(r1)	/* Return to switch caller */
678	mfmsr	r11
679	li	r0,MSR_FP	/* Disable floating-point */
680#ifdef CONFIG_ALTIVEC
681BEGIN_FTR_SECTION
682	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
683	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
684	stw	r12,THREAD+THREAD_VRSAVE(r2)
685END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
686#endif /* CONFIG_ALTIVEC */
687#ifdef CONFIG_SPE
688BEGIN_FTR_SECTION
689	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
690	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
691	stw	r12,THREAD+THREAD_SPEFSCR(r2)
692END_FTR_SECTION_IFSET(CPU_FTR_SPE)
693#endif /* CONFIG_SPE */
694	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
695	beq+	1f
696	andc	r11,r11,r0
697	MTMSRD(r11)
698	isync
6991:	stw	r11,_MSR(r1)
700	mfcr	r10
701	stw	r10,_CCR(r1)
702	stw	r1,KSP(r3)	/* Set old stack pointer */
703
704	kuap_check r2, r4
705#ifdef CONFIG_SMP
706	/* We need a sync somewhere here to make sure that if the
707	 * previous task gets rescheduled on another CPU, it sees all
708	 * stores it has performed on this one.
709	 */
710	sync
711#endif /* CONFIG_SMP */
712
713	tophys(r0,r4)
714	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
715	lwz	r1,KSP(r4)	/* Load new stack pointer */
716
717	/* save the old current 'last' for return value */
718	mr	r3,r2
719	addi	r2,r4,-THREAD	/* Update current */
720
721#ifdef CONFIG_ALTIVEC
722BEGIN_FTR_SECTION
723	lwz	r0,THREAD+THREAD_VRSAVE(r2)
724	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
725END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
726#endif /* CONFIG_ALTIVEC */
727#ifdef CONFIG_SPE
728BEGIN_FTR_SECTION
729	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
730	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
731END_FTR_SECTION_IFSET(CPU_FTR_SPE)
732#endif /* CONFIG_SPE */
733
734	lwz	r0,_CCR(r1)
735	mtcrf	0xFF,r0
736	/* r3-r12 are destroyed -- Cort */
737	REST_NVGPRS(r1)
738
739	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
740	mtlr	r4
741	addi	r1,r1,INT_FRAME_SIZE
742	blr
743
744	.globl	fast_exception_return
745fast_exception_return:
746#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
747	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
748	beq	1f			/* if not, we've got problems */
749#endif
750
7512:	REST_4GPRS(3, r11)
752	lwz	r10,_CCR(r11)
753	REST_GPR(1, r11)
754	mtcr	r10
755	lwz	r10,_LINK(r11)
756	mtlr	r10
757	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
758	li	r10, 0
759	stw	r10, 8(r11)
760	REST_GPR(10, r11)
761#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
762	mtspr	SPRN_NRI, r0
763#endif
764	mtspr	SPRN_SRR1,r9
765	mtspr	SPRN_SRR0,r12
766	REST_GPR(9, r11)
767	REST_GPR(12, r11)
768	lwz	r11,GPR11(r11)
769	SYNC
770	RFI
771
772#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
773/* check if the exception happened in a restartable section */
7741:	lis	r3,exc_exit_restart_end@ha
775	addi	r3,r3,exc_exit_restart_end@l
776	cmplw	r12,r3
777	bge	3f
778	lis	r4,exc_exit_restart@ha
779	addi	r4,r4,exc_exit_restart@l
780	cmplw	r12,r4
781	blt	3f
782	lis	r3,fee_restarts@ha
783	tophys(r3,r3)
784	lwz	r5,fee_restarts@l(r3)
785	addi	r5,r5,1
786	stw	r5,fee_restarts@l(r3)
787	mr	r12,r4		/* restart at exc_exit_restart */
788	b	2b
789
790	.section .bss
791	.align	2
792fee_restarts:
793	.space	4
794	.previous
795
796/* aargh, a nonrecoverable interrupt, panic */
797/* aargh, we don't know which trap this is */
798/* but the 601 doesn't implement the RI bit, so assume it's OK */
7993:
800BEGIN_FTR_SECTION
801	b	2b
802END_FTR_SECTION_IFSET(CPU_FTR_601)
803	li	r10,-1
804	stw	r10,_TRAP(r11)
805	addi	r3,r1,STACK_FRAME_OVERHEAD
806	lis	r10,MSR_KERNEL@h
807	ori	r10,r10,MSR_KERNEL@l
808	bl	transfer_to_handler_full
809	.long	unrecoverable_exception
810	.long	ret_from_except
811#endif
812
813	.globl	ret_from_except_full
814ret_from_except_full:
815	REST_NVGPRS(r1)
816	/* fall through */
817
818	.globl	ret_from_except
819ret_from_except:
820	/* Hard-disable interrupts so that current_thread_info()->flags
821	 * can't change between when we test it and when we return
822	 * from the interrupt. */
823	/* Note: We don't bother telling lockdep about it */
824	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
825	SYNC			/* Some chip revs have problems here... */
826	MTMSRD(r10)		/* disable interrupts */
827
828	lwz	r3,_MSR(r1)	/* Returning to user mode? */
829	andi.	r0,r3,MSR_PR
830	beq	resume_kernel
831
832user_exc_return:		/* r10 contains MSR_KERNEL here */
833	/* Check current_thread_info()->flags */
834	lwz	r9,TI_FLAGS(r2)
835	andi.	r0,r9,_TIF_USER_WORK_MASK
836	bne	do_work
837
838restore_user:
839#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
840	/* Check whether this process has its own DBCR0 value.  The internal
841	   debug mode bit tells us that dbcr0 should be loaded. */
842	lwz	r0,THREAD+THREAD_DBCR0(r2)
843	andis.	r10,r0,DBCR0_IDM@h
844	bnel-	load_dbcr0
845#endif
846	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
847#ifdef CONFIG_PPC_BOOK3S_32
848	kuep_unlock	r10, r11
849#endif
850
851	b	restore
852
853/* N.B. the only way to get here is from the beq following ret_from_except. */
854resume_kernel:
855	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
856	lwz	r8,TI_FLAGS(r2)
857	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
858	beq+	1f
859
860	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
861
862	lwz	r3,GPR1(r1)
863	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
864	mr	r4,r1			/* src:  current exception frame */
865	mr	r1,r3			/* Reroute the trampoline frame to r1 */
866
867	/* Copy from the original to the trampoline. */
868	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
869	li	r6,0			/* start offset: 0 */
870	mtctr	r5
8712:	lwzx	r0,r6,r4
872	stwx	r0,r6,r3
873	addi	r6,r6,4
874	bdnz	2b
875
876	/* Do real store operation to complete stwu */
877	lwz	r5,GPR1(r1)
878	stw	r8,0(r5)
879
880	/* Clear _TIF_EMULATE_STACK_STORE flag */
881	lis	r11,_TIF_EMULATE_STACK_STORE@h
882	addi	r5,r2,TI_FLAGS
8830:	lwarx	r8,0,r5
884	andc	r8,r8,r11
885#ifdef CONFIG_IBM405_ERR77
886	dcbt	0,r5
887#endif
888	stwcx.	r8,0,r5
889	bne-	0b
8901:
891
892#ifdef CONFIG_PREEMPT
893	/* check current_thread_info->preempt_count */
894	lwz	r0,TI_PREEMPT(r2)
895	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
896	bne	restore_kuap
897	andi.	r8,r8,_TIF_NEED_RESCHED
898	beq+	restore_kuap
899	lwz	r3,_MSR(r1)
900	andi.	r0,r3,MSR_EE	/* interrupts off? */
901	beq	restore_kuap	/* don't schedule if so */
902#ifdef CONFIG_TRACE_IRQFLAGS
903	/* Lockdep thinks irqs are enabled, we need to call
904	 * preempt_schedule_irq with IRQs off, so we inform lockdep
905	 * now that we -did- turn them off already
906	 */
907	bl	trace_hardirqs_off
908#endif
909	bl	preempt_schedule_irq
910#ifdef CONFIG_TRACE_IRQFLAGS
911	/* And now, to properly rebalance the above, we tell lockdep they
912	 * are being turned back on, which will happen when we return
913	 */
914	bl	trace_hardirqs_on
915#endif
916#endif /* CONFIG_PREEMPT */
917restore_kuap:
918	kuap_restore r1, r2, r9, r10, r0
919
920	/* interrupts are hard-disabled at this point */
921restore:
922#ifdef CONFIG_44x
923BEGIN_MMU_FTR_SECTION
924	b	1f
925END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
926	lis	r4,icache_44x_need_flush@ha
927	lwz	r5,icache_44x_need_flush@l(r4)
928	cmplwi	cr0,r5,0
929	beq+	1f
930	li	r6,0
931	iccci	r0,r0
932	stw	r6,icache_44x_need_flush@l(r4)
9331:
934#endif  /* CONFIG_44x */
935
936	lwz	r9,_MSR(r1)
937#ifdef CONFIG_TRACE_IRQFLAGS
938	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
939	 * off in this assembly code while peeking at TI_FLAGS() and such. However
940	 * we need to inform it if the exception turned interrupts off, and we
941	 * are about to trun them back on.
942	 */
943	andi.	r10,r9,MSR_EE
944	beq	1f
945	stwu	r1,-32(r1)
946	mflr	r0
947	stw	r0,4(r1)
948	bl	trace_hardirqs_on
949	addi	r1, r1, 32
950	lwz	r9,_MSR(r1)
9511:
952#endif /* CONFIG_TRACE_IRQFLAGS */
953
954	lwz	r0,GPR0(r1)
955	lwz	r2,GPR2(r1)
956	REST_4GPRS(3, r1)
957	REST_2GPRS(7, r1)
958
959	lwz	r10,_XER(r1)
960	lwz	r11,_CTR(r1)
961	mtspr	SPRN_XER,r10
962	mtctr	r11
963
964	PPC405_ERR77(0,r1)
965BEGIN_FTR_SECTION
966	lwarx	r11,0,r1
967END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
968	stwcx.	r0,0,r1			/* to clear the reservation */
969
970#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
971	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
972	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
973
974	lwz	r10,_CCR(r1)
975	lwz	r11,_LINK(r1)
976	mtcrf	0xFF,r10
977	mtlr	r11
978
979	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
980	li	r10, 0
981	stw	r10, 8(r1)
982	/*
983	 * Once we put values in SRR0 and SRR1, we are in a state
984	 * where exceptions are not recoverable, since taking an
985	 * exception will trash SRR0 and SRR1.  Therefore we clear the
986	 * MSR:RI bit to indicate this.  If we do take an exception,
987	 * we can't return to the point of the exception but we
988	 * can restart the exception exit path at the label
989	 * exc_exit_restart below.  -- paulus
990	 */
991	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
992	SYNC
993	MTMSRD(r10)		/* clear the RI bit */
994	.globl exc_exit_restart
995exc_exit_restart:
996	lwz	r12,_NIP(r1)
997	mtspr	SPRN_SRR0,r12
998	mtspr	SPRN_SRR1,r9
999	REST_4GPRS(9, r1)
1000	lwz	r1,GPR1(r1)
1001	.globl exc_exit_restart_end
1002exc_exit_restart_end:
1003	SYNC
1004	RFI
1005
1006#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1007	/*
1008	 * This is a bit different on 4xx/Book-E because it doesn't have
1009	 * the RI bit in the MSR.
1010	 * The TLB miss handler checks if we have interrupted
1011	 * the exception exit path and restarts it if so
1012	 * (well maybe one day it will... :).
1013	 */
1014	lwz	r11,_LINK(r1)
1015	mtlr	r11
1016	lwz	r10,_CCR(r1)
1017	mtcrf	0xff,r10
1018	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1019	li	r10, 0
1020	stw	r10, 8(r1)
1021	REST_2GPRS(9, r1)
1022	.globl exc_exit_restart
1023exc_exit_restart:
1024	lwz	r11,_NIP(r1)
1025	lwz	r12,_MSR(r1)
1026exc_exit_start:
1027	mtspr	SPRN_SRR0,r11
1028	mtspr	SPRN_SRR1,r12
1029	REST_2GPRS(11, r1)
1030	lwz	r1,GPR1(r1)
1031	.globl exc_exit_restart_end
1032exc_exit_restart_end:
1033	PPC405_ERR77_SYNC
1034	rfi
1035	b	.			/* prevent prefetch past rfi */
1036
1037/*
1038 * Returning from a critical interrupt in user mode doesn't need
1039 * to be any different from a normal exception.  For a critical
1040 * interrupt in the kernel, we just return (without checking for
1041 * preemption) since the interrupt may have happened at some crucial
1042 * place (e.g. inside the TLB miss handler), and because we will be
1043 * running with r1 pointing into critical_stack, not the current
1044 * process's kernel stack (and therefore current_thread_info() will
1045 * give the wrong answer).
1046 * We have to restore various SPRs that may have been in use at the
1047 * time of the critical interrupt.
1048 *
1049 */
1050#ifdef CONFIG_40x
1051#define PPC_40x_TURN_OFF_MSR_DR						    \
1052	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1053	 * assume the instructions here are mapped by a pinned TLB entry */ \
1054	li	r10,MSR_IR;						    \
1055	mtmsr	r10;							    \
1056	isync;								    \
1057	tophys(r1, r1);
1058#else
1059#define PPC_40x_TURN_OFF_MSR_DR
1060#endif
1061
1062#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1063	REST_NVGPRS(r1);						\
1064	lwz	r3,_MSR(r1);						\
1065	andi.	r3,r3,MSR_PR;						\
1066	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1067	bne	user_exc_return;					\
1068	lwz	r0,GPR0(r1);						\
1069	lwz	r2,GPR2(r1);						\
1070	REST_4GPRS(3, r1);						\
1071	REST_2GPRS(7, r1);						\
1072	lwz	r10,_XER(r1);						\
1073	lwz	r11,_CTR(r1);						\
1074	mtspr	SPRN_XER,r10;						\
1075	mtctr	r11;							\
1076	PPC405_ERR77(0,r1);						\
1077	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1078	lwz	r11,_LINK(r1);						\
1079	mtlr	r11;							\
1080	lwz	r10,_CCR(r1);						\
1081	mtcrf	0xff,r10;						\
1082	PPC_40x_TURN_OFF_MSR_DR;					\
1083	lwz	r9,_DEAR(r1);						\
1084	lwz	r10,_ESR(r1);						\
1085	mtspr	SPRN_DEAR,r9;						\
1086	mtspr	SPRN_ESR,r10;						\
1087	lwz	r11,_NIP(r1);						\
1088	lwz	r12,_MSR(r1);						\
1089	mtspr	exc_lvl_srr0,r11;					\
1090	mtspr	exc_lvl_srr1,r12;					\
1091	lwz	r9,GPR9(r1);						\
1092	lwz	r12,GPR12(r1);						\
1093	lwz	r10,GPR10(r1);						\
1094	lwz	r11,GPR11(r1);						\
1095	lwz	r1,GPR1(r1);						\
1096	PPC405_ERR77_SYNC;						\
1097	exc_lvl_rfi;							\
1098	b	.;		/* prevent prefetch past exc_lvl_rfi */
1099
1100#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1101	lwz	r9,_##exc_lvl_srr0(r1);					\
1102	lwz	r10,_##exc_lvl_srr1(r1);				\
1103	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1104	mtspr	SPRN_##exc_lvl_srr1,r10;
1105
1106#if defined(CONFIG_PPC_BOOK3E_MMU)
1107#ifdef CONFIG_PHYS_64BIT
1108#define	RESTORE_MAS7							\
1109	lwz	r11,MAS7(r1);						\
1110	mtspr	SPRN_MAS7,r11;
1111#else
1112#define	RESTORE_MAS7
1113#endif /* CONFIG_PHYS_64BIT */
1114#define RESTORE_MMU_REGS						\
1115	lwz	r9,MAS0(r1);						\
1116	lwz	r10,MAS1(r1);						\
1117	lwz	r11,MAS2(r1);						\
1118	mtspr	SPRN_MAS0,r9;						\
1119	lwz	r9,MAS3(r1);						\
1120	mtspr	SPRN_MAS1,r10;						\
1121	lwz	r10,MAS6(r1);						\
1122	mtspr	SPRN_MAS2,r11;						\
1123	mtspr	SPRN_MAS3,r9;						\
1124	mtspr	SPRN_MAS6,r10;						\
1125	RESTORE_MAS7;
1126#elif defined(CONFIG_44x)
1127#define RESTORE_MMU_REGS						\
1128	lwz	r9,MMUCR(r1);						\
1129	mtspr	SPRN_MMUCR,r9;
1130#else
1131#define RESTORE_MMU_REGS
1132#endif
1133
1134#ifdef CONFIG_40x
1135	.globl	ret_from_crit_exc
1136ret_from_crit_exc:
1137	mfspr	r9,SPRN_SPRG_THREAD
1138	lis	r10,saved_ksp_limit@ha;
1139	lwz	r10,saved_ksp_limit@l(r10);
1140	tovirt(r9,r9);
1141	stw	r10,KSP_LIMIT(r9)
1142	lis	r9,crit_srr0@ha;
1143	lwz	r9,crit_srr0@l(r9);
1144	lis	r10,crit_srr1@ha;
1145	lwz	r10,crit_srr1@l(r10);
1146	mtspr	SPRN_SRR0,r9;
1147	mtspr	SPRN_SRR1,r10;
1148	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1149#endif /* CONFIG_40x */
1150
1151#ifdef CONFIG_BOOKE
1152	.globl	ret_from_crit_exc
1153ret_from_crit_exc:
1154	mfspr	r9,SPRN_SPRG_THREAD
1155	lwz	r10,SAVED_KSP_LIMIT(r1)
1156	stw	r10,KSP_LIMIT(r9)
1157	RESTORE_xSRR(SRR0,SRR1);
1158	RESTORE_MMU_REGS;
1159	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1160
1161	.globl	ret_from_debug_exc
1162ret_from_debug_exc:
1163	mfspr	r9,SPRN_SPRG_THREAD
1164	lwz	r10,SAVED_KSP_LIMIT(r1)
1165	stw	r10,KSP_LIMIT(r9)
1166	RESTORE_xSRR(SRR0,SRR1);
1167	RESTORE_xSRR(CSRR0,CSRR1);
1168	RESTORE_MMU_REGS;
1169	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1170
1171	.globl	ret_from_mcheck_exc
1172ret_from_mcheck_exc:
1173	mfspr	r9,SPRN_SPRG_THREAD
1174	lwz	r10,SAVED_KSP_LIMIT(r1)
1175	stw	r10,KSP_LIMIT(r9)
1176	RESTORE_xSRR(SRR0,SRR1);
1177	RESTORE_xSRR(CSRR0,CSRR1);
1178	RESTORE_xSRR(DSRR0,DSRR1);
1179	RESTORE_MMU_REGS;
1180	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1181#endif /* CONFIG_BOOKE */
1182
1183/*
1184 * Load the DBCR0 value for a task that is being ptraced,
1185 * having first saved away the global DBCR0.  Note that r0
1186 * has the dbcr0 value to set upon entry to this.
1187 */
1188load_dbcr0:
1189	mfmsr	r10		/* first disable debug exceptions */
1190	rlwinm	r10,r10,0,~MSR_DE
1191	mtmsr	r10
1192	isync
1193	mfspr	r10,SPRN_DBCR0
1194	lis	r11,global_dbcr0@ha
1195	addi	r11,r11,global_dbcr0@l
1196#ifdef CONFIG_SMP
1197	lwz	r9,TASK_CPU(r2)
1198	slwi	r9,r9,3
1199	add	r11,r11,r9
1200#endif
1201	stw	r10,0(r11)
1202	mtspr	SPRN_DBCR0,r0
1203	lwz	r10,4(r11)
1204	addi	r10,r10,1
1205	stw	r10,4(r11)
1206	li	r11,-1
1207	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1208	blr
1209
1210	.section .bss
1211	.align	4
1212	.global global_dbcr0
1213global_dbcr0:
1214	.space	8*NR_CPUS
1215	.previous
1216#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1217
1218do_work:			/* r10 contains MSR_KERNEL here */
1219	andi.	r0,r9,_TIF_NEED_RESCHED
1220	beq	do_user_signal
1221
1222do_resched:			/* r10 contains MSR_KERNEL here */
1223#ifdef CONFIG_TRACE_IRQFLAGS
1224	bl	trace_hardirqs_on
1225	mfmsr	r10
1226#endif
1227	ori	r10,r10,MSR_EE
1228	SYNC
1229	MTMSRD(r10)		/* hard-enable interrupts */
1230	bl	schedule
1231recheck:
1232	/* Note: And we don't tell it we are disabling them again
1233	 * neither. Those disable/enable cycles used to peek at
1234	 * TI_FLAGS aren't advertised.
1235	 */
1236	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1237	SYNC
1238	MTMSRD(r10)		/* disable interrupts */
1239	lwz	r9,TI_FLAGS(r2)
1240	andi.	r0,r9,_TIF_NEED_RESCHED
1241	bne-	do_resched
1242	andi.	r0,r9,_TIF_USER_WORK_MASK
1243	beq	restore_user
1244do_user_signal:			/* r10 contains MSR_KERNEL here */
1245	ori	r10,r10,MSR_EE
1246	SYNC
1247	MTMSRD(r10)		/* hard-enable interrupts */
1248	/* save r13-r31 in the exception frame, if not already done */
1249	lwz	r3,_TRAP(r1)
1250	andi.	r0,r3,1
1251	beq	2f
1252	SAVE_NVGPRS(r1)
1253	rlwinm	r3,r3,0,0,30
1254	stw	r3,_TRAP(r1)
12552:	addi	r3,r1,STACK_FRAME_OVERHEAD
1256	mr	r4,r9
1257	bl	do_notify_resume
1258	REST_NVGPRS(r1)
1259	b	recheck
1260
1261/*
1262 * We come here when we are at the end of handling an exception
1263 * that occurred at a place where taking an exception will lose
1264 * state information, such as the contents of SRR0 and SRR1.
1265 */
1266nonrecoverable:
1267	lis	r10,exc_exit_restart_end@ha
1268	addi	r10,r10,exc_exit_restart_end@l
1269	cmplw	r12,r10
1270	bge	3f
1271	lis	r11,exc_exit_restart@ha
1272	addi	r11,r11,exc_exit_restart@l
1273	cmplw	r12,r11
1274	blt	3f
1275	lis	r10,ee_restarts@ha
1276	lwz	r12,ee_restarts@l(r10)
1277	addi	r12,r12,1
1278	stw	r12,ee_restarts@l(r10)
1279	mr	r12,r11		/* restart at exc_exit_restart */
1280	blr
12813:	/* OK, we can't recover, kill this process */
1282	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1283BEGIN_FTR_SECTION
1284	blr
1285END_FTR_SECTION_IFSET(CPU_FTR_601)
1286	lwz	r3,_TRAP(r1)
1287	andi.	r0,r3,1
1288	beq	5f
1289	SAVE_NVGPRS(r1)
1290	rlwinm	r3,r3,0,0,30
1291	stw	r3,_TRAP(r1)
12925:	mfspr	r2,SPRN_SPRG_THREAD
1293	addi	r2,r2,-THREAD
1294	tovirt(r2,r2)			/* set back r2 to current */
12954:	addi	r3,r1,STACK_FRAME_OVERHEAD
1296	bl	unrecoverable_exception
1297	/* shouldn't return */
1298	b	4b
1299
1300	.section .bss
1301	.align	2
1302ee_restarts:
1303	.space	4
1304	.previous
1305
1306/*
1307 * PROM code for specific machines follows.  Put it
1308 * here so it's easy to add arch-specific sections later.
1309 * -- Cort
1310 */
1311#ifdef CONFIG_PPC_RTAS
1312/*
1313 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1314 * called with the MMU off.
1315 */
1316_GLOBAL(enter_rtas)
1317	stwu	r1,-INT_FRAME_SIZE(r1)
1318	mflr	r0
1319	stw	r0,INT_FRAME_SIZE+4(r1)
1320	LOAD_REG_ADDR(r4, rtas)
1321	lis	r6,1f@ha	/* physical return address for rtas */
1322	addi	r6,r6,1f@l
1323	tophys(r6,r6)
1324	tophys(r7,r1)
1325	lwz	r8,RTASENTRY(r4)
1326	lwz	r4,RTASBASE(r4)
1327	mfmsr	r9
1328	stw	r9,8(r1)
1329	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1330	SYNC			/* disable interrupts so SRR0/1 */
1331	MTMSRD(r0)		/* don't get trashed */
1332	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1333	mtlr	r6
1334	stw	r7, THREAD + RTAS_SP(r2)
1335	mtspr	SPRN_SRR0,r8
1336	mtspr	SPRN_SRR1,r9
1337	RFI
13381:	tophys(r9,r1)
1339	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1340	lwz	r9,8(r9)	/* original msr value */
1341	addi	r1,r1,INT_FRAME_SIZE
1342	li	r0,0
1343	tophys(r7, r2)
1344	stw	r0, THREAD + RTAS_SP(r7)
1345	mtspr	SPRN_SRR0,r8
1346	mtspr	SPRN_SRR1,r9
1347	RFI			/* return to caller */
1348
1349	.globl	machine_check_in_rtas
1350machine_check_in_rtas:
1351	twi	31,0,0
1352	/* XXX load up BATs and panic */
1353
1354#endif /* CONFIG_PPC_RTAS */
1355