xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision dd5b2498)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ptrace.h>
35#include <asm/export.h>
36#include <asm/asm-405.h>
37#include <asm/feature-fixups.h>
38#include <asm/barrier.h>
39
40/*
41 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
42 */
43#if MSR_KERNEL >= 0x10000
44#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
45#else
46#define LOAD_MSR_KERNEL(r, x)	li r,(x)
47#endif
48
49/*
50 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
51 * fit into one page in order to not encounter a TLB miss between the
52 * modification of srr0/srr1 and the associated rfi.
53 */
54	.align	12
55
56#ifdef CONFIG_BOOKE
57	.globl	mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59	mfspr	r0,SPRN_DSRR0
60	stw	r0,_DSRR0(r11)
61	mfspr	r0,SPRN_DSRR1
62	stw	r0,_DSRR1(r11)
63	/* fall through */
64
65	.globl	debug_transfer_to_handler
66debug_transfer_to_handler:
67	mfspr	r0,SPRN_CSRR0
68	stw	r0,_CSRR0(r11)
69	mfspr	r0,SPRN_CSRR1
70	stw	r0,_CSRR1(r11)
71	/* fall through */
72
73	.globl	crit_transfer_to_handler
74crit_transfer_to_handler:
75#ifdef CONFIG_PPC_BOOK3E_MMU
76	mfspr	r0,SPRN_MAS0
77	stw	r0,MAS0(r11)
78	mfspr	r0,SPRN_MAS1
79	stw	r0,MAS1(r11)
80	mfspr	r0,SPRN_MAS2
81	stw	r0,MAS2(r11)
82	mfspr	r0,SPRN_MAS3
83	stw	r0,MAS3(r11)
84	mfspr	r0,SPRN_MAS6
85	stw	r0,MAS6(r11)
86#ifdef CONFIG_PHYS_64BIT
87	mfspr	r0,SPRN_MAS7
88	stw	r0,MAS7(r11)
89#endif /* CONFIG_PHYS_64BIT */
90#endif /* CONFIG_PPC_BOOK3E_MMU */
91#ifdef CONFIG_44x
92	mfspr	r0,SPRN_MMUCR
93	stw	r0,MMUCR(r11)
94#endif
95	mfspr	r0,SPRN_SRR0
96	stw	r0,_SRR0(r11)
97	mfspr	r0,SPRN_SRR1
98	stw	r0,_SRR1(r11)
99
100	/* set the stack limit to the current stack */
101	mfspr	r8,SPRN_SPRG_THREAD
102	lwz	r0,KSP_LIMIT(r8)
103	stw	r0,SAVED_KSP_LIMIT(r11)
104	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
105	stw	r0,KSP_LIMIT(r8)
106	/* fall through */
107#endif
108
109#ifdef CONFIG_40x
110	.globl	crit_transfer_to_handler
111crit_transfer_to_handler:
112	lwz	r0,crit_r10@l(0)
113	stw	r0,GPR10(r11)
114	lwz	r0,crit_r11@l(0)
115	stw	r0,GPR11(r11)
116	mfspr	r0,SPRN_SRR0
117	stw	r0,crit_srr0@l(0)
118	mfspr	r0,SPRN_SRR1
119	stw	r0,crit_srr1@l(0)
120
121	/* set the stack limit to the current stack */
122	mfspr	r8,SPRN_SPRG_THREAD
123	lwz	r0,KSP_LIMIT(r8)
124	stw	r0,saved_ksp_limit@l(0)
125	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
126	stw	r0,KSP_LIMIT(r8)
127	/* fall through */
128#endif
129
130/*
131 * This code finishes saving the registers to the exception frame
132 * and jumps to the appropriate handler for the exception, turning
133 * on address translation.
134 * Note that we rely on the caller having set cr0.eq iff the exception
135 * occurred in kernel mode (i.e. MSR:PR = 0).
136 */
137	.globl	transfer_to_handler_full
138transfer_to_handler_full:
139	SAVE_NVGPRS(r11)
140	/* fall through */
141
142	.globl	transfer_to_handler
143transfer_to_handler:
144	stw	r2,GPR2(r11)
145	stw	r12,_NIP(r11)
146	stw	r9,_MSR(r11)
147	andi.	r2,r9,MSR_PR
148	mfctr	r12
149	mfspr	r2,SPRN_XER
150	stw	r12,_CTR(r11)
151	stw	r2,_XER(r11)
152	mfspr	r12,SPRN_SPRG_THREAD
153	addi	r2,r12,-THREAD
154	beq	2f			/* if from user, fix up THREAD.regs */
155	addi	r11,r1,STACK_FRAME_OVERHEAD
156	stw	r11,PT_REGS(r12)
157#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
158	/* Check to see if the dbcr0 register is set up to debug.  Use the
159	   internal debug mode bit to do this. */
160	lwz	r12,THREAD_DBCR0(r12)
161	andis.	r12,r12,DBCR0_IDM@h
162#endif
163	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
164#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
165	beq+	3f
166	/* From user and task is ptraced - load up global dbcr0 */
167	li	r12,-1			/* clear all pending debug events */
168	mtspr	SPRN_DBSR,r12
169	lis	r11,global_dbcr0@ha
170	tophys(r11,r11)
171	addi	r11,r11,global_dbcr0@l
172#ifdef CONFIG_SMP
173	lwz	r9,TASK_CPU(r2)
174	slwi	r9,r9,3
175	add	r11,r11,r9
176#endif
177	lwz	r12,0(r11)
178	mtspr	SPRN_DBCR0,r12
179	lwz	r12,4(r11)
180	addi	r12,r12,-1
181	stw	r12,4(r11)
182#endif
183
184	b	3f
185
1862:	/* if from kernel, check interrupted DOZE/NAP mode and
187         * check for stack overflow
188         */
189	lwz	r9,KSP_LIMIT(r12)
190	cmplw	r1,r9			/* if r1 <= ksp_limit */
191	ble-	stack_ovf		/* then the kernel stack overflowed */
1925:
193#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
194	lwz	r12,TI_LOCAL_FLAGS(r2)
195	mtcrf	0x01,r12
196	bt-	31-TLF_NAPPING,4f
197	bt-	31-TLF_SLEEPING,7f
198#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
199	.globl transfer_to_handler_cont
200transfer_to_handler_cont:
2013:
202	mflr	r9
203	tovirt(r2, r2)			/* set r2 to current */
204	lwz	r11,0(r9)		/* virtual address of handler */
205	lwz	r9,4(r9)		/* where to go when done */
206#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
207	mtspr	SPRN_NRI, r0
208#endif
209#ifdef CONFIG_TRACE_IRQFLAGS
210	lis	r12,reenable_mmu@h
211	ori	r12,r12,reenable_mmu@l
212	mtspr	SPRN_SRR0,r12
213	mtspr	SPRN_SRR1,r10
214	SYNC
215	RFI
216reenable_mmu:				/* re-enable mmu so we can */
217	mfmsr	r10
218	lwz	r12,_MSR(r1)
219	xor	r10,r10,r12
220	andi.	r10,r10,MSR_EE		/* Did EE change? */
221	beq	1f
222
223	/*
224	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
225	 * If from user mode there is only one stack frame on the stack, and
226	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
227	 * stack frame to make trace_hardirqs_off happy.
228	 *
229	 * This is handy because we also need to save a bunch of GPRs,
230	 * r3 can be different from GPR3(r1) at this point, r9 and r11
231	 * contains the old MSR and handler address respectively,
232	 * r4 & r5 can contain page fault arguments that need to be passed
233	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
234	 * they aren't useful past this point (aren't syscall arguments),
235	 * the rest is restored from the exception frame.
236	 */
237	stwu	r1,-32(r1)
238	stw	r9,8(r1)
239	stw	r11,12(r1)
240	stw	r3,16(r1)
241	stw	r4,20(r1)
242	stw	r5,24(r1)
243	bl	trace_hardirqs_off
244	lwz	r5,24(r1)
245	lwz	r4,20(r1)
246	lwz	r3,16(r1)
247	lwz	r11,12(r1)
248	lwz	r9,8(r1)
249	addi	r1,r1,32
250	lwz	r0,GPR0(r1)
251	lwz	r6,GPR6(r1)
252	lwz	r7,GPR7(r1)
253	lwz	r8,GPR8(r1)
2541:	mtctr	r11
255	mtlr	r9
256	bctr				/* jump to handler */
257#else /* CONFIG_TRACE_IRQFLAGS */
258	mtspr	SPRN_SRR0,r11
259	mtspr	SPRN_SRR1,r10
260	mtlr	r9
261	SYNC
262	RFI				/* jump to handler, enable MMU */
263#endif /* CONFIG_TRACE_IRQFLAGS */
264
265#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2664:	rlwinm	r12,r12,0,~_TLF_NAPPING
267	stw	r12,TI_LOCAL_FLAGS(r2)
268	b	power_save_ppc32_restore
269
2707:	rlwinm	r12,r12,0,~_TLF_SLEEPING
271	stw	r12,TI_LOCAL_FLAGS(r2)
272	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
273	rlwinm	r9,r9,0,~MSR_EE
274	lwz	r12,_LINK(r11)		/* and return to address in LR */
275	b	fast_exception_return
276#endif
277
278/*
279 * On kernel stack overflow, load up an initial stack pointer
280 * and call StackOverflow(regs), which should not return.
281 */
282stack_ovf:
283	/* sometimes we use a statically-allocated stack, which is OK. */
284	lis	r12,_end@h
285	ori	r12,r12,_end@l
286	cmplw	r1,r12
287	ble	5b			/* r1 <= &_end is OK */
288	SAVE_NVGPRS(r11)
289	addi	r3,r1,STACK_FRAME_OVERHEAD
290	lis	r1,init_thread_union@ha
291	addi	r1,r1,init_thread_union@l
292	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
293	lis	r9,StackOverflow@ha
294	addi	r9,r9,StackOverflow@l
295	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
296#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
297	mtspr	SPRN_NRI, r0
298#endif
299	mtspr	SPRN_SRR0,r9
300	mtspr	SPRN_SRR1,r10
301	SYNC
302	RFI
303
304/*
305 * Handle a system call.
306 */
307	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
308	.stabs	"entry_32.S",N_SO,0,0,0f
3090:
310
311_GLOBAL(DoSyscall)
312	stw	r3,ORIG_GPR3(r1)
313	li	r12,0
314	stw	r12,RESULT(r1)
315	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
316	rlwinm	r11,r11,0,4,2
317	stw	r11,_CCR(r1)
318#ifdef CONFIG_TRACE_IRQFLAGS
319	/* Return from syscalls can (and generally will) hard enable
320	 * interrupts. You aren't supposed to call a syscall with
321	 * interrupts disabled in the first place. However, to ensure
322	 * that we get it right vs. lockdep if it happens, we force
323	 * that hard enable here with appropriate tracing if we see
324	 * that we have been called with interrupts off
325	 */
326	mfmsr	r11
327	andi.	r12,r11,MSR_EE
328	bne+	1f
329	/* We came in with interrupts disabled, we enable them now */
330	bl	trace_hardirqs_on
331	mfmsr	r11
332	lwz	r0,GPR0(r1)
333	lwz	r3,GPR3(r1)
334	lwz	r4,GPR4(r1)
335	ori	r11,r11,MSR_EE
336	lwz	r5,GPR5(r1)
337	lwz	r6,GPR6(r1)
338	lwz	r7,GPR7(r1)
339	lwz	r8,GPR8(r1)
340	mtmsr	r11
3411:
342#endif /* CONFIG_TRACE_IRQFLAGS */
343	lwz	r11,TI_FLAGS(r2)
344	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
345	bne-	syscall_dotrace
346syscall_dotrace_cont:
347	cmplwi	0,r0,NR_syscalls
348	lis	r10,sys_call_table@h
349	ori	r10,r10,sys_call_table@l
350	slwi	r0,r0,2
351	bge-	66f
352
353	barrier_nospec_asm
354	/*
355	 * Prevent the load of the handler below (based on the user-passed
356	 * system call number) being speculatively executed until the test
357	 * against NR_syscalls and branch to .66f above has
358	 * committed.
359	 */
360
361	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
362	mtlr	r10
363	addi	r9,r1,STACK_FRAME_OVERHEAD
364	PPC440EP_ERR42
365	blrl			/* Call handler */
366	.globl	ret_from_syscall
367ret_from_syscall:
368#ifdef CONFIG_DEBUG_RSEQ
369	/* Check whether the syscall is issued inside a restartable sequence */
370	stw	r3,GPR3(r1)
371	addi    r3,r1,STACK_FRAME_OVERHEAD
372	bl      rseq_syscall
373	lwz	r3,GPR3(r1)
374#endif
375	mr	r6,r3
376	/* disable interrupts so current_thread_info()->flags can't change */
377	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
378	/* Note: We don't bother telling lockdep about it */
379	SYNC
380	MTMSRD(r10)
381	lwz	r9,TI_FLAGS(r2)
382	li	r8,-MAX_ERRNO
383	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
384	bne-	syscall_exit_work
385	cmplw	0,r3,r8
386	blt+	syscall_exit_cont
387	lwz	r11,_CCR(r1)			/* Load CR */
388	neg	r3,r3
389	oris	r11,r11,0x1000	/* Set SO bit in CR */
390	stw	r11,_CCR(r1)
391syscall_exit_cont:
392	lwz	r8,_MSR(r1)
393#ifdef CONFIG_TRACE_IRQFLAGS
394	/* If we are going to return from the syscall with interrupts
395	 * off, we trace that here. It shouldn't happen though but we
396	 * want to catch the bugger if it does right ?
397	 */
398	andi.	r10,r8,MSR_EE
399	bne+	1f
400	stw	r3,GPR3(r1)
401	bl      trace_hardirqs_off
402	lwz	r3,GPR3(r1)
4031:
404#endif /* CONFIG_TRACE_IRQFLAGS */
405#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
406	/* If the process has its own DBCR0 value, load it up.  The internal
407	   debug mode bit tells us that dbcr0 should be loaded. */
408	lwz	r0,THREAD+THREAD_DBCR0(r2)
409	andis.	r10,r0,DBCR0_IDM@h
410	bnel-	load_dbcr0
411#endif
412#ifdef CONFIG_44x
413BEGIN_MMU_FTR_SECTION
414	lis	r4,icache_44x_need_flush@ha
415	lwz	r5,icache_44x_need_flush@l(r4)
416	cmplwi	cr0,r5,0
417	bne-	2f
4181:
419END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
420#endif /* CONFIG_44x */
421BEGIN_FTR_SECTION
422	lwarx	r7,0,r1
423END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
424	stwcx.	r0,0,r1			/* to clear the reservation */
425#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
426	andi.	r4,r8,MSR_PR
427	beq	3f
428	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
4293:
430#endif
431	lwz	r4,_LINK(r1)
432	lwz	r5,_CCR(r1)
433	mtlr	r4
434	mtcr	r5
435	lwz	r7,_NIP(r1)
436	lwz	r2,GPR2(r1)
437	lwz	r1,GPR1(r1)
438#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
439	mtspr	SPRN_NRI, r0
440#endif
441	mtspr	SPRN_SRR0,r7
442	mtspr	SPRN_SRR1,r8
443	SYNC
444	RFI
445#ifdef CONFIG_44x
4462:	li	r7,0
447	iccci	r0,r0
448	stw	r7,icache_44x_need_flush@l(r4)
449	b	1b
450#endif  /* CONFIG_44x */
451
45266:	li	r3,-ENOSYS
453	b	ret_from_syscall
454
455	.globl	ret_from_fork
456ret_from_fork:
457	REST_NVGPRS(r1)
458	bl	schedule_tail
459	li	r3,0
460	b	ret_from_syscall
461
462	.globl	ret_from_kernel_thread
463ret_from_kernel_thread:
464	REST_NVGPRS(r1)
465	bl	schedule_tail
466	mtlr	r14
467	mr	r3,r15
468	PPC440EP_ERR42
469	blrl
470	li	r3,0
471	b	ret_from_syscall
472
473/* Traced system call support */
474syscall_dotrace:
475	SAVE_NVGPRS(r1)
476	li	r0,0xc00
477	stw	r0,_TRAP(r1)
478	addi	r3,r1,STACK_FRAME_OVERHEAD
479	bl	do_syscall_trace_enter
480	/*
481	 * Restore argument registers possibly just changed.
482	 * We use the return value of do_syscall_trace_enter
483	 * for call number to look up in the table (r0).
484	 */
485	mr	r0,r3
486	lwz	r3,GPR3(r1)
487	lwz	r4,GPR4(r1)
488	lwz	r5,GPR5(r1)
489	lwz	r6,GPR6(r1)
490	lwz	r7,GPR7(r1)
491	lwz	r8,GPR8(r1)
492	REST_NVGPRS(r1)
493
494	cmplwi	r0,NR_syscalls
495	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
496	bge-	ret_from_syscall
497	b	syscall_dotrace_cont
498
499syscall_exit_work:
500	andi.	r0,r9,_TIF_RESTOREALL
501	beq+	0f
502	REST_NVGPRS(r1)
503	b	2f
5040:	cmplw	0,r3,r8
505	blt+	1f
506	andi.	r0,r9,_TIF_NOERROR
507	bne-	1f
508	lwz	r11,_CCR(r1)			/* Load CR */
509	neg	r3,r3
510	oris	r11,r11,0x1000	/* Set SO bit in CR */
511	stw	r11,_CCR(r1)
512
5131:	stw	r6,RESULT(r1)	/* Save result */
514	stw	r3,GPR3(r1)	/* Update return value */
5152:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
516	beq	4f
517
518	/* Clear per-syscall TIF flags if any are set.  */
519
520	li	r11,_TIF_PERSYSCALL_MASK
521	addi	r12,r2,TI_FLAGS
5223:	lwarx	r8,0,r12
523	andc	r8,r8,r11
524#ifdef CONFIG_IBM405_ERR77
525	dcbt	0,r12
526#endif
527	stwcx.	r8,0,r12
528	bne-	3b
529
5304:	/* Anything which requires enabling interrupts? */
531	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
532	beq	ret_from_except
533
534	/* Re-enable interrupts. There is no need to trace that with
535	 * lockdep as we are supposed to have IRQs on at this point
536	 */
537	ori	r10,r10,MSR_EE
538	SYNC
539	MTMSRD(r10)
540
541	/* Save NVGPRS if they're not saved already */
542	lwz	r4,_TRAP(r1)
543	andi.	r4,r4,1
544	beq	5f
545	SAVE_NVGPRS(r1)
546	li	r4,0xc00
547	stw	r4,_TRAP(r1)
5485:
549	addi	r3,r1,STACK_FRAME_OVERHEAD
550	bl	do_syscall_trace_leave
551	b	ret_from_except_full
552
553/*
554 * The fork/clone functions need to copy the full register set into
555 * the child process. Therefore we need to save all the nonvolatile
556 * registers (r13 - r31) before calling the C code.
557 */
558	.globl	ppc_fork
559ppc_fork:
560	SAVE_NVGPRS(r1)
561	lwz	r0,_TRAP(r1)
562	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
563	stw	r0,_TRAP(r1)		/* register set saved */
564	b	sys_fork
565
566	.globl	ppc_vfork
567ppc_vfork:
568	SAVE_NVGPRS(r1)
569	lwz	r0,_TRAP(r1)
570	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
571	stw	r0,_TRAP(r1)		/* register set saved */
572	b	sys_vfork
573
574	.globl	ppc_clone
575ppc_clone:
576	SAVE_NVGPRS(r1)
577	lwz	r0,_TRAP(r1)
578	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
579	stw	r0,_TRAP(r1)		/* register set saved */
580	b	sys_clone
581
582	.globl	ppc_swapcontext
583ppc_swapcontext:
584	SAVE_NVGPRS(r1)
585	lwz	r0,_TRAP(r1)
586	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
587	stw	r0,_TRAP(r1)		/* register set saved */
588	b	sys_swapcontext
589
590/*
591 * Top-level page fault handling.
592 * This is in assembler because if do_page_fault tells us that
593 * it is a bad kernel page fault, we want to save the non-volatile
594 * registers before calling bad_page_fault.
595 */
596	.globl	handle_page_fault
597handle_page_fault:
598	stw	r4,_DAR(r1)
599	addi	r3,r1,STACK_FRAME_OVERHEAD
600#ifdef CONFIG_PPC_BOOK3S_32
601	andis.  r0,r5,DSISR_DABRMATCH@h
602	bne-    handle_dabr_fault
603#endif
604	bl	do_page_fault
605	cmpwi	r3,0
606	beq+	ret_from_except
607	SAVE_NVGPRS(r1)
608	lwz	r0,_TRAP(r1)
609	clrrwi	r0,r0,1
610	stw	r0,_TRAP(r1)
611	mr	r5,r3
612	addi	r3,r1,STACK_FRAME_OVERHEAD
613	lwz	r4,_DAR(r1)
614	bl	bad_page_fault
615	b	ret_from_except_full
616
617#ifdef CONFIG_PPC_BOOK3S_32
618	/* We have a data breakpoint exception - handle it */
619handle_dabr_fault:
620	SAVE_NVGPRS(r1)
621	lwz	r0,_TRAP(r1)
622	clrrwi	r0,r0,1
623	stw	r0,_TRAP(r1)
624	bl      do_break
625	b	ret_from_except_full
626#endif
627
628/*
629 * This routine switches between two different tasks.  The process
630 * state of one is saved on its kernel stack.  Then the state
631 * of the other is restored from its kernel stack.  The memory
632 * management hardware is updated to the second process's state.
633 * Finally, we can return to the second process.
634 * On entry, r3 points to the THREAD for the current task, r4
635 * points to the THREAD for the new task.
636 *
637 * This routine is always called with interrupts disabled.
638 *
639 * Note: there are two ways to get to the "going out" portion
640 * of this code; either by coming in via the entry (_switch)
641 * or via "fork" which must set up an environment equivalent
642 * to the "_switch" path.  If you change this , you'll have to
643 * change the fork code also.
644 *
645 * The code which creates the new task context is in 'copy_thread'
646 * in arch/ppc/kernel/process.c
647 */
648_GLOBAL(_switch)
649	stwu	r1,-INT_FRAME_SIZE(r1)
650	mflr	r0
651	stw	r0,INT_FRAME_SIZE+4(r1)
652	/* r3-r12 are caller saved -- Cort */
653	SAVE_NVGPRS(r1)
654	stw	r0,_NIP(r1)	/* Return to switch caller */
655	mfmsr	r11
656	li	r0,MSR_FP	/* Disable floating-point */
657#ifdef CONFIG_ALTIVEC
658BEGIN_FTR_SECTION
659	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
660	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
661	stw	r12,THREAD+THREAD_VRSAVE(r2)
662END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
663#endif /* CONFIG_ALTIVEC */
664#ifdef CONFIG_SPE
665BEGIN_FTR_SECTION
666	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
667	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
668	stw	r12,THREAD+THREAD_SPEFSCR(r2)
669END_FTR_SECTION_IFSET(CPU_FTR_SPE)
670#endif /* CONFIG_SPE */
671	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
672	beq+	1f
673	andc	r11,r11,r0
674	MTMSRD(r11)
675	isync
6761:	stw	r11,_MSR(r1)
677	mfcr	r10
678	stw	r10,_CCR(r1)
679	stw	r1,KSP(r3)	/* Set old stack pointer */
680
681#ifdef CONFIG_SMP
682	/* We need a sync somewhere here to make sure that if the
683	 * previous task gets rescheduled on another CPU, it sees all
684	 * stores it has performed on this one.
685	 */
686	sync
687#endif /* CONFIG_SMP */
688
689	tophys(r0,r4)
690	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
691	lwz	r1,KSP(r4)	/* Load new stack pointer */
692
693	/* save the old current 'last' for return value */
694	mr	r3,r2
695	addi	r2,r4,-THREAD	/* Update current */
696
697#ifdef CONFIG_ALTIVEC
698BEGIN_FTR_SECTION
699	lwz	r0,THREAD+THREAD_VRSAVE(r2)
700	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
701END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
702#endif /* CONFIG_ALTIVEC */
703#ifdef CONFIG_SPE
704BEGIN_FTR_SECTION
705	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
706	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
707END_FTR_SECTION_IFSET(CPU_FTR_SPE)
708#endif /* CONFIG_SPE */
709
710	lwz	r0,_CCR(r1)
711	mtcrf	0xFF,r0
712	/* r3-r12 are destroyed -- Cort */
713	REST_NVGPRS(r1)
714
715	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
716	mtlr	r4
717	addi	r1,r1,INT_FRAME_SIZE
718	blr
719
720	.globl	fast_exception_return
721fast_exception_return:
722#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
723	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
724	beq	1f			/* if not, we've got problems */
725#endif
726
7272:	REST_4GPRS(3, r11)
728	lwz	r10,_CCR(r11)
729	REST_GPR(1, r11)
730	mtcr	r10
731	lwz	r10,_LINK(r11)
732	mtlr	r10
733	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
734	li	r10, 0
735	stw	r10, 8(r11)
736	REST_GPR(10, r11)
737#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
738	mtspr	SPRN_NRI, r0
739#endif
740	mtspr	SPRN_SRR1,r9
741	mtspr	SPRN_SRR0,r12
742	REST_GPR(9, r11)
743	REST_GPR(12, r11)
744	lwz	r11,GPR11(r11)
745	SYNC
746	RFI
747
748#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
749/* check if the exception happened in a restartable section */
7501:	lis	r3,exc_exit_restart_end@ha
751	addi	r3,r3,exc_exit_restart_end@l
752	cmplw	r12,r3
753	bge	3f
754	lis	r4,exc_exit_restart@ha
755	addi	r4,r4,exc_exit_restart@l
756	cmplw	r12,r4
757	blt	3f
758	lis	r3,fee_restarts@ha
759	tophys(r3,r3)
760	lwz	r5,fee_restarts@l(r3)
761	addi	r5,r5,1
762	stw	r5,fee_restarts@l(r3)
763	mr	r12,r4		/* restart at exc_exit_restart */
764	b	2b
765
766	.section .bss
767	.align	2
768fee_restarts:
769	.space	4
770	.previous
771
772/* aargh, a nonrecoverable interrupt, panic */
773/* aargh, we don't know which trap this is */
774/* but the 601 doesn't implement the RI bit, so assume it's OK */
7753:
776BEGIN_FTR_SECTION
777	b	2b
778END_FTR_SECTION_IFSET(CPU_FTR_601)
779	li	r10,-1
780	stw	r10,_TRAP(r11)
781	addi	r3,r1,STACK_FRAME_OVERHEAD
782	lis	r10,MSR_KERNEL@h
783	ori	r10,r10,MSR_KERNEL@l
784	bl	transfer_to_handler_full
785	.long	unrecoverable_exception
786	.long	ret_from_except
787#endif
788
789	.globl	ret_from_except_full
790ret_from_except_full:
791	REST_NVGPRS(r1)
792	/* fall through */
793
794	.globl	ret_from_except
795ret_from_except:
796	/* Hard-disable interrupts so that current_thread_info()->flags
797	 * can't change between when we test it and when we return
798	 * from the interrupt. */
799	/* Note: We don't bother telling lockdep about it */
800	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
801	SYNC			/* Some chip revs have problems here... */
802	MTMSRD(r10)		/* disable interrupts */
803
804	lwz	r3,_MSR(r1)	/* Returning to user mode? */
805	andi.	r0,r3,MSR_PR
806	beq	resume_kernel
807
808user_exc_return:		/* r10 contains MSR_KERNEL here */
809	/* Check current_thread_info()->flags */
810	lwz	r9,TI_FLAGS(r2)
811	andi.	r0,r9,_TIF_USER_WORK_MASK
812	bne	do_work
813
814restore_user:
815#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
816	/* Check whether this process has its own DBCR0 value.  The internal
817	   debug mode bit tells us that dbcr0 should be loaded. */
818	lwz	r0,THREAD+THREAD_DBCR0(r2)
819	andis.	r10,r0,DBCR0_IDM@h
820	bnel-	load_dbcr0
821#endif
822	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
823
824	b	restore
825
826/* N.B. the only way to get here is from the beq following ret_from_except. */
827resume_kernel:
828	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
829	lwz	r8,TI_FLAGS(r2)
830	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
831	beq+	1f
832
833	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
834
835	lwz	r3,GPR1(r1)
836	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
837	mr	r4,r1			/* src:  current exception frame */
838	mr	r1,r3			/* Reroute the trampoline frame to r1 */
839
840	/* Copy from the original to the trampoline. */
841	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
842	li	r6,0			/* start offset: 0 */
843	mtctr	r5
8442:	lwzx	r0,r6,r4
845	stwx	r0,r6,r3
846	addi	r6,r6,4
847	bdnz	2b
848
849	/* Do real store operation to complete stwu */
850	lwz	r5,GPR1(r1)
851	stw	r8,0(r5)
852
853	/* Clear _TIF_EMULATE_STACK_STORE flag */
854	lis	r11,_TIF_EMULATE_STACK_STORE@h
855	addi	r5,r2,TI_FLAGS
8560:	lwarx	r8,0,r5
857	andc	r8,r8,r11
858#ifdef CONFIG_IBM405_ERR77
859	dcbt	0,r5
860#endif
861	stwcx.	r8,0,r5
862	bne-	0b
8631:
864
865#ifdef CONFIG_PREEMPT
866	/* check current_thread_info->preempt_count */
867	lwz	r0,TI_PREEMPT(r2)
868	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
869	bne	restore
870	andi.	r8,r8,_TIF_NEED_RESCHED
871	beq+	restore
872	lwz	r3,_MSR(r1)
873	andi.	r0,r3,MSR_EE	/* interrupts off? */
874	beq	restore		/* don't schedule if so */
875#ifdef CONFIG_TRACE_IRQFLAGS
876	/* Lockdep thinks irqs are enabled, we need to call
877	 * preempt_schedule_irq with IRQs off, so we inform lockdep
878	 * now that we -did- turn them off already
879	 */
880	bl	trace_hardirqs_off
881#endif
8821:	bl	preempt_schedule_irq
883	lwz	r3,TI_FLAGS(r2)
884	andi.	r0,r3,_TIF_NEED_RESCHED
885	bne-	1b
886#ifdef CONFIG_TRACE_IRQFLAGS
887	/* And now, to properly rebalance the above, we tell lockdep they
888	 * are being turned back on, which will happen when we return
889	 */
890	bl	trace_hardirqs_on
891#endif
892#endif /* CONFIG_PREEMPT */
893
894	/* interrupts are hard-disabled at this point */
895restore:
896#ifdef CONFIG_44x
897BEGIN_MMU_FTR_SECTION
898	b	1f
899END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
900	lis	r4,icache_44x_need_flush@ha
901	lwz	r5,icache_44x_need_flush@l(r4)
902	cmplwi	cr0,r5,0
903	beq+	1f
904	li	r6,0
905	iccci	r0,r0
906	stw	r6,icache_44x_need_flush@l(r4)
9071:
908#endif  /* CONFIG_44x */
909
910	lwz	r9,_MSR(r1)
911#ifdef CONFIG_TRACE_IRQFLAGS
912	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
913	 * off in this assembly code while peeking at TI_FLAGS() and such. However
914	 * we need to inform it if the exception turned interrupts off, and we
915	 * are about to trun them back on.
916	 *
917	 * The problem here sadly is that we don't know whether the exceptions was
918	 * one that turned interrupts off or not. So we always tell lockdep about
919	 * turning them on here when we go back to wherever we came from with EE
920	 * on, even if that may meen some redudant calls being tracked. Maybe later
921	 * we could encode what the exception did somewhere or test the exception
922	 * type in the pt_regs but that sounds overkill
923	 */
924	andi.	r10,r9,MSR_EE
925	beq	1f
926	/*
927	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
928	 * which is the stack frame here, we need to force a stack frame
929	 * in case we came from user space.
930	 */
931	stwu	r1,-32(r1)
932	mflr	r0
933	stw	r0,4(r1)
934	stwu	r1,-32(r1)
935	bl	trace_hardirqs_on
936	lwz	r1,0(r1)
937	lwz	r1,0(r1)
938	lwz	r9,_MSR(r1)
9391:
940#endif /* CONFIG_TRACE_IRQFLAGS */
941
942	lwz	r0,GPR0(r1)
943	lwz	r2,GPR2(r1)
944	REST_4GPRS(3, r1)
945	REST_2GPRS(7, r1)
946
947	lwz	r10,_XER(r1)
948	lwz	r11,_CTR(r1)
949	mtspr	SPRN_XER,r10
950	mtctr	r11
951
952	PPC405_ERR77(0,r1)
953BEGIN_FTR_SECTION
954	lwarx	r11,0,r1
955END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
956	stwcx.	r0,0,r1			/* to clear the reservation */
957
958#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
959	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
960	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
961
962	lwz	r10,_CCR(r1)
963	lwz	r11,_LINK(r1)
964	mtcrf	0xFF,r10
965	mtlr	r11
966
967	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
968	li	r10, 0
969	stw	r10, 8(r1)
970	/*
971	 * Once we put values in SRR0 and SRR1, we are in a state
972	 * where exceptions are not recoverable, since taking an
973	 * exception will trash SRR0 and SRR1.  Therefore we clear the
974	 * MSR:RI bit to indicate this.  If we do take an exception,
975	 * we can't return to the point of the exception but we
976	 * can restart the exception exit path at the label
977	 * exc_exit_restart below.  -- paulus
978	 */
979	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
980	SYNC
981	MTMSRD(r10)		/* clear the RI bit */
982	.globl exc_exit_restart
983exc_exit_restart:
984	lwz	r12,_NIP(r1)
985	mtspr	SPRN_SRR0,r12
986	mtspr	SPRN_SRR1,r9
987	REST_4GPRS(9, r1)
988	lwz	r1,GPR1(r1)
989	.globl exc_exit_restart_end
990exc_exit_restart_end:
991	SYNC
992	RFI
993
994#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
995	/*
996	 * This is a bit different on 4xx/Book-E because it doesn't have
997	 * the RI bit in the MSR.
998	 * The TLB miss handler checks if we have interrupted
999	 * the exception exit path and restarts it if so
1000	 * (well maybe one day it will... :).
1001	 */
1002	lwz	r11,_LINK(r1)
1003	mtlr	r11
1004	lwz	r10,_CCR(r1)
1005	mtcrf	0xff,r10
1006	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1007	li	r10, 0
1008	stw	r10, 8(r1)
1009	REST_2GPRS(9, r1)
1010	.globl exc_exit_restart
1011exc_exit_restart:
1012	lwz	r11,_NIP(r1)
1013	lwz	r12,_MSR(r1)
1014exc_exit_start:
1015	mtspr	SPRN_SRR0,r11
1016	mtspr	SPRN_SRR1,r12
1017	REST_2GPRS(11, r1)
1018	lwz	r1,GPR1(r1)
1019	.globl exc_exit_restart_end
1020exc_exit_restart_end:
1021	PPC405_ERR77_SYNC
1022	rfi
1023	b	.			/* prevent prefetch past rfi */
1024
1025/*
1026 * Returning from a critical interrupt in user mode doesn't need
1027 * to be any different from a normal exception.  For a critical
1028 * interrupt in the kernel, we just return (without checking for
1029 * preemption) since the interrupt may have happened at some crucial
1030 * place (e.g. inside the TLB miss handler), and because we will be
1031 * running with r1 pointing into critical_stack, not the current
1032 * process's kernel stack (and therefore current_thread_info() will
1033 * give the wrong answer).
1034 * We have to restore various SPRs that may have been in use at the
1035 * time of the critical interrupt.
1036 *
1037 */
1038#ifdef CONFIG_40x
1039#define PPC_40x_TURN_OFF_MSR_DR						    \
1040	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1041	 * assume the instructions here are mapped by a pinned TLB entry */ \
1042	li	r10,MSR_IR;						    \
1043	mtmsr	r10;							    \
1044	isync;								    \
1045	tophys(r1, r1);
1046#else
1047#define PPC_40x_TURN_OFF_MSR_DR
1048#endif
1049
1050#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1051	REST_NVGPRS(r1);						\
1052	lwz	r3,_MSR(r1);						\
1053	andi.	r3,r3,MSR_PR;						\
1054	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1055	bne	user_exc_return;					\
1056	lwz	r0,GPR0(r1);						\
1057	lwz	r2,GPR2(r1);						\
1058	REST_4GPRS(3, r1);						\
1059	REST_2GPRS(7, r1);						\
1060	lwz	r10,_XER(r1);						\
1061	lwz	r11,_CTR(r1);						\
1062	mtspr	SPRN_XER,r10;						\
1063	mtctr	r11;							\
1064	PPC405_ERR77(0,r1);						\
1065	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1066	lwz	r11,_LINK(r1);						\
1067	mtlr	r11;							\
1068	lwz	r10,_CCR(r1);						\
1069	mtcrf	0xff,r10;						\
1070	PPC_40x_TURN_OFF_MSR_DR;					\
1071	lwz	r9,_DEAR(r1);						\
1072	lwz	r10,_ESR(r1);						\
1073	mtspr	SPRN_DEAR,r9;						\
1074	mtspr	SPRN_ESR,r10;						\
1075	lwz	r11,_NIP(r1);						\
1076	lwz	r12,_MSR(r1);						\
1077	mtspr	exc_lvl_srr0,r11;					\
1078	mtspr	exc_lvl_srr1,r12;					\
1079	lwz	r9,GPR9(r1);						\
1080	lwz	r12,GPR12(r1);						\
1081	lwz	r10,GPR10(r1);						\
1082	lwz	r11,GPR11(r1);						\
1083	lwz	r1,GPR1(r1);						\
1084	PPC405_ERR77_SYNC;						\
1085	exc_lvl_rfi;							\
1086	b	.;		/* prevent prefetch past exc_lvl_rfi */
1087
1088#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1089	lwz	r9,_##exc_lvl_srr0(r1);					\
1090	lwz	r10,_##exc_lvl_srr1(r1);				\
1091	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1092	mtspr	SPRN_##exc_lvl_srr1,r10;
1093
1094#if defined(CONFIG_PPC_BOOK3E_MMU)
1095#ifdef CONFIG_PHYS_64BIT
1096#define	RESTORE_MAS7							\
1097	lwz	r11,MAS7(r1);						\
1098	mtspr	SPRN_MAS7,r11;
1099#else
1100#define	RESTORE_MAS7
1101#endif /* CONFIG_PHYS_64BIT */
1102#define RESTORE_MMU_REGS						\
1103	lwz	r9,MAS0(r1);						\
1104	lwz	r10,MAS1(r1);						\
1105	lwz	r11,MAS2(r1);						\
1106	mtspr	SPRN_MAS0,r9;						\
1107	lwz	r9,MAS3(r1);						\
1108	mtspr	SPRN_MAS1,r10;						\
1109	lwz	r10,MAS6(r1);						\
1110	mtspr	SPRN_MAS2,r11;						\
1111	mtspr	SPRN_MAS3,r9;						\
1112	mtspr	SPRN_MAS6,r10;						\
1113	RESTORE_MAS7;
1114#elif defined(CONFIG_44x)
1115#define RESTORE_MMU_REGS						\
1116	lwz	r9,MMUCR(r1);						\
1117	mtspr	SPRN_MMUCR,r9;
1118#else
1119#define RESTORE_MMU_REGS
1120#endif
1121
1122#ifdef CONFIG_40x
1123	.globl	ret_from_crit_exc
1124ret_from_crit_exc:
1125	mfspr	r9,SPRN_SPRG_THREAD
1126	lis	r10,saved_ksp_limit@ha;
1127	lwz	r10,saved_ksp_limit@l(r10);
1128	tovirt(r9,r9);
1129	stw	r10,KSP_LIMIT(r9)
1130	lis	r9,crit_srr0@ha;
1131	lwz	r9,crit_srr0@l(r9);
1132	lis	r10,crit_srr1@ha;
1133	lwz	r10,crit_srr1@l(r10);
1134	mtspr	SPRN_SRR0,r9;
1135	mtspr	SPRN_SRR1,r10;
1136	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1137#endif /* CONFIG_40x */
1138
1139#ifdef CONFIG_BOOKE
1140	.globl	ret_from_crit_exc
1141ret_from_crit_exc:
1142	mfspr	r9,SPRN_SPRG_THREAD
1143	lwz	r10,SAVED_KSP_LIMIT(r1)
1144	stw	r10,KSP_LIMIT(r9)
1145	RESTORE_xSRR(SRR0,SRR1);
1146	RESTORE_MMU_REGS;
1147	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1148
1149	.globl	ret_from_debug_exc
1150ret_from_debug_exc:
1151	mfspr	r9,SPRN_SPRG_THREAD
1152	lwz	r10,SAVED_KSP_LIMIT(r1)
1153	stw	r10,KSP_LIMIT(r9)
1154	RESTORE_xSRR(SRR0,SRR1);
1155	RESTORE_xSRR(CSRR0,CSRR1);
1156	RESTORE_MMU_REGS;
1157	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1158
1159	.globl	ret_from_mcheck_exc
1160ret_from_mcheck_exc:
1161	mfspr	r9,SPRN_SPRG_THREAD
1162	lwz	r10,SAVED_KSP_LIMIT(r1)
1163	stw	r10,KSP_LIMIT(r9)
1164	RESTORE_xSRR(SRR0,SRR1);
1165	RESTORE_xSRR(CSRR0,CSRR1);
1166	RESTORE_xSRR(DSRR0,DSRR1);
1167	RESTORE_MMU_REGS;
1168	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1169#endif /* CONFIG_BOOKE */
1170
1171/*
1172 * Load the DBCR0 value for a task that is being ptraced,
1173 * having first saved away the global DBCR0.  Note that r0
1174 * has the dbcr0 value to set upon entry to this.
1175 */
1176load_dbcr0:
1177	mfmsr	r10		/* first disable debug exceptions */
1178	rlwinm	r10,r10,0,~MSR_DE
1179	mtmsr	r10
1180	isync
1181	mfspr	r10,SPRN_DBCR0
1182	lis	r11,global_dbcr0@ha
1183	addi	r11,r11,global_dbcr0@l
1184#ifdef CONFIG_SMP
1185	lwz	r9,TASK_CPU(r2)
1186	slwi	r9,r9,3
1187	add	r11,r11,r9
1188#endif
1189	stw	r10,0(r11)
1190	mtspr	SPRN_DBCR0,r0
1191	lwz	r10,4(r11)
1192	addi	r10,r10,1
1193	stw	r10,4(r11)
1194	li	r11,-1
1195	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1196	blr
1197
1198	.section .bss
1199	.align	4
1200global_dbcr0:
1201	.space	8*NR_CPUS
1202	.previous
1203#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1204
1205do_work:			/* r10 contains MSR_KERNEL here */
1206	andi.	r0,r9,_TIF_NEED_RESCHED
1207	beq	do_user_signal
1208
1209do_resched:			/* r10 contains MSR_KERNEL here */
1210	/* Note: We don't need to inform lockdep that we are enabling
1211	 * interrupts here. As far as it knows, they are already enabled
1212	 */
1213	ori	r10,r10,MSR_EE
1214	SYNC
1215	MTMSRD(r10)		/* hard-enable interrupts */
1216	bl	schedule
1217recheck:
1218	/* Note: And we don't tell it we are disabling them again
1219	 * neither. Those disable/enable cycles used to peek at
1220	 * TI_FLAGS aren't advertised.
1221	 */
1222	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1223	SYNC
1224	MTMSRD(r10)		/* disable interrupts */
1225	lwz	r9,TI_FLAGS(r2)
1226	andi.	r0,r9,_TIF_NEED_RESCHED
1227	bne-	do_resched
1228	andi.	r0,r9,_TIF_USER_WORK_MASK
1229	beq	restore_user
1230do_user_signal:			/* r10 contains MSR_KERNEL here */
1231	ori	r10,r10,MSR_EE
1232	SYNC
1233	MTMSRD(r10)		/* hard-enable interrupts */
1234	/* save r13-r31 in the exception frame, if not already done */
1235	lwz	r3,_TRAP(r1)
1236	andi.	r0,r3,1
1237	beq	2f
1238	SAVE_NVGPRS(r1)
1239	rlwinm	r3,r3,0,0,30
1240	stw	r3,_TRAP(r1)
12412:	addi	r3,r1,STACK_FRAME_OVERHEAD
1242	mr	r4,r9
1243	bl	do_notify_resume
1244	REST_NVGPRS(r1)
1245	b	recheck
1246
1247/*
1248 * We come here when we are at the end of handling an exception
1249 * that occurred at a place where taking an exception will lose
1250 * state information, such as the contents of SRR0 and SRR1.
1251 */
1252nonrecoverable:
1253	lis	r10,exc_exit_restart_end@ha
1254	addi	r10,r10,exc_exit_restart_end@l
1255	cmplw	r12,r10
1256	bge	3f
1257	lis	r11,exc_exit_restart@ha
1258	addi	r11,r11,exc_exit_restart@l
1259	cmplw	r12,r11
1260	blt	3f
1261	lis	r10,ee_restarts@ha
1262	lwz	r12,ee_restarts@l(r10)
1263	addi	r12,r12,1
1264	stw	r12,ee_restarts@l(r10)
1265	mr	r12,r11		/* restart at exc_exit_restart */
1266	blr
12673:	/* OK, we can't recover, kill this process */
1268	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1269BEGIN_FTR_SECTION
1270	blr
1271END_FTR_SECTION_IFSET(CPU_FTR_601)
1272	lwz	r3,_TRAP(r1)
1273	andi.	r0,r3,1
1274	beq	5f
1275	SAVE_NVGPRS(r1)
1276	rlwinm	r3,r3,0,0,30
1277	stw	r3,_TRAP(r1)
12785:	mfspr	r2,SPRN_SPRG_THREAD
1279	addi	r2,r2,-THREAD
1280	tovirt(r2,r2)			/* set back r2 to current */
12814:	addi	r3,r1,STACK_FRAME_OVERHEAD
1282	bl	unrecoverable_exception
1283	/* shouldn't return */
1284	b	4b
1285
1286	.section .bss
1287	.align	2
1288ee_restarts:
1289	.space	4
1290	.previous
1291
1292/*
1293 * PROM code for specific machines follows.  Put it
1294 * here so it's easy to add arch-specific sections later.
1295 * -- Cort
1296 */
1297#ifdef CONFIG_PPC_RTAS
1298/*
1299 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1300 * called with the MMU off.
1301 */
1302_GLOBAL(enter_rtas)
1303	stwu	r1,-INT_FRAME_SIZE(r1)
1304	mflr	r0
1305	stw	r0,INT_FRAME_SIZE+4(r1)
1306	LOAD_REG_ADDR(r4, rtas)
1307	lis	r6,1f@ha	/* physical return address for rtas */
1308	addi	r6,r6,1f@l
1309	tophys(r6,r6)
1310	tophys(r7,r1)
1311	lwz	r8,RTASENTRY(r4)
1312	lwz	r4,RTASBASE(r4)
1313	mfmsr	r9
1314	stw	r9,8(r1)
1315	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1316	SYNC			/* disable interrupts so SRR0/1 */
1317	MTMSRD(r0)		/* don't get trashed */
1318	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1319	mtlr	r6
1320	stw	r7, THREAD + RTAS_SP(r2)
1321	mtspr	SPRN_SRR0,r8
1322	mtspr	SPRN_SRR1,r9
1323	RFI
13241:	tophys(r9,r1)
1325	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1326	lwz	r9,8(r9)	/* original msr value */
1327	addi	r1,r1,INT_FRAME_SIZE
1328	li	r0,0
1329	tophys(r7, r2)
1330	stw	r0, THREAD + RTAS_SP(r7)
1331	mtspr	SPRN_SRR0,r8
1332	mtspr	SPRN_SRR1,r9
1333	RFI			/* return to caller */
1334
1335	.globl	machine_check_in_rtas
1336machine_check_in_rtas:
1337	twi	31,0,0
1338	/* XXX load up BATs and panic */
1339
1340#endif /* CONFIG_PPC_RTAS */
1341