xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision a17922de)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ptrace.h>
35#include <asm/export.h>
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46/*
47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
48 * fit into one page in order to not encounter a TLB miss between the
49 * modification of srr0/srr1 and the associated rfi.
50 */
51	.align	12
52
53#ifdef CONFIG_BOOKE
54	.globl	mcheck_transfer_to_handler
55mcheck_transfer_to_handler:
56	mfspr	r0,SPRN_DSRR0
57	stw	r0,_DSRR0(r11)
58	mfspr	r0,SPRN_DSRR1
59	stw	r0,_DSRR1(r11)
60	/* fall through */
61
62	.globl	debug_transfer_to_handler
63debug_transfer_to_handler:
64	mfspr	r0,SPRN_CSRR0
65	stw	r0,_CSRR0(r11)
66	mfspr	r0,SPRN_CSRR1
67	stw	r0,_CSRR1(r11)
68	/* fall through */
69
70	.globl	crit_transfer_to_handler
71crit_transfer_to_handler:
72#ifdef CONFIG_PPC_BOOK3E_MMU
73	mfspr	r0,SPRN_MAS0
74	stw	r0,MAS0(r11)
75	mfspr	r0,SPRN_MAS1
76	stw	r0,MAS1(r11)
77	mfspr	r0,SPRN_MAS2
78	stw	r0,MAS2(r11)
79	mfspr	r0,SPRN_MAS3
80	stw	r0,MAS3(r11)
81	mfspr	r0,SPRN_MAS6
82	stw	r0,MAS6(r11)
83#ifdef CONFIG_PHYS_64BIT
84	mfspr	r0,SPRN_MAS7
85	stw	r0,MAS7(r11)
86#endif /* CONFIG_PHYS_64BIT */
87#endif /* CONFIG_PPC_BOOK3E_MMU */
88#ifdef CONFIG_44x
89	mfspr	r0,SPRN_MMUCR
90	stw	r0,MMUCR(r11)
91#endif
92	mfspr	r0,SPRN_SRR0
93	stw	r0,_SRR0(r11)
94	mfspr	r0,SPRN_SRR1
95	stw	r0,_SRR1(r11)
96
97	/* set the stack limit to the current stack
98	 * and set the limit to protect the thread_info
99	 * struct
100	 */
101	mfspr	r8,SPRN_SPRG_THREAD
102	lwz	r0,KSP_LIMIT(r8)
103	stw	r0,SAVED_KSP_LIMIT(r11)
104	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
105	stw	r0,KSP_LIMIT(r8)
106	/* fall through */
107#endif
108
109#ifdef CONFIG_40x
110	.globl	crit_transfer_to_handler
111crit_transfer_to_handler:
112	lwz	r0,crit_r10@l(0)
113	stw	r0,GPR10(r11)
114	lwz	r0,crit_r11@l(0)
115	stw	r0,GPR11(r11)
116	mfspr	r0,SPRN_SRR0
117	stw	r0,crit_srr0@l(0)
118	mfspr	r0,SPRN_SRR1
119	stw	r0,crit_srr1@l(0)
120
121	/* set the stack limit to the current stack
122	 * and set the limit to protect the thread_info
123	 * struct
124	 */
125	mfspr	r8,SPRN_SPRG_THREAD
126	lwz	r0,KSP_LIMIT(r8)
127	stw	r0,saved_ksp_limit@l(0)
128	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
129	stw	r0,KSP_LIMIT(r8)
130	/* fall through */
131#endif
132
133/*
134 * This code finishes saving the registers to the exception frame
135 * and jumps to the appropriate handler for the exception, turning
136 * on address translation.
137 * Note that we rely on the caller having set cr0.eq iff the exception
138 * occurred in kernel mode (i.e. MSR:PR = 0).
139 */
140	.globl	transfer_to_handler_full
141transfer_to_handler_full:
142	SAVE_NVGPRS(r11)
143	/* fall through */
144
145	.globl	transfer_to_handler
146transfer_to_handler:
147	stw	r2,GPR2(r11)
148	stw	r12,_NIP(r11)
149	stw	r9,_MSR(r11)
150	andi.	r2,r9,MSR_PR
151	mfctr	r12
152	mfspr	r2,SPRN_XER
153	stw	r12,_CTR(r11)
154	stw	r2,_XER(r11)
155	mfspr	r12,SPRN_SPRG_THREAD
156	addi	r2,r12,-THREAD
157	tovirt(r2,r2)			/* set r2 to current */
158	beq	2f			/* if from user, fix up THREAD.regs */
159	addi	r11,r1,STACK_FRAME_OVERHEAD
160	stw	r11,PT_REGS(r12)
161#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
162	/* Check to see if the dbcr0 register is set up to debug.  Use the
163	   internal debug mode bit to do this. */
164	lwz	r12,THREAD_DBCR0(r12)
165	andis.	r12,r12,DBCR0_IDM@h
166	beq+	3f
167	/* From user and task is ptraced - load up global dbcr0 */
168	li	r12,-1			/* clear all pending debug events */
169	mtspr	SPRN_DBSR,r12
170	lis	r11,global_dbcr0@ha
171	tophys(r11,r11)
172	addi	r11,r11,global_dbcr0@l
173#ifdef CONFIG_SMP
174	CURRENT_THREAD_INFO(r9, r1)
175	lwz	r9,TI_CPU(r9)
176	slwi	r9,r9,3
177	add	r11,r11,r9
178#endif
179	lwz	r12,0(r11)
180	mtspr	SPRN_DBCR0,r12
181	lwz	r12,4(r11)
182	addi	r12,r12,-1
183	stw	r12,4(r11)
184#endif
185#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
186	CURRENT_THREAD_INFO(r9, r1)
187	tophys(r9, r9)
188	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
189#endif
190
191	b	3f
192
1932:	/* if from kernel, check interrupted DOZE/NAP mode and
194         * check for stack overflow
195         */
196	lwz	r9,KSP_LIMIT(r12)
197	cmplw	r1,r9			/* if r1 <= ksp_limit */
198	ble-	stack_ovf		/* then the kernel stack overflowed */
1995:
200#if defined(CONFIG_6xx) || defined(CONFIG_E500)
201	CURRENT_THREAD_INFO(r9, r1)
202	tophys(r9,r9)			/* check local flags */
203	lwz	r12,TI_LOCAL_FLAGS(r9)
204	mtcrf	0x01,r12
205	bt-	31-TLF_NAPPING,4f
206	bt-	31-TLF_SLEEPING,7f
207#endif /* CONFIG_6xx || CONFIG_E500 */
208	.globl transfer_to_handler_cont
209transfer_to_handler_cont:
2103:
211	mflr	r9
212	lwz	r11,0(r9)		/* virtual address of handler */
213	lwz	r9,4(r9)		/* where to go when done */
214#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
215	mtspr	SPRN_NRI, r0
216#endif
217#ifdef CONFIG_TRACE_IRQFLAGS
218	lis	r12,reenable_mmu@h
219	ori	r12,r12,reenable_mmu@l
220	mtspr	SPRN_SRR0,r12
221	mtspr	SPRN_SRR1,r10
222	SYNC
223	RFI
224reenable_mmu:				/* re-enable mmu so we can */
225	mfmsr	r10
226	lwz	r12,_MSR(r1)
227	xor	r10,r10,r12
228	andi.	r10,r10,MSR_EE		/* Did EE change? */
229	beq	1f
230
231	/*
232	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
233	 * If from user mode there is only one stack frame on the stack, and
234	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
235	 * stack frame to make trace_hardirqs_off happy.
236	 *
237	 * This is handy because we also need to save a bunch of GPRs,
238	 * r3 can be different from GPR3(r1) at this point, r9 and r11
239	 * contains the old MSR and handler address respectively,
240	 * r4 & r5 can contain page fault arguments that need to be passed
241	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
242	 * they aren't useful past this point (aren't syscall arguments),
243	 * the rest is restored from the exception frame.
244	 */
245	stwu	r1,-32(r1)
246	stw	r9,8(r1)
247	stw	r11,12(r1)
248	stw	r3,16(r1)
249	stw	r4,20(r1)
250	stw	r5,24(r1)
251	bl	trace_hardirqs_off
252	lwz	r5,24(r1)
253	lwz	r4,20(r1)
254	lwz	r3,16(r1)
255	lwz	r11,12(r1)
256	lwz	r9,8(r1)
257	addi	r1,r1,32
258	lwz	r0,GPR0(r1)
259	lwz	r6,GPR6(r1)
260	lwz	r7,GPR7(r1)
261	lwz	r8,GPR8(r1)
2621:	mtctr	r11
263	mtlr	r9
264	bctr				/* jump to handler */
265#else /* CONFIG_TRACE_IRQFLAGS */
266	mtspr	SPRN_SRR0,r11
267	mtspr	SPRN_SRR1,r10
268	mtlr	r9
269	SYNC
270	RFI				/* jump to handler, enable MMU */
271#endif /* CONFIG_TRACE_IRQFLAGS */
272
273#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2744:	rlwinm	r12,r12,0,~_TLF_NAPPING
275	stw	r12,TI_LOCAL_FLAGS(r9)
276	b	power_save_ppc32_restore
277
2787:	rlwinm	r12,r12,0,~_TLF_SLEEPING
279	stw	r12,TI_LOCAL_FLAGS(r9)
280	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
281	rlwinm	r9,r9,0,~MSR_EE
282	lwz	r12,_LINK(r11)		/* and return to address in LR */
283	b	fast_exception_return
284#endif
285
286/*
287 * On kernel stack overflow, load up an initial stack pointer
288 * and call StackOverflow(regs), which should not return.
289 */
290stack_ovf:
291	/* sometimes we use a statically-allocated stack, which is OK. */
292	lis	r12,_end@h
293	ori	r12,r12,_end@l
294	cmplw	r1,r12
295	ble	5b			/* r1 <= &_end is OK */
296	SAVE_NVGPRS(r11)
297	addi	r3,r1,STACK_FRAME_OVERHEAD
298	lis	r1,init_thread_union@ha
299	addi	r1,r1,init_thread_union@l
300	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
301	lis	r9,StackOverflow@ha
302	addi	r9,r9,StackOverflow@l
303	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
304#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
305	mtspr	SPRN_NRI, r0
306#endif
307	mtspr	SPRN_SRR0,r9
308	mtspr	SPRN_SRR1,r10
309	SYNC
310	RFI
311
312/*
313 * Handle a system call.
314 */
315	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
316	.stabs	"entry_32.S",N_SO,0,0,0f
3170:
318
319_GLOBAL(DoSyscall)
320	stw	r3,ORIG_GPR3(r1)
321	li	r12,0
322	stw	r12,RESULT(r1)
323	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
324	rlwinm	r11,r11,0,4,2
325	stw	r11,_CCR(r1)
326#ifdef CONFIG_TRACE_IRQFLAGS
327	/* Return from syscalls can (and generally will) hard enable
328	 * interrupts. You aren't supposed to call a syscall with
329	 * interrupts disabled in the first place. However, to ensure
330	 * that we get it right vs. lockdep if it happens, we force
331	 * that hard enable here with appropriate tracing if we see
332	 * that we have been called with interrupts off
333	 */
334	mfmsr	r11
335	andi.	r12,r11,MSR_EE
336	bne+	1f
337	/* We came in with interrupts disabled, we enable them now */
338	bl	trace_hardirqs_on
339	mfmsr	r11
340	lwz	r0,GPR0(r1)
341	lwz	r3,GPR3(r1)
342	lwz	r4,GPR4(r1)
343	ori	r11,r11,MSR_EE
344	lwz	r5,GPR5(r1)
345	lwz	r6,GPR6(r1)
346	lwz	r7,GPR7(r1)
347	lwz	r8,GPR8(r1)
348	mtmsr	r11
3491:
350#endif /* CONFIG_TRACE_IRQFLAGS */
351	CURRENT_THREAD_INFO(r10, r1)
352	lwz	r11,TI_FLAGS(r10)
353	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
354	bne-	syscall_dotrace
355syscall_dotrace_cont:
356	cmplwi	0,r0,NR_syscalls
357	lis	r10,sys_call_table@h
358	ori	r10,r10,sys_call_table@l
359	slwi	r0,r0,2
360	bge-	66f
361	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
362	mtlr	r10
363	addi	r9,r1,STACK_FRAME_OVERHEAD
364	PPC440EP_ERR42
365	blrl			/* Call handler */
366	.globl	ret_from_syscall
367ret_from_syscall:
368#ifdef CONFIG_DEBUG_RSEQ
369	/* Check whether the syscall is issued inside a restartable sequence */
370	stw	r3,GPR3(r1)
371	addi    r3,r1,STACK_FRAME_OVERHEAD
372	bl      rseq_syscall
373	lwz	r3,GPR3(r1)
374#endif
375	mr	r6,r3
376	CURRENT_THREAD_INFO(r12, r1)
377	/* disable interrupts so current_thread_info()->flags can't change */
378	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
379	/* Note: We don't bother telling lockdep about it */
380	SYNC
381	MTMSRD(r10)
382	lwz	r9,TI_FLAGS(r12)
383	li	r8,-MAX_ERRNO
384	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
385	bne-	syscall_exit_work
386	cmplw	0,r3,r8
387	blt+	syscall_exit_cont
388	lwz	r11,_CCR(r1)			/* Load CR */
389	neg	r3,r3
390	oris	r11,r11,0x1000	/* Set SO bit in CR */
391	stw	r11,_CCR(r1)
392syscall_exit_cont:
393	lwz	r8,_MSR(r1)
394#ifdef CONFIG_TRACE_IRQFLAGS
395	/* If we are going to return from the syscall with interrupts
396	 * off, we trace that here. It shouldn't happen though but we
397	 * want to catch the bugger if it does right ?
398	 */
399	andi.	r10,r8,MSR_EE
400	bne+	1f
401	stw	r3,GPR3(r1)
402	bl      trace_hardirqs_off
403	lwz	r3,GPR3(r1)
4041:
405#endif /* CONFIG_TRACE_IRQFLAGS */
406#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
407	/* If the process has its own DBCR0 value, load it up.  The internal
408	   debug mode bit tells us that dbcr0 should be loaded. */
409	lwz	r0,THREAD+THREAD_DBCR0(r2)
410	andis.	r10,r0,DBCR0_IDM@h
411	bnel-	load_dbcr0
412#endif
413#ifdef CONFIG_44x
414BEGIN_MMU_FTR_SECTION
415	lis	r4,icache_44x_need_flush@ha
416	lwz	r5,icache_44x_need_flush@l(r4)
417	cmplwi	cr0,r5,0
418	bne-	2f
4191:
420END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
421#endif /* CONFIG_44x */
422BEGIN_FTR_SECTION
423	lwarx	r7,0,r1
424END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
425	stwcx.	r0,0,r1			/* to clear the reservation */
426#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
427	andi.	r4,r8,MSR_PR
428	beq	3f
429	CURRENT_THREAD_INFO(r4, r1)
430	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4313:
432#endif
433	lwz	r4,_LINK(r1)
434	lwz	r5,_CCR(r1)
435	mtlr	r4
436	mtcr	r5
437	lwz	r7,_NIP(r1)
438	lwz	r2,GPR2(r1)
439	lwz	r1,GPR1(r1)
440#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
441	mtspr	SPRN_NRI, r0
442#endif
443	mtspr	SPRN_SRR0,r7
444	mtspr	SPRN_SRR1,r8
445	SYNC
446	RFI
447#ifdef CONFIG_44x
4482:	li	r7,0
449	iccci	r0,r0
450	stw	r7,icache_44x_need_flush@l(r4)
451	b	1b
452#endif  /* CONFIG_44x */
453
45466:	li	r3,-ENOSYS
455	b	ret_from_syscall
456
457	.globl	ret_from_fork
458ret_from_fork:
459	REST_NVGPRS(r1)
460	bl	schedule_tail
461	li	r3,0
462	b	ret_from_syscall
463
464	.globl	ret_from_kernel_thread
465ret_from_kernel_thread:
466	REST_NVGPRS(r1)
467	bl	schedule_tail
468	mtlr	r14
469	mr	r3,r15
470	PPC440EP_ERR42
471	blrl
472	li	r3,0
473	b	ret_from_syscall
474
475/* Traced system call support */
476syscall_dotrace:
477	SAVE_NVGPRS(r1)
478	li	r0,0xc00
479	stw	r0,_TRAP(r1)
480	addi	r3,r1,STACK_FRAME_OVERHEAD
481	bl	do_syscall_trace_enter
482	/*
483	 * Restore argument registers possibly just changed.
484	 * We use the return value of do_syscall_trace_enter
485	 * for call number to look up in the table (r0).
486	 */
487	mr	r0,r3
488	lwz	r3,GPR3(r1)
489	lwz	r4,GPR4(r1)
490	lwz	r5,GPR5(r1)
491	lwz	r6,GPR6(r1)
492	lwz	r7,GPR7(r1)
493	lwz	r8,GPR8(r1)
494	REST_NVGPRS(r1)
495
496	cmplwi	r0,NR_syscalls
497	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
498	bge-	ret_from_syscall
499	b	syscall_dotrace_cont
500
501syscall_exit_work:
502	andi.	r0,r9,_TIF_RESTOREALL
503	beq+	0f
504	REST_NVGPRS(r1)
505	b	2f
5060:	cmplw	0,r3,r8
507	blt+	1f
508	andi.	r0,r9,_TIF_NOERROR
509	bne-	1f
510	lwz	r11,_CCR(r1)			/* Load CR */
511	neg	r3,r3
512	oris	r11,r11,0x1000	/* Set SO bit in CR */
513	stw	r11,_CCR(r1)
514
5151:	stw	r6,RESULT(r1)	/* Save result */
516	stw	r3,GPR3(r1)	/* Update return value */
5172:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
518	beq	4f
519
520	/* Clear per-syscall TIF flags if any are set.  */
521
522	li	r11,_TIF_PERSYSCALL_MASK
523	addi	r12,r12,TI_FLAGS
5243:	lwarx	r8,0,r12
525	andc	r8,r8,r11
526#ifdef CONFIG_IBM405_ERR77
527	dcbt	0,r12
528#endif
529	stwcx.	r8,0,r12
530	bne-	3b
531	subi	r12,r12,TI_FLAGS
532
5334:	/* Anything which requires enabling interrupts? */
534	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
535	beq	ret_from_except
536
537	/* Re-enable interrupts. There is no need to trace that with
538	 * lockdep as we are supposed to have IRQs on at this point
539	 */
540	ori	r10,r10,MSR_EE
541	SYNC
542	MTMSRD(r10)
543
544	/* Save NVGPRS if they're not saved already */
545	lwz	r4,_TRAP(r1)
546	andi.	r4,r4,1
547	beq	5f
548	SAVE_NVGPRS(r1)
549	li	r4,0xc00
550	stw	r4,_TRAP(r1)
5515:
552	addi	r3,r1,STACK_FRAME_OVERHEAD
553	bl	do_syscall_trace_leave
554	b	ret_from_except_full
555
556/*
557 * The fork/clone functions need to copy the full register set into
558 * the child process. Therefore we need to save all the nonvolatile
559 * registers (r13 - r31) before calling the C code.
560 */
561	.globl	ppc_fork
562ppc_fork:
563	SAVE_NVGPRS(r1)
564	lwz	r0,_TRAP(r1)
565	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
566	stw	r0,_TRAP(r1)		/* register set saved */
567	b	sys_fork
568
569	.globl	ppc_vfork
570ppc_vfork:
571	SAVE_NVGPRS(r1)
572	lwz	r0,_TRAP(r1)
573	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
574	stw	r0,_TRAP(r1)		/* register set saved */
575	b	sys_vfork
576
577	.globl	ppc_clone
578ppc_clone:
579	SAVE_NVGPRS(r1)
580	lwz	r0,_TRAP(r1)
581	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
582	stw	r0,_TRAP(r1)		/* register set saved */
583	b	sys_clone
584
585	.globl	ppc_swapcontext
586ppc_swapcontext:
587	SAVE_NVGPRS(r1)
588	lwz	r0,_TRAP(r1)
589	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
590	stw	r0,_TRAP(r1)		/* register set saved */
591	b	sys_swapcontext
592
593/*
594 * Top-level page fault handling.
595 * This is in assembler because if do_page_fault tells us that
596 * it is a bad kernel page fault, we want to save the non-volatile
597 * registers before calling bad_page_fault.
598 */
599	.globl	handle_page_fault
600handle_page_fault:
601	stw	r4,_DAR(r1)
602	addi	r3,r1,STACK_FRAME_OVERHEAD
603#ifdef CONFIG_6xx
604	andis.  r0,r5,DSISR_DABRMATCH@h
605	bne-    handle_dabr_fault
606#endif
607	bl	do_page_fault
608	cmpwi	r3,0
609	beq+	ret_from_except
610	SAVE_NVGPRS(r1)
611	lwz	r0,_TRAP(r1)
612	clrrwi	r0,r0,1
613	stw	r0,_TRAP(r1)
614	mr	r5,r3
615	addi	r3,r1,STACK_FRAME_OVERHEAD
616	lwz	r4,_DAR(r1)
617	bl	bad_page_fault
618	b	ret_from_except_full
619
620#ifdef CONFIG_6xx
621	/* We have a data breakpoint exception - handle it */
622handle_dabr_fault:
623	SAVE_NVGPRS(r1)
624	lwz	r0,_TRAP(r1)
625	clrrwi	r0,r0,1
626	stw	r0,_TRAP(r1)
627	bl      do_break
628	b	ret_from_except_full
629#endif
630
631/*
632 * This routine switches between two different tasks.  The process
633 * state of one is saved on its kernel stack.  Then the state
634 * of the other is restored from its kernel stack.  The memory
635 * management hardware is updated to the second process's state.
636 * Finally, we can return to the second process.
637 * On entry, r3 points to the THREAD for the current task, r4
638 * points to the THREAD for the new task.
639 *
640 * This routine is always called with interrupts disabled.
641 *
642 * Note: there are two ways to get to the "going out" portion
643 * of this code; either by coming in via the entry (_switch)
644 * or via "fork" which must set up an environment equivalent
645 * to the "_switch" path.  If you change this , you'll have to
646 * change the fork code also.
647 *
648 * The code which creates the new task context is in 'copy_thread'
649 * in arch/ppc/kernel/process.c
650 */
651_GLOBAL(_switch)
652	stwu	r1,-INT_FRAME_SIZE(r1)
653	mflr	r0
654	stw	r0,INT_FRAME_SIZE+4(r1)
655	/* r3-r12 are caller saved -- Cort */
656	SAVE_NVGPRS(r1)
657	stw	r0,_NIP(r1)	/* Return to switch caller */
658	mfmsr	r11
659	li	r0,MSR_FP	/* Disable floating-point */
660#ifdef CONFIG_ALTIVEC
661BEGIN_FTR_SECTION
662	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
663	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
664	stw	r12,THREAD+THREAD_VRSAVE(r2)
665END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
666#endif /* CONFIG_ALTIVEC */
667#ifdef CONFIG_SPE
668BEGIN_FTR_SECTION
669	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
670	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
671	stw	r12,THREAD+THREAD_SPEFSCR(r2)
672END_FTR_SECTION_IFSET(CPU_FTR_SPE)
673#endif /* CONFIG_SPE */
674	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
675	beq+	1f
676	andc	r11,r11,r0
677	MTMSRD(r11)
678	isync
6791:	stw	r11,_MSR(r1)
680	mfcr	r10
681	stw	r10,_CCR(r1)
682	stw	r1,KSP(r3)	/* Set old stack pointer */
683
684#ifdef CONFIG_SMP
685	/* We need a sync somewhere here to make sure that if the
686	 * previous task gets rescheduled on another CPU, it sees all
687	 * stores it has performed on this one.
688	 */
689	sync
690#endif /* CONFIG_SMP */
691
692	tophys(r0,r4)
693	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
694	lwz	r1,KSP(r4)	/* Load new stack pointer */
695
696	/* save the old current 'last' for return value */
697	mr	r3,r2
698	addi	r2,r4,-THREAD	/* Update current */
699
700#ifdef CONFIG_ALTIVEC
701BEGIN_FTR_SECTION
702	lwz	r0,THREAD+THREAD_VRSAVE(r2)
703	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
704END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
705#endif /* CONFIG_ALTIVEC */
706#ifdef CONFIG_SPE
707BEGIN_FTR_SECTION
708	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
709	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
710END_FTR_SECTION_IFSET(CPU_FTR_SPE)
711#endif /* CONFIG_SPE */
712
713	lwz	r0,_CCR(r1)
714	mtcrf	0xFF,r0
715	/* r3-r12 are destroyed -- Cort */
716	REST_NVGPRS(r1)
717
718	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
719	mtlr	r4
720	addi	r1,r1,INT_FRAME_SIZE
721	blr
722
723	.globl	fast_exception_return
724fast_exception_return:
725#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
726	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
727	beq	1f			/* if not, we've got problems */
728#endif
729
7302:	REST_4GPRS(3, r11)
731	lwz	r10,_CCR(r11)
732	REST_GPR(1, r11)
733	mtcr	r10
734	lwz	r10,_LINK(r11)
735	mtlr	r10
736	REST_GPR(10, r11)
737#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
738	mtspr	SPRN_NRI, r0
739#endif
740	mtspr	SPRN_SRR1,r9
741	mtspr	SPRN_SRR0,r12
742	REST_GPR(9, r11)
743	REST_GPR(12, r11)
744	lwz	r11,GPR11(r11)
745	SYNC
746	RFI
747
748#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
749/* check if the exception happened in a restartable section */
7501:	lis	r3,exc_exit_restart_end@ha
751	addi	r3,r3,exc_exit_restart_end@l
752	cmplw	r12,r3
753	bge	3f
754	lis	r4,exc_exit_restart@ha
755	addi	r4,r4,exc_exit_restart@l
756	cmplw	r12,r4
757	blt	3f
758	lis	r3,fee_restarts@ha
759	tophys(r3,r3)
760	lwz	r5,fee_restarts@l(r3)
761	addi	r5,r5,1
762	stw	r5,fee_restarts@l(r3)
763	mr	r12,r4		/* restart at exc_exit_restart */
764	b	2b
765
766	.section .bss
767	.align	2
768fee_restarts:
769	.space	4
770	.previous
771
772/* aargh, a nonrecoverable interrupt, panic */
773/* aargh, we don't know which trap this is */
774/* but the 601 doesn't implement the RI bit, so assume it's OK */
7753:
776BEGIN_FTR_SECTION
777	b	2b
778END_FTR_SECTION_IFSET(CPU_FTR_601)
779	li	r10,-1
780	stw	r10,_TRAP(r11)
781	addi	r3,r1,STACK_FRAME_OVERHEAD
782	lis	r10,MSR_KERNEL@h
783	ori	r10,r10,MSR_KERNEL@l
784	bl	transfer_to_handler_full
785	.long	nonrecoverable_exception
786	.long	ret_from_except
787#endif
788
789	.globl	ret_from_except_full
790ret_from_except_full:
791	REST_NVGPRS(r1)
792	/* fall through */
793
794	.globl	ret_from_except
795ret_from_except:
796	/* Hard-disable interrupts so that current_thread_info()->flags
797	 * can't change between when we test it and when we return
798	 * from the interrupt. */
799	/* Note: We don't bother telling lockdep about it */
800	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
801	SYNC			/* Some chip revs have problems here... */
802	MTMSRD(r10)		/* disable interrupts */
803
804	lwz	r3,_MSR(r1)	/* Returning to user mode? */
805	andi.	r0,r3,MSR_PR
806	beq	resume_kernel
807
808user_exc_return:		/* r10 contains MSR_KERNEL here */
809	/* Check current_thread_info()->flags */
810	CURRENT_THREAD_INFO(r9, r1)
811	lwz	r9,TI_FLAGS(r9)
812	andi.	r0,r9,_TIF_USER_WORK_MASK
813	bne	do_work
814
815restore_user:
816#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
817	/* Check whether this process has its own DBCR0 value.  The internal
818	   debug mode bit tells us that dbcr0 should be loaded. */
819	lwz	r0,THREAD+THREAD_DBCR0(r2)
820	andis.	r10,r0,DBCR0_IDM@h
821	bnel-	load_dbcr0
822#endif
823#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
824	CURRENT_THREAD_INFO(r9, r1)
825	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
826#endif
827
828	b	restore
829
830/* N.B. the only way to get here is from the beq following ret_from_except. */
831resume_kernel:
832	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
833	CURRENT_THREAD_INFO(r9, r1)
834	lwz	r8,TI_FLAGS(r9)
835	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
836	beq+	1f
837
838	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
839
840	lwz	r3,GPR1(r1)
841	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
842	mr	r4,r1			/* src:  current exception frame */
843	mr	r1,r3			/* Reroute the trampoline frame to r1 */
844
845	/* Copy from the original to the trampoline. */
846	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
847	li	r6,0			/* start offset: 0 */
848	mtctr	r5
8492:	lwzx	r0,r6,r4
850	stwx	r0,r6,r3
851	addi	r6,r6,4
852	bdnz	2b
853
854	/* Do real store operation to complete stwu */
855	lwz	r5,GPR1(r1)
856	stw	r8,0(r5)
857
858	/* Clear _TIF_EMULATE_STACK_STORE flag */
859	lis	r11,_TIF_EMULATE_STACK_STORE@h
860	addi	r5,r9,TI_FLAGS
8610:	lwarx	r8,0,r5
862	andc	r8,r8,r11
863#ifdef CONFIG_IBM405_ERR77
864	dcbt	0,r5
865#endif
866	stwcx.	r8,0,r5
867	bne-	0b
8681:
869
870#ifdef CONFIG_PREEMPT
871	/* check current_thread_info->preempt_count */
872	lwz	r0,TI_PREEMPT(r9)
873	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
874	bne	restore
875	andi.	r8,r8,_TIF_NEED_RESCHED
876	beq+	restore
877	lwz	r3,_MSR(r1)
878	andi.	r0,r3,MSR_EE	/* interrupts off? */
879	beq	restore		/* don't schedule if so */
880#ifdef CONFIG_TRACE_IRQFLAGS
881	/* Lockdep thinks irqs are enabled, we need to call
882	 * preempt_schedule_irq with IRQs off, so we inform lockdep
883	 * now that we -did- turn them off already
884	 */
885	bl	trace_hardirqs_off
886#endif
8871:	bl	preempt_schedule_irq
888	CURRENT_THREAD_INFO(r9, r1)
889	lwz	r3,TI_FLAGS(r9)
890	andi.	r0,r3,_TIF_NEED_RESCHED
891	bne-	1b
892#ifdef CONFIG_TRACE_IRQFLAGS
893	/* And now, to properly rebalance the above, we tell lockdep they
894	 * are being turned back on, which will happen when we return
895	 */
896	bl	trace_hardirqs_on
897#endif
898#endif /* CONFIG_PREEMPT */
899
900	/* interrupts are hard-disabled at this point */
901restore:
902#ifdef CONFIG_44x
903BEGIN_MMU_FTR_SECTION
904	b	1f
905END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
906	lis	r4,icache_44x_need_flush@ha
907	lwz	r5,icache_44x_need_flush@l(r4)
908	cmplwi	cr0,r5,0
909	beq+	1f
910	li	r6,0
911	iccci	r0,r0
912	stw	r6,icache_44x_need_flush@l(r4)
9131:
914#endif  /* CONFIG_44x */
915
916	lwz	r9,_MSR(r1)
917#ifdef CONFIG_TRACE_IRQFLAGS
918	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
919	 * off in this assembly code while peeking at TI_FLAGS() and such. However
920	 * we need to inform it if the exception turned interrupts off, and we
921	 * are about to trun them back on.
922	 *
923	 * The problem here sadly is that we don't know whether the exceptions was
924	 * one that turned interrupts off or not. So we always tell lockdep about
925	 * turning them on here when we go back to wherever we came from with EE
926	 * on, even if that may meen some redudant calls being tracked. Maybe later
927	 * we could encode what the exception did somewhere or test the exception
928	 * type in the pt_regs but that sounds overkill
929	 */
930	andi.	r10,r9,MSR_EE
931	beq	1f
932	/*
933	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
934	 * which is the stack frame here, we need to force a stack frame
935	 * in case we came from user space.
936	 */
937	stwu	r1,-32(r1)
938	mflr	r0
939	stw	r0,4(r1)
940	stwu	r1,-32(r1)
941	bl	trace_hardirqs_on
942	lwz	r1,0(r1)
943	lwz	r1,0(r1)
944	lwz	r9,_MSR(r1)
9451:
946#endif /* CONFIG_TRACE_IRQFLAGS */
947
948	lwz	r0,GPR0(r1)
949	lwz	r2,GPR2(r1)
950	REST_4GPRS(3, r1)
951	REST_2GPRS(7, r1)
952
953	lwz	r10,_XER(r1)
954	lwz	r11,_CTR(r1)
955	mtspr	SPRN_XER,r10
956	mtctr	r11
957
958	PPC405_ERR77(0,r1)
959BEGIN_FTR_SECTION
960	lwarx	r11,0,r1
961END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
962	stwcx.	r0,0,r1			/* to clear the reservation */
963
964#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
965	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
966	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
967
968	lwz	r10,_CCR(r1)
969	lwz	r11,_LINK(r1)
970	mtcrf	0xFF,r10
971	mtlr	r11
972
973	/*
974	 * Once we put values in SRR0 and SRR1, we are in a state
975	 * where exceptions are not recoverable, since taking an
976	 * exception will trash SRR0 and SRR1.  Therefore we clear the
977	 * MSR:RI bit to indicate this.  If we do take an exception,
978	 * we can't return to the point of the exception but we
979	 * can restart the exception exit path at the label
980	 * exc_exit_restart below.  -- paulus
981	 */
982	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
983	SYNC
984	MTMSRD(r10)		/* clear the RI bit */
985	.globl exc_exit_restart
986exc_exit_restart:
987	lwz	r12,_NIP(r1)
988#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
989	mtspr	SPRN_NRI, r0
990#endif
991	mtspr	SPRN_SRR0,r12
992	mtspr	SPRN_SRR1,r9
993	REST_4GPRS(9, r1)
994	lwz	r1,GPR1(r1)
995	.globl exc_exit_restart_end
996exc_exit_restart_end:
997	SYNC
998	RFI
999
1000#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1001	/*
1002	 * This is a bit different on 4xx/Book-E because it doesn't have
1003	 * the RI bit in the MSR.
1004	 * The TLB miss handler checks if we have interrupted
1005	 * the exception exit path and restarts it if so
1006	 * (well maybe one day it will... :).
1007	 */
1008	lwz	r11,_LINK(r1)
1009	mtlr	r11
1010	lwz	r10,_CCR(r1)
1011	mtcrf	0xff,r10
1012	REST_2GPRS(9, r1)
1013	.globl exc_exit_restart
1014exc_exit_restart:
1015	lwz	r11,_NIP(r1)
1016	lwz	r12,_MSR(r1)
1017exc_exit_start:
1018	mtspr	SPRN_SRR0,r11
1019	mtspr	SPRN_SRR1,r12
1020	REST_2GPRS(11, r1)
1021	lwz	r1,GPR1(r1)
1022	.globl exc_exit_restart_end
1023exc_exit_restart_end:
1024	PPC405_ERR77_SYNC
1025	rfi
1026	b	.			/* prevent prefetch past rfi */
1027
1028/*
1029 * Returning from a critical interrupt in user mode doesn't need
1030 * to be any different from a normal exception.  For a critical
1031 * interrupt in the kernel, we just return (without checking for
1032 * preemption) since the interrupt may have happened at some crucial
1033 * place (e.g. inside the TLB miss handler), and because we will be
1034 * running with r1 pointing into critical_stack, not the current
1035 * process's kernel stack (and therefore current_thread_info() will
1036 * give the wrong answer).
1037 * We have to restore various SPRs that may have been in use at the
1038 * time of the critical interrupt.
1039 *
1040 */
1041#ifdef CONFIG_40x
1042#define PPC_40x_TURN_OFF_MSR_DR						    \
1043	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1044	 * assume the instructions here are mapped by a pinned TLB entry */ \
1045	li	r10,MSR_IR;						    \
1046	mtmsr	r10;							    \
1047	isync;								    \
1048	tophys(r1, r1);
1049#else
1050#define PPC_40x_TURN_OFF_MSR_DR
1051#endif
1052
1053#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1054	REST_NVGPRS(r1);						\
1055	lwz	r3,_MSR(r1);						\
1056	andi.	r3,r3,MSR_PR;						\
1057	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1058	bne	user_exc_return;					\
1059	lwz	r0,GPR0(r1);						\
1060	lwz	r2,GPR2(r1);						\
1061	REST_4GPRS(3, r1);						\
1062	REST_2GPRS(7, r1);						\
1063	lwz	r10,_XER(r1);						\
1064	lwz	r11,_CTR(r1);						\
1065	mtspr	SPRN_XER,r10;						\
1066	mtctr	r11;							\
1067	PPC405_ERR77(0,r1);						\
1068	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1069	lwz	r11,_LINK(r1);						\
1070	mtlr	r11;							\
1071	lwz	r10,_CCR(r1);						\
1072	mtcrf	0xff,r10;						\
1073	PPC_40x_TURN_OFF_MSR_DR;					\
1074	lwz	r9,_DEAR(r1);						\
1075	lwz	r10,_ESR(r1);						\
1076	mtspr	SPRN_DEAR,r9;						\
1077	mtspr	SPRN_ESR,r10;						\
1078	lwz	r11,_NIP(r1);						\
1079	lwz	r12,_MSR(r1);						\
1080	mtspr	exc_lvl_srr0,r11;					\
1081	mtspr	exc_lvl_srr1,r12;					\
1082	lwz	r9,GPR9(r1);						\
1083	lwz	r12,GPR12(r1);						\
1084	lwz	r10,GPR10(r1);						\
1085	lwz	r11,GPR11(r1);						\
1086	lwz	r1,GPR1(r1);						\
1087	PPC405_ERR77_SYNC;						\
1088	exc_lvl_rfi;							\
1089	b	.;		/* prevent prefetch past exc_lvl_rfi */
1090
1091#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1092	lwz	r9,_##exc_lvl_srr0(r1);					\
1093	lwz	r10,_##exc_lvl_srr1(r1);				\
1094	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1095	mtspr	SPRN_##exc_lvl_srr1,r10;
1096
1097#if defined(CONFIG_PPC_BOOK3E_MMU)
1098#ifdef CONFIG_PHYS_64BIT
1099#define	RESTORE_MAS7							\
1100	lwz	r11,MAS7(r1);						\
1101	mtspr	SPRN_MAS7,r11;
1102#else
1103#define	RESTORE_MAS7
1104#endif /* CONFIG_PHYS_64BIT */
1105#define RESTORE_MMU_REGS						\
1106	lwz	r9,MAS0(r1);						\
1107	lwz	r10,MAS1(r1);						\
1108	lwz	r11,MAS2(r1);						\
1109	mtspr	SPRN_MAS0,r9;						\
1110	lwz	r9,MAS3(r1);						\
1111	mtspr	SPRN_MAS1,r10;						\
1112	lwz	r10,MAS6(r1);						\
1113	mtspr	SPRN_MAS2,r11;						\
1114	mtspr	SPRN_MAS3,r9;						\
1115	mtspr	SPRN_MAS6,r10;						\
1116	RESTORE_MAS7;
1117#elif defined(CONFIG_44x)
1118#define RESTORE_MMU_REGS						\
1119	lwz	r9,MMUCR(r1);						\
1120	mtspr	SPRN_MMUCR,r9;
1121#else
1122#define RESTORE_MMU_REGS
1123#endif
1124
1125#ifdef CONFIG_40x
1126	.globl	ret_from_crit_exc
1127ret_from_crit_exc:
1128	mfspr	r9,SPRN_SPRG_THREAD
1129	lis	r10,saved_ksp_limit@ha;
1130	lwz	r10,saved_ksp_limit@l(r10);
1131	tovirt(r9,r9);
1132	stw	r10,KSP_LIMIT(r9)
1133	lis	r9,crit_srr0@ha;
1134	lwz	r9,crit_srr0@l(r9);
1135	lis	r10,crit_srr1@ha;
1136	lwz	r10,crit_srr1@l(r10);
1137	mtspr	SPRN_SRR0,r9;
1138	mtspr	SPRN_SRR1,r10;
1139	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1140#endif /* CONFIG_40x */
1141
1142#ifdef CONFIG_BOOKE
1143	.globl	ret_from_crit_exc
1144ret_from_crit_exc:
1145	mfspr	r9,SPRN_SPRG_THREAD
1146	lwz	r10,SAVED_KSP_LIMIT(r1)
1147	stw	r10,KSP_LIMIT(r9)
1148	RESTORE_xSRR(SRR0,SRR1);
1149	RESTORE_MMU_REGS;
1150	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1151
1152	.globl	ret_from_debug_exc
1153ret_from_debug_exc:
1154	mfspr	r9,SPRN_SPRG_THREAD
1155	lwz	r10,SAVED_KSP_LIMIT(r1)
1156	stw	r10,KSP_LIMIT(r9)
1157	lwz	r9,THREAD_INFO-THREAD(r9)
1158	CURRENT_THREAD_INFO(r10, r1)
1159	lwz	r10,TI_PREEMPT(r10)
1160	stw	r10,TI_PREEMPT(r9)
1161	RESTORE_xSRR(SRR0,SRR1);
1162	RESTORE_xSRR(CSRR0,CSRR1);
1163	RESTORE_MMU_REGS;
1164	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1165
1166	.globl	ret_from_mcheck_exc
1167ret_from_mcheck_exc:
1168	mfspr	r9,SPRN_SPRG_THREAD
1169	lwz	r10,SAVED_KSP_LIMIT(r1)
1170	stw	r10,KSP_LIMIT(r9)
1171	RESTORE_xSRR(SRR0,SRR1);
1172	RESTORE_xSRR(CSRR0,CSRR1);
1173	RESTORE_xSRR(DSRR0,DSRR1);
1174	RESTORE_MMU_REGS;
1175	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1176#endif /* CONFIG_BOOKE */
1177
1178/*
1179 * Load the DBCR0 value for a task that is being ptraced,
1180 * having first saved away the global DBCR0.  Note that r0
1181 * has the dbcr0 value to set upon entry to this.
1182 */
1183load_dbcr0:
1184	mfmsr	r10		/* first disable debug exceptions */
1185	rlwinm	r10,r10,0,~MSR_DE
1186	mtmsr	r10
1187	isync
1188	mfspr	r10,SPRN_DBCR0
1189	lis	r11,global_dbcr0@ha
1190	addi	r11,r11,global_dbcr0@l
1191#ifdef CONFIG_SMP
1192	CURRENT_THREAD_INFO(r9, r1)
1193	lwz	r9,TI_CPU(r9)
1194	slwi	r9,r9,3
1195	add	r11,r11,r9
1196#endif
1197	stw	r10,0(r11)
1198	mtspr	SPRN_DBCR0,r0
1199	lwz	r10,4(r11)
1200	addi	r10,r10,1
1201	stw	r10,4(r11)
1202	li	r11,-1
1203	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1204	blr
1205
1206	.section .bss
1207	.align	4
1208global_dbcr0:
1209	.space	8*NR_CPUS
1210	.previous
1211#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1212
1213do_work:			/* r10 contains MSR_KERNEL here */
1214	andi.	r0,r9,_TIF_NEED_RESCHED
1215	beq	do_user_signal
1216
1217do_resched:			/* r10 contains MSR_KERNEL here */
1218	/* Note: We don't need to inform lockdep that we are enabling
1219	 * interrupts here. As far as it knows, they are already enabled
1220	 */
1221	ori	r10,r10,MSR_EE
1222	SYNC
1223	MTMSRD(r10)		/* hard-enable interrupts */
1224	bl	schedule
1225recheck:
1226	/* Note: And we don't tell it we are disabling them again
1227	 * neither. Those disable/enable cycles used to peek at
1228	 * TI_FLAGS aren't advertised.
1229	 */
1230	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1231	SYNC
1232	MTMSRD(r10)		/* disable interrupts */
1233	CURRENT_THREAD_INFO(r9, r1)
1234	lwz	r9,TI_FLAGS(r9)
1235	andi.	r0,r9,_TIF_NEED_RESCHED
1236	bne-	do_resched
1237	andi.	r0,r9,_TIF_USER_WORK_MASK
1238	beq	restore_user
1239do_user_signal:			/* r10 contains MSR_KERNEL here */
1240	ori	r10,r10,MSR_EE
1241	SYNC
1242	MTMSRD(r10)		/* hard-enable interrupts */
1243	/* save r13-r31 in the exception frame, if not already done */
1244	lwz	r3,_TRAP(r1)
1245	andi.	r0,r3,1
1246	beq	2f
1247	SAVE_NVGPRS(r1)
1248	rlwinm	r3,r3,0,0,30
1249	stw	r3,_TRAP(r1)
12502:	addi	r3,r1,STACK_FRAME_OVERHEAD
1251	mr	r4,r9
1252	bl	do_notify_resume
1253	REST_NVGPRS(r1)
1254	b	recheck
1255
1256/*
1257 * We come here when we are at the end of handling an exception
1258 * that occurred at a place where taking an exception will lose
1259 * state information, such as the contents of SRR0 and SRR1.
1260 */
1261nonrecoverable:
1262	lis	r10,exc_exit_restart_end@ha
1263	addi	r10,r10,exc_exit_restart_end@l
1264	cmplw	r12,r10
1265	bge	3f
1266	lis	r11,exc_exit_restart@ha
1267	addi	r11,r11,exc_exit_restart@l
1268	cmplw	r12,r11
1269	blt	3f
1270	lis	r10,ee_restarts@ha
1271	lwz	r12,ee_restarts@l(r10)
1272	addi	r12,r12,1
1273	stw	r12,ee_restarts@l(r10)
1274	mr	r12,r11		/* restart at exc_exit_restart */
1275	blr
12763:	/* OK, we can't recover, kill this process */
1277	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1278BEGIN_FTR_SECTION
1279	blr
1280END_FTR_SECTION_IFSET(CPU_FTR_601)
1281	lwz	r3,_TRAP(r1)
1282	andi.	r0,r3,1
1283	beq	4f
1284	SAVE_NVGPRS(r1)
1285	rlwinm	r3,r3,0,0,30
1286	stw	r3,_TRAP(r1)
12874:	addi	r3,r1,STACK_FRAME_OVERHEAD
1288	bl	nonrecoverable_exception
1289	/* shouldn't return */
1290	b	4b
1291
1292	.section .bss
1293	.align	2
1294ee_restarts:
1295	.space	4
1296	.previous
1297
1298/*
1299 * PROM code for specific machines follows.  Put it
1300 * here so it's easy to add arch-specific sections later.
1301 * -- Cort
1302 */
1303#ifdef CONFIG_PPC_RTAS
1304/*
1305 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1306 * called with the MMU off.
1307 */
1308_GLOBAL(enter_rtas)
1309	stwu	r1,-INT_FRAME_SIZE(r1)
1310	mflr	r0
1311	stw	r0,INT_FRAME_SIZE+4(r1)
1312	LOAD_REG_ADDR(r4, rtas)
1313	lis	r6,1f@ha	/* physical return address for rtas */
1314	addi	r6,r6,1f@l
1315	tophys(r6,r6)
1316	tophys(r7,r1)
1317	lwz	r8,RTASENTRY(r4)
1318	lwz	r4,RTASBASE(r4)
1319	mfmsr	r9
1320	stw	r9,8(r1)
1321	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1322	SYNC			/* disable interrupts so SRR0/1 */
1323	MTMSRD(r0)		/* don't get trashed */
1324	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1325	mtlr	r6
1326	mtspr	SPRN_SPRG_RTAS,r7
1327	mtspr	SPRN_SRR0,r8
1328	mtspr	SPRN_SRR1,r9
1329	RFI
13301:	tophys(r9,r1)
1331	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1332	lwz	r9,8(r9)	/* original msr value */
1333	addi	r1,r1,INT_FRAME_SIZE
1334	li	r0,0
1335	mtspr	SPRN_SPRG_RTAS,r0
1336	mtspr	SPRN_SRR0,r8
1337	mtspr	SPRN_SRR1,r9
1338	RFI			/* return to caller */
1339
1340	.globl	machine_check_in_rtas
1341machine_check_in_rtas:
1342	twi	31,0,0
1343	/* XXX load up BATs and panic */
1344
1345#endif /* CONFIG_PPC_RTAS */
1346