xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 8730046c)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ftrace.h>
35#include <asm/ptrace.h>
36#include <asm/export.h>
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48	.globl	mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
50	mfspr	r0,SPRN_DSRR0
51	stw	r0,_DSRR0(r11)
52	mfspr	r0,SPRN_DSRR1
53	stw	r0,_DSRR1(r11)
54	/* fall through */
55
56	.globl	debug_transfer_to_handler
57debug_transfer_to_handler:
58	mfspr	r0,SPRN_CSRR0
59	stw	r0,_CSRR0(r11)
60	mfspr	r0,SPRN_CSRR1
61	stw	r0,_CSRR1(r11)
62	/* fall through */
63
64	.globl	crit_transfer_to_handler
65crit_transfer_to_handler:
66#ifdef CONFIG_PPC_BOOK3E_MMU
67	mfspr	r0,SPRN_MAS0
68	stw	r0,MAS0(r11)
69	mfspr	r0,SPRN_MAS1
70	stw	r0,MAS1(r11)
71	mfspr	r0,SPRN_MAS2
72	stw	r0,MAS2(r11)
73	mfspr	r0,SPRN_MAS3
74	stw	r0,MAS3(r11)
75	mfspr	r0,SPRN_MAS6
76	stw	r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78	mfspr	r0,SPRN_MAS7
79	stw	r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
81#endif /* CONFIG_PPC_BOOK3E_MMU */
82#ifdef CONFIG_44x
83	mfspr	r0,SPRN_MMUCR
84	stw	r0,MMUCR(r11)
85#endif
86	mfspr	r0,SPRN_SRR0
87	stw	r0,_SRR0(r11)
88	mfspr	r0,SPRN_SRR1
89	stw	r0,_SRR1(r11)
90
91	/* set the stack limit to the current stack
92	 * and set the limit to protect the thread_info
93	 * struct
94	 */
95	mfspr	r8,SPRN_SPRG_THREAD
96	lwz	r0,KSP_LIMIT(r8)
97	stw	r0,SAVED_KSP_LIMIT(r11)
98	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
99	stw	r0,KSP_LIMIT(r8)
100	/* fall through */
101#endif
102
103#ifdef CONFIG_40x
104	.globl	crit_transfer_to_handler
105crit_transfer_to_handler:
106	lwz	r0,crit_r10@l(0)
107	stw	r0,GPR10(r11)
108	lwz	r0,crit_r11@l(0)
109	stw	r0,GPR11(r11)
110	mfspr	r0,SPRN_SRR0
111	stw	r0,crit_srr0@l(0)
112	mfspr	r0,SPRN_SRR1
113	stw	r0,crit_srr1@l(0)
114
115	/* set the stack limit to the current stack
116	 * and set the limit to protect the thread_info
117	 * struct
118	 */
119	mfspr	r8,SPRN_SPRG_THREAD
120	lwz	r0,KSP_LIMIT(r8)
121	stw	r0,saved_ksp_limit@l(0)
122	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
123	stw	r0,KSP_LIMIT(r8)
124	/* fall through */
125#endif
126
127/*
128 * This code finishes saving the registers to the exception frame
129 * and jumps to the appropriate handler for the exception, turning
130 * on address translation.
131 * Note that we rely on the caller having set cr0.eq iff the exception
132 * occurred in kernel mode (i.e. MSR:PR = 0).
133 */
134	.globl	transfer_to_handler_full
135transfer_to_handler_full:
136	SAVE_NVGPRS(r11)
137	/* fall through */
138
139	.globl	transfer_to_handler
140transfer_to_handler:
141	stw	r2,GPR2(r11)
142	stw	r12,_NIP(r11)
143	stw	r9,_MSR(r11)
144	andi.	r2,r9,MSR_PR
145	mfctr	r12
146	mfspr	r2,SPRN_XER
147	stw	r12,_CTR(r11)
148	stw	r2,_XER(r11)
149	mfspr	r12,SPRN_SPRG_THREAD
150	addi	r2,r12,-THREAD
151	tovirt(r2,r2)			/* set r2 to current */
152	beq	2f			/* if from user, fix up THREAD.regs */
153	addi	r11,r1,STACK_FRAME_OVERHEAD
154	stw	r11,PT_REGS(r12)
155#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
156	/* Check to see if the dbcr0 register is set up to debug.  Use the
157	   internal debug mode bit to do this. */
158	lwz	r12,THREAD_DBCR0(r12)
159	andis.	r12,r12,DBCR0_IDM@h
160	beq+	3f
161	/* From user and task is ptraced - load up global dbcr0 */
162	li	r12,-1			/* clear all pending debug events */
163	mtspr	SPRN_DBSR,r12
164	lis	r11,global_dbcr0@ha
165	tophys(r11,r11)
166	addi	r11,r11,global_dbcr0@l
167#ifdef CONFIG_SMP
168	CURRENT_THREAD_INFO(r9, r1)
169	lwz	r9,TI_CPU(r9)
170	slwi	r9,r9,3
171	add	r11,r11,r9
172#endif
173	lwz	r12,0(r11)
174	mtspr	SPRN_DBCR0,r12
175	lwz	r12,4(r11)
176	addi	r12,r12,-1
177	stw	r12,4(r11)
178#endif
179#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
180	CURRENT_THREAD_INFO(r9, r1)
181	tophys(r9, r9)
182	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
183#endif
184
185	b	3f
186
1872:	/* if from kernel, check interrupted DOZE/NAP mode and
188         * check for stack overflow
189         */
190	lwz	r9,KSP_LIMIT(r12)
191	cmplw	r1,r9			/* if r1 <= ksp_limit */
192	ble-	stack_ovf		/* then the kernel stack overflowed */
1935:
194#if defined(CONFIG_6xx) || defined(CONFIG_E500)
195	CURRENT_THREAD_INFO(r9, r1)
196	tophys(r9,r9)			/* check local flags */
197	lwz	r12,TI_LOCAL_FLAGS(r9)
198	mtcrf	0x01,r12
199	bt-	31-TLF_NAPPING,4f
200	bt-	31-TLF_SLEEPING,7f
201#endif /* CONFIG_6xx || CONFIG_E500 */
202	.globl transfer_to_handler_cont
203transfer_to_handler_cont:
2043:
205	mflr	r9
206	lwz	r11,0(r9)		/* virtual address of handler */
207	lwz	r9,4(r9)		/* where to go when done */
208#ifdef CONFIG_TRACE_IRQFLAGS
209	lis	r12,reenable_mmu@h
210	ori	r12,r12,reenable_mmu@l
211	mtspr	SPRN_SRR0,r12
212	mtspr	SPRN_SRR1,r10
213	SYNC
214	RFI
215reenable_mmu:				/* re-enable mmu so we can */
216	mfmsr	r10
217	lwz	r12,_MSR(r1)
218	xor	r10,r10,r12
219	andi.	r10,r10,MSR_EE		/* Did EE change? */
220	beq	1f
221
222	/*
223	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
224	 * If from user mode there is only one stack frame on the stack, and
225	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
226	 * stack frame to make trace_hardirqs_off happy.
227	 *
228	 * This is handy because we also need to save a bunch of GPRs,
229	 * r3 can be different from GPR3(r1) at this point, r9 and r11
230	 * contains the old MSR and handler address respectively,
231	 * r4 & r5 can contain page fault arguments that need to be passed
232	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
233	 * they aren't useful past this point (aren't syscall arguments),
234	 * the rest is restored from the exception frame.
235	 */
236	stwu	r1,-32(r1)
237	stw	r9,8(r1)
238	stw	r11,12(r1)
239	stw	r3,16(r1)
240	stw	r4,20(r1)
241	stw	r5,24(r1)
242	bl	trace_hardirqs_off
243	lwz	r5,24(r1)
244	lwz	r4,20(r1)
245	lwz	r3,16(r1)
246	lwz	r11,12(r1)
247	lwz	r9,8(r1)
248	addi	r1,r1,32
249	lwz	r0,GPR0(r1)
250	lwz	r6,GPR6(r1)
251	lwz	r7,GPR7(r1)
252	lwz	r8,GPR8(r1)
2531:	mtctr	r11
254	mtlr	r9
255	bctr				/* jump to handler */
256#else /* CONFIG_TRACE_IRQFLAGS */
257	mtspr	SPRN_SRR0,r11
258	mtspr	SPRN_SRR1,r10
259	mtlr	r9
260	SYNC
261	RFI				/* jump to handler, enable MMU */
262#endif /* CONFIG_TRACE_IRQFLAGS */
263
264#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2654:	rlwinm	r12,r12,0,~_TLF_NAPPING
266	stw	r12,TI_LOCAL_FLAGS(r9)
267	b	power_save_ppc32_restore
268
2697:	rlwinm	r12,r12,0,~_TLF_SLEEPING
270	stw	r12,TI_LOCAL_FLAGS(r9)
271	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
272	rlwinm	r9,r9,0,~MSR_EE
273	lwz	r12,_LINK(r11)		/* and return to address in LR */
274	b	fast_exception_return
275#endif
276
277/*
278 * On kernel stack overflow, load up an initial stack pointer
279 * and call StackOverflow(regs), which should not return.
280 */
281stack_ovf:
282	/* sometimes we use a statically-allocated stack, which is OK. */
283	lis	r12,_end@h
284	ori	r12,r12,_end@l
285	cmplw	r1,r12
286	ble	5b			/* r1 <= &_end is OK */
287	SAVE_NVGPRS(r11)
288	addi	r3,r1,STACK_FRAME_OVERHEAD
289	lis	r1,init_thread_union@ha
290	addi	r1,r1,init_thread_union@l
291	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
292	lis	r9,StackOverflow@ha
293	addi	r9,r9,StackOverflow@l
294	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
295	FIX_SRR1(r10,r12)
296	mtspr	SPRN_SRR0,r9
297	mtspr	SPRN_SRR1,r10
298	SYNC
299	RFI
300
301/*
302 * Handle a system call.
303 */
304	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
305	.stabs	"entry_32.S",N_SO,0,0,0f
3060:
307
308_GLOBAL(DoSyscall)
309	stw	r3,ORIG_GPR3(r1)
310	li	r12,0
311	stw	r12,RESULT(r1)
312	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
313	rlwinm	r11,r11,0,4,2
314	stw	r11,_CCR(r1)
315#ifdef CONFIG_TRACE_IRQFLAGS
316	/* Return from syscalls can (and generally will) hard enable
317	 * interrupts. You aren't supposed to call a syscall with
318	 * interrupts disabled in the first place. However, to ensure
319	 * that we get it right vs. lockdep if it happens, we force
320	 * that hard enable here with appropriate tracing if we see
321	 * that we have been called with interrupts off
322	 */
323	mfmsr	r11
324	andi.	r12,r11,MSR_EE
325	bne+	1f
326	/* We came in with interrupts disabled, we enable them now */
327	bl	trace_hardirqs_on
328	mfmsr	r11
329	lwz	r0,GPR0(r1)
330	lwz	r3,GPR3(r1)
331	lwz	r4,GPR4(r1)
332	ori	r11,r11,MSR_EE
333	lwz	r5,GPR5(r1)
334	lwz	r6,GPR6(r1)
335	lwz	r7,GPR7(r1)
336	lwz	r8,GPR8(r1)
337	mtmsr	r11
3381:
339#endif /* CONFIG_TRACE_IRQFLAGS */
340	CURRENT_THREAD_INFO(r10, r1)
341	lwz	r11,TI_FLAGS(r10)
342	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
343	bne-	syscall_dotrace
344syscall_dotrace_cont:
345	cmplwi	0,r0,NR_syscalls
346	lis	r10,sys_call_table@h
347	ori	r10,r10,sys_call_table@l
348	slwi	r0,r0,2
349	bge-	66f
350	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
351	mtlr	r10
352	addi	r9,r1,STACK_FRAME_OVERHEAD
353	PPC440EP_ERR42
354	blrl			/* Call handler */
355	.globl	ret_from_syscall
356ret_from_syscall:
357	mr	r6,r3
358	CURRENT_THREAD_INFO(r12, r1)
359	/* disable interrupts so current_thread_info()->flags can't change */
360	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
361	/* Note: We don't bother telling lockdep about it */
362	SYNC
363	MTMSRD(r10)
364	lwz	r9,TI_FLAGS(r12)
365	li	r8,-MAX_ERRNO
366	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
367	bne-	syscall_exit_work
368	cmplw	0,r3,r8
369	blt+	syscall_exit_cont
370	lwz	r11,_CCR(r1)			/* Load CR */
371	neg	r3,r3
372	oris	r11,r11,0x1000	/* Set SO bit in CR */
373	stw	r11,_CCR(r1)
374syscall_exit_cont:
375	lwz	r8,_MSR(r1)
376#ifdef CONFIG_TRACE_IRQFLAGS
377	/* If we are going to return from the syscall with interrupts
378	 * off, we trace that here. It shouldn't happen though but we
379	 * want to catch the bugger if it does right ?
380	 */
381	andi.	r10,r8,MSR_EE
382	bne+	1f
383	stw	r3,GPR3(r1)
384	bl      trace_hardirqs_off
385	lwz	r3,GPR3(r1)
3861:
387#endif /* CONFIG_TRACE_IRQFLAGS */
388#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
389	/* If the process has its own DBCR0 value, load it up.  The internal
390	   debug mode bit tells us that dbcr0 should be loaded. */
391	lwz	r0,THREAD+THREAD_DBCR0(r2)
392	andis.	r10,r0,DBCR0_IDM@h
393	bnel-	load_dbcr0
394#endif
395#ifdef CONFIG_44x
396BEGIN_MMU_FTR_SECTION
397	lis	r4,icache_44x_need_flush@ha
398	lwz	r5,icache_44x_need_flush@l(r4)
399	cmplwi	cr0,r5,0
400	bne-	2f
4011:
402END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
403#endif /* CONFIG_44x */
404BEGIN_FTR_SECTION
405	lwarx	r7,0,r1
406END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
407	stwcx.	r0,0,r1			/* to clear the reservation */
408#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
409	andi.	r4,r8,MSR_PR
410	beq	3f
411	CURRENT_THREAD_INFO(r4, r1)
412	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4133:
414#endif
415	lwz	r4,_LINK(r1)
416	lwz	r5,_CCR(r1)
417	mtlr	r4
418	mtcr	r5
419	lwz	r7,_NIP(r1)
420	FIX_SRR1(r8, r0)
421	lwz	r2,GPR2(r1)
422	lwz	r1,GPR1(r1)
423	mtspr	SPRN_SRR0,r7
424	mtspr	SPRN_SRR1,r8
425	SYNC
426	RFI
427#ifdef CONFIG_44x
4282:	li	r7,0
429	iccci	r0,r0
430	stw	r7,icache_44x_need_flush@l(r4)
431	b	1b
432#endif  /* CONFIG_44x */
433
43466:	li	r3,-ENOSYS
435	b	ret_from_syscall
436
437	.globl	ret_from_fork
438ret_from_fork:
439	REST_NVGPRS(r1)
440	bl	schedule_tail
441	li	r3,0
442	b	ret_from_syscall
443
444	.globl	ret_from_kernel_thread
445ret_from_kernel_thread:
446	REST_NVGPRS(r1)
447	bl	schedule_tail
448	mtlr	r14
449	mr	r3,r15
450	PPC440EP_ERR42
451	blrl
452	li	r3,0
453	b	ret_from_syscall
454
455/* Traced system call support */
456syscall_dotrace:
457	SAVE_NVGPRS(r1)
458	li	r0,0xc00
459	stw	r0,_TRAP(r1)
460	addi	r3,r1,STACK_FRAME_OVERHEAD
461	bl	do_syscall_trace_enter
462	/*
463	 * Restore argument registers possibly just changed.
464	 * We use the return value of do_syscall_trace_enter
465	 * for call number to look up in the table (r0).
466	 */
467	mr	r0,r3
468	lwz	r3,GPR3(r1)
469	lwz	r4,GPR4(r1)
470	lwz	r5,GPR5(r1)
471	lwz	r6,GPR6(r1)
472	lwz	r7,GPR7(r1)
473	lwz	r8,GPR8(r1)
474	REST_NVGPRS(r1)
475
476	cmplwi	r0,NR_syscalls
477	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
478	bge-	ret_from_syscall
479	b	syscall_dotrace_cont
480
481syscall_exit_work:
482	andi.	r0,r9,_TIF_RESTOREALL
483	beq+	0f
484	REST_NVGPRS(r1)
485	b	2f
4860:	cmplw	0,r3,r8
487	blt+	1f
488	andi.	r0,r9,_TIF_NOERROR
489	bne-	1f
490	lwz	r11,_CCR(r1)			/* Load CR */
491	neg	r3,r3
492	oris	r11,r11,0x1000	/* Set SO bit in CR */
493	stw	r11,_CCR(r1)
494
4951:	stw	r6,RESULT(r1)	/* Save result */
496	stw	r3,GPR3(r1)	/* Update return value */
4972:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
498	beq	4f
499
500	/* Clear per-syscall TIF flags if any are set.  */
501
502	li	r11,_TIF_PERSYSCALL_MASK
503	addi	r12,r12,TI_FLAGS
5043:	lwarx	r8,0,r12
505	andc	r8,r8,r11
506#ifdef CONFIG_IBM405_ERR77
507	dcbt	0,r12
508#endif
509	stwcx.	r8,0,r12
510	bne-	3b
511	subi	r12,r12,TI_FLAGS
512
5134:	/* Anything which requires enabling interrupts? */
514	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
515	beq	ret_from_except
516
517	/* Re-enable interrupts. There is no need to trace that with
518	 * lockdep as we are supposed to have IRQs on at this point
519	 */
520	ori	r10,r10,MSR_EE
521	SYNC
522	MTMSRD(r10)
523
524	/* Save NVGPRS if they're not saved already */
525	lwz	r4,_TRAP(r1)
526	andi.	r4,r4,1
527	beq	5f
528	SAVE_NVGPRS(r1)
529	li	r4,0xc00
530	stw	r4,_TRAP(r1)
5315:
532	addi	r3,r1,STACK_FRAME_OVERHEAD
533	bl	do_syscall_trace_leave
534	b	ret_from_except_full
535
536/*
537 * The fork/clone functions need to copy the full register set into
538 * the child process. Therefore we need to save all the nonvolatile
539 * registers (r13 - r31) before calling the C code.
540 */
541	.globl	ppc_fork
542ppc_fork:
543	SAVE_NVGPRS(r1)
544	lwz	r0,_TRAP(r1)
545	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
546	stw	r0,_TRAP(r1)		/* register set saved */
547	b	sys_fork
548
549	.globl	ppc_vfork
550ppc_vfork:
551	SAVE_NVGPRS(r1)
552	lwz	r0,_TRAP(r1)
553	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
554	stw	r0,_TRAP(r1)		/* register set saved */
555	b	sys_vfork
556
557	.globl	ppc_clone
558ppc_clone:
559	SAVE_NVGPRS(r1)
560	lwz	r0,_TRAP(r1)
561	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
562	stw	r0,_TRAP(r1)		/* register set saved */
563	b	sys_clone
564
565	.globl	ppc_swapcontext
566ppc_swapcontext:
567	SAVE_NVGPRS(r1)
568	lwz	r0,_TRAP(r1)
569	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
570	stw	r0,_TRAP(r1)		/* register set saved */
571	b	sys_swapcontext
572
573/*
574 * Top-level page fault handling.
575 * This is in assembler because if do_page_fault tells us that
576 * it is a bad kernel page fault, we want to save the non-volatile
577 * registers before calling bad_page_fault.
578 */
579	.globl	handle_page_fault
580handle_page_fault:
581	stw	r4,_DAR(r1)
582	addi	r3,r1,STACK_FRAME_OVERHEAD
583	bl	do_page_fault
584	cmpwi	r3,0
585	beq+	ret_from_except
586	SAVE_NVGPRS(r1)
587	lwz	r0,_TRAP(r1)
588	clrrwi	r0,r0,1
589	stw	r0,_TRAP(r1)
590	mr	r5,r3
591	addi	r3,r1,STACK_FRAME_OVERHEAD
592	lwz	r4,_DAR(r1)
593	bl	bad_page_fault
594	b	ret_from_except_full
595
596/*
597 * This routine switches between two different tasks.  The process
598 * state of one is saved on its kernel stack.  Then the state
599 * of the other is restored from its kernel stack.  The memory
600 * management hardware is updated to the second process's state.
601 * Finally, we can return to the second process.
602 * On entry, r3 points to the THREAD for the current task, r4
603 * points to the THREAD for the new task.
604 *
605 * This routine is always called with interrupts disabled.
606 *
607 * Note: there are two ways to get to the "going out" portion
608 * of this code; either by coming in via the entry (_switch)
609 * or via "fork" which must set up an environment equivalent
610 * to the "_switch" path.  If you change this , you'll have to
611 * change the fork code also.
612 *
613 * The code which creates the new task context is in 'copy_thread'
614 * in arch/ppc/kernel/process.c
615 */
616_GLOBAL(_switch)
617	stwu	r1,-INT_FRAME_SIZE(r1)
618	mflr	r0
619	stw	r0,INT_FRAME_SIZE+4(r1)
620	/* r3-r12 are caller saved -- Cort */
621	SAVE_NVGPRS(r1)
622	stw	r0,_NIP(r1)	/* Return to switch caller */
623	mfmsr	r11
624	li	r0,MSR_FP	/* Disable floating-point */
625#ifdef CONFIG_ALTIVEC
626BEGIN_FTR_SECTION
627	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
628	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
629	stw	r12,THREAD+THREAD_VRSAVE(r2)
630END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
631#endif /* CONFIG_ALTIVEC */
632#ifdef CONFIG_SPE
633BEGIN_FTR_SECTION
634	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
635	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
636	stw	r12,THREAD+THREAD_SPEFSCR(r2)
637END_FTR_SECTION_IFSET(CPU_FTR_SPE)
638#endif /* CONFIG_SPE */
639	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
640	beq+	1f
641	andc	r11,r11,r0
642	MTMSRD(r11)
643	isync
6441:	stw	r11,_MSR(r1)
645	mfcr	r10
646	stw	r10,_CCR(r1)
647	stw	r1,KSP(r3)	/* Set old stack pointer */
648
649#ifdef CONFIG_SMP
650	/* We need a sync somewhere here to make sure that if the
651	 * previous task gets rescheduled on another CPU, it sees all
652	 * stores it has performed on this one.
653	 */
654	sync
655#endif /* CONFIG_SMP */
656
657	tophys(r0,r4)
658	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
659	lwz	r1,KSP(r4)	/* Load new stack pointer */
660
661	/* save the old current 'last' for return value */
662	mr	r3,r2
663	addi	r2,r4,-THREAD	/* Update current */
664
665#ifdef CONFIG_ALTIVEC
666BEGIN_FTR_SECTION
667	lwz	r0,THREAD+THREAD_VRSAVE(r2)
668	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
670#endif /* CONFIG_ALTIVEC */
671#ifdef CONFIG_SPE
672BEGIN_FTR_SECTION
673	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
674	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
676#endif /* CONFIG_SPE */
677#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
678	lwz	r0,TSK_STACK_CANARY(r2)
679	lis	r4,__stack_chk_guard@ha
680	stw	r0,__stack_chk_guard@l(r4)
681#endif
682	lwz	r0,_CCR(r1)
683	mtcrf	0xFF,r0
684	/* r3-r12 are destroyed -- Cort */
685	REST_NVGPRS(r1)
686
687	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
688	mtlr	r4
689	addi	r1,r1,INT_FRAME_SIZE
690	blr
691
692	.globl	fast_exception_return
693fast_exception_return:
694#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
695	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
696	beq	1f			/* if not, we've got problems */
697#endif
698
6992:	REST_4GPRS(3, r11)
700	lwz	r10,_CCR(r11)
701	REST_GPR(1, r11)
702	mtcr	r10
703	lwz	r10,_LINK(r11)
704	mtlr	r10
705	REST_GPR(10, r11)
706	mtspr	SPRN_SRR1,r9
707	mtspr	SPRN_SRR0,r12
708	REST_GPR(9, r11)
709	REST_GPR(12, r11)
710	lwz	r11,GPR11(r11)
711	SYNC
712	RFI
713
714#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
715/* check if the exception happened in a restartable section */
7161:	lis	r3,exc_exit_restart_end@ha
717	addi	r3,r3,exc_exit_restart_end@l
718	cmplw	r12,r3
719	bge	3f
720	lis	r4,exc_exit_restart@ha
721	addi	r4,r4,exc_exit_restart@l
722	cmplw	r12,r4
723	blt	3f
724	lis	r3,fee_restarts@ha
725	tophys(r3,r3)
726	lwz	r5,fee_restarts@l(r3)
727	addi	r5,r5,1
728	stw	r5,fee_restarts@l(r3)
729	mr	r12,r4		/* restart at exc_exit_restart */
730	b	2b
731
732	.section .bss
733	.align	2
734fee_restarts:
735	.space	4
736	.previous
737
738/* aargh, a nonrecoverable interrupt, panic */
739/* aargh, we don't know which trap this is */
740/* but the 601 doesn't implement the RI bit, so assume it's OK */
7413:
742BEGIN_FTR_SECTION
743	b	2b
744END_FTR_SECTION_IFSET(CPU_FTR_601)
745	li	r10,-1
746	stw	r10,_TRAP(r11)
747	addi	r3,r1,STACK_FRAME_OVERHEAD
748	lis	r10,MSR_KERNEL@h
749	ori	r10,r10,MSR_KERNEL@l
750	bl	transfer_to_handler_full
751	.long	nonrecoverable_exception
752	.long	ret_from_except
753#endif
754
755	.globl	ret_from_except_full
756ret_from_except_full:
757	REST_NVGPRS(r1)
758	/* fall through */
759
760	.globl	ret_from_except
761ret_from_except:
762	/* Hard-disable interrupts so that current_thread_info()->flags
763	 * can't change between when we test it and when we return
764	 * from the interrupt. */
765	/* Note: We don't bother telling lockdep about it */
766	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
767	SYNC			/* Some chip revs have problems here... */
768	MTMSRD(r10)		/* disable interrupts */
769
770	lwz	r3,_MSR(r1)	/* Returning to user mode? */
771	andi.	r0,r3,MSR_PR
772	beq	resume_kernel
773
774user_exc_return:		/* r10 contains MSR_KERNEL here */
775	/* Check current_thread_info()->flags */
776	CURRENT_THREAD_INFO(r9, r1)
777	lwz	r9,TI_FLAGS(r9)
778	andi.	r0,r9,_TIF_USER_WORK_MASK
779	bne	do_work
780
781restore_user:
782#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
783	/* Check whether this process has its own DBCR0 value.  The internal
784	   debug mode bit tells us that dbcr0 should be loaded. */
785	lwz	r0,THREAD+THREAD_DBCR0(r2)
786	andis.	r10,r0,DBCR0_IDM@h
787	bnel-	load_dbcr0
788#endif
789#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
790	CURRENT_THREAD_INFO(r9, r1)
791	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
792#endif
793
794	b	restore
795
796/* N.B. the only way to get here is from the beq following ret_from_except. */
797resume_kernel:
798	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
799	CURRENT_THREAD_INFO(r9, r1)
800	lwz	r8,TI_FLAGS(r9)
801	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
802	beq+	1f
803
804	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
805
806	lwz	r3,GPR1(r1)
807	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
808	mr	r4,r1			/* src:  current exception frame */
809	mr	r1,r3			/* Reroute the trampoline frame to r1 */
810
811	/* Copy from the original to the trampoline. */
812	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
813	li	r6,0			/* start offset: 0 */
814	mtctr	r5
8152:	lwzx	r0,r6,r4
816	stwx	r0,r6,r3
817	addi	r6,r6,4
818	bdnz	2b
819
820	/* Do real store operation to complete stwu */
821	lwz	r5,GPR1(r1)
822	stw	r8,0(r5)
823
824	/* Clear _TIF_EMULATE_STACK_STORE flag */
825	lis	r11,_TIF_EMULATE_STACK_STORE@h
826	addi	r5,r9,TI_FLAGS
8270:	lwarx	r8,0,r5
828	andc	r8,r8,r11
829#ifdef CONFIG_IBM405_ERR77
830	dcbt	0,r5
831#endif
832	stwcx.	r8,0,r5
833	bne-	0b
8341:
835
836#ifdef CONFIG_PREEMPT
837	/* check current_thread_info->preempt_count */
838	lwz	r0,TI_PREEMPT(r9)
839	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
840	bne	restore
841	andi.	r8,r8,_TIF_NEED_RESCHED
842	beq+	restore
843	lwz	r3,_MSR(r1)
844	andi.	r0,r3,MSR_EE	/* interrupts off? */
845	beq	restore		/* don't schedule if so */
846#ifdef CONFIG_TRACE_IRQFLAGS
847	/* Lockdep thinks irqs are enabled, we need to call
848	 * preempt_schedule_irq with IRQs off, so we inform lockdep
849	 * now that we -did- turn them off already
850	 */
851	bl	trace_hardirqs_off
852#endif
8531:	bl	preempt_schedule_irq
854	CURRENT_THREAD_INFO(r9, r1)
855	lwz	r3,TI_FLAGS(r9)
856	andi.	r0,r3,_TIF_NEED_RESCHED
857	bne-	1b
858#ifdef CONFIG_TRACE_IRQFLAGS
859	/* And now, to properly rebalance the above, we tell lockdep they
860	 * are being turned back on, which will happen when we return
861	 */
862	bl	trace_hardirqs_on
863#endif
864#endif /* CONFIG_PREEMPT */
865
866	/* interrupts are hard-disabled at this point */
867restore:
868#ifdef CONFIG_44x
869BEGIN_MMU_FTR_SECTION
870	b	1f
871END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
872	lis	r4,icache_44x_need_flush@ha
873	lwz	r5,icache_44x_need_flush@l(r4)
874	cmplwi	cr0,r5,0
875	beq+	1f
876	li	r6,0
877	iccci	r0,r0
878	stw	r6,icache_44x_need_flush@l(r4)
8791:
880#endif  /* CONFIG_44x */
881
882	lwz	r9,_MSR(r1)
883#ifdef CONFIG_TRACE_IRQFLAGS
884	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
885	 * off in this assembly code while peeking at TI_FLAGS() and such. However
886	 * we need to inform it if the exception turned interrupts off, and we
887	 * are about to trun them back on.
888	 *
889	 * The problem here sadly is that we don't know whether the exceptions was
890	 * one that turned interrupts off or not. So we always tell lockdep about
891	 * turning them on here when we go back to wherever we came from with EE
892	 * on, even if that may meen some redudant calls being tracked. Maybe later
893	 * we could encode what the exception did somewhere or test the exception
894	 * type in the pt_regs but that sounds overkill
895	 */
896	andi.	r10,r9,MSR_EE
897	beq	1f
898	/*
899	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
900	 * which is the stack frame here, we need to force a stack frame
901	 * in case we came from user space.
902	 */
903	stwu	r1,-32(r1)
904	mflr	r0
905	stw	r0,4(r1)
906	stwu	r1,-32(r1)
907	bl	trace_hardirqs_on
908	lwz	r1,0(r1)
909	lwz	r1,0(r1)
910	lwz	r9,_MSR(r1)
9111:
912#endif /* CONFIG_TRACE_IRQFLAGS */
913
914	lwz	r0,GPR0(r1)
915	lwz	r2,GPR2(r1)
916	REST_4GPRS(3, r1)
917	REST_2GPRS(7, r1)
918
919	lwz	r10,_XER(r1)
920	lwz	r11,_CTR(r1)
921	mtspr	SPRN_XER,r10
922	mtctr	r11
923
924	PPC405_ERR77(0,r1)
925BEGIN_FTR_SECTION
926	lwarx	r11,0,r1
927END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
928	stwcx.	r0,0,r1			/* to clear the reservation */
929
930#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
931	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
932	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
933
934	lwz	r10,_CCR(r1)
935	lwz	r11,_LINK(r1)
936	mtcrf	0xFF,r10
937	mtlr	r11
938
939	/*
940	 * Once we put values in SRR0 and SRR1, we are in a state
941	 * where exceptions are not recoverable, since taking an
942	 * exception will trash SRR0 and SRR1.  Therefore we clear the
943	 * MSR:RI bit to indicate this.  If we do take an exception,
944	 * we can't return to the point of the exception but we
945	 * can restart the exception exit path at the label
946	 * exc_exit_restart below.  -- paulus
947	 */
948	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
949	SYNC
950	MTMSRD(r10)		/* clear the RI bit */
951	.globl exc_exit_restart
952exc_exit_restart:
953	lwz	r12,_NIP(r1)
954	FIX_SRR1(r9,r10)
955	mtspr	SPRN_SRR0,r12
956	mtspr	SPRN_SRR1,r9
957	REST_4GPRS(9, r1)
958	lwz	r1,GPR1(r1)
959	.globl exc_exit_restart_end
960exc_exit_restart_end:
961	SYNC
962	RFI
963
964#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
965	/*
966	 * This is a bit different on 4xx/Book-E because it doesn't have
967	 * the RI bit in the MSR.
968	 * The TLB miss handler checks if we have interrupted
969	 * the exception exit path and restarts it if so
970	 * (well maybe one day it will... :).
971	 */
972	lwz	r11,_LINK(r1)
973	mtlr	r11
974	lwz	r10,_CCR(r1)
975	mtcrf	0xff,r10
976	REST_2GPRS(9, r1)
977	.globl exc_exit_restart
978exc_exit_restart:
979	lwz	r11,_NIP(r1)
980	lwz	r12,_MSR(r1)
981exc_exit_start:
982	mtspr	SPRN_SRR0,r11
983	mtspr	SPRN_SRR1,r12
984	REST_2GPRS(11, r1)
985	lwz	r1,GPR1(r1)
986	.globl exc_exit_restart_end
987exc_exit_restart_end:
988	PPC405_ERR77_SYNC
989	rfi
990	b	.			/* prevent prefetch past rfi */
991
992/*
993 * Returning from a critical interrupt in user mode doesn't need
994 * to be any different from a normal exception.  For a critical
995 * interrupt in the kernel, we just return (without checking for
996 * preemption) since the interrupt may have happened at some crucial
997 * place (e.g. inside the TLB miss handler), and because we will be
998 * running with r1 pointing into critical_stack, not the current
999 * process's kernel stack (and therefore current_thread_info() will
1000 * give the wrong answer).
1001 * We have to restore various SPRs that may have been in use at the
1002 * time of the critical interrupt.
1003 *
1004 */
1005#ifdef CONFIG_40x
1006#define PPC_40x_TURN_OFF_MSR_DR						    \
1007	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1008	 * assume the instructions here are mapped by a pinned TLB entry */ \
1009	li	r10,MSR_IR;						    \
1010	mtmsr	r10;							    \
1011	isync;								    \
1012	tophys(r1, r1);
1013#else
1014#define PPC_40x_TURN_OFF_MSR_DR
1015#endif
1016
1017#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1018	REST_NVGPRS(r1);						\
1019	lwz	r3,_MSR(r1);						\
1020	andi.	r3,r3,MSR_PR;						\
1021	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1022	bne	user_exc_return;					\
1023	lwz	r0,GPR0(r1);						\
1024	lwz	r2,GPR2(r1);						\
1025	REST_4GPRS(3, r1);						\
1026	REST_2GPRS(7, r1);						\
1027	lwz	r10,_XER(r1);						\
1028	lwz	r11,_CTR(r1);						\
1029	mtspr	SPRN_XER,r10;						\
1030	mtctr	r11;							\
1031	PPC405_ERR77(0,r1);						\
1032	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1033	lwz	r11,_LINK(r1);						\
1034	mtlr	r11;							\
1035	lwz	r10,_CCR(r1);						\
1036	mtcrf	0xff,r10;						\
1037	PPC_40x_TURN_OFF_MSR_DR;					\
1038	lwz	r9,_DEAR(r1);						\
1039	lwz	r10,_ESR(r1);						\
1040	mtspr	SPRN_DEAR,r9;						\
1041	mtspr	SPRN_ESR,r10;						\
1042	lwz	r11,_NIP(r1);						\
1043	lwz	r12,_MSR(r1);						\
1044	mtspr	exc_lvl_srr0,r11;					\
1045	mtspr	exc_lvl_srr1,r12;					\
1046	lwz	r9,GPR9(r1);						\
1047	lwz	r12,GPR12(r1);						\
1048	lwz	r10,GPR10(r1);						\
1049	lwz	r11,GPR11(r1);						\
1050	lwz	r1,GPR1(r1);						\
1051	PPC405_ERR77_SYNC;						\
1052	exc_lvl_rfi;							\
1053	b	.;		/* prevent prefetch past exc_lvl_rfi */
1054
1055#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1056	lwz	r9,_##exc_lvl_srr0(r1);					\
1057	lwz	r10,_##exc_lvl_srr1(r1);				\
1058	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1059	mtspr	SPRN_##exc_lvl_srr1,r10;
1060
1061#if defined(CONFIG_PPC_BOOK3E_MMU)
1062#ifdef CONFIG_PHYS_64BIT
1063#define	RESTORE_MAS7							\
1064	lwz	r11,MAS7(r1);						\
1065	mtspr	SPRN_MAS7,r11;
1066#else
1067#define	RESTORE_MAS7
1068#endif /* CONFIG_PHYS_64BIT */
1069#define RESTORE_MMU_REGS						\
1070	lwz	r9,MAS0(r1);						\
1071	lwz	r10,MAS1(r1);						\
1072	lwz	r11,MAS2(r1);						\
1073	mtspr	SPRN_MAS0,r9;						\
1074	lwz	r9,MAS3(r1);						\
1075	mtspr	SPRN_MAS1,r10;						\
1076	lwz	r10,MAS6(r1);						\
1077	mtspr	SPRN_MAS2,r11;						\
1078	mtspr	SPRN_MAS3,r9;						\
1079	mtspr	SPRN_MAS6,r10;						\
1080	RESTORE_MAS7;
1081#elif defined(CONFIG_44x)
1082#define RESTORE_MMU_REGS						\
1083	lwz	r9,MMUCR(r1);						\
1084	mtspr	SPRN_MMUCR,r9;
1085#else
1086#define RESTORE_MMU_REGS
1087#endif
1088
1089#ifdef CONFIG_40x
1090	.globl	ret_from_crit_exc
1091ret_from_crit_exc:
1092	mfspr	r9,SPRN_SPRG_THREAD
1093	lis	r10,saved_ksp_limit@ha;
1094	lwz	r10,saved_ksp_limit@l(r10);
1095	tovirt(r9,r9);
1096	stw	r10,KSP_LIMIT(r9)
1097	lis	r9,crit_srr0@ha;
1098	lwz	r9,crit_srr0@l(r9);
1099	lis	r10,crit_srr1@ha;
1100	lwz	r10,crit_srr1@l(r10);
1101	mtspr	SPRN_SRR0,r9;
1102	mtspr	SPRN_SRR1,r10;
1103	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1104#endif /* CONFIG_40x */
1105
1106#ifdef CONFIG_BOOKE
1107	.globl	ret_from_crit_exc
1108ret_from_crit_exc:
1109	mfspr	r9,SPRN_SPRG_THREAD
1110	lwz	r10,SAVED_KSP_LIMIT(r1)
1111	stw	r10,KSP_LIMIT(r9)
1112	RESTORE_xSRR(SRR0,SRR1);
1113	RESTORE_MMU_REGS;
1114	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1115
1116	.globl	ret_from_debug_exc
1117ret_from_debug_exc:
1118	mfspr	r9,SPRN_SPRG_THREAD
1119	lwz	r10,SAVED_KSP_LIMIT(r1)
1120	stw	r10,KSP_LIMIT(r9)
1121	lwz	r9,THREAD_INFO-THREAD(r9)
1122	CURRENT_THREAD_INFO(r10, r1)
1123	lwz	r10,TI_PREEMPT(r10)
1124	stw	r10,TI_PREEMPT(r9)
1125	RESTORE_xSRR(SRR0,SRR1);
1126	RESTORE_xSRR(CSRR0,CSRR1);
1127	RESTORE_MMU_REGS;
1128	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1129
1130	.globl	ret_from_mcheck_exc
1131ret_from_mcheck_exc:
1132	mfspr	r9,SPRN_SPRG_THREAD
1133	lwz	r10,SAVED_KSP_LIMIT(r1)
1134	stw	r10,KSP_LIMIT(r9)
1135	RESTORE_xSRR(SRR0,SRR1);
1136	RESTORE_xSRR(CSRR0,CSRR1);
1137	RESTORE_xSRR(DSRR0,DSRR1);
1138	RESTORE_MMU_REGS;
1139	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1140#endif /* CONFIG_BOOKE */
1141
1142/*
1143 * Load the DBCR0 value for a task that is being ptraced,
1144 * having first saved away the global DBCR0.  Note that r0
1145 * has the dbcr0 value to set upon entry to this.
1146 */
1147load_dbcr0:
1148	mfmsr	r10		/* first disable debug exceptions */
1149	rlwinm	r10,r10,0,~MSR_DE
1150	mtmsr	r10
1151	isync
1152	mfspr	r10,SPRN_DBCR0
1153	lis	r11,global_dbcr0@ha
1154	addi	r11,r11,global_dbcr0@l
1155#ifdef CONFIG_SMP
1156	CURRENT_THREAD_INFO(r9, r1)
1157	lwz	r9,TI_CPU(r9)
1158	slwi	r9,r9,3
1159	add	r11,r11,r9
1160#endif
1161	stw	r10,0(r11)
1162	mtspr	SPRN_DBCR0,r0
1163	lwz	r10,4(r11)
1164	addi	r10,r10,1
1165	stw	r10,4(r11)
1166	li	r11,-1
1167	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1168	blr
1169
1170	.section .bss
1171	.align	4
1172global_dbcr0:
1173	.space	8*NR_CPUS
1174	.previous
1175#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1176
1177do_work:			/* r10 contains MSR_KERNEL here */
1178	andi.	r0,r9,_TIF_NEED_RESCHED
1179	beq	do_user_signal
1180
1181do_resched:			/* r10 contains MSR_KERNEL here */
1182	/* Note: We don't need to inform lockdep that we are enabling
1183	 * interrupts here. As far as it knows, they are already enabled
1184	 */
1185	ori	r10,r10,MSR_EE
1186	SYNC
1187	MTMSRD(r10)		/* hard-enable interrupts */
1188	bl	schedule
1189recheck:
1190	/* Note: And we don't tell it we are disabling them again
1191	 * neither. Those disable/enable cycles used to peek at
1192	 * TI_FLAGS aren't advertised.
1193	 */
1194	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1195	SYNC
1196	MTMSRD(r10)		/* disable interrupts */
1197	CURRENT_THREAD_INFO(r9, r1)
1198	lwz	r9,TI_FLAGS(r9)
1199	andi.	r0,r9,_TIF_NEED_RESCHED
1200	bne-	do_resched
1201	andi.	r0,r9,_TIF_USER_WORK_MASK
1202	beq	restore_user
1203do_user_signal:			/* r10 contains MSR_KERNEL here */
1204	ori	r10,r10,MSR_EE
1205	SYNC
1206	MTMSRD(r10)		/* hard-enable interrupts */
1207	/* save r13-r31 in the exception frame, if not already done */
1208	lwz	r3,_TRAP(r1)
1209	andi.	r0,r3,1
1210	beq	2f
1211	SAVE_NVGPRS(r1)
1212	rlwinm	r3,r3,0,0,30
1213	stw	r3,_TRAP(r1)
12142:	addi	r3,r1,STACK_FRAME_OVERHEAD
1215	mr	r4,r9
1216	bl	do_notify_resume
1217	REST_NVGPRS(r1)
1218	b	recheck
1219
1220/*
1221 * We come here when we are at the end of handling an exception
1222 * that occurred at a place where taking an exception will lose
1223 * state information, such as the contents of SRR0 and SRR1.
1224 */
1225nonrecoverable:
1226	lis	r10,exc_exit_restart_end@ha
1227	addi	r10,r10,exc_exit_restart_end@l
1228	cmplw	r12,r10
1229	bge	3f
1230	lis	r11,exc_exit_restart@ha
1231	addi	r11,r11,exc_exit_restart@l
1232	cmplw	r12,r11
1233	blt	3f
1234	lis	r10,ee_restarts@ha
1235	lwz	r12,ee_restarts@l(r10)
1236	addi	r12,r12,1
1237	stw	r12,ee_restarts@l(r10)
1238	mr	r12,r11		/* restart at exc_exit_restart */
1239	blr
12403:	/* OK, we can't recover, kill this process */
1241	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1242BEGIN_FTR_SECTION
1243	blr
1244END_FTR_SECTION_IFSET(CPU_FTR_601)
1245	lwz	r3,_TRAP(r1)
1246	andi.	r0,r3,1
1247	beq	4f
1248	SAVE_NVGPRS(r1)
1249	rlwinm	r3,r3,0,0,30
1250	stw	r3,_TRAP(r1)
12514:	addi	r3,r1,STACK_FRAME_OVERHEAD
1252	bl	nonrecoverable_exception
1253	/* shouldn't return */
1254	b	4b
1255
1256	.section .bss
1257	.align	2
1258ee_restarts:
1259	.space	4
1260	.previous
1261
1262/*
1263 * PROM code for specific machines follows.  Put it
1264 * here so it's easy to add arch-specific sections later.
1265 * -- Cort
1266 */
1267#ifdef CONFIG_PPC_RTAS
1268/*
1269 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1270 * called with the MMU off.
1271 */
1272_GLOBAL(enter_rtas)
1273	stwu	r1,-INT_FRAME_SIZE(r1)
1274	mflr	r0
1275	stw	r0,INT_FRAME_SIZE+4(r1)
1276	LOAD_REG_ADDR(r4, rtas)
1277	lis	r6,1f@ha	/* physical return address for rtas */
1278	addi	r6,r6,1f@l
1279	tophys(r6,r6)
1280	tophys(r7,r1)
1281	lwz	r8,RTASENTRY(r4)
1282	lwz	r4,RTASBASE(r4)
1283	mfmsr	r9
1284	stw	r9,8(r1)
1285	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1286	SYNC			/* disable interrupts so SRR0/1 */
1287	MTMSRD(r0)		/* don't get trashed */
1288	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1289	mtlr	r6
1290	mtspr	SPRN_SPRG_RTAS,r7
1291	mtspr	SPRN_SRR0,r8
1292	mtspr	SPRN_SRR1,r9
1293	RFI
12941:	tophys(r9,r1)
1295	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1296	lwz	r9,8(r9)	/* original msr value */
1297	FIX_SRR1(r9,r0)
1298	addi	r1,r1,INT_FRAME_SIZE
1299	li	r0,0
1300	mtspr	SPRN_SPRG_RTAS,r0
1301	mtspr	SPRN_SRR0,r8
1302	mtspr	SPRN_SRR1,r9
1303	RFI			/* return to caller */
1304
1305	.globl	machine_check_in_rtas
1306machine_check_in_rtas:
1307	twi	31,0,0
1308	/* XXX load up BATs and panic */
1309
1310#endif /* CONFIG_PPC_RTAS */
1311
1312#ifdef CONFIG_FUNCTION_TRACER
1313#ifdef CONFIG_DYNAMIC_FTRACE
1314_GLOBAL(mcount)
1315_GLOBAL(_mcount)
1316	/*
1317	 * It is required that _mcount on PPC32 must preserve the
1318	 * link register. But we have r0 to play with. We use r0
1319	 * to push the return address back to the caller of mcount
1320	 * into the ctr register, restore the link register and
1321	 * then jump back using the ctr register.
1322	 */
1323	mflr	r0
1324	mtctr	r0
1325	lwz	r0, 4(r1)
1326	mtlr	r0
1327	bctr
1328
1329_GLOBAL(ftrace_caller)
1330	MCOUNT_SAVE_FRAME
1331	/* r3 ends up with link register */
1332	subi	r3, r3, MCOUNT_INSN_SIZE
1333.globl ftrace_call
1334ftrace_call:
1335	bl	ftrace_stub
1336	nop
1337#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1338.globl ftrace_graph_call
1339ftrace_graph_call:
1340	b	ftrace_graph_stub
1341_GLOBAL(ftrace_graph_stub)
1342#endif
1343	MCOUNT_RESTORE_FRAME
1344	/* old link register ends up in ctr reg */
1345	bctr
1346#else
1347_GLOBAL(mcount)
1348_GLOBAL(_mcount)
1349
1350	MCOUNT_SAVE_FRAME
1351
1352	subi	r3, r3, MCOUNT_INSN_SIZE
1353	LOAD_REG_ADDR(r5, ftrace_trace_function)
1354	lwz	r5,0(r5)
1355
1356	mtctr	r5
1357	bctrl
1358	nop
1359
1360#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1361	b	ftrace_graph_caller
1362#endif
1363	MCOUNT_RESTORE_FRAME
1364	bctr
1365#endif
1366EXPORT_SYMBOL(_mcount)
1367
1368_GLOBAL(ftrace_stub)
1369	blr
1370
1371#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1372_GLOBAL(ftrace_graph_caller)
1373	/* load r4 with local address */
1374	lwz	r4, 44(r1)
1375	subi	r4, r4, MCOUNT_INSN_SIZE
1376
1377	/* Grab the LR out of the caller stack frame */
1378	lwz	r3,52(r1)
1379
1380	bl	prepare_ftrace_return
1381	nop
1382
1383        /*
1384         * prepare_ftrace_return gives us the address we divert to.
1385         * Change the LR in the callers stack frame to this.
1386         */
1387	stw	r3,52(r1)
1388
1389	MCOUNT_RESTORE_FRAME
1390	/* old link register ends up in ctr reg */
1391	bctr
1392
1393_GLOBAL(return_to_handler)
1394	/* need to save return values */
1395	stwu	r1, -32(r1)
1396	stw	r3, 20(r1)
1397	stw	r4, 16(r1)
1398	stw	r31, 12(r1)
1399	mr	r31, r1
1400
1401	bl	ftrace_return_to_handler
1402	nop
1403
1404	/* return value has real return address */
1405	mtlr	r3
1406
1407	lwz	r3, 20(r1)
1408	lwz	r4, 16(r1)
1409	lwz	r31,12(r1)
1410	lwz	r1, 0(r1)
1411
1412	/* Jump back to real return address */
1413	blr
1414#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1415
1416#endif /* CONFIG_FUNCTION_TRACER */
1417