xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 7e035230)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49	.globl	mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51	mfspr	r0,SPRN_DSRR0
52	stw	r0,_DSRR0(r11)
53	mfspr	r0,SPRN_DSRR1
54	stw	r0,_DSRR1(r11)
55	/* fall through */
56
57	.globl	debug_transfer_to_handler
58debug_transfer_to_handler:
59	mfspr	r0,SPRN_CSRR0
60	stw	r0,_CSRR0(r11)
61	mfspr	r0,SPRN_CSRR1
62	stw	r0,_CSRR1(r11)
63	/* fall through */
64
65	.globl	crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68	mfspr	r0,SPRN_MAS0
69	stw	r0,MAS0(r11)
70	mfspr	r0,SPRN_MAS1
71	stw	r0,MAS1(r11)
72	mfspr	r0,SPRN_MAS2
73	stw	r0,MAS2(r11)
74	mfspr	r0,SPRN_MAS3
75	stw	r0,MAS3(r11)
76	mfspr	r0,SPRN_MAS6
77	stw	r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79	mfspr	r0,SPRN_MAS7
80	stw	r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84	mfspr	r0,SPRN_MMUCR
85	stw	r0,MMUCR(r11)
86#endif
87	mfspr	r0,SPRN_SRR0
88	stw	r0,_SRR0(r11)
89	mfspr	r0,SPRN_SRR1
90	stw	r0,_SRR1(r11)
91
92	/* set the stack limit to the current stack
93	 * and set the limit to protect the thread_info
94	 * struct
95	 */
96	mfspr	r8,SPRN_SPRG_THREAD
97	lwz	r0,KSP_LIMIT(r8)
98	stw	r0,SAVED_KSP_LIMIT(r11)
99	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
100	stw	r0,KSP_LIMIT(r8)
101	/* fall through */
102#endif
103
104#ifdef CONFIG_40x
105	.globl	crit_transfer_to_handler
106crit_transfer_to_handler:
107	lwz	r0,crit_r10@l(0)
108	stw	r0,GPR10(r11)
109	lwz	r0,crit_r11@l(0)
110	stw	r0,GPR11(r11)
111	mfspr	r0,SPRN_SRR0
112	stw	r0,crit_srr0@l(0)
113	mfspr	r0,SPRN_SRR1
114	stw	r0,crit_srr1@l(0)
115
116	/* set the stack limit to the current stack
117	 * and set the limit to protect the thread_info
118	 * struct
119	 */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126#endif
127
128/*
129 * This code finishes saving the registers to the exception frame
130 * and jumps to the appropriate handler for the exception, turning
131 * on address translation.
132 * Note that we rely on the caller having set cr0.eq iff the exception
133 * occurred in kernel mode (i.e. MSR:PR = 0).
134 */
135	.globl	transfer_to_handler_full
136transfer_to_handler_full:
137	SAVE_NVGPRS(r11)
138	/* fall through */
139
140	.globl	transfer_to_handler
141transfer_to_handler:
142	stw	r2,GPR2(r11)
143	stw	r12,_NIP(r11)
144	stw	r9,_MSR(r11)
145	andi.	r2,r9,MSR_PR
146	mfctr	r12
147	mfspr	r2,SPRN_XER
148	stw	r12,_CTR(r11)
149	stw	r2,_XER(r11)
150	mfspr	r12,SPRN_SPRG_THREAD
151	addi	r2,r12,-THREAD
152	tovirt(r2,r2)			/* set r2 to current */
153	beq	2f			/* if from user, fix up THREAD.regs */
154	addi	r11,r1,STACK_FRAME_OVERHEAD
155	stw	r11,PT_REGS(r12)
156#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
157	/* Check to see if the dbcr0 register is set up to debug.  Use the
158	   internal debug mode bit to do this. */
159	lwz	r12,THREAD_DBCR0(r12)
160	andis.	r12,r12,DBCR0_IDM@h
161	beq+	3f
162	/* From user and task is ptraced - load up global dbcr0 */
163	li	r12,-1			/* clear all pending debug events */
164	mtspr	SPRN_DBSR,r12
165	lis	r11,global_dbcr0@ha
166	tophys(r11,r11)
167	addi	r11,r11,global_dbcr0@l
168#ifdef CONFIG_SMP
169	CURRENT_THREAD_INFO(r9, r1)
170	lwz	r9,TI_CPU(r9)
171	slwi	r9,r9,3
172	add	r11,r11,r9
173#endif
174	lwz	r12,0(r11)
175	mtspr	SPRN_DBCR0,r12
176	lwz	r12,4(r11)
177	addi	r12,r12,-1
178	stw	r12,4(r11)
179#endif
180	b	3f
181
1822:	/* if from kernel, check interrupted DOZE/NAP mode and
183         * check for stack overflow
184         */
185	lwz	r9,KSP_LIMIT(r12)
186	cmplw	r1,r9			/* if r1 <= ksp_limit */
187	ble-	stack_ovf		/* then the kernel stack overflowed */
1885:
189#if defined(CONFIG_6xx) || defined(CONFIG_E500)
190	CURRENT_THREAD_INFO(r9, r1)
191	tophys(r9,r9)			/* check local flags */
192	lwz	r12,TI_LOCAL_FLAGS(r9)
193	mtcrf	0x01,r12
194	bt-	31-TLF_NAPPING,4f
195	bt-	31-TLF_SLEEPING,7f
196#endif /* CONFIG_6xx || CONFIG_E500 */
197	.globl transfer_to_handler_cont
198transfer_to_handler_cont:
1993:
200	mflr	r9
201	lwz	r11,0(r9)		/* virtual address of handler */
202	lwz	r9,4(r9)		/* where to go when done */
203#ifdef CONFIG_TRACE_IRQFLAGS
204	lis	r12,reenable_mmu@h
205	ori	r12,r12,reenable_mmu@l
206	mtspr	SPRN_SRR0,r12
207	mtspr	SPRN_SRR1,r10
208	SYNC
209	RFI
210reenable_mmu:				/* re-enable mmu so we can */
211	mfmsr	r10
212	lwz	r12,_MSR(r1)
213	xor	r10,r10,r12
214	andi.	r10,r10,MSR_EE		/* Did EE change? */
215	beq	1f
216
217	/*
218	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
219	 * If from user mode there is only one stack frame on the stack, and
220	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
221	 * stack frame to make trace_hardirqs_off happy.
222	 *
223	 * This is handy because we also need to save a bunch of GPRs,
224	 * r3 can be different from GPR3(r1) at this point, r9 and r11
225	 * contains the old MSR and handler address respectively,
226	 * r4 & r5 can contain page fault arguments that need to be passed
227	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
228	 * they aren't useful past this point (aren't syscall arguments),
229	 * the rest is restored from the exception frame.
230	 */
231	stwu	r1,-32(r1)
232	stw	r9,8(r1)
233	stw	r11,12(r1)
234	stw	r3,16(r1)
235	stw	r4,20(r1)
236	stw	r5,24(r1)
237	bl	trace_hardirqs_off
238	lwz	r5,24(r1)
239	lwz	r4,20(r1)
240	lwz	r3,16(r1)
241	lwz	r11,12(r1)
242	lwz	r9,8(r1)
243	addi	r1,r1,32
244	lwz	r0,GPR0(r1)
245	lwz	r6,GPR6(r1)
246	lwz	r7,GPR7(r1)
247	lwz	r8,GPR8(r1)
2481:	mtctr	r11
249	mtlr	r9
250	bctr				/* jump to handler */
251#else /* CONFIG_TRACE_IRQFLAGS */
252	mtspr	SPRN_SRR0,r11
253	mtspr	SPRN_SRR1,r10
254	mtlr	r9
255	SYNC
256	RFI				/* jump to handler, enable MMU */
257#endif /* CONFIG_TRACE_IRQFLAGS */
258
259#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2604:	rlwinm	r12,r12,0,~_TLF_NAPPING
261	stw	r12,TI_LOCAL_FLAGS(r9)
262	b	power_save_ppc32_restore
263
2647:	rlwinm	r12,r12,0,~_TLF_SLEEPING
265	stw	r12,TI_LOCAL_FLAGS(r9)
266	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
267	rlwinm	r9,r9,0,~MSR_EE
268	lwz	r12,_LINK(r11)		/* and return to address in LR */
269	b	fast_exception_return
270#endif
271
272/*
273 * On kernel stack overflow, load up an initial stack pointer
274 * and call StackOverflow(regs), which should not return.
275 */
276stack_ovf:
277	/* sometimes we use a statically-allocated stack, which is OK. */
278	lis	r12,_end@h
279	ori	r12,r12,_end@l
280	cmplw	r1,r12
281	ble	5b			/* r1 <= &_end is OK */
282	SAVE_NVGPRS(r11)
283	addi	r3,r1,STACK_FRAME_OVERHEAD
284	lis	r1,init_thread_union@ha
285	addi	r1,r1,init_thread_union@l
286	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
287	lis	r9,StackOverflow@ha
288	addi	r9,r9,StackOverflow@l
289	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
290	FIX_SRR1(r10,r12)
291	mtspr	SPRN_SRR0,r9
292	mtspr	SPRN_SRR1,r10
293	SYNC
294	RFI
295
296/*
297 * Handle a system call.
298 */
299	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
300	.stabs	"entry_32.S",N_SO,0,0,0f
3010:
302
303_GLOBAL(DoSyscall)
304	stw	r3,ORIG_GPR3(r1)
305	li	r12,0
306	stw	r12,RESULT(r1)
307	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
308	rlwinm	r11,r11,0,4,2
309	stw	r11,_CCR(r1)
310#ifdef SHOW_SYSCALLS
311	bl	do_show_syscall
312#endif /* SHOW_SYSCALLS */
313#ifdef CONFIG_TRACE_IRQFLAGS
314	/* Return from syscalls can (and generally will) hard enable
315	 * interrupts. You aren't supposed to call a syscall with
316	 * interrupts disabled in the first place. However, to ensure
317	 * that we get it right vs. lockdep if it happens, we force
318	 * that hard enable here with appropriate tracing if we see
319	 * that we have been called with interrupts off
320	 */
321	mfmsr	r11
322	andi.	r12,r11,MSR_EE
323	bne+	1f
324	/* We came in with interrupts disabled, we enable them now */
325	bl	trace_hardirqs_on
326	mfmsr	r11
327	lwz	r0,GPR0(r1)
328	lwz	r3,GPR3(r1)
329	lwz	r4,GPR4(r1)
330	ori	r11,r11,MSR_EE
331	lwz	r5,GPR5(r1)
332	lwz	r6,GPR6(r1)
333	lwz	r7,GPR7(r1)
334	lwz	r8,GPR8(r1)
335	mtmsr	r11
3361:
337#endif /* CONFIG_TRACE_IRQFLAGS */
338	CURRENT_THREAD_INFO(r10, r1)
339	lwz	r11,TI_FLAGS(r10)
340	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
341	bne-	syscall_dotrace
342syscall_dotrace_cont:
343	cmplwi	0,r0,NR_syscalls
344	lis	r10,sys_call_table@h
345	ori	r10,r10,sys_call_table@l
346	slwi	r0,r0,2
347	bge-	66f
348	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
349	mtlr	r10
350	addi	r9,r1,STACK_FRAME_OVERHEAD
351	PPC440EP_ERR42
352	blrl			/* Call handler */
353	.globl	ret_from_syscall
354ret_from_syscall:
355#ifdef SHOW_SYSCALLS
356	bl	do_show_syscall_exit
357#endif
358	mr	r6,r3
359	CURRENT_THREAD_INFO(r12, r1)
360	/* disable interrupts so current_thread_info()->flags can't change */
361	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
362	/* Note: We don't bother telling lockdep about it */
363	SYNC
364	MTMSRD(r10)
365	lwz	r9,TI_FLAGS(r12)
366	li	r8,-_LAST_ERRNO
367	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
368	bne-	syscall_exit_work
369	cmplw	0,r3,r8
370	blt+	syscall_exit_cont
371	lwz	r11,_CCR(r1)			/* Load CR */
372	neg	r3,r3
373	oris	r11,r11,0x1000	/* Set SO bit in CR */
374	stw	r11,_CCR(r1)
375syscall_exit_cont:
376	lwz	r8,_MSR(r1)
377#ifdef CONFIG_TRACE_IRQFLAGS
378	/* If we are going to return from the syscall with interrupts
379	 * off, we trace that here. It shouldn't happen though but we
380	 * want to catch the bugger if it does right ?
381	 */
382	andi.	r10,r8,MSR_EE
383	bne+	1f
384	stw	r3,GPR3(r1)
385	bl      trace_hardirqs_off
386	lwz	r3,GPR3(r1)
3871:
388#endif /* CONFIG_TRACE_IRQFLAGS */
389#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
390	/* If the process has its own DBCR0 value, load it up.  The internal
391	   debug mode bit tells us that dbcr0 should be loaded. */
392	lwz	r0,THREAD+THREAD_DBCR0(r2)
393	andis.	r10,r0,DBCR0_IDM@h
394	bnel-	load_dbcr0
395#endif
396#ifdef CONFIG_44x
397BEGIN_MMU_FTR_SECTION
398	lis	r4,icache_44x_need_flush@ha
399	lwz	r5,icache_44x_need_flush@l(r4)
400	cmplwi	cr0,r5,0
401	bne-	2f
4021:
403END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
404#endif /* CONFIG_44x */
405BEGIN_FTR_SECTION
406	lwarx	r7,0,r1
407END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
408	stwcx.	r0,0,r1			/* to clear the reservation */
409	lwz	r4,_LINK(r1)
410	lwz	r5,_CCR(r1)
411	mtlr	r4
412	mtcr	r5
413	lwz	r7,_NIP(r1)
414	FIX_SRR1(r8, r0)
415	lwz	r2,GPR2(r1)
416	lwz	r1,GPR1(r1)
417	mtspr	SPRN_SRR0,r7
418	mtspr	SPRN_SRR1,r8
419	SYNC
420	RFI
421#ifdef CONFIG_44x
4222:	li	r7,0
423	iccci	r0,r0
424	stw	r7,icache_44x_need_flush@l(r4)
425	b	1b
426#endif  /* CONFIG_44x */
427
42866:	li	r3,-ENOSYS
429	b	ret_from_syscall
430
431	.globl	ret_from_fork
432ret_from_fork:
433	REST_NVGPRS(r1)
434	bl	schedule_tail
435	li	r3,0
436	b	ret_from_syscall
437
438/* Traced system call support */
439syscall_dotrace:
440	SAVE_NVGPRS(r1)
441	li	r0,0xc00
442	stw	r0,_TRAP(r1)
443	addi	r3,r1,STACK_FRAME_OVERHEAD
444	bl	do_syscall_trace_enter
445	/*
446	 * Restore argument registers possibly just changed.
447	 * We use the return value of do_syscall_trace_enter
448	 * for call number to look up in the table (r0).
449	 */
450	mr	r0,r3
451	lwz	r3,GPR3(r1)
452	lwz	r4,GPR4(r1)
453	lwz	r5,GPR5(r1)
454	lwz	r6,GPR6(r1)
455	lwz	r7,GPR7(r1)
456	lwz	r8,GPR8(r1)
457	REST_NVGPRS(r1)
458	b	syscall_dotrace_cont
459
460syscall_exit_work:
461	andi.	r0,r9,_TIF_RESTOREALL
462	beq+	0f
463	REST_NVGPRS(r1)
464	b	2f
4650:	cmplw	0,r3,r8
466	blt+	1f
467	andi.	r0,r9,_TIF_NOERROR
468	bne-	1f
469	lwz	r11,_CCR(r1)			/* Load CR */
470	neg	r3,r3
471	oris	r11,r11,0x1000	/* Set SO bit in CR */
472	stw	r11,_CCR(r1)
473
4741:	stw	r6,RESULT(r1)	/* Save result */
475	stw	r3,GPR3(r1)	/* Update return value */
4762:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
477	beq	4f
478
479	/* Clear per-syscall TIF flags if any are set.  */
480
481	li	r11,_TIF_PERSYSCALL_MASK
482	addi	r12,r12,TI_FLAGS
4833:	lwarx	r8,0,r12
484	andc	r8,r8,r11
485#ifdef CONFIG_IBM405_ERR77
486	dcbt	0,r12
487#endif
488	stwcx.	r8,0,r12
489	bne-	3b
490	subi	r12,r12,TI_FLAGS
491
4924:	/* Anything which requires enabling interrupts? */
493	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
494	beq	ret_from_except
495
496	/* Re-enable interrupts. There is no need to trace that with
497	 * lockdep as we are supposed to have IRQs on at this point
498	 */
499	ori	r10,r10,MSR_EE
500	SYNC
501	MTMSRD(r10)
502
503	/* Save NVGPRS if they're not saved already */
504	lwz	r4,_TRAP(r1)
505	andi.	r4,r4,1
506	beq	5f
507	SAVE_NVGPRS(r1)
508	li	r4,0xc00
509	stw	r4,_TRAP(r1)
5105:
511	addi	r3,r1,STACK_FRAME_OVERHEAD
512	bl	do_syscall_trace_leave
513	b	ret_from_except_full
514
515#ifdef SHOW_SYSCALLS
516do_show_syscall:
517#ifdef SHOW_SYSCALLS_TASK
518	lis	r11,show_syscalls_task@ha
519	lwz	r11,show_syscalls_task@l(r11)
520	cmp	0,r2,r11
521	bnelr
522#endif
523	stw	r31,GPR31(r1)
524	mflr	r31
525	lis	r3,7f@ha
526	addi	r3,r3,7f@l
527	lwz	r4,GPR0(r1)
528	lwz	r5,GPR3(r1)
529	lwz	r6,GPR4(r1)
530	lwz	r7,GPR5(r1)
531	lwz	r8,GPR6(r1)
532	lwz	r9,GPR7(r1)
533	bl	printk
534	lis	r3,77f@ha
535	addi	r3,r3,77f@l
536	lwz	r4,GPR8(r1)
537	mr	r5,r2
538	bl	printk
539	lwz	r0,GPR0(r1)
540	lwz	r3,GPR3(r1)
541	lwz	r4,GPR4(r1)
542	lwz	r5,GPR5(r1)
543	lwz	r6,GPR6(r1)
544	lwz	r7,GPR7(r1)
545	lwz	r8,GPR8(r1)
546	mtlr	r31
547	lwz	r31,GPR31(r1)
548	blr
549
550do_show_syscall_exit:
551#ifdef SHOW_SYSCALLS_TASK
552	lis	r11,show_syscalls_task@ha
553	lwz	r11,show_syscalls_task@l(r11)
554	cmp	0,r2,r11
555	bnelr
556#endif
557	stw	r31,GPR31(r1)
558	mflr	r31
559	stw	r3,RESULT(r1)	/* Save result */
560	mr	r4,r3
561	lis	r3,79f@ha
562	addi	r3,r3,79f@l
563	bl	printk
564	lwz	r3,RESULT(r1)
565	mtlr	r31
566	lwz	r31,GPR31(r1)
567	blr
568
5697:	.string	"syscall %d(%x, %x, %x, %x, %x, "
57077:	.string	"%x), current=%p\n"
57179:	.string	" -> %x\n"
572	.align	2,0
573
574#ifdef SHOW_SYSCALLS_TASK
575	.data
576	.globl	show_syscalls_task
577show_syscalls_task:
578	.long	-1
579	.text
580#endif
581#endif /* SHOW_SYSCALLS */
582
583/*
584 * The fork/clone functions need to copy the full register set into
585 * the child process. Therefore we need to save all the nonvolatile
586 * registers (r13 - r31) before calling the C code.
587 */
588	.globl	ppc_fork
589ppc_fork:
590	SAVE_NVGPRS(r1)
591	lwz	r0,_TRAP(r1)
592	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
593	stw	r0,_TRAP(r1)		/* register set saved */
594	b	sys_fork
595
596	.globl	ppc_vfork
597ppc_vfork:
598	SAVE_NVGPRS(r1)
599	lwz	r0,_TRAP(r1)
600	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
601	stw	r0,_TRAP(r1)		/* register set saved */
602	b	sys_vfork
603
604	.globl	ppc_clone
605ppc_clone:
606	SAVE_NVGPRS(r1)
607	lwz	r0,_TRAP(r1)
608	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
609	stw	r0,_TRAP(r1)		/* register set saved */
610	b	sys_clone
611
612	.globl	ppc_swapcontext
613ppc_swapcontext:
614	SAVE_NVGPRS(r1)
615	lwz	r0,_TRAP(r1)
616	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
617	stw	r0,_TRAP(r1)		/* register set saved */
618	b	sys_swapcontext
619
620/*
621 * Top-level page fault handling.
622 * This is in assembler because if do_page_fault tells us that
623 * it is a bad kernel page fault, we want to save the non-volatile
624 * registers before calling bad_page_fault.
625 */
626	.globl	handle_page_fault
627handle_page_fault:
628	stw	r4,_DAR(r1)
629	addi	r3,r1,STACK_FRAME_OVERHEAD
630	bl	do_page_fault
631	cmpwi	r3,0
632	beq+	ret_from_except
633	SAVE_NVGPRS(r1)
634	lwz	r0,_TRAP(r1)
635	clrrwi	r0,r0,1
636	stw	r0,_TRAP(r1)
637	mr	r5,r3
638	addi	r3,r1,STACK_FRAME_OVERHEAD
639	lwz	r4,_DAR(r1)
640	bl	bad_page_fault
641	b	ret_from_except_full
642
643/*
644 * This routine switches between two different tasks.  The process
645 * state of one is saved on its kernel stack.  Then the state
646 * of the other is restored from its kernel stack.  The memory
647 * management hardware is updated to the second process's state.
648 * Finally, we can return to the second process.
649 * On entry, r3 points to the THREAD for the current task, r4
650 * points to the THREAD for the new task.
651 *
652 * This routine is always called with interrupts disabled.
653 *
654 * Note: there are two ways to get to the "going out" portion
655 * of this code; either by coming in via the entry (_switch)
656 * or via "fork" which must set up an environment equivalent
657 * to the "_switch" path.  If you change this , you'll have to
658 * change the fork code also.
659 *
660 * The code which creates the new task context is in 'copy_thread'
661 * in arch/ppc/kernel/process.c
662 */
663_GLOBAL(_switch)
664	stwu	r1,-INT_FRAME_SIZE(r1)
665	mflr	r0
666	stw	r0,INT_FRAME_SIZE+4(r1)
667	/* r3-r12 are caller saved -- Cort */
668	SAVE_NVGPRS(r1)
669	stw	r0,_NIP(r1)	/* Return to switch caller */
670	mfmsr	r11
671	li	r0,MSR_FP	/* Disable floating-point */
672#ifdef CONFIG_ALTIVEC
673BEGIN_FTR_SECTION
674	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
675	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
676	stw	r12,THREAD+THREAD_VRSAVE(r2)
677END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
678#endif /* CONFIG_ALTIVEC */
679#ifdef CONFIG_SPE
680BEGIN_FTR_SECTION
681	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
682	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
683	stw	r12,THREAD+THREAD_SPEFSCR(r2)
684END_FTR_SECTION_IFSET(CPU_FTR_SPE)
685#endif /* CONFIG_SPE */
686	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
687	beq+	1f
688	andc	r11,r11,r0
689	MTMSRD(r11)
690	isync
6911:	stw	r11,_MSR(r1)
692	mfcr	r10
693	stw	r10,_CCR(r1)
694	stw	r1,KSP(r3)	/* Set old stack pointer */
695
696#ifdef CONFIG_SMP
697	/* We need a sync somewhere here to make sure that if the
698	 * previous task gets rescheduled on another CPU, it sees all
699	 * stores it has performed on this one.
700	 */
701	sync
702#endif /* CONFIG_SMP */
703
704	tophys(r0,r4)
705	CLR_TOP32(r0)
706	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
707	lwz	r1,KSP(r4)	/* Load new stack pointer */
708
709	/* save the old current 'last' for return value */
710	mr	r3,r2
711	addi	r2,r4,-THREAD	/* Update current */
712
713#ifdef CONFIG_ALTIVEC
714BEGIN_FTR_SECTION
715	lwz	r0,THREAD+THREAD_VRSAVE(r2)
716	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
717END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
718#endif /* CONFIG_ALTIVEC */
719#ifdef CONFIG_SPE
720BEGIN_FTR_SECTION
721	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
722	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
723END_FTR_SECTION_IFSET(CPU_FTR_SPE)
724#endif /* CONFIG_SPE */
725
726	lwz	r0,_CCR(r1)
727	mtcrf	0xFF,r0
728	/* r3-r12 are destroyed -- Cort */
729	REST_NVGPRS(r1)
730
731	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
732	mtlr	r4
733	addi	r1,r1,INT_FRAME_SIZE
734	blr
735
736	.globl	fast_exception_return
737fast_exception_return:
738#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
739	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
740	beq	1f			/* if not, we've got problems */
741#endif
742
7432:	REST_4GPRS(3, r11)
744	lwz	r10,_CCR(r11)
745	REST_GPR(1, r11)
746	mtcr	r10
747	lwz	r10,_LINK(r11)
748	mtlr	r10
749	REST_GPR(10, r11)
750	mtspr	SPRN_SRR1,r9
751	mtspr	SPRN_SRR0,r12
752	REST_GPR(9, r11)
753	REST_GPR(12, r11)
754	lwz	r11,GPR11(r11)
755	SYNC
756	RFI
757
758#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
759/* check if the exception happened in a restartable section */
7601:	lis	r3,exc_exit_restart_end@ha
761	addi	r3,r3,exc_exit_restart_end@l
762	cmplw	r12,r3
763	bge	3f
764	lis	r4,exc_exit_restart@ha
765	addi	r4,r4,exc_exit_restart@l
766	cmplw	r12,r4
767	blt	3f
768	lis	r3,fee_restarts@ha
769	tophys(r3,r3)
770	lwz	r5,fee_restarts@l(r3)
771	addi	r5,r5,1
772	stw	r5,fee_restarts@l(r3)
773	mr	r12,r4		/* restart at exc_exit_restart */
774	b	2b
775
776	.section .bss
777	.align	2
778fee_restarts:
779	.space	4
780	.previous
781
782/* aargh, a nonrecoverable interrupt, panic */
783/* aargh, we don't know which trap this is */
784/* but the 601 doesn't implement the RI bit, so assume it's OK */
7853:
786BEGIN_FTR_SECTION
787	b	2b
788END_FTR_SECTION_IFSET(CPU_FTR_601)
789	li	r10,-1
790	stw	r10,_TRAP(r11)
791	addi	r3,r1,STACK_FRAME_OVERHEAD
792	lis	r10,MSR_KERNEL@h
793	ori	r10,r10,MSR_KERNEL@l
794	bl	transfer_to_handler_full
795	.long	nonrecoverable_exception
796	.long	ret_from_except
797#endif
798
799	.globl	ret_from_except_full
800ret_from_except_full:
801	REST_NVGPRS(r1)
802	/* fall through */
803
804	.globl	ret_from_except
805ret_from_except:
806	/* Hard-disable interrupts so that current_thread_info()->flags
807	 * can't change between when we test it and when we return
808	 * from the interrupt. */
809	/* Note: We don't bother telling lockdep about it */
810	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
811	SYNC			/* Some chip revs have problems here... */
812	MTMSRD(r10)		/* disable interrupts */
813
814	lwz	r3,_MSR(r1)	/* Returning to user mode? */
815	andi.	r0,r3,MSR_PR
816	beq	resume_kernel
817
818user_exc_return:		/* r10 contains MSR_KERNEL here */
819	/* Check current_thread_info()->flags */
820	CURRENT_THREAD_INFO(r9, r1)
821	lwz	r9,TI_FLAGS(r9)
822	andi.	r0,r9,_TIF_USER_WORK_MASK
823	bne	do_work
824
825restore_user:
826#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
827	/* Check whether this process has its own DBCR0 value.  The internal
828	   debug mode bit tells us that dbcr0 should be loaded. */
829	lwz	r0,THREAD+THREAD_DBCR0(r2)
830	andis.	r10,r0,DBCR0_IDM@h
831	bnel-	load_dbcr0
832#endif
833
834#ifdef CONFIG_PREEMPT
835	b	restore
836
837/* N.B. the only way to get here is from the beq following ret_from_except. */
838resume_kernel:
839	/* check current_thread_info->preempt_count */
840	CURRENT_THREAD_INFO(r9, r1)
841	lwz	r0,TI_PREEMPT(r9)
842	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
843	bne	restore
844	lwz	r0,TI_FLAGS(r9)
845	andi.	r0,r0,_TIF_NEED_RESCHED
846	beq+	restore
847	andi.	r0,r3,MSR_EE	/* interrupts off? */
848	beq	restore		/* don't schedule if so */
849#ifdef CONFIG_TRACE_IRQFLAGS
850	/* Lockdep thinks irqs are enabled, we need to call
851	 * preempt_schedule_irq with IRQs off, so we inform lockdep
852	 * now that we -did- turn them off already
853	 */
854	bl	trace_hardirqs_off
855#endif
8561:	bl	preempt_schedule_irq
857	CURRENT_THREAD_INFO(r9, r1)
858	lwz	r3,TI_FLAGS(r9)
859	andi.	r0,r3,_TIF_NEED_RESCHED
860	bne-	1b
861#ifdef CONFIG_TRACE_IRQFLAGS
862	/* And now, to properly rebalance the above, we tell lockdep they
863	 * are being turned back on, which will happen when we return
864	 */
865	bl	trace_hardirqs_on
866#endif
867#else
868resume_kernel:
869#endif /* CONFIG_PREEMPT */
870
871	/* interrupts are hard-disabled at this point */
872restore:
873#ifdef CONFIG_44x
874BEGIN_MMU_FTR_SECTION
875	b	1f
876END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
877	lis	r4,icache_44x_need_flush@ha
878	lwz	r5,icache_44x_need_flush@l(r4)
879	cmplwi	cr0,r5,0
880	beq+	1f
881	li	r6,0
882	iccci	r0,r0
883	stw	r6,icache_44x_need_flush@l(r4)
8841:
885#endif  /* CONFIG_44x */
886
887	lwz	r9,_MSR(r1)
888#ifdef CONFIG_TRACE_IRQFLAGS
889	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
890	 * off in this assembly code while peeking at TI_FLAGS() and such. However
891	 * we need to inform it if the exception turned interrupts off, and we
892	 * are about to trun them back on.
893	 *
894	 * The problem here sadly is that we don't know whether the exceptions was
895	 * one that turned interrupts off or not. So we always tell lockdep about
896	 * turning them on here when we go back to wherever we came from with EE
897	 * on, even if that may meen some redudant calls being tracked. Maybe later
898	 * we could encode what the exception did somewhere or test the exception
899	 * type in the pt_regs but that sounds overkill
900	 */
901	andi.	r10,r9,MSR_EE
902	beq	1f
903	/*
904	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
905	 * which is the stack frame here, we need to force a stack frame
906	 * in case we came from user space.
907	 */
908	stwu	r1,-32(r1)
909	mflr	r0
910	stw	r0,4(r1)
911	stwu	r1,-32(r1)
912	bl	trace_hardirqs_on
913	lwz	r1,0(r1)
914	lwz	r1,0(r1)
915	lwz	r9,_MSR(r1)
9161:
917#endif /* CONFIG_TRACE_IRQFLAGS */
918
919	lwz	r0,GPR0(r1)
920	lwz	r2,GPR2(r1)
921	REST_4GPRS(3, r1)
922	REST_2GPRS(7, r1)
923
924	lwz	r10,_XER(r1)
925	lwz	r11,_CTR(r1)
926	mtspr	SPRN_XER,r10
927	mtctr	r11
928
929	PPC405_ERR77(0,r1)
930BEGIN_FTR_SECTION
931	lwarx	r11,0,r1
932END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
933	stwcx.	r0,0,r1			/* to clear the reservation */
934
935#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
936	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
937	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
938
939	lwz	r10,_CCR(r1)
940	lwz	r11,_LINK(r1)
941	mtcrf	0xFF,r10
942	mtlr	r11
943
944	/*
945	 * Once we put values in SRR0 and SRR1, we are in a state
946	 * where exceptions are not recoverable, since taking an
947	 * exception will trash SRR0 and SRR1.  Therefore we clear the
948	 * MSR:RI bit to indicate this.  If we do take an exception,
949	 * we can't return to the point of the exception but we
950	 * can restart the exception exit path at the label
951	 * exc_exit_restart below.  -- paulus
952	 */
953	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
954	SYNC
955	MTMSRD(r10)		/* clear the RI bit */
956	.globl exc_exit_restart
957exc_exit_restart:
958	lwz	r12,_NIP(r1)
959	FIX_SRR1(r9,r10)
960	mtspr	SPRN_SRR0,r12
961	mtspr	SPRN_SRR1,r9
962	REST_4GPRS(9, r1)
963	lwz	r1,GPR1(r1)
964	.globl exc_exit_restart_end
965exc_exit_restart_end:
966	SYNC
967	RFI
968
969#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
970	/*
971	 * This is a bit different on 4xx/Book-E because it doesn't have
972	 * the RI bit in the MSR.
973	 * The TLB miss handler checks if we have interrupted
974	 * the exception exit path and restarts it if so
975	 * (well maybe one day it will... :).
976	 */
977	lwz	r11,_LINK(r1)
978	mtlr	r11
979	lwz	r10,_CCR(r1)
980	mtcrf	0xff,r10
981	REST_2GPRS(9, r1)
982	.globl exc_exit_restart
983exc_exit_restart:
984	lwz	r11,_NIP(r1)
985	lwz	r12,_MSR(r1)
986exc_exit_start:
987	mtspr	SPRN_SRR0,r11
988	mtspr	SPRN_SRR1,r12
989	REST_2GPRS(11, r1)
990	lwz	r1,GPR1(r1)
991	.globl exc_exit_restart_end
992exc_exit_restart_end:
993	PPC405_ERR77_SYNC
994	rfi
995	b	.			/* prevent prefetch past rfi */
996
997/*
998 * Returning from a critical interrupt in user mode doesn't need
999 * to be any different from a normal exception.  For a critical
1000 * interrupt in the kernel, we just return (without checking for
1001 * preemption) since the interrupt may have happened at some crucial
1002 * place (e.g. inside the TLB miss handler), and because we will be
1003 * running with r1 pointing into critical_stack, not the current
1004 * process's kernel stack (and therefore current_thread_info() will
1005 * give the wrong answer).
1006 * We have to restore various SPRs that may have been in use at the
1007 * time of the critical interrupt.
1008 *
1009 */
1010#ifdef CONFIG_40x
1011#define PPC_40x_TURN_OFF_MSR_DR						    \
1012	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1013	 * assume the instructions here are mapped by a pinned TLB entry */ \
1014	li	r10,MSR_IR;						    \
1015	mtmsr	r10;							    \
1016	isync;								    \
1017	tophys(r1, r1);
1018#else
1019#define PPC_40x_TURN_OFF_MSR_DR
1020#endif
1021
1022#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1023	REST_NVGPRS(r1);						\
1024	lwz	r3,_MSR(r1);						\
1025	andi.	r3,r3,MSR_PR;						\
1026	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1027	bne	user_exc_return;					\
1028	lwz	r0,GPR0(r1);						\
1029	lwz	r2,GPR2(r1);						\
1030	REST_4GPRS(3, r1);						\
1031	REST_2GPRS(7, r1);						\
1032	lwz	r10,_XER(r1);						\
1033	lwz	r11,_CTR(r1);						\
1034	mtspr	SPRN_XER,r10;						\
1035	mtctr	r11;							\
1036	PPC405_ERR77(0,r1);						\
1037	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1038	lwz	r11,_LINK(r1);						\
1039	mtlr	r11;							\
1040	lwz	r10,_CCR(r1);						\
1041	mtcrf	0xff,r10;						\
1042	PPC_40x_TURN_OFF_MSR_DR;					\
1043	lwz	r9,_DEAR(r1);						\
1044	lwz	r10,_ESR(r1);						\
1045	mtspr	SPRN_DEAR,r9;						\
1046	mtspr	SPRN_ESR,r10;						\
1047	lwz	r11,_NIP(r1);						\
1048	lwz	r12,_MSR(r1);						\
1049	mtspr	exc_lvl_srr0,r11;					\
1050	mtspr	exc_lvl_srr1,r12;					\
1051	lwz	r9,GPR9(r1);						\
1052	lwz	r12,GPR12(r1);						\
1053	lwz	r10,GPR10(r1);						\
1054	lwz	r11,GPR11(r1);						\
1055	lwz	r1,GPR1(r1);						\
1056	PPC405_ERR77_SYNC;						\
1057	exc_lvl_rfi;							\
1058	b	.;		/* prevent prefetch past exc_lvl_rfi */
1059
1060#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1061	lwz	r9,_##exc_lvl_srr0(r1);					\
1062	lwz	r10,_##exc_lvl_srr1(r1);				\
1063	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1064	mtspr	SPRN_##exc_lvl_srr1,r10;
1065
1066#if defined(CONFIG_PPC_BOOK3E_MMU)
1067#ifdef CONFIG_PHYS_64BIT
1068#define	RESTORE_MAS7							\
1069	lwz	r11,MAS7(r1);						\
1070	mtspr	SPRN_MAS7,r11;
1071#else
1072#define	RESTORE_MAS7
1073#endif /* CONFIG_PHYS_64BIT */
1074#define RESTORE_MMU_REGS						\
1075	lwz	r9,MAS0(r1);						\
1076	lwz	r10,MAS1(r1);						\
1077	lwz	r11,MAS2(r1);						\
1078	mtspr	SPRN_MAS0,r9;						\
1079	lwz	r9,MAS3(r1);						\
1080	mtspr	SPRN_MAS1,r10;						\
1081	lwz	r10,MAS6(r1);						\
1082	mtspr	SPRN_MAS2,r11;						\
1083	mtspr	SPRN_MAS3,r9;						\
1084	mtspr	SPRN_MAS6,r10;						\
1085	RESTORE_MAS7;
1086#elif defined(CONFIG_44x)
1087#define RESTORE_MMU_REGS						\
1088	lwz	r9,MMUCR(r1);						\
1089	mtspr	SPRN_MMUCR,r9;
1090#else
1091#define RESTORE_MMU_REGS
1092#endif
1093
1094#ifdef CONFIG_40x
1095	.globl	ret_from_crit_exc
1096ret_from_crit_exc:
1097	mfspr	r9,SPRN_SPRG_THREAD
1098	lis	r10,saved_ksp_limit@ha;
1099	lwz	r10,saved_ksp_limit@l(r10);
1100	tovirt(r9,r9);
1101	stw	r10,KSP_LIMIT(r9)
1102	lis	r9,crit_srr0@ha;
1103	lwz	r9,crit_srr0@l(r9);
1104	lis	r10,crit_srr1@ha;
1105	lwz	r10,crit_srr1@l(r10);
1106	mtspr	SPRN_SRR0,r9;
1107	mtspr	SPRN_SRR1,r10;
1108	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1109#endif /* CONFIG_40x */
1110
1111#ifdef CONFIG_BOOKE
1112	.globl	ret_from_crit_exc
1113ret_from_crit_exc:
1114	mfspr	r9,SPRN_SPRG_THREAD
1115	lwz	r10,SAVED_KSP_LIMIT(r1)
1116	stw	r10,KSP_LIMIT(r9)
1117	RESTORE_xSRR(SRR0,SRR1);
1118	RESTORE_MMU_REGS;
1119	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1120
1121	.globl	ret_from_debug_exc
1122ret_from_debug_exc:
1123	mfspr	r9,SPRN_SPRG_THREAD
1124	lwz	r10,SAVED_KSP_LIMIT(r1)
1125	stw	r10,KSP_LIMIT(r9)
1126	lwz	r9,THREAD_INFO-THREAD(r9)
1127	CURRENT_THREAD_INFO(r10, r1)
1128	lwz	r10,TI_PREEMPT(r10)
1129	stw	r10,TI_PREEMPT(r9)
1130	RESTORE_xSRR(SRR0,SRR1);
1131	RESTORE_xSRR(CSRR0,CSRR1);
1132	RESTORE_MMU_REGS;
1133	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1134
1135	.globl	ret_from_mcheck_exc
1136ret_from_mcheck_exc:
1137	mfspr	r9,SPRN_SPRG_THREAD
1138	lwz	r10,SAVED_KSP_LIMIT(r1)
1139	stw	r10,KSP_LIMIT(r9)
1140	RESTORE_xSRR(SRR0,SRR1);
1141	RESTORE_xSRR(CSRR0,CSRR1);
1142	RESTORE_xSRR(DSRR0,DSRR1);
1143	RESTORE_MMU_REGS;
1144	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1145#endif /* CONFIG_BOOKE */
1146
1147/*
1148 * Load the DBCR0 value for a task that is being ptraced,
1149 * having first saved away the global DBCR0.  Note that r0
1150 * has the dbcr0 value to set upon entry to this.
1151 */
1152load_dbcr0:
1153	mfmsr	r10		/* first disable debug exceptions */
1154	rlwinm	r10,r10,0,~MSR_DE
1155	mtmsr	r10
1156	isync
1157	mfspr	r10,SPRN_DBCR0
1158	lis	r11,global_dbcr0@ha
1159	addi	r11,r11,global_dbcr0@l
1160#ifdef CONFIG_SMP
1161	CURRENT_THREAD_INFO(r9, r1)
1162	lwz	r9,TI_CPU(r9)
1163	slwi	r9,r9,3
1164	add	r11,r11,r9
1165#endif
1166	stw	r10,0(r11)
1167	mtspr	SPRN_DBCR0,r0
1168	lwz	r10,4(r11)
1169	addi	r10,r10,1
1170	stw	r10,4(r11)
1171	li	r11,-1
1172	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1173	blr
1174
1175	.section .bss
1176	.align	4
1177global_dbcr0:
1178	.space	8*NR_CPUS
1179	.previous
1180#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1181
1182do_work:			/* r10 contains MSR_KERNEL here */
1183	andi.	r0,r9,_TIF_NEED_RESCHED
1184	beq	do_user_signal
1185
1186do_resched:			/* r10 contains MSR_KERNEL here */
1187	/* Note: We don't need to inform lockdep that we are enabling
1188	 * interrupts here. As far as it knows, they are already enabled
1189	 */
1190	ori	r10,r10,MSR_EE
1191	SYNC
1192	MTMSRD(r10)		/* hard-enable interrupts */
1193	bl	schedule
1194recheck:
1195	/* Note: And we don't tell it we are disabling them again
1196	 * neither. Those disable/enable cycles used to peek at
1197	 * TI_FLAGS aren't advertised.
1198	 */
1199	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1200	SYNC
1201	MTMSRD(r10)		/* disable interrupts */
1202	CURRENT_THREAD_INFO(r9, r1)
1203	lwz	r9,TI_FLAGS(r9)
1204	andi.	r0,r9,_TIF_NEED_RESCHED
1205	bne-	do_resched
1206	andi.	r0,r9,_TIF_USER_WORK_MASK
1207	beq	restore_user
1208do_user_signal:			/* r10 contains MSR_KERNEL here */
1209	ori	r10,r10,MSR_EE
1210	SYNC
1211	MTMSRD(r10)		/* hard-enable interrupts */
1212	/* save r13-r31 in the exception frame, if not already done */
1213	lwz	r3,_TRAP(r1)
1214	andi.	r0,r3,1
1215	beq	2f
1216	SAVE_NVGPRS(r1)
1217	rlwinm	r3,r3,0,0,30
1218	stw	r3,_TRAP(r1)
12192:	addi	r3,r1,STACK_FRAME_OVERHEAD
1220	mr	r4,r9
1221	bl	do_notify_resume
1222	REST_NVGPRS(r1)
1223	b	recheck
1224
1225/*
1226 * We come here when we are at the end of handling an exception
1227 * that occurred at a place where taking an exception will lose
1228 * state information, such as the contents of SRR0 and SRR1.
1229 */
1230nonrecoverable:
1231	lis	r10,exc_exit_restart_end@ha
1232	addi	r10,r10,exc_exit_restart_end@l
1233	cmplw	r12,r10
1234	bge	3f
1235	lis	r11,exc_exit_restart@ha
1236	addi	r11,r11,exc_exit_restart@l
1237	cmplw	r12,r11
1238	blt	3f
1239	lis	r10,ee_restarts@ha
1240	lwz	r12,ee_restarts@l(r10)
1241	addi	r12,r12,1
1242	stw	r12,ee_restarts@l(r10)
1243	mr	r12,r11		/* restart at exc_exit_restart */
1244	blr
12453:	/* OK, we can't recover, kill this process */
1246	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1247BEGIN_FTR_SECTION
1248	blr
1249END_FTR_SECTION_IFSET(CPU_FTR_601)
1250	lwz	r3,_TRAP(r1)
1251	andi.	r0,r3,1
1252	beq	4f
1253	SAVE_NVGPRS(r1)
1254	rlwinm	r3,r3,0,0,30
1255	stw	r3,_TRAP(r1)
12564:	addi	r3,r1,STACK_FRAME_OVERHEAD
1257	bl	nonrecoverable_exception
1258	/* shouldn't return */
1259	b	4b
1260
1261	.section .bss
1262	.align	2
1263ee_restarts:
1264	.space	4
1265	.previous
1266
1267/*
1268 * PROM code for specific machines follows.  Put it
1269 * here so it's easy to add arch-specific sections later.
1270 * -- Cort
1271 */
1272#ifdef CONFIG_PPC_RTAS
1273/*
1274 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1275 * called with the MMU off.
1276 */
1277_GLOBAL(enter_rtas)
1278	stwu	r1,-INT_FRAME_SIZE(r1)
1279	mflr	r0
1280	stw	r0,INT_FRAME_SIZE+4(r1)
1281	LOAD_REG_ADDR(r4, rtas)
1282	lis	r6,1f@ha	/* physical return address for rtas */
1283	addi	r6,r6,1f@l
1284	tophys(r6,r6)
1285	tophys(r7,r1)
1286	lwz	r8,RTASENTRY(r4)
1287	lwz	r4,RTASBASE(r4)
1288	mfmsr	r9
1289	stw	r9,8(r1)
1290	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1291	SYNC			/* disable interrupts so SRR0/1 */
1292	MTMSRD(r0)		/* don't get trashed */
1293	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1294	mtlr	r6
1295	mtspr	SPRN_SPRG_RTAS,r7
1296	mtspr	SPRN_SRR0,r8
1297	mtspr	SPRN_SRR1,r9
1298	RFI
12991:	tophys(r9,r1)
1300	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1301	lwz	r9,8(r9)	/* original msr value */
1302	FIX_SRR1(r9,r0)
1303	addi	r1,r1,INT_FRAME_SIZE
1304	li	r0,0
1305	mtspr	SPRN_SPRG_RTAS,r0
1306	mtspr	SPRN_SRR0,r8
1307	mtspr	SPRN_SRR1,r9
1308	RFI			/* return to caller */
1309
1310	.globl	machine_check_in_rtas
1311machine_check_in_rtas:
1312	twi	31,0,0
1313	/* XXX load up BATs and panic */
1314
1315#endif /* CONFIG_PPC_RTAS */
1316
1317#ifdef CONFIG_FUNCTION_TRACER
1318#ifdef CONFIG_DYNAMIC_FTRACE
1319_GLOBAL(mcount)
1320_GLOBAL(_mcount)
1321	/*
1322	 * It is required that _mcount on PPC32 must preserve the
1323	 * link register. But we have r0 to play with. We use r0
1324	 * to push the return address back to the caller of mcount
1325	 * into the ctr register, restore the link register and
1326	 * then jump back using the ctr register.
1327	 */
1328	mflr	r0
1329	mtctr	r0
1330	lwz	r0, 4(r1)
1331	mtlr	r0
1332	bctr
1333
1334_GLOBAL(ftrace_caller)
1335	MCOUNT_SAVE_FRAME
1336	/* r3 ends up with link register */
1337	subi	r3, r3, MCOUNT_INSN_SIZE
1338.globl ftrace_call
1339ftrace_call:
1340	bl	ftrace_stub
1341	nop
1342#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1343.globl ftrace_graph_call
1344ftrace_graph_call:
1345	b	ftrace_graph_stub
1346_GLOBAL(ftrace_graph_stub)
1347#endif
1348	MCOUNT_RESTORE_FRAME
1349	/* old link register ends up in ctr reg */
1350	bctr
1351#else
1352_GLOBAL(mcount)
1353_GLOBAL(_mcount)
1354
1355	MCOUNT_SAVE_FRAME
1356
1357	subi	r3, r3, MCOUNT_INSN_SIZE
1358	LOAD_REG_ADDR(r5, ftrace_trace_function)
1359	lwz	r5,0(r5)
1360
1361	mtctr	r5
1362	bctrl
1363	nop
1364
1365#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1366	b	ftrace_graph_caller
1367#endif
1368	MCOUNT_RESTORE_FRAME
1369	bctr
1370#endif
1371
1372_GLOBAL(ftrace_stub)
1373	blr
1374
1375#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1376_GLOBAL(ftrace_graph_caller)
1377	/* load r4 with local address */
1378	lwz	r4, 44(r1)
1379	subi	r4, r4, MCOUNT_INSN_SIZE
1380
1381	/* get the parent address */
1382	addi	r3, r1, 52
1383
1384	bl	prepare_ftrace_return
1385	nop
1386
1387	MCOUNT_RESTORE_FRAME
1388	/* old link register ends up in ctr reg */
1389	bctr
1390
1391_GLOBAL(return_to_handler)
1392	/* need to save return values */
1393	stwu	r1, -32(r1)
1394	stw	r3, 20(r1)
1395	stw	r4, 16(r1)
1396	stw	r31, 12(r1)
1397	mr	r31, r1
1398
1399	bl	ftrace_return_to_handler
1400	nop
1401
1402	/* return value has real return address */
1403	mtlr	r3
1404
1405	lwz	r3, 20(r1)
1406	lwz	r4, 16(r1)
1407	lwz	r31,12(r1)
1408	lwz	r1, 0(r1)
1409
1410	/* Jump back to real return address */
1411	blr
1412#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1413
1414#endif /* CONFIG_MCOUNT */
1415