xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision b9ccfda2)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49	.globl	mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51	mfspr	r0,SPRN_DSRR0
52	stw	r0,_DSRR0(r11)
53	mfspr	r0,SPRN_DSRR1
54	stw	r0,_DSRR1(r11)
55	/* fall through */
56
57	.globl	debug_transfer_to_handler
58debug_transfer_to_handler:
59	mfspr	r0,SPRN_CSRR0
60	stw	r0,_CSRR0(r11)
61	mfspr	r0,SPRN_CSRR1
62	stw	r0,_CSRR1(r11)
63	/* fall through */
64
65	.globl	crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68	mfspr	r0,SPRN_MAS0
69	stw	r0,MAS0(r11)
70	mfspr	r0,SPRN_MAS1
71	stw	r0,MAS1(r11)
72	mfspr	r0,SPRN_MAS2
73	stw	r0,MAS2(r11)
74	mfspr	r0,SPRN_MAS3
75	stw	r0,MAS3(r11)
76	mfspr	r0,SPRN_MAS6
77	stw	r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79	mfspr	r0,SPRN_MAS7
80	stw	r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84	mfspr	r0,SPRN_MMUCR
85	stw	r0,MMUCR(r11)
86#endif
87	mfspr	r0,SPRN_SRR0
88	stw	r0,_SRR0(r11)
89	mfspr	r0,SPRN_SRR1
90	stw	r0,_SRR1(r11)
91
92	mfspr	r8,SPRN_SPRG_THREAD
93	lwz	r0,KSP_LIMIT(r8)
94	stw	r0,SAVED_KSP_LIMIT(r11)
95	CURRENT_THREAD_INFO(r0, r1)
96	stw	r0,KSP_LIMIT(r8)
97	/* fall through */
98#endif
99
100#ifdef CONFIG_40x
101	.globl	crit_transfer_to_handler
102crit_transfer_to_handler:
103	lwz	r0,crit_r10@l(0)
104	stw	r0,GPR10(r11)
105	lwz	r0,crit_r11@l(0)
106	stw	r0,GPR11(r11)
107	mfspr	r0,SPRN_SRR0
108	stw	r0,crit_srr0@l(0)
109	mfspr	r0,SPRN_SRR1
110	stw	r0,crit_srr1@l(0)
111
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	CURRENT_THREAD_INFO(r0, r1)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	addi	r2,r12,-THREAD
144	tovirt(r2,r2)			/* set r2 to current */
145	beq	2f			/* if from user, fix up THREAD.regs */
146	addi	r11,r1,STACK_FRAME_OVERHEAD
147	stw	r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149	/* Check to see if the dbcr0 register is set up to debug.  Use the
150	   internal debug mode bit to do this. */
151	lwz	r12,THREAD_DBCR0(r12)
152	andis.	r12,r12,DBCR0_IDM@h
153	beq+	3f
154	/* From user and task is ptraced - load up global dbcr0 */
155	li	r12,-1			/* clear all pending debug events */
156	mtspr	SPRN_DBSR,r12
157	lis	r11,global_dbcr0@ha
158	tophys(r11,r11)
159	addi	r11,r11,global_dbcr0@l
160#ifdef CONFIG_SMP
161	CURRENT_THREAD_INFO(r9, r1)
162	lwz	r9,TI_CPU(r9)
163	slwi	r9,r9,3
164	add	r11,r11,r9
165#endif
166	lwz	r12,0(r11)
167	mtspr	SPRN_DBCR0,r12
168	lwz	r12,4(r11)
169	addi	r12,r12,-1
170	stw	r12,4(r11)
171#endif
172	b	3f
173
1742:	/* if from kernel, check interrupted DOZE/NAP mode and
175         * check for stack overflow
176         */
177	lwz	r9,KSP_LIMIT(r12)
178	cmplw	r1,r9			/* if r1 <= ksp_limit */
179	ble-	stack_ovf		/* then the kernel stack overflowed */
1805:
181#if defined(CONFIG_6xx) || defined(CONFIG_E500)
182	CURRENT_THREAD_INFO(r9, r1)
183	tophys(r9,r9)			/* check local flags */
184	lwz	r12,TI_LOCAL_FLAGS(r9)
185	mtcrf	0x01,r12
186	bt-	31-TLF_NAPPING,4f
187	bt-	31-TLF_SLEEPING,7f
188#endif /* CONFIG_6xx || CONFIG_E500 */
189	.globl transfer_to_handler_cont
190transfer_to_handler_cont:
1913:
192	mflr	r9
193	lwz	r11,0(r9)		/* virtual address of handler */
194	lwz	r9,4(r9)		/* where to go when done */
195#ifdef CONFIG_TRACE_IRQFLAGS
196	lis	r12,reenable_mmu@h
197	ori	r12,r12,reenable_mmu@l
198	mtspr	SPRN_SRR0,r12
199	mtspr	SPRN_SRR1,r10
200	SYNC
201	RFI
202reenable_mmu:				/* re-enable mmu so we can */
203	mfmsr	r10
204	lwz	r12,_MSR(r1)
205	xor	r10,r10,r12
206	andi.	r10,r10,MSR_EE		/* Did EE change? */
207	beq	1f
208
209	/*
210	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
211	 * If from user mode there is only one stack frame on the stack, and
212	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
213	 * stack frame to make trace_hardirqs_off happy.
214	 *
215	 * This is handy because we also need to save a bunch of GPRs,
216	 * r3 can be different from GPR3(r1) at this point, r9 and r11
217	 * contains the old MSR and handler address respectively,
218	 * r4 & r5 can contain page fault arguments that need to be passed
219	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
220	 * they aren't useful past this point (aren't syscall arguments),
221	 * the rest is restored from the exception frame.
222	 */
223	stwu	r1,-32(r1)
224	stw	r9,8(r1)
225	stw	r11,12(r1)
226	stw	r3,16(r1)
227	stw	r4,20(r1)
228	stw	r5,24(r1)
229	bl	trace_hardirqs_off
230	lwz	r5,24(r1)
231	lwz	r4,20(r1)
232	lwz	r3,16(r1)
233	lwz	r11,12(r1)
234	lwz	r9,8(r1)
235	addi	r1,r1,32
236	lwz	r0,GPR0(r1)
237	lwz	r6,GPR6(r1)
238	lwz	r7,GPR7(r1)
239	lwz	r8,GPR8(r1)
2401:	mtctr	r11
241	mtlr	r9
242	bctr				/* jump to handler */
243#else /* CONFIG_TRACE_IRQFLAGS */
244	mtspr	SPRN_SRR0,r11
245	mtspr	SPRN_SRR1,r10
246	mtlr	r9
247	SYNC
248	RFI				/* jump to handler, enable MMU */
249#endif /* CONFIG_TRACE_IRQFLAGS */
250
251#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2524:	rlwinm	r12,r12,0,~_TLF_NAPPING
253	stw	r12,TI_LOCAL_FLAGS(r9)
254	b	power_save_ppc32_restore
255
2567:	rlwinm	r12,r12,0,~_TLF_SLEEPING
257	stw	r12,TI_LOCAL_FLAGS(r9)
258	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
259	rlwinm	r9,r9,0,~MSR_EE
260	lwz	r12,_LINK(r11)		/* and return to address in LR */
261	b	fast_exception_return
262#endif
263
264/*
265 * On kernel stack overflow, load up an initial stack pointer
266 * and call StackOverflow(regs), which should not return.
267 */
268stack_ovf:
269	/* sometimes we use a statically-allocated stack, which is OK. */
270	lis	r12,_end@h
271	ori	r12,r12,_end@l
272	cmplw	r1,r12
273	ble	5b			/* r1 <= &_end is OK */
274	SAVE_NVGPRS(r11)
275	addi	r3,r1,STACK_FRAME_OVERHEAD
276	lis	r1,init_thread_union@ha
277	addi	r1,r1,init_thread_union@l
278	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
279	lis	r9,StackOverflow@ha
280	addi	r9,r9,StackOverflow@l
281	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
282	FIX_SRR1(r10,r12)
283	mtspr	SPRN_SRR0,r9
284	mtspr	SPRN_SRR1,r10
285	SYNC
286	RFI
287
288/*
289 * Handle a system call.
290 */
291	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
292	.stabs	"entry_32.S",N_SO,0,0,0f
2930:
294
295_GLOBAL(DoSyscall)
296	stw	r3,ORIG_GPR3(r1)
297	li	r12,0
298	stw	r12,RESULT(r1)
299	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
300	rlwinm	r11,r11,0,4,2
301	stw	r11,_CCR(r1)
302#ifdef SHOW_SYSCALLS
303	bl	do_show_syscall
304#endif /* SHOW_SYSCALLS */
305#ifdef CONFIG_TRACE_IRQFLAGS
306	/* Return from syscalls can (and generally will) hard enable
307	 * interrupts. You aren't supposed to call a syscall with
308	 * interrupts disabled in the first place. However, to ensure
309	 * that we get it right vs. lockdep if it happens, we force
310	 * that hard enable here with appropriate tracing if we see
311	 * that we have been called with interrupts off
312	 */
313	mfmsr	r11
314	andi.	r12,r11,MSR_EE
315	bne+	1f
316	/* We came in with interrupts disabled, we enable them now */
317	bl	trace_hardirqs_on
318	mfmsr	r11
319	lwz	r0,GPR0(r1)
320	lwz	r3,GPR3(r1)
321	lwz	r4,GPR4(r1)
322	ori	r11,r11,MSR_EE
323	lwz	r5,GPR5(r1)
324	lwz	r6,GPR6(r1)
325	lwz	r7,GPR7(r1)
326	lwz	r8,GPR8(r1)
327	mtmsr	r11
3281:
329#endif /* CONFIG_TRACE_IRQFLAGS */
330	CURRENT_THREAD_INFO(r10, r1)
331	lwz	r11,TI_FLAGS(r10)
332	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
333	bne-	syscall_dotrace
334syscall_dotrace_cont:
335	cmplwi	0,r0,NR_syscalls
336	lis	r10,sys_call_table@h
337	ori	r10,r10,sys_call_table@l
338	slwi	r0,r0,2
339	bge-	66f
340	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
341	mtlr	r10
342	addi	r9,r1,STACK_FRAME_OVERHEAD
343	PPC440EP_ERR42
344	blrl			/* Call handler */
345	.globl	ret_from_syscall
346ret_from_syscall:
347#ifdef SHOW_SYSCALLS
348	bl	do_show_syscall_exit
349#endif
350	mr	r6,r3
351	CURRENT_THREAD_INFO(r12, r1)
352	/* disable interrupts so current_thread_info()->flags can't change */
353	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
354	/* Note: We don't bother telling lockdep about it */
355	SYNC
356	MTMSRD(r10)
357	lwz	r9,TI_FLAGS(r12)
358	li	r8,-_LAST_ERRNO
359	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
360	bne-	syscall_exit_work
361	cmplw	0,r3,r8
362	blt+	syscall_exit_cont
363	lwz	r11,_CCR(r1)			/* Load CR */
364	neg	r3,r3
365	oris	r11,r11,0x1000	/* Set SO bit in CR */
366	stw	r11,_CCR(r1)
367syscall_exit_cont:
368	lwz	r8,_MSR(r1)
369#ifdef CONFIG_TRACE_IRQFLAGS
370	/* If we are going to return from the syscall with interrupts
371	 * off, we trace that here. It shouldn't happen though but we
372	 * want to catch the bugger if it does right ?
373	 */
374	andi.	r10,r8,MSR_EE
375	bne+	1f
376	stw	r3,GPR3(r1)
377	bl      trace_hardirqs_off
378	lwz	r3,GPR3(r1)
3791:
380#endif /* CONFIG_TRACE_IRQFLAGS */
381#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
382	/* If the process has its own DBCR0 value, load it up.  The internal
383	   debug mode bit tells us that dbcr0 should be loaded. */
384	lwz	r0,THREAD+THREAD_DBCR0(r2)
385	andis.	r10,r0,DBCR0_IDM@h
386	bnel-	load_dbcr0
387#endif
388#ifdef CONFIG_44x
389BEGIN_MMU_FTR_SECTION
390	lis	r4,icache_44x_need_flush@ha
391	lwz	r5,icache_44x_need_flush@l(r4)
392	cmplwi	cr0,r5,0
393	bne-	2f
3941:
395END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
396#endif /* CONFIG_44x */
397BEGIN_FTR_SECTION
398	lwarx	r7,0,r1
399END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
400	stwcx.	r0,0,r1			/* to clear the reservation */
401	lwz	r4,_LINK(r1)
402	lwz	r5,_CCR(r1)
403	mtlr	r4
404	mtcr	r5
405	lwz	r7,_NIP(r1)
406	FIX_SRR1(r8, r0)
407	lwz	r2,GPR2(r1)
408	lwz	r1,GPR1(r1)
409	mtspr	SPRN_SRR0,r7
410	mtspr	SPRN_SRR1,r8
411	SYNC
412	RFI
413#ifdef CONFIG_44x
4142:	li	r7,0
415	iccci	r0,r0
416	stw	r7,icache_44x_need_flush@l(r4)
417	b	1b
418#endif  /* CONFIG_44x */
419
42066:	li	r3,-ENOSYS
421	b	ret_from_syscall
422
423	.globl	ret_from_fork
424ret_from_fork:
425	REST_NVGPRS(r1)
426	bl	schedule_tail
427	li	r3,0
428	b	ret_from_syscall
429
430/* Traced system call support */
431syscall_dotrace:
432	SAVE_NVGPRS(r1)
433	li	r0,0xc00
434	stw	r0,_TRAP(r1)
435	addi	r3,r1,STACK_FRAME_OVERHEAD
436	bl	do_syscall_trace_enter
437	/*
438	 * Restore argument registers possibly just changed.
439	 * We use the return value of do_syscall_trace_enter
440	 * for call number to look up in the table (r0).
441	 */
442	mr	r0,r3
443	lwz	r3,GPR3(r1)
444	lwz	r4,GPR4(r1)
445	lwz	r5,GPR5(r1)
446	lwz	r6,GPR6(r1)
447	lwz	r7,GPR7(r1)
448	lwz	r8,GPR8(r1)
449	REST_NVGPRS(r1)
450	b	syscall_dotrace_cont
451
452syscall_exit_work:
453	andi.	r0,r9,_TIF_RESTOREALL
454	beq+	0f
455	REST_NVGPRS(r1)
456	b	2f
4570:	cmplw	0,r3,r8
458	blt+	1f
459	andi.	r0,r9,_TIF_NOERROR
460	bne-	1f
461	lwz	r11,_CCR(r1)			/* Load CR */
462	neg	r3,r3
463	oris	r11,r11,0x1000	/* Set SO bit in CR */
464	stw	r11,_CCR(r1)
465
4661:	stw	r6,RESULT(r1)	/* Save result */
467	stw	r3,GPR3(r1)	/* Update return value */
4682:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
469	beq	4f
470
471	/* Clear per-syscall TIF flags if any are set.  */
472
473	li	r11,_TIF_PERSYSCALL_MASK
474	addi	r12,r12,TI_FLAGS
4753:	lwarx	r8,0,r12
476	andc	r8,r8,r11
477#ifdef CONFIG_IBM405_ERR77
478	dcbt	0,r12
479#endif
480	stwcx.	r8,0,r12
481	bne-	3b
482	subi	r12,r12,TI_FLAGS
483
4844:	/* Anything which requires enabling interrupts? */
485	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
486	beq	ret_from_except
487
488	/* Re-enable interrupts. There is no need to trace that with
489	 * lockdep as we are supposed to have IRQs on at this point
490	 */
491	ori	r10,r10,MSR_EE
492	SYNC
493	MTMSRD(r10)
494
495	/* Save NVGPRS if they're not saved already */
496	lwz	r4,_TRAP(r1)
497	andi.	r4,r4,1
498	beq	5f
499	SAVE_NVGPRS(r1)
500	li	r4,0xc00
501	stw	r4,_TRAP(r1)
5025:
503	addi	r3,r1,STACK_FRAME_OVERHEAD
504	bl	do_syscall_trace_leave
505	b	ret_from_except_full
506
507#ifdef SHOW_SYSCALLS
508do_show_syscall:
509#ifdef SHOW_SYSCALLS_TASK
510	lis	r11,show_syscalls_task@ha
511	lwz	r11,show_syscalls_task@l(r11)
512	cmp	0,r2,r11
513	bnelr
514#endif
515	stw	r31,GPR31(r1)
516	mflr	r31
517	lis	r3,7f@ha
518	addi	r3,r3,7f@l
519	lwz	r4,GPR0(r1)
520	lwz	r5,GPR3(r1)
521	lwz	r6,GPR4(r1)
522	lwz	r7,GPR5(r1)
523	lwz	r8,GPR6(r1)
524	lwz	r9,GPR7(r1)
525	bl	printk
526	lis	r3,77f@ha
527	addi	r3,r3,77f@l
528	lwz	r4,GPR8(r1)
529	mr	r5,r2
530	bl	printk
531	lwz	r0,GPR0(r1)
532	lwz	r3,GPR3(r1)
533	lwz	r4,GPR4(r1)
534	lwz	r5,GPR5(r1)
535	lwz	r6,GPR6(r1)
536	lwz	r7,GPR7(r1)
537	lwz	r8,GPR8(r1)
538	mtlr	r31
539	lwz	r31,GPR31(r1)
540	blr
541
542do_show_syscall_exit:
543#ifdef SHOW_SYSCALLS_TASK
544	lis	r11,show_syscalls_task@ha
545	lwz	r11,show_syscalls_task@l(r11)
546	cmp	0,r2,r11
547	bnelr
548#endif
549	stw	r31,GPR31(r1)
550	mflr	r31
551	stw	r3,RESULT(r1)	/* Save result */
552	mr	r4,r3
553	lis	r3,79f@ha
554	addi	r3,r3,79f@l
555	bl	printk
556	lwz	r3,RESULT(r1)
557	mtlr	r31
558	lwz	r31,GPR31(r1)
559	blr
560
5617:	.string	"syscall %d(%x, %x, %x, %x, %x, "
56277:	.string	"%x), current=%p\n"
56379:	.string	" -> %x\n"
564	.align	2,0
565
566#ifdef SHOW_SYSCALLS_TASK
567	.data
568	.globl	show_syscalls_task
569show_syscalls_task:
570	.long	-1
571	.text
572#endif
573#endif /* SHOW_SYSCALLS */
574
575/*
576 * The fork/clone functions need to copy the full register set into
577 * the child process. Therefore we need to save all the nonvolatile
578 * registers (r13 - r31) before calling the C code.
579 */
580	.globl	ppc_fork
581ppc_fork:
582	SAVE_NVGPRS(r1)
583	lwz	r0,_TRAP(r1)
584	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
585	stw	r0,_TRAP(r1)		/* register set saved */
586	b	sys_fork
587
588	.globl	ppc_vfork
589ppc_vfork:
590	SAVE_NVGPRS(r1)
591	lwz	r0,_TRAP(r1)
592	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
593	stw	r0,_TRAP(r1)		/* register set saved */
594	b	sys_vfork
595
596	.globl	ppc_clone
597ppc_clone:
598	SAVE_NVGPRS(r1)
599	lwz	r0,_TRAP(r1)
600	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
601	stw	r0,_TRAP(r1)		/* register set saved */
602	b	sys_clone
603
604	.globl	ppc_swapcontext
605ppc_swapcontext:
606	SAVE_NVGPRS(r1)
607	lwz	r0,_TRAP(r1)
608	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
609	stw	r0,_TRAP(r1)		/* register set saved */
610	b	sys_swapcontext
611
612/*
613 * Top-level page fault handling.
614 * This is in assembler because if do_page_fault tells us that
615 * it is a bad kernel page fault, we want to save the non-volatile
616 * registers before calling bad_page_fault.
617 */
618	.globl	handle_page_fault
619handle_page_fault:
620	stw	r4,_DAR(r1)
621	addi	r3,r1,STACK_FRAME_OVERHEAD
622	bl	do_page_fault
623	cmpwi	r3,0
624	beq+	ret_from_except
625	SAVE_NVGPRS(r1)
626	lwz	r0,_TRAP(r1)
627	clrrwi	r0,r0,1
628	stw	r0,_TRAP(r1)
629	mr	r5,r3
630	addi	r3,r1,STACK_FRAME_OVERHEAD
631	lwz	r4,_DAR(r1)
632	bl	bad_page_fault
633	b	ret_from_except_full
634
635/*
636 * This routine switches between two different tasks.  The process
637 * state of one is saved on its kernel stack.  Then the state
638 * of the other is restored from its kernel stack.  The memory
639 * management hardware is updated to the second process's state.
640 * Finally, we can return to the second process.
641 * On entry, r3 points to the THREAD for the current task, r4
642 * points to the THREAD for the new task.
643 *
644 * This routine is always called with interrupts disabled.
645 *
646 * Note: there are two ways to get to the "going out" portion
647 * of this code; either by coming in via the entry (_switch)
648 * or via "fork" which must set up an environment equivalent
649 * to the "_switch" path.  If you change this , you'll have to
650 * change the fork code also.
651 *
652 * The code which creates the new task context is in 'copy_thread'
653 * in arch/ppc/kernel/process.c
654 */
655_GLOBAL(_switch)
656	stwu	r1,-INT_FRAME_SIZE(r1)
657	mflr	r0
658	stw	r0,INT_FRAME_SIZE+4(r1)
659	/* r3-r12 are caller saved -- Cort */
660	SAVE_NVGPRS(r1)
661	stw	r0,_NIP(r1)	/* Return to switch caller */
662	mfmsr	r11
663	li	r0,MSR_FP	/* Disable floating-point */
664#ifdef CONFIG_ALTIVEC
665BEGIN_FTR_SECTION
666	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
667	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
668	stw	r12,THREAD+THREAD_VRSAVE(r2)
669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
670#endif /* CONFIG_ALTIVEC */
671#ifdef CONFIG_SPE
672BEGIN_FTR_SECTION
673	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
674	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
675	stw	r12,THREAD+THREAD_SPEFSCR(r2)
676END_FTR_SECTION_IFSET(CPU_FTR_SPE)
677#endif /* CONFIG_SPE */
678	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
679	beq+	1f
680	andc	r11,r11,r0
681	MTMSRD(r11)
682	isync
6831:	stw	r11,_MSR(r1)
684	mfcr	r10
685	stw	r10,_CCR(r1)
686	stw	r1,KSP(r3)	/* Set old stack pointer */
687
688#ifdef CONFIG_SMP
689	/* We need a sync somewhere here to make sure that if the
690	 * previous task gets rescheduled on another CPU, it sees all
691	 * stores it has performed on this one.
692	 */
693	sync
694#endif /* CONFIG_SMP */
695
696	tophys(r0,r4)
697	CLR_TOP32(r0)
698	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
699	lwz	r1,KSP(r4)	/* Load new stack pointer */
700
701	/* save the old current 'last' for return value */
702	mr	r3,r2
703	addi	r2,r4,-THREAD	/* Update current */
704
705#ifdef CONFIG_ALTIVEC
706BEGIN_FTR_SECTION
707	lwz	r0,THREAD+THREAD_VRSAVE(r2)
708	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
709END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
710#endif /* CONFIG_ALTIVEC */
711#ifdef CONFIG_SPE
712BEGIN_FTR_SECTION
713	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
714	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
715END_FTR_SECTION_IFSET(CPU_FTR_SPE)
716#endif /* CONFIG_SPE */
717
718	lwz	r0,_CCR(r1)
719	mtcrf	0xFF,r0
720	/* r3-r12 are destroyed -- Cort */
721	REST_NVGPRS(r1)
722
723	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
724	mtlr	r4
725	addi	r1,r1,INT_FRAME_SIZE
726	blr
727
728	.globl	fast_exception_return
729fast_exception_return:
730#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
731	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
732	beq	1f			/* if not, we've got problems */
733#endif
734
7352:	REST_4GPRS(3, r11)
736	lwz	r10,_CCR(r11)
737	REST_GPR(1, r11)
738	mtcr	r10
739	lwz	r10,_LINK(r11)
740	mtlr	r10
741	REST_GPR(10, r11)
742	mtspr	SPRN_SRR1,r9
743	mtspr	SPRN_SRR0,r12
744	REST_GPR(9, r11)
745	REST_GPR(12, r11)
746	lwz	r11,GPR11(r11)
747	SYNC
748	RFI
749
750#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
751/* check if the exception happened in a restartable section */
7521:	lis	r3,exc_exit_restart_end@ha
753	addi	r3,r3,exc_exit_restart_end@l
754	cmplw	r12,r3
755	bge	3f
756	lis	r4,exc_exit_restart@ha
757	addi	r4,r4,exc_exit_restart@l
758	cmplw	r12,r4
759	blt	3f
760	lis	r3,fee_restarts@ha
761	tophys(r3,r3)
762	lwz	r5,fee_restarts@l(r3)
763	addi	r5,r5,1
764	stw	r5,fee_restarts@l(r3)
765	mr	r12,r4		/* restart at exc_exit_restart */
766	b	2b
767
768	.section .bss
769	.align	2
770fee_restarts:
771	.space	4
772	.previous
773
774/* aargh, a nonrecoverable interrupt, panic */
775/* aargh, we don't know which trap this is */
776/* but the 601 doesn't implement the RI bit, so assume it's OK */
7773:
778BEGIN_FTR_SECTION
779	b	2b
780END_FTR_SECTION_IFSET(CPU_FTR_601)
781	li	r10,-1
782	stw	r10,_TRAP(r11)
783	addi	r3,r1,STACK_FRAME_OVERHEAD
784	lis	r10,MSR_KERNEL@h
785	ori	r10,r10,MSR_KERNEL@l
786	bl	transfer_to_handler_full
787	.long	nonrecoverable_exception
788	.long	ret_from_except
789#endif
790
791	.globl	ret_from_except_full
792ret_from_except_full:
793	REST_NVGPRS(r1)
794	/* fall through */
795
796	.globl	ret_from_except
797ret_from_except:
798	/* Hard-disable interrupts so that current_thread_info()->flags
799	 * can't change between when we test it and when we return
800	 * from the interrupt. */
801	/* Note: We don't bother telling lockdep about it */
802	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
803	SYNC			/* Some chip revs have problems here... */
804	MTMSRD(r10)		/* disable interrupts */
805
806	lwz	r3,_MSR(r1)	/* Returning to user mode? */
807	andi.	r0,r3,MSR_PR
808	beq	resume_kernel
809
810user_exc_return:		/* r10 contains MSR_KERNEL here */
811	/* Check current_thread_info()->flags */
812	CURRENT_THREAD_INFO(r9, r1)
813	lwz	r9,TI_FLAGS(r9)
814	andi.	r0,r9,_TIF_USER_WORK_MASK
815	bne	do_work
816
817restore_user:
818#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
819	/* Check whether this process has its own DBCR0 value.  The internal
820	   debug mode bit tells us that dbcr0 should be loaded. */
821	lwz	r0,THREAD+THREAD_DBCR0(r2)
822	andis.	r10,r0,DBCR0_IDM@h
823	bnel-	load_dbcr0
824#endif
825
826#ifdef CONFIG_PREEMPT
827	b	restore
828
829/* N.B. the only way to get here is from the beq following ret_from_except. */
830resume_kernel:
831	/* check current_thread_info->preempt_count */
832	CURRENT_THREAD_INFO(r9, r1)
833	lwz	r0,TI_PREEMPT(r9)
834	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
835	bne	restore
836	lwz	r0,TI_FLAGS(r9)
837	andi.	r0,r0,_TIF_NEED_RESCHED
838	beq+	restore
839	andi.	r0,r3,MSR_EE	/* interrupts off? */
840	beq	restore		/* don't schedule if so */
841#ifdef CONFIG_TRACE_IRQFLAGS
842	/* Lockdep thinks irqs are enabled, we need to call
843	 * preempt_schedule_irq with IRQs off, so we inform lockdep
844	 * now that we -did- turn them off already
845	 */
846	bl	trace_hardirqs_off
847#endif
8481:	bl	preempt_schedule_irq
849	CURRENT_THREAD_INFO(r9, r1)
850	lwz	r3,TI_FLAGS(r9)
851	andi.	r0,r3,_TIF_NEED_RESCHED
852	bne-	1b
853#ifdef CONFIG_TRACE_IRQFLAGS
854	/* And now, to properly rebalance the above, we tell lockdep they
855	 * are being turned back on, which will happen when we return
856	 */
857	bl	trace_hardirqs_on
858#endif
859#else
860resume_kernel:
861#endif /* CONFIG_PREEMPT */
862
863	/* interrupts are hard-disabled at this point */
864restore:
865#ifdef CONFIG_44x
866BEGIN_MMU_FTR_SECTION
867	b	1f
868END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
869	lis	r4,icache_44x_need_flush@ha
870	lwz	r5,icache_44x_need_flush@l(r4)
871	cmplwi	cr0,r5,0
872	beq+	1f
873	li	r6,0
874	iccci	r0,r0
875	stw	r6,icache_44x_need_flush@l(r4)
8761:
877#endif  /* CONFIG_44x */
878
879	lwz	r9,_MSR(r1)
880#ifdef CONFIG_TRACE_IRQFLAGS
881	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
882	 * off in this assembly code while peeking at TI_FLAGS() and such. However
883	 * we need to inform it if the exception turned interrupts off, and we
884	 * are about to trun them back on.
885	 *
886	 * The problem here sadly is that we don't know whether the exceptions was
887	 * one that turned interrupts off or not. So we always tell lockdep about
888	 * turning them on here when we go back to wherever we came from with EE
889	 * on, even if that may meen some redudant calls being tracked. Maybe later
890	 * we could encode what the exception did somewhere or test the exception
891	 * type in the pt_regs but that sounds overkill
892	 */
893	andi.	r10,r9,MSR_EE
894	beq	1f
895	/*
896	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
897	 * which is the stack frame here, we need to force a stack frame
898	 * in case we came from user space.
899	 */
900	stwu	r1,-32(r1)
901	mflr	r0
902	stw	r0,4(r1)
903	stwu	r1,-32(r1)
904	bl	trace_hardirqs_on
905	lwz	r1,0(r1)
906	lwz	r1,0(r1)
907	lwz	r9,_MSR(r1)
9081:
909#endif /* CONFIG_TRACE_IRQFLAGS */
910
911	lwz	r0,GPR0(r1)
912	lwz	r2,GPR2(r1)
913	REST_4GPRS(3, r1)
914	REST_2GPRS(7, r1)
915
916	lwz	r10,_XER(r1)
917	lwz	r11,_CTR(r1)
918	mtspr	SPRN_XER,r10
919	mtctr	r11
920
921	PPC405_ERR77(0,r1)
922BEGIN_FTR_SECTION
923	lwarx	r11,0,r1
924END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
925	stwcx.	r0,0,r1			/* to clear the reservation */
926
927#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
928	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
929	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
930
931	lwz	r10,_CCR(r1)
932	lwz	r11,_LINK(r1)
933	mtcrf	0xFF,r10
934	mtlr	r11
935
936	/*
937	 * Once we put values in SRR0 and SRR1, we are in a state
938	 * where exceptions are not recoverable, since taking an
939	 * exception will trash SRR0 and SRR1.  Therefore we clear the
940	 * MSR:RI bit to indicate this.  If we do take an exception,
941	 * we can't return to the point of the exception but we
942	 * can restart the exception exit path at the label
943	 * exc_exit_restart below.  -- paulus
944	 */
945	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
946	SYNC
947	MTMSRD(r10)		/* clear the RI bit */
948	.globl exc_exit_restart
949exc_exit_restart:
950	lwz	r12,_NIP(r1)
951	FIX_SRR1(r9,r10)
952	mtspr	SPRN_SRR0,r12
953	mtspr	SPRN_SRR1,r9
954	REST_4GPRS(9, r1)
955	lwz	r1,GPR1(r1)
956	.globl exc_exit_restart_end
957exc_exit_restart_end:
958	SYNC
959	RFI
960
961#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
962	/*
963	 * This is a bit different on 4xx/Book-E because it doesn't have
964	 * the RI bit in the MSR.
965	 * The TLB miss handler checks if we have interrupted
966	 * the exception exit path and restarts it if so
967	 * (well maybe one day it will... :).
968	 */
969	lwz	r11,_LINK(r1)
970	mtlr	r11
971	lwz	r10,_CCR(r1)
972	mtcrf	0xff,r10
973	REST_2GPRS(9, r1)
974	.globl exc_exit_restart
975exc_exit_restart:
976	lwz	r11,_NIP(r1)
977	lwz	r12,_MSR(r1)
978exc_exit_start:
979	mtspr	SPRN_SRR0,r11
980	mtspr	SPRN_SRR1,r12
981	REST_2GPRS(11, r1)
982	lwz	r1,GPR1(r1)
983	.globl exc_exit_restart_end
984exc_exit_restart_end:
985	PPC405_ERR77_SYNC
986	rfi
987	b	.			/* prevent prefetch past rfi */
988
989/*
990 * Returning from a critical interrupt in user mode doesn't need
991 * to be any different from a normal exception.  For a critical
992 * interrupt in the kernel, we just return (without checking for
993 * preemption) since the interrupt may have happened at some crucial
994 * place (e.g. inside the TLB miss handler), and because we will be
995 * running with r1 pointing into critical_stack, not the current
996 * process's kernel stack (and therefore current_thread_info() will
997 * give the wrong answer).
998 * We have to restore various SPRs that may have been in use at the
999 * time of the critical interrupt.
1000 *
1001 */
1002#ifdef CONFIG_40x
1003#define PPC_40x_TURN_OFF_MSR_DR						    \
1004	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1005	 * assume the instructions here are mapped by a pinned TLB entry */ \
1006	li	r10,MSR_IR;						    \
1007	mtmsr	r10;							    \
1008	isync;								    \
1009	tophys(r1, r1);
1010#else
1011#define PPC_40x_TURN_OFF_MSR_DR
1012#endif
1013
1014#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1015	REST_NVGPRS(r1);						\
1016	lwz	r3,_MSR(r1);						\
1017	andi.	r3,r3,MSR_PR;						\
1018	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1019	bne	user_exc_return;					\
1020	lwz	r0,GPR0(r1);						\
1021	lwz	r2,GPR2(r1);						\
1022	REST_4GPRS(3, r1);						\
1023	REST_2GPRS(7, r1);						\
1024	lwz	r10,_XER(r1);						\
1025	lwz	r11,_CTR(r1);						\
1026	mtspr	SPRN_XER,r10;						\
1027	mtctr	r11;							\
1028	PPC405_ERR77(0,r1);						\
1029	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1030	lwz	r11,_LINK(r1);						\
1031	mtlr	r11;							\
1032	lwz	r10,_CCR(r1);						\
1033	mtcrf	0xff,r10;						\
1034	PPC_40x_TURN_OFF_MSR_DR;					\
1035	lwz	r9,_DEAR(r1);						\
1036	lwz	r10,_ESR(r1);						\
1037	mtspr	SPRN_DEAR,r9;						\
1038	mtspr	SPRN_ESR,r10;						\
1039	lwz	r11,_NIP(r1);						\
1040	lwz	r12,_MSR(r1);						\
1041	mtspr	exc_lvl_srr0,r11;					\
1042	mtspr	exc_lvl_srr1,r12;					\
1043	lwz	r9,GPR9(r1);						\
1044	lwz	r12,GPR12(r1);						\
1045	lwz	r10,GPR10(r1);						\
1046	lwz	r11,GPR11(r1);						\
1047	lwz	r1,GPR1(r1);						\
1048	PPC405_ERR77_SYNC;						\
1049	exc_lvl_rfi;							\
1050	b	.;		/* prevent prefetch past exc_lvl_rfi */
1051
1052#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1053	lwz	r9,_##exc_lvl_srr0(r1);					\
1054	lwz	r10,_##exc_lvl_srr1(r1);				\
1055	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1056	mtspr	SPRN_##exc_lvl_srr1,r10;
1057
1058#if defined(CONFIG_PPC_BOOK3E_MMU)
1059#ifdef CONFIG_PHYS_64BIT
1060#define	RESTORE_MAS7							\
1061	lwz	r11,MAS7(r1);						\
1062	mtspr	SPRN_MAS7,r11;
1063#else
1064#define	RESTORE_MAS7
1065#endif /* CONFIG_PHYS_64BIT */
1066#define RESTORE_MMU_REGS						\
1067	lwz	r9,MAS0(r1);						\
1068	lwz	r10,MAS1(r1);						\
1069	lwz	r11,MAS2(r1);						\
1070	mtspr	SPRN_MAS0,r9;						\
1071	lwz	r9,MAS3(r1);						\
1072	mtspr	SPRN_MAS1,r10;						\
1073	lwz	r10,MAS6(r1);						\
1074	mtspr	SPRN_MAS2,r11;						\
1075	mtspr	SPRN_MAS3,r9;						\
1076	mtspr	SPRN_MAS6,r10;						\
1077	RESTORE_MAS7;
1078#elif defined(CONFIG_44x)
1079#define RESTORE_MMU_REGS						\
1080	lwz	r9,MMUCR(r1);						\
1081	mtspr	SPRN_MMUCR,r9;
1082#else
1083#define RESTORE_MMU_REGS
1084#endif
1085
1086#ifdef CONFIG_40x
1087	.globl	ret_from_crit_exc
1088ret_from_crit_exc:
1089	mfspr	r9,SPRN_SPRG_THREAD
1090	lis	r10,saved_ksp_limit@ha;
1091	lwz	r10,saved_ksp_limit@l(r10);
1092	tovirt(r9,r9);
1093	stw	r10,KSP_LIMIT(r9)
1094	lis	r9,crit_srr0@ha;
1095	lwz	r9,crit_srr0@l(r9);
1096	lis	r10,crit_srr1@ha;
1097	lwz	r10,crit_srr1@l(r10);
1098	mtspr	SPRN_SRR0,r9;
1099	mtspr	SPRN_SRR1,r10;
1100	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1101#endif /* CONFIG_40x */
1102
1103#ifdef CONFIG_BOOKE
1104	.globl	ret_from_crit_exc
1105ret_from_crit_exc:
1106	mfspr	r9,SPRN_SPRG_THREAD
1107	lwz	r10,SAVED_KSP_LIMIT(r1)
1108	stw	r10,KSP_LIMIT(r9)
1109	RESTORE_xSRR(SRR0,SRR1);
1110	RESTORE_MMU_REGS;
1111	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1112
1113	.globl	ret_from_debug_exc
1114ret_from_debug_exc:
1115	mfspr	r9,SPRN_SPRG_THREAD
1116	lwz	r10,SAVED_KSP_LIMIT(r1)
1117	stw	r10,KSP_LIMIT(r9)
1118	lwz	r9,THREAD_INFO-THREAD(r9)
1119	CURRENT_THREAD_INFO(r10, r1)
1120	lwz	r10,TI_PREEMPT(r10)
1121	stw	r10,TI_PREEMPT(r9)
1122	RESTORE_xSRR(SRR0,SRR1);
1123	RESTORE_xSRR(CSRR0,CSRR1);
1124	RESTORE_MMU_REGS;
1125	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1126
1127	.globl	ret_from_mcheck_exc
1128ret_from_mcheck_exc:
1129	mfspr	r9,SPRN_SPRG_THREAD
1130	lwz	r10,SAVED_KSP_LIMIT(r1)
1131	stw	r10,KSP_LIMIT(r9)
1132	RESTORE_xSRR(SRR0,SRR1);
1133	RESTORE_xSRR(CSRR0,CSRR1);
1134	RESTORE_xSRR(DSRR0,DSRR1);
1135	RESTORE_MMU_REGS;
1136	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1137#endif /* CONFIG_BOOKE */
1138
1139/*
1140 * Load the DBCR0 value for a task that is being ptraced,
1141 * having first saved away the global DBCR0.  Note that r0
1142 * has the dbcr0 value to set upon entry to this.
1143 */
1144load_dbcr0:
1145	mfmsr	r10		/* first disable debug exceptions */
1146	rlwinm	r10,r10,0,~MSR_DE
1147	mtmsr	r10
1148	isync
1149	mfspr	r10,SPRN_DBCR0
1150	lis	r11,global_dbcr0@ha
1151	addi	r11,r11,global_dbcr0@l
1152#ifdef CONFIG_SMP
1153	CURRENT_THREAD_INFO(r9, r1)
1154	lwz	r9,TI_CPU(r9)
1155	slwi	r9,r9,3
1156	add	r11,r11,r9
1157#endif
1158	stw	r10,0(r11)
1159	mtspr	SPRN_DBCR0,r0
1160	lwz	r10,4(r11)
1161	addi	r10,r10,1
1162	stw	r10,4(r11)
1163	li	r11,-1
1164	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1165	blr
1166
1167	.section .bss
1168	.align	4
1169global_dbcr0:
1170	.space	8*NR_CPUS
1171	.previous
1172#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1173
1174do_work:			/* r10 contains MSR_KERNEL here */
1175	andi.	r0,r9,_TIF_NEED_RESCHED
1176	beq	do_user_signal
1177
1178do_resched:			/* r10 contains MSR_KERNEL here */
1179	/* Note: We don't need to inform lockdep that we are enabling
1180	 * interrupts here. As far as it knows, they are already enabled
1181	 */
1182	ori	r10,r10,MSR_EE
1183	SYNC
1184	MTMSRD(r10)		/* hard-enable interrupts */
1185	bl	schedule
1186recheck:
1187	/* Note: And we don't tell it we are disabling them again
1188	 * neither. Those disable/enable cycles used to peek at
1189	 * TI_FLAGS aren't advertised.
1190	 */
1191	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1192	SYNC
1193	MTMSRD(r10)		/* disable interrupts */
1194	CURRENT_THREAD_INFO(r9, r1)
1195	lwz	r9,TI_FLAGS(r9)
1196	andi.	r0,r9,_TIF_NEED_RESCHED
1197	bne-	do_resched
1198	andi.	r0,r9,_TIF_USER_WORK_MASK
1199	beq	restore_user
1200do_user_signal:			/* r10 contains MSR_KERNEL here */
1201	ori	r10,r10,MSR_EE
1202	SYNC
1203	MTMSRD(r10)		/* hard-enable interrupts */
1204	/* save r13-r31 in the exception frame, if not already done */
1205	lwz	r3,_TRAP(r1)
1206	andi.	r0,r3,1
1207	beq	2f
1208	SAVE_NVGPRS(r1)
1209	rlwinm	r3,r3,0,0,30
1210	stw	r3,_TRAP(r1)
12112:	addi	r3,r1,STACK_FRAME_OVERHEAD
1212	mr	r4,r9
1213	bl	do_notify_resume
1214	REST_NVGPRS(r1)
1215	b	recheck
1216
1217/*
1218 * We come here when we are at the end of handling an exception
1219 * that occurred at a place where taking an exception will lose
1220 * state information, such as the contents of SRR0 and SRR1.
1221 */
1222nonrecoverable:
1223	lis	r10,exc_exit_restart_end@ha
1224	addi	r10,r10,exc_exit_restart_end@l
1225	cmplw	r12,r10
1226	bge	3f
1227	lis	r11,exc_exit_restart@ha
1228	addi	r11,r11,exc_exit_restart@l
1229	cmplw	r12,r11
1230	blt	3f
1231	lis	r10,ee_restarts@ha
1232	lwz	r12,ee_restarts@l(r10)
1233	addi	r12,r12,1
1234	stw	r12,ee_restarts@l(r10)
1235	mr	r12,r11		/* restart at exc_exit_restart */
1236	blr
12373:	/* OK, we can't recover, kill this process */
1238	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1239BEGIN_FTR_SECTION
1240	blr
1241END_FTR_SECTION_IFSET(CPU_FTR_601)
1242	lwz	r3,_TRAP(r1)
1243	andi.	r0,r3,1
1244	beq	4f
1245	SAVE_NVGPRS(r1)
1246	rlwinm	r3,r3,0,0,30
1247	stw	r3,_TRAP(r1)
12484:	addi	r3,r1,STACK_FRAME_OVERHEAD
1249	bl	nonrecoverable_exception
1250	/* shouldn't return */
1251	b	4b
1252
1253	.section .bss
1254	.align	2
1255ee_restarts:
1256	.space	4
1257	.previous
1258
1259/*
1260 * PROM code for specific machines follows.  Put it
1261 * here so it's easy to add arch-specific sections later.
1262 * -- Cort
1263 */
1264#ifdef CONFIG_PPC_RTAS
1265/*
1266 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1267 * called with the MMU off.
1268 */
1269_GLOBAL(enter_rtas)
1270	stwu	r1,-INT_FRAME_SIZE(r1)
1271	mflr	r0
1272	stw	r0,INT_FRAME_SIZE+4(r1)
1273	LOAD_REG_ADDR(r4, rtas)
1274	lis	r6,1f@ha	/* physical return address for rtas */
1275	addi	r6,r6,1f@l
1276	tophys(r6,r6)
1277	tophys(r7,r1)
1278	lwz	r8,RTASENTRY(r4)
1279	lwz	r4,RTASBASE(r4)
1280	mfmsr	r9
1281	stw	r9,8(r1)
1282	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1283	SYNC			/* disable interrupts so SRR0/1 */
1284	MTMSRD(r0)		/* don't get trashed */
1285	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1286	mtlr	r6
1287	mtspr	SPRN_SPRG_RTAS,r7
1288	mtspr	SPRN_SRR0,r8
1289	mtspr	SPRN_SRR1,r9
1290	RFI
12911:	tophys(r9,r1)
1292	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1293	lwz	r9,8(r9)	/* original msr value */
1294	FIX_SRR1(r9,r0)
1295	addi	r1,r1,INT_FRAME_SIZE
1296	li	r0,0
1297	mtspr	SPRN_SPRG_RTAS,r0
1298	mtspr	SPRN_SRR0,r8
1299	mtspr	SPRN_SRR1,r9
1300	RFI			/* return to caller */
1301
1302	.globl	machine_check_in_rtas
1303machine_check_in_rtas:
1304	twi	31,0,0
1305	/* XXX load up BATs and panic */
1306
1307#endif /* CONFIG_PPC_RTAS */
1308
1309#ifdef CONFIG_FUNCTION_TRACER
1310#ifdef CONFIG_DYNAMIC_FTRACE
1311_GLOBAL(mcount)
1312_GLOBAL(_mcount)
1313	/*
1314	 * It is required that _mcount on PPC32 must preserve the
1315	 * link register. But we have r0 to play with. We use r0
1316	 * to push the return address back to the caller of mcount
1317	 * into the ctr register, restore the link register and
1318	 * then jump back using the ctr register.
1319	 */
1320	mflr	r0
1321	mtctr	r0
1322	lwz	r0, 4(r1)
1323	mtlr	r0
1324	bctr
1325
1326_GLOBAL(ftrace_caller)
1327	MCOUNT_SAVE_FRAME
1328	/* r3 ends up with link register */
1329	subi	r3, r3, MCOUNT_INSN_SIZE
1330.globl ftrace_call
1331ftrace_call:
1332	bl	ftrace_stub
1333	nop
1334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335.globl ftrace_graph_call
1336ftrace_graph_call:
1337	b	ftrace_graph_stub
1338_GLOBAL(ftrace_graph_stub)
1339#endif
1340	MCOUNT_RESTORE_FRAME
1341	/* old link register ends up in ctr reg */
1342	bctr
1343#else
1344_GLOBAL(mcount)
1345_GLOBAL(_mcount)
1346
1347	MCOUNT_SAVE_FRAME
1348
1349	subi	r3, r3, MCOUNT_INSN_SIZE
1350	LOAD_REG_ADDR(r5, ftrace_trace_function)
1351	lwz	r5,0(r5)
1352
1353	mtctr	r5
1354	bctrl
1355	nop
1356
1357#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1358	b	ftrace_graph_caller
1359#endif
1360	MCOUNT_RESTORE_FRAME
1361	bctr
1362#endif
1363
1364_GLOBAL(ftrace_stub)
1365	blr
1366
1367#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368_GLOBAL(ftrace_graph_caller)
1369	/* load r4 with local address */
1370	lwz	r4, 44(r1)
1371	subi	r4, r4, MCOUNT_INSN_SIZE
1372
1373	/* get the parent address */
1374	addi	r3, r1, 52
1375
1376	bl	prepare_ftrace_return
1377	nop
1378
1379	MCOUNT_RESTORE_FRAME
1380	/* old link register ends up in ctr reg */
1381	bctr
1382
1383_GLOBAL(return_to_handler)
1384	/* need to save return values */
1385	stwu	r1, -32(r1)
1386	stw	r3, 20(r1)
1387	stw	r4, 16(r1)
1388	stw	r31, 12(r1)
1389	mr	r31, r1
1390
1391	bl	ftrace_return_to_handler
1392	nop
1393
1394	/* return value has real return address */
1395	mtlr	r3
1396
1397	lwz	r3, 20(r1)
1398	lwz	r4, 16(r1)
1399	lwz	r31,12(r1)
1400	lwz	r1, 0(r1)
1401
1402	/* Jump back to real return address */
1403	blr
1404#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1405
1406#endif /* CONFIG_MCOUNT */
1407