1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18#include <asm/cpuidle.h>
19
20/*
21 * We layout physical memory as follows:
22 * 0x0000 - 0x00ff : Secondary processor spin code
23 * 0x0100 - 0x17ff : pSeries Interrupt prologs
24 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
25 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
26 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
27 * 0x7000 - 0x7fff : FWNMI data area
28 * 0x8000 - 0x8fff : Initial (CPU0) segment table
29 * 0x9000 -        : Early init and support code
30 */
31	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
32#define SYSCALL_PSERIES_1 					\
33BEGIN_FTR_SECTION						\
34	cmpdi	r0,0x1ebe ; 					\
35	beq-	1f ;						\
36END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
37	mr	r9,r13 ;					\
38	GET_PACA(r13) ;						\
39	mfspr	r11,SPRN_SRR0 ;					\
400:
41
42#define SYSCALL_PSERIES_2_RFID 					\
43	mfspr	r12,SPRN_SRR1 ;					\
44	ld	r10,PACAKBASE(r13) ; 				\
45	LOAD_HANDLER(r10, system_call_entry) ; 			\
46	mtspr	SPRN_SRR0,r10 ; 				\
47	ld	r10,PACAKMSR(r13) ;				\
48	mtspr	SPRN_SRR1,r10 ; 				\
49	rfid ; 							\
50	b	. ;	/* prevent speculative execution */
51
52#define SYSCALL_PSERIES_3					\
53	/* Fast LE/BE switch system call */			\
541:	mfspr	r12,SPRN_SRR1 ;					\
55	xori	r12,r12,MSR_LE ;				\
56	mtspr	SPRN_SRR1,r12 ;					\
57	rfid ;		/* return to userspace */		\
58	b	. ;	/* prevent speculative execution */
59
60#if defined(CONFIG_RELOCATABLE)
61	/*
62	 * We can't branch directly so we do it via the CTR which
63	 * is volatile across system calls.
64	 */
65#define SYSCALL_PSERIES_2_DIRECT				\
66	mflr	r10 ;						\
67	ld	r12,PACAKBASE(r13) ; 				\
68	LOAD_HANDLER(r12, system_call_entry) ;			\
69	mtctr	r12 ;						\
70	mfspr	r12,SPRN_SRR1 ;					\
71	/* Re-use of r13... No spare regs to do this */	\
72	li	r13,MSR_RI ;					\
73	mtmsrd 	r13,1 ;						\
74	GET_PACA(r13) ;	/* get r13 back */			\
75	bctr ;
76#else
77	/* We can branch directly */
78#define SYSCALL_PSERIES_2_DIRECT				\
79	mfspr	r12,SPRN_SRR1 ;					\
80	li	r10,MSR_RI ;					\
81	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
82	b	system_call_common ;
83#endif
84
85/*
86 * This is the start of the interrupt handlers for pSeries
87 * This code runs with relocation off.
88 * Code from here to __end_interrupts gets copied down to real
89 * address 0x100 when we are running a relocatable kernel.
90 * Therefore any relative branches in this section must only
91 * branch to labels in this section.
92 */
93	. = 0x100
94	.globl __start_interrupts
95__start_interrupts:
96
97	.globl system_reset_pSeries;
98system_reset_pSeries:
99	SET_SCRATCH0(r13)
100#ifdef CONFIG_PPC_P7_NAP
101BEGIN_FTR_SECTION
102	/* Running native on arch 2.06 or later, check if we are
103	 * waking up from nap/sleep/winkle.
104	 */
105	mfspr	r13,SPRN_SRR1
106	rlwinm.	r13,r13,47-31,30,31
107	beq	9f
108
109	cmpwi	cr3,r13,2
110
111	/*
112	 * Check if last bit of HSPGR0 is set. This indicates whether we are
113	 * waking up from winkle.
114	 */
115	GET_PACA(r13)
116	clrldi	r5,r13,63
117	clrrdi	r13,r13,1
118	cmpwi	cr4,r5,1
119	mtspr	SPRN_HSPRG0,r13
120
121	lbz	r0,PACA_THREAD_IDLE_STATE(r13)
122	cmpwi   cr2,r0,PNV_THREAD_NAP
123	bgt     cr2,8f				/* Either sleep or Winkle */
124
125	/* Waking up from nap should not cause hypervisor state loss */
126	bgt	cr3,.
127
128	/* Waking up from nap */
129	li	r0,PNV_THREAD_RUNNING
130	stb	r0,PACA_THREAD_IDLE_STATE(r13)	/* Clear thread state */
131
132#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
133	li	r0,KVM_HWTHREAD_IN_KERNEL
134	stb	r0,HSTATE_HWTHREAD_STATE(r13)
135	/* Order setting hwthread_state vs. testing hwthread_req */
136	sync
137	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
138	cmpwi	r0,0
139	beq	1f
140	b	kvm_start_guest
1411:
142#endif
143
144	/* Return SRR1 from power7_nap() */
145	mfspr	r3,SPRN_SRR1
146	beq	cr3,2f
147	b	power7_wakeup_noloss
1482:	b	power7_wakeup_loss
149
150	/* Fast Sleep wakeup on PowerNV */
1518:	GET_PACA(r13)
152	b 	power7_wakeup_tb_loss
153
1549:
155END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
156#endif /* CONFIG_PPC_P7_NAP */
157	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
158				 NOTEST, 0x100)
159
160	. = 0x200
161machine_check_pSeries_1:
162	/* This is moved out of line as it can be patched by FW, but
163	 * some code path might still want to branch into the original
164	 * vector
165	 */
166	SET_SCRATCH0(r13)		/* save r13 */
167#ifdef CONFIG_PPC_P7_NAP
168BEGIN_FTR_SECTION
169	/* Running native on arch 2.06 or later, check if we are
170	 * waking up from nap. We only handle no state loss and
171	 * supervisor state loss. We do -not- handle hypervisor
172	 * state loss at this time.
173	 */
174	mfspr	r13,SPRN_SRR1
175	rlwinm.	r13,r13,47-31,30,31
176	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
177	beq	9f
178
179	mfspr	r13,SPRN_SRR1
180	rlwinm.	r13,r13,47-31,30,31
181	/* waking up from powersave (nap) state */
182	cmpwi	cr1,r13,2
183	/* Total loss of HV state is fatal. let's just stay stuck here */
184	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
185	bgt	cr1,.
1869:
187	OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
188END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
189#endif /* CONFIG_PPC_P7_NAP */
190	EXCEPTION_PROLOG_0(PACA_EXMC)
191BEGIN_FTR_SECTION
192	b	machine_check_pSeries_early
193FTR_SECTION_ELSE
194	b	machine_check_pSeries_0
195ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
196
197	. = 0x300
198	.globl data_access_pSeries
199data_access_pSeries:
200	SET_SCRATCH0(r13)
201	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
202				 KVMTEST, 0x300)
203
204	. = 0x380
205	.globl data_access_slb_pSeries
206data_access_slb_pSeries:
207	SET_SCRATCH0(r13)
208	EXCEPTION_PROLOG_0(PACA_EXSLB)
209	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
210	std	r3,PACA_EXSLB+EX_R3(r13)
211	mfspr	r3,SPRN_DAR
212#ifdef __DISABLED__
213	/* Keep that around for when we re-implement dynamic VSIDs */
214	cmpdi	r3,0
215	bge	slb_miss_user_pseries
216#endif /* __DISABLED__ */
217	mfspr	r12,SPRN_SRR1
218#ifndef CONFIG_RELOCATABLE
219	b	slb_miss_realmode
220#else
221	/*
222	 * We can't just use a direct branch to slb_miss_realmode
223	 * because the distance from here to there depends on where
224	 * the kernel ends up being put.
225	 */
226	mfctr	r11
227	ld	r10,PACAKBASE(r13)
228	LOAD_HANDLER(r10, slb_miss_realmode)
229	mtctr	r10
230	bctr
231#endif
232
233	STD_EXCEPTION_PSERIES(0x400, instruction_access)
234
235	. = 0x480
236	.globl instruction_access_slb_pSeries
237instruction_access_slb_pSeries:
238	SET_SCRATCH0(r13)
239	EXCEPTION_PROLOG_0(PACA_EXSLB)
240	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
241	std	r3,PACA_EXSLB+EX_R3(r13)
242	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
243#ifdef __DISABLED__
244	/* Keep that around for when we re-implement dynamic VSIDs */
245	cmpdi	r3,0
246	bge	slb_miss_user_pseries
247#endif /* __DISABLED__ */
248	mfspr	r12,SPRN_SRR1
249#ifndef CONFIG_RELOCATABLE
250	b	slb_miss_realmode
251#else
252	mfctr	r11
253	ld	r10,PACAKBASE(r13)
254	LOAD_HANDLER(r10, slb_miss_realmode)
255	mtctr	r10
256	bctr
257#endif
258
259	/* We open code these as we can't have a ". = x" (even with
260	 * x = "." within a feature section
261	 */
262	. = 0x500;
263	.globl hardware_interrupt_pSeries;
264	.globl hardware_interrupt_hv;
265hardware_interrupt_pSeries:
266hardware_interrupt_hv:
267	BEGIN_FTR_SECTION
268		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
269					    EXC_HV, SOFTEN_TEST_HV)
270		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
271	FTR_SECTION_ELSE
272		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
273					    EXC_STD, SOFTEN_TEST_PR)
274		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
275	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
276
277	STD_EXCEPTION_PSERIES(0x600, alignment)
278	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
279
280	STD_EXCEPTION_PSERIES(0x700, program_check)
281	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
282
283	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
284	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
285
286	. = 0x900
287	.globl decrementer_pSeries
288decrementer_pSeries:
289	_MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
290
291	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
292
293	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
294	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
295
296	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
297	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
298
299	. = 0xc00
300	.globl	system_call_pSeries
301system_call_pSeries:
302	 /*
303	  * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
304	  * that support it) before changing to HMT_MEDIUM. That allows the KVM
305	  * code to save that value into the guest state (it is the guest's PPR
306	  * value). Otherwise just change to HMT_MEDIUM as userspace has
307	  * already saved the PPR.
308	  */
309#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
310	SET_SCRATCH0(r13)
311	GET_PACA(r13)
312	std	r9,PACA_EXGEN+EX_R9(r13)
313	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
314	HMT_MEDIUM;
315	std	r10,PACA_EXGEN+EX_R10(r13)
316	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
317	mfcr	r9
318	KVMTEST(0xc00)
319	GET_SCRATCH0(r13)
320#else
321	HMT_MEDIUM;
322#endif
323	SYSCALL_PSERIES_1
324	SYSCALL_PSERIES_2_RFID
325	SYSCALL_PSERIES_3
326	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
327
328	STD_EXCEPTION_PSERIES(0xd00, single_step)
329	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
330
331	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
332	 * out of line to handle them
333	 */
334	. = 0xe00
335hv_data_storage_trampoline:
336	SET_SCRATCH0(r13)
337	EXCEPTION_PROLOG_0(PACA_EXGEN)
338	b	h_data_storage_hv
339
340	. = 0xe20
341hv_instr_storage_trampoline:
342	SET_SCRATCH0(r13)
343	EXCEPTION_PROLOG_0(PACA_EXGEN)
344	b	h_instr_storage_hv
345
346	. = 0xe40
347emulation_assist_trampoline:
348	SET_SCRATCH0(r13)
349	EXCEPTION_PROLOG_0(PACA_EXGEN)
350	b	emulation_assist_hv
351
352	. = 0xe60
353hv_exception_trampoline:
354	SET_SCRATCH0(r13)
355	EXCEPTION_PROLOG_0(PACA_EXGEN)
356	b	hmi_exception_early
357
358	. = 0xe80
359hv_doorbell_trampoline:
360	SET_SCRATCH0(r13)
361	EXCEPTION_PROLOG_0(PACA_EXGEN)
362	b	h_doorbell_hv
363
364	/* We need to deal with the Altivec unavailable exception
365	 * here which is at 0xf20, thus in the middle of the
366	 * prolog code of the PerformanceMonitor one. A little
367	 * trickery is thus necessary
368	 */
369	. = 0xf00
370performance_monitor_pseries_trampoline:
371	SET_SCRATCH0(r13)
372	EXCEPTION_PROLOG_0(PACA_EXGEN)
373	b	performance_monitor_pSeries
374
375	. = 0xf20
376altivec_unavailable_pseries_trampoline:
377	SET_SCRATCH0(r13)
378	EXCEPTION_PROLOG_0(PACA_EXGEN)
379	b	altivec_unavailable_pSeries
380
381	. = 0xf40
382vsx_unavailable_pseries_trampoline:
383	SET_SCRATCH0(r13)
384	EXCEPTION_PROLOG_0(PACA_EXGEN)
385	b	vsx_unavailable_pSeries
386
387	. = 0xf60
388facility_unavailable_trampoline:
389	SET_SCRATCH0(r13)
390	EXCEPTION_PROLOG_0(PACA_EXGEN)
391	b	facility_unavailable_pSeries
392
393	. = 0xf80
394hv_facility_unavailable_trampoline:
395	SET_SCRATCH0(r13)
396	EXCEPTION_PROLOG_0(PACA_EXGEN)
397	b	facility_unavailable_hv
398
399#ifdef CONFIG_CBE_RAS
400	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
401	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
402#endif /* CONFIG_CBE_RAS */
403
404	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
405	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
406
407	. = 0x1500
408	.global denorm_exception_hv
409denorm_exception_hv:
410	mtspr	SPRN_SPRG_HSCRATCH0,r13
411	EXCEPTION_PROLOG_0(PACA_EXGEN)
412	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
413
414#ifdef CONFIG_PPC_DENORMALISATION
415	mfspr	r10,SPRN_HSRR1
416	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
417	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
418	addi	r11,r11,-4		/* HSRR0 is next instruction */
419	bne+	denorm_assist
420#endif
421
422	KVMTEST(0x1500)
423	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
424	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
425
426#ifdef CONFIG_CBE_RAS
427	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
428	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
429#endif /* CONFIG_CBE_RAS */
430
431	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
432	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
433
434#ifdef CONFIG_CBE_RAS
435	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
436	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
437#else
438	. = 0x1800
439#endif /* CONFIG_CBE_RAS */
440
441
442/*** Out of line interrupts support ***/
443
444	.align	7
445	/* moved from 0x200 */
446machine_check_pSeries_early:
447BEGIN_FTR_SECTION
448	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
449	/*
450	 * Register contents:
451	 * R13		= PACA
452	 * R9		= CR
453	 * Original R9 to R13 is saved on PACA_EXMC
454	 *
455	 * Switch to mc_emergency stack and handle re-entrancy (we limit
456	 * the nested MCE upto level 4 to avoid stack overflow).
457	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
458	 *
459	 * We use paca->in_mce to check whether this is the first entry or
460	 * nested machine check. We increment paca->in_mce to track nested
461	 * machine checks.
462	 *
463	 * If this is the first entry then set stack pointer to
464	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
465	 * stack frame on mc_emergency stack.
466	 *
467	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
468	 * checkstop if we get another machine check exception before we do
469	 * rfid with MSR_ME=1.
470	 */
471	mr	r11,r1			/* Save r1 */
472	lhz	r10,PACA_IN_MCE(r13)
473	cmpwi	r10,0			/* Are we in nested machine check */
474	bne	0f			/* Yes, we are. */
475	/* First machine check entry */
476	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
4770:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
478	addi	r10,r10,1		/* increment paca->in_mce */
479	sth	r10,PACA_IN_MCE(r13)
480	/* Limit nested MCE to level 4 to avoid stack overflow */
481	cmpwi	r10,4
482	bgt	2f			/* Check if we hit limit of 4 */
483	std	r11,GPR1(r1)		/* Save r1 on the stack. */
484	std	r11,0(r1)		/* make stack chain pointer */
485	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
486	std	r11,_NIP(r1)
487	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
488	std	r11,_MSR(r1)
489	mfspr	r11,SPRN_DAR		/* Save DAR */
490	std	r11,_DAR(r1)
491	mfspr	r11,SPRN_DSISR		/* Save DSISR */
492	std	r11,_DSISR(r1)
493	std	r9,_CCR(r1)		/* Save CR in stackframe */
494	/* Save r9 through r13 from EXMC save area to stack frame. */
495	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
496	mfmsr	r11			/* get MSR value */
497	ori	r11,r11,MSR_ME		/* turn on ME bit */
498	ori	r11,r11,MSR_RI		/* turn on RI bit */
499	ld	r12,PACAKBASE(r13)	/* get high part of &label */
500	LOAD_HANDLER(r12, machine_check_handle_early)
5011:	mtspr	SPRN_SRR0,r12
502	mtspr	SPRN_SRR1,r11
503	rfid
504	b	.	/* prevent speculative execution */
5052:
506	/* Stack overflow. Stay on emergency stack and panic.
507	 * Keep the ME bit off while panic-ing, so that if we hit
508	 * another machine check we checkstop.
509	 */
510	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
511	ld	r11,PACAKMSR(r13)
512	ld	r12,PACAKBASE(r13)
513	LOAD_HANDLER(r12, unrecover_mce)
514	li	r10,MSR_ME
515	andc	r11,r11,r10		/* Turn off MSR_ME */
516	b	1b
517	b	.	/* prevent speculative execution */
518END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
519
520machine_check_pSeries:
521	.globl machine_check_fwnmi
522machine_check_fwnmi:
523	SET_SCRATCH0(r13)		/* save r13 */
524	EXCEPTION_PROLOG_0(PACA_EXMC)
525machine_check_pSeries_0:
526	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
527	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
528	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
529	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
530	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
531	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
532	KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
533	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
534	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
535
536#ifdef CONFIG_PPC_DENORMALISATION
537denorm_assist:
538BEGIN_FTR_SECTION
539/*
540 * To denormalise we need to move a copy of the register to itself.
541 * For POWER6 do that here for all FP regs.
542 */
543	mfmsr	r10
544	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
545	xori	r10,r10,(MSR_FE0|MSR_FE1)
546	mtmsrd	r10
547	sync
548
549#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
550#define FMR4(n)  FMR2(n) ; FMR2(n+2)
551#define FMR8(n)  FMR4(n) ; FMR4(n+4)
552#define FMR16(n) FMR8(n) ; FMR8(n+8)
553#define FMR32(n) FMR16(n) ; FMR16(n+16)
554	FMR32(0)
555
556FTR_SECTION_ELSE
557/*
558 * To denormalise we need to move a copy of the register to itself.
559 * For POWER7 do that here for the first 32 VSX registers only.
560 */
561	mfmsr	r10
562	oris	r10,r10,MSR_VSX@h
563	mtmsrd	r10
564	sync
565
566#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
567#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
568#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
569#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
570#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
571	XVCPSGNDP32(0)
572
573ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
574
575BEGIN_FTR_SECTION
576	b	denorm_done
577END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
578/*
579 * To denormalise we need to move a copy of the register to itself.
580 * For POWER8 we need to do that for all 64 VSX registers
581 */
582	XVCPSGNDP32(32)
583denorm_done:
584	mtspr	SPRN_HSRR0,r11
585	mtcrf	0x80,r9
586	ld	r9,PACA_EXGEN+EX_R9(r13)
587	RESTORE_PPR_PACA(PACA_EXGEN, r10)
588BEGIN_FTR_SECTION
589	ld	r10,PACA_EXGEN+EX_CFAR(r13)
590	mtspr	SPRN_CFAR,r10
591END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
592	ld	r10,PACA_EXGEN+EX_R10(r13)
593	ld	r11,PACA_EXGEN+EX_R11(r13)
594	ld	r12,PACA_EXGEN+EX_R12(r13)
595	ld	r13,PACA_EXGEN+EX_R13(r13)
596	HRFID
597	b	.
598#endif
599
600	.align	7
601	/* moved from 0xe00 */
602	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
603	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
604	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
605	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
606	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
607	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
608	MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
609	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
610
611	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
612	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
613
614	/* moved from 0xf00 */
615	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
616	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
617	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
618	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
619	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
620	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
621	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
622	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
623	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
624	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
625
626/*
627 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
628 * - If it was a decrementer interrupt, we bump the dec to max and and return.
629 * - If it was a doorbell we return immediately since doorbells are edge
630 *   triggered and won't automatically refire.
631 * - If it was a HMI we return immediately since we handled it in realmode
632 *   and it won't refire.
633 * - else we hard disable and return.
634 * This is called with r10 containing the value to OR to the paca field.
635 */
636#define MASKED_INTERRUPT(_H)				\
637masked_##_H##interrupt:					\
638	std	r11,PACA_EXGEN+EX_R11(r13);		\
639	lbz	r11,PACAIRQHAPPENED(r13);		\
640	or	r11,r11,r10;				\
641	stb	r11,PACAIRQHAPPENED(r13);		\
642	cmpwi	r10,PACA_IRQ_DEC;			\
643	bne	1f;					\
644	lis	r10,0x7fff;				\
645	ori	r10,r10,0xffff;				\
646	mtspr	SPRN_DEC,r10;				\
647	b	2f;					\
6481:	cmpwi	r10,PACA_IRQ_DBELL;			\
649	beq	2f;					\
650	cmpwi	r10,PACA_IRQ_HMI;			\
651	beq	2f;					\
652	mfspr	r10,SPRN_##_H##SRR1;			\
653	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
654	rotldi	r10,r10,16;				\
655	mtspr	SPRN_##_H##SRR1,r10;			\
6562:	mtcrf	0x80,r9;				\
657	ld	r9,PACA_EXGEN+EX_R9(r13);		\
658	ld	r10,PACA_EXGEN+EX_R10(r13);		\
659	ld	r11,PACA_EXGEN+EX_R11(r13);		\
660	GET_SCRATCH0(r13);				\
661	##_H##rfid;					\
662	b	.
663
664	MASKED_INTERRUPT()
665	MASKED_INTERRUPT(H)
666
667/*
668 * Called from arch_local_irq_enable when an interrupt needs
669 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
670 * which kind of interrupt. MSR:EE is already off. We generate a
671 * stackframe like if a real interrupt had happened.
672 *
673 * Note: While MSR:EE is off, we need to make sure that _MSR
674 * in the generated frame has EE set to 1 or the exception
675 * handler will not properly re-enable them.
676 */
677_GLOBAL(__replay_interrupt)
678	/* We are going to jump to the exception common code which
679	 * will retrieve various register values from the PACA which
680	 * we don't give a damn about, so we don't bother storing them.
681	 */
682	mfmsr	r12
683	mflr	r11
684	mfcr	r9
685	ori	r12,r12,MSR_EE
686	cmpwi	r3,0x900
687	beq	decrementer_common
688	cmpwi	r3,0x500
689	beq	hardware_interrupt_common
690BEGIN_FTR_SECTION
691	cmpwi	r3,0xe80
692	beq	h_doorbell_common
693FTR_SECTION_ELSE
694	cmpwi	r3,0xa00
695	beq	doorbell_super_common
696ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
697	blr
698
699#ifdef CONFIG_PPC_PSERIES
700/*
701 * Vectors for the FWNMI option.  Share common code.
702 */
703	.globl system_reset_fwnmi
704      .align 7
705system_reset_fwnmi:
706	SET_SCRATCH0(r13)		/* save r13 */
707	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
708				 NOTEST, 0x100)
709
710#endif /* CONFIG_PPC_PSERIES */
711
712#ifdef __DISABLED__
713/*
714 * This is used for when the SLB miss handler has to go virtual,
715 * which doesn't happen for now anymore but will once we re-implement
716 * dynamic VSIDs for shared page tables
717 */
718slb_miss_user_pseries:
719	std	r10,PACA_EXGEN+EX_R10(r13)
720	std	r11,PACA_EXGEN+EX_R11(r13)
721	std	r12,PACA_EXGEN+EX_R12(r13)
722	GET_SCRATCH0(r10)
723	ld	r11,PACA_EXSLB+EX_R9(r13)
724	ld	r12,PACA_EXSLB+EX_R3(r13)
725	std	r10,PACA_EXGEN+EX_R13(r13)
726	std	r11,PACA_EXGEN+EX_R9(r13)
727	std	r12,PACA_EXGEN+EX_R3(r13)
728	clrrdi	r12,r13,32
729	mfmsr	r10
730	mfspr	r11,SRR0			/* save SRR0 */
731	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
732	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
733	mtspr	SRR0,r12
734	mfspr	r12,SRR1			/* and SRR1 */
735	mtspr	SRR1,r10
736	rfid
737	b	.				/* prevent spec. execution */
738#endif /* __DISABLED__ */
739
740#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
741kvmppc_skip_interrupt:
742	/*
743	 * Here all GPRs are unchanged from when the interrupt happened
744	 * except for r13, which is saved in SPRG_SCRATCH0.
745	 */
746	mfspr	r13, SPRN_SRR0
747	addi	r13, r13, 4
748	mtspr	SPRN_SRR0, r13
749	GET_SCRATCH0(r13)
750	rfid
751	b	.
752
753kvmppc_skip_Hinterrupt:
754	/*
755	 * Here all GPRs are unchanged from when the interrupt happened
756	 * except for r13, which is saved in SPRG_SCRATCH0.
757	 */
758	mfspr	r13, SPRN_HSRR0
759	addi	r13, r13, 4
760	mtspr	SPRN_HSRR0, r13
761	GET_SCRATCH0(r13)
762	hrfid
763	b	.
764#endif
765
766/*
767 * Code from here down to __end_handlers is invoked from the
768 * exception prologs above.  Because the prologs assemble the
769 * addresses of these handlers using the LOAD_HANDLER macro,
770 * which uses an ori instruction, these handlers must be in
771 * the first 64k of the kernel image.
772 */
773
774/*** Common interrupt handlers ***/
775
776	STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
777
778	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
779	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
780	STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
781#ifdef CONFIG_PPC_DOORBELL
782	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
783#else
784	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
785#endif
786	STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
787	STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
788	STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
789	STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
790	STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
791#ifdef CONFIG_PPC_DOORBELL
792	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
793#else
794	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
795#endif
796	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
797	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
798	STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
799#ifdef CONFIG_ALTIVEC
800	STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
801#else
802	STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
803#endif
804#ifdef CONFIG_CBE_RAS
805	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
806	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
807	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
808#endif /* CONFIG_CBE_RAS */
809
810	/*
811	 * Relocation-on interrupts: A subset of the interrupts can be delivered
812	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
813	 * it.  Addresses are the same as the original interrupt addresses, but
814	 * offset by 0xc000000000004000.
815	 * It's impossible to receive interrupts below 0x300 via this mechanism.
816	 * KVM: None of these traps are from the guest ; anything that escalated
817	 * to HV=1 from HV=0 is delivered via real mode handlers.
818	 */
819
820	/*
821	 * This uses the standard macro, since the original 0x300 vector
822	 * only has extra guff for STAB-based processors -- which never
823	 * come here.
824	 */
825	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
826	. = 0x4380
827	.globl data_access_slb_relon_pSeries
828data_access_slb_relon_pSeries:
829	SET_SCRATCH0(r13)
830	EXCEPTION_PROLOG_0(PACA_EXSLB)
831	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
832	std	r3,PACA_EXSLB+EX_R3(r13)
833	mfspr	r3,SPRN_DAR
834	mfspr	r12,SPRN_SRR1
835#ifndef CONFIG_RELOCATABLE
836	b	slb_miss_realmode
837#else
838	/*
839	 * We can't just use a direct branch to slb_miss_realmode
840	 * because the distance from here to there depends on where
841	 * the kernel ends up being put.
842	 */
843	mfctr	r11
844	ld	r10,PACAKBASE(r13)
845	LOAD_HANDLER(r10, slb_miss_realmode)
846	mtctr	r10
847	bctr
848#endif
849
850	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
851	. = 0x4480
852	.globl instruction_access_slb_relon_pSeries
853instruction_access_slb_relon_pSeries:
854	SET_SCRATCH0(r13)
855	EXCEPTION_PROLOG_0(PACA_EXSLB)
856	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
857	std	r3,PACA_EXSLB+EX_R3(r13)
858	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
859	mfspr	r12,SPRN_SRR1
860#ifndef CONFIG_RELOCATABLE
861	b	slb_miss_realmode
862#else
863	mfctr	r11
864	ld	r10,PACAKBASE(r13)
865	LOAD_HANDLER(r10, slb_miss_realmode)
866	mtctr	r10
867	bctr
868#endif
869
870	. = 0x4500
871	.globl hardware_interrupt_relon_pSeries;
872	.globl hardware_interrupt_relon_hv;
873hardware_interrupt_relon_pSeries:
874hardware_interrupt_relon_hv:
875	BEGIN_FTR_SECTION
876		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
877	FTR_SECTION_ELSE
878		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
879	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
880	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
881	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
882	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
883	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
884	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
885	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
886	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
887
888	. = 0x4c00
889	.globl system_call_relon_pSeries
890system_call_relon_pSeries:
891	HMT_MEDIUM
892	SYSCALL_PSERIES_1
893	SYSCALL_PSERIES_2_DIRECT
894	SYSCALL_PSERIES_3
895
896	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
897
898	. = 0x4e00
899	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
900
901	. = 0x4e20
902	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
903
904	. = 0x4e40
905emulation_assist_relon_trampoline:
906	SET_SCRATCH0(r13)
907	EXCEPTION_PROLOG_0(PACA_EXGEN)
908	b	emulation_assist_relon_hv
909
910	. = 0x4e60
911	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
912
913	. = 0x4e80
914h_doorbell_relon_trampoline:
915	SET_SCRATCH0(r13)
916	EXCEPTION_PROLOG_0(PACA_EXGEN)
917	b	h_doorbell_relon_hv
918
919	. = 0x4f00
920performance_monitor_relon_pseries_trampoline:
921	SET_SCRATCH0(r13)
922	EXCEPTION_PROLOG_0(PACA_EXGEN)
923	b	performance_monitor_relon_pSeries
924
925	. = 0x4f20
926altivec_unavailable_relon_pseries_trampoline:
927	SET_SCRATCH0(r13)
928	EXCEPTION_PROLOG_0(PACA_EXGEN)
929	b	altivec_unavailable_relon_pSeries
930
931	. = 0x4f40
932vsx_unavailable_relon_pseries_trampoline:
933	SET_SCRATCH0(r13)
934	EXCEPTION_PROLOG_0(PACA_EXGEN)
935	b	vsx_unavailable_relon_pSeries
936
937	. = 0x4f60
938facility_unavailable_relon_trampoline:
939	SET_SCRATCH0(r13)
940	EXCEPTION_PROLOG_0(PACA_EXGEN)
941	b	facility_unavailable_relon_pSeries
942
943	. = 0x4f80
944hv_facility_unavailable_relon_trampoline:
945	SET_SCRATCH0(r13)
946	EXCEPTION_PROLOG_0(PACA_EXGEN)
947	b	hv_facility_unavailable_relon_hv
948
949	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
950#ifdef CONFIG_PPC_DENORMALISATION
951	. = 0x5500
952	b	denorm_exception_hv
953#endif
954	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
955
956	/* Other future vectors */
957	.align	7
958	.globl	__end_interrupts
959__end_interrupts:
960
961	.align	7
962system_call_entry:
963	b	system_call_common
964
965ppc64_runlatch_on_trampoline:
966	b	__ppc64_runlatch_on
967
968/*
969 * Here r13 points to the paca, r9 contains the saved CR,
970 * SRR0 and SRR1 are saved in r11 and r12,
971 * r9 - r13 are saved in paca->exgen.
972 */
973	.align	7
974	.globl data_access_common
975data_access_common:
976	mfspr	r10,SPRN_DAR
977	std	r10,PACA_EXGEN+EX_DAR(r13)
978	mfspr	r10,SPRN_DSISR
979	stw	r10,PACA_EXGEN+EX_DSISR(r13)
980	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
981	RECONCILE_IRQ_STATE(r10, r11)
982	ld	r12,_MSR(r1)
983	ld	r3,PACA_EXGEN+EX_DAR(r13)
984	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
985	li	r5,0x300
986	b	do_hash_page		/* Try to handle as hpte fault */
987
988	.align  7
989	.globl  h_data_storage_common
990h_data_storage_common:
991	mfspr   r10,SPRN_HDAR
992	std     r10,PACA_EXGEN+EX_DAR(r13)
993	mfspr   r10,SPRN_HDSISR
994	stw     r10,PACA_EXGEN+EX_DSISR(r13)
995	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
996	bl      save_nvgprs
997	RECONCILE_IRQ_STATE(r10, r11)
998	addi    r3,r1,STACK_FRAME_OVERHEAD
999	bl      unknown_exception
1000	b       ret_from_except
1001
1002	.align	7
1003	.globl instruction_access_common
1004instruction_access_common:
1005	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1006	RECONCILE_IRQ_STATE(r10, r11)
1007	ld	r12,_MSR(r1)
1008	ld	r3,_NIP(r1)
1009	andis.	r4,r12,0x5820
1010	li	r5,0x400
1011	b	do_hash_page		/* Try to handle as hpte fault */
1012
1013	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
1014
1015/*
1016 * Here is the common SLB miss user that is used when going to virtual
1017 * mode for SLB misses, that is currently not used
1018 */
1019#ifdef __DISABLED__
1020	.align	7
1021	.globl	slb_miss_user_common
1022slb_miss_user_common:
1023	mflr	r10
1024	std	r3,PACA_EXGEN+EX_DAR(r13)
1025	stw	r9,PACA_EXGEN+EX_CCR(r13)
1026	std	r10,PACA_EXGEN+EX_LR(r13)
1027	std	r11,PACA_EXGEN+EX_SRR0(r13)
1028	bl	slb_allocate_user
1029
1030	ld	r10,PACA_EXGEN+EX_LR(r13)
1031	ld	r3,PACA_EXGEN+EX_R3(r13)
1032	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1033	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1034	mtlr	r10
1035	beq-	slb_miss_fault
1036
1037	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1038	beq-	unrecov_user_slb
1039	mfmsr	r10
1040
1041.machine push
1042.machine "power4"
1043	mtcrf	0x80,r9
1044.machine pop
1045
1046	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1047	mtmsrd	r10,1
1048
1049	mtspr	SRR0,r11
1050	mtspr	SRR1,r12
1051
1052	ld	r9,PACA_EXGEN+EX_R9(r13)
1053	ld	r10,PACA_EXGEN+EX_R10(r13)
1054	ld	r11,PACA_EXGEN+EX_R11(r13)
1055	ld	r12,PACA_EXGEN+EX_R12(r13)
1056	ld	r13,PACA_EXGEN+EX_R13(r13)
1057	rfid
1058	b	.
1059
1060slb_miss_fault:
1061	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1062	ld	r4,PACA_EXGEN+EX_DAR(r13)
1063	li	r5,0
1064	std	r4,_DAR(r1)
1065	std	r5,_DSISR(r1)
1066	b	handle_page_fault
1067
1068unrecov_user_slb:
1069	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1070	RECONCILE_IRQ_STATE(r10, r11)
1071	bl	save_nvgprs
10721:	addi	r3,r1,STACK_FRAME_OVERHEAD
1073	bl	unrecoverable_exception
1074	b	1b
1075
1076#endif /* __DISABLED__ */
1077
1078
1079	/*
1080	 * Machine check is different because we use a different
1081	 * save area: PACA_EXMC instead of PACA_EXGEN.
1082	 */
1083	.align	7
1084	.globl machine_check_common
1085machine_check_common:
1086
1087	mfspr	r10,SPRN_DAR
1088	std	r10,PACA_EXGEN+EX_DAR(r13)
1089	mfspr	r10,SPRN_DSISR
1090	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1091	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1092	FINISH_NAP
1093	RECONCILE_IRQ_STATE(r10, r11)
1094	ld	r3,PACA_EXGEN+EX_DAR(r13)
1095	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1096	std	r3,_DAR(r1)
1097	std	r4,_DSISR(r1)
1098	bl	save_nvgprs
1099	addi	r3,r1,STACK_FRAME_OVERHEAD
1100	bl	machine_check_exception
1101	b	ret_from_except
1102
1103	.align	7
1104	.globl alignment_common
1105alignment_common:
1106	mfspr	r10,SPRN_DAR
1107	std	r10,PACA_EXGEN+EX_DAR(r13)
1108	mfspr	r10,SPRN_DSISR
1109	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1110	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1111	ld	r3,PACA_EXGEN+EX_DAR(r13)
1112	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1113	std	r3,_DAR(r1)
1114	std	r4,_DSISR(r1)
1115	bl	save_nvgprs
1116	RECONCILE_IRQ_STATE(r10, r11)
1117	addi	r3,r1,STACK_FRAME_OVERHEAD
1118	bl	alignment_exception
1119	b	ret_from_except
1120
1121	.align	7
1122	.globl program_check_common
1123program_check_common:
1124	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1125	bl	save_nvgprs
1126	RECONCILE_IRQ_STATE(r10, r11)
1127	addi	r3,r1,STACK_FRAME_OVERHEAD
1128	bl	program_check_exception
1129	b	ret_from_except
1130
1131	.align	7
1132	.globl fp_unavailable_common
1133fp_unavailable_common:
1134	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1135	bne	1f			/* if from user, just load it up */
1136	bl	save_nvgprs
1137	RECONCILE_IRQ_STATE(r10, r11)
1138	addi	r3,r1,STACK_FRAME_OVERHEAD
1139	bl	kernel_fp_unavailable_exception
1140	BUG_OPCODE
11411:
1142#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1143BEGIN_FTR_SECTION
1144	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1145	 * transaction), go do TM stuff
1146	 */
1147	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1148	bne-	2f
1149END_FTR_SECTION_IFSET(CPU_FTR_TM)
1150#endif
1151	bl	load_up_fpu
1152	b	fast_exception_return
1153#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11542:	/* User process was in a transaction */
1155	bl	save_nvgprs
1156	RECONCILE_IRQ_STATE(r10, r11)
1157	addi	r3,r1,STACK_FRAME_OVERHEAD
1158	bl	fp_unavailable_tm
1159	b	ret_from_except
1160#endif
1161	.align	7
1162	.globl altivec_unavailable_common
1163altivec_unavailable_common:
1164	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1165#ifdef CONFIG_ALTIVEC
1166BEGIN_FTR_SECTION
1167	beq	1f
1168#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1169  BEGIN_FTR_SECTION_NESTED(69)
1170	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1171	 * transaction), go do TM stuff
1172	 */
1173	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1174	bne-	2f
1175  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1176#endif
1177	bl	load_up_altivec
1178	b	fast_exception_return
1179#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11802:	/* User process was in a transaction */
1181	bl	save_nvgprs
1182	RECONCILE_IRQ_STATE(r10, r11)
1183	addi	r3,r1,STACK_FRAME_OVERHEAD
1184	bl	altivec_unavailable_tm
1185	b	ret_from_except
1186#endif
11871:
1188END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1189#endif
1190	bl	save_nvgprs
1191	RECONCILE_IRQ_STATE(r10, r11)
1192	addi	r3,r1,STACK_FRAME_OVERHEAD
1193	bl	altivec_unavailable_exception
1194	b	ret_from_except
1195
1196	.align	7
1197	.globl vsx_unavailable_common
1198vsx_unavailable_common:
1199	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1200#ifdef CONFIG_VSX
1201BEGIN_FTR_SECTION
1202	beq	1f
1203#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1204  BEGIN_FTR_SECTION_NESTED(69)
1205	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1206	 * transaction), go do TM stuff
1207	 */
1208	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1209	bne-	2f
1210  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1211#endif
1212	b	load_up_vsx
1213#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12142:	/* User process was in a transaction */
1215	bl	save_nvgprs
1216	RECONCILE_IRQ_STATE(r10, r11)
1217	addi	r3,r1,STACK_FRAME_OVERHEAD
1218	bl	vsx_unavailable_tm
1219	b	ret_from_except
1220#endif
12211:
1222END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1223#endif
1224	bl	save_nvgprs
1225	RECONCILE_IRQ_STATE(r10, r11)
1226	addi	r3,r1,STACK_FRAME_OVERHEAD
1227	bl	vsx_unavailable_exception
1228	b	ret_from_except
1229
1230	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
1231	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
1232
1233	.align	7
1234	.globl	__end_handlers
1235__end_handlers:
1236
1237	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1238	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1239	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1240
1241	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1242	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1243	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1244	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1245	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1246
1247#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1248/*
1249 * Data area reserved for FWNMI option.
1250 * This address (0x7000) is fixed by the RPA.
1251 */
1252	.= 0x7000
1253	.globl fwnmi_data_area
1254fwnmi_data_area:
1255
1256	/* pseries and powernv need to keep the whole page from
1257	 * 0x7000 to 0x8000 free for use by the firmware
1258	 */
1259	. = 0x8000
1260#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1261
1262	.globl hmi_exception_early
1263hmi_exception_early:
1264	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0xe60)
1265	mr	r10,r1			/* Save r1			*/
1266	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1267	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1268	std	r9,_CCR(r1)		/* save CR in stackframe	*/
1269	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
1270	std	r11,_NIP(r1)		/* save HSRR0 in stackframe	*/
1271	mfspr	r12,SPRN_HSRR1		/* Save SRR1 */
1272	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
1273	std	r10,0(r1)		/* make stack chain pointer	*/
1274	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
1275	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
1276	EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1277	EXCEPTION_PROLOG_COMMON_3(0xe60)
1278	addi	r3,r1,STACK_FRAME_OVERHEAD
1279	bl	hmi_exception_realmode
1280	/* Windup the stack. */
1281	/* Move original HSRR0 and HSRR1 into the respective regs */
1282	ld	r9,_MSR(r1)
1283	mtspr	SPRN_HSRR1,r9
1284	ld	r3,_NIP(r1)
1285	mtspr	SPRN_HSRR0,r3
1286	ld	r9,_CTR(r1)
1287	mtctr	r9
1288	ld	r9,_XER(r1)
1289	mtxer	r9
1290	ld	r9,_LINK(r1)
1291	mtlr	r9
1292	REST_GPR(0, r1)
1293	REST_8GPRS(2, r1)
1294	REST_GPR(10, r1)
1295	ld	r11,_CCR(r1)
1296	mtcr	r11
1297	REST_GPR(11, r1)
1298	REST_2GPRS(12, r1)
1299	/* restore original r1. */
1300	ld	r1,GPR1(r1)
1301
1302	/*
1303	 * Go to virtual mode and pull the HMI event information from
1304	 * firmware.
1305	 */
1306	.globl hmi_exception_after_realmode
1307hmi_exception_after_realmode:
1308	SET_SCRATCH0(r13)
1309	EXCEPTION_PROLOG_0(PACA_EXGEN)
1310	b	hmi_exception_hv
1311
1312
1313#define MACHINE_CHECK_HANDLER_WINDUP			\
1314	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1315	li	r0,MSR_RI;				\
1316	mfmsr	r9;		/* get MSR value */	\
1317	andc	r9,r9,r0;				\
1318	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1319	/* Move original SRR0 and SRR1 into the respective regs */	\
1320	ld	r9,_MSR(r1);				\
1321	mtspr	SPRN_SRR1,r9;				\
1322	ld	r3,_NIP(r1);				\
1323	mtspr	SPRN_SRR0,r3;				\
1324	ld	r9,_CTR(r1);				\
1325	mtctr	r9;					\
1326	ld	r9,_XER(r1);				\
1327	mtxer	r9;					\
1328	ld	r9,_LINK(r1);				\
1329	mtlr	r9;					\
1330	REST_GPR(0, r1);				\
1331	REST_8GPRS(2, r1);				\
1332	REST_GPR(10, r1);				\
1333	ld	r11,_CCR(r1);				\
1334	mtcr	r11;					\
1335	/* Decrement paca->in_mce. */			\
1336	lhz	r12,PACA_IN_MCE(r13);			\
1337	subi	r12,r12,1;				\
1338	sth	r12,PACA_IN_MCE(r13);			\
1339	REST_GPR(11, r1);				\
1340	REST_2GPRS(12, r1);				\
1341	/* restore original r1. */			\
1342	ld	r1,GPR1(r1)
1343
1344	/*
1345	 * Handle machine check early in real mode. We come here with
1346	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1347	 */
1348	.align	7
1349	.globl machine_check_handle_early
1350machine_check_handle_early:
1351	std	r0,GPR0(r1)	/* Save r0 */
1352	EXCEPTION_PROLOG_COMMON_3(0x200)
1353	bl	save_nvgprs
1354	addi	r3,r1,STACK_FRAME_OVERHEAD
1355	bl	machine_check_early
1356	std	r3,RESULT(r1)	/* Save result */
1357	ld	r12,_MSR(r1)
1358#ifdef	CONFIG_PPC_P7_NAP
1359	/*
1360	 * Check if thread was in power saving mode. We come here when any
1361	 * of the following is true:
1362	 * a. thread wasn't in power saving mode
1363	 * b. thread was in power saving mode with no state loss or
1364	 *    supervisor state loss
1365	 *
1366	 * Go back to nap again if (b) is true.
1367	 */
1368	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */
1369	beq	4f			/* No, it wasn;t */
1370	/* Thread was in power saving mode. Go back to nap again. */
1371	cmpwi	r11,2
1372	bne	3f
1373	/* Supervisor state loss */
1374	li	r0,1
1375	stb	r0,PACA_NAPSTATELOST(r13)
13763:	bl	machine_check_queue_event
1377	MACHINE_CHECK_HANDLER_WINDUP
1378	GET_PACA(r13)
1379	ld	r1,PACAR1(r13)
1380	li	r3,PNV_THREAD_NAP
1381	b	power7_enter_nap_mode
13824:
1383#endif
1384	/*
1385	 * Check if we are coming from hypervisor userspace. If yes then we
1386	 * continue in host kernel in V mode to deliver the MC event.
1387	 */
1388	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
1389	beq	5f
1390	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1391	bne	9f			/* continue in V mode if we are. */
1392
13935:
1394#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1395	/*
1396	 * We are coming from kernel context. Check if we are coming from
1397	 * guest. if yes, then we can continue. We will fall through
1398	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1399	 */
1400	lbz	r11,HSTATE_IN_GUEST(r13)
1401	cmpwi	r11,0			/* Check if coming from guest */
1402	bne	9f			/* continue if we are. */
1403#endif
1404	/*
1405	 * At this point we are not sure about what context we come from.
1406	 * Queue up the MCE event and return from the interrupt.
1407	 * But before that, check if this is an un-recoverable exception.
1408	 * If yes, then stay on emergency stack and panic.
1409	 */
1410	andi.	r11,r12,MSR_RI
1411	bne	2f
14121:	mfspr	r11,SPRN_SRR0
1413	ld	r10,PACAKBASE(r13)
1414	LOAD_HANDLER(r10,unrecover_mce)
1415	mtspr	SPRN_SRR0,r10
1416	ld	r10,PACAKMSR(r13)
1417	/*
1418	 * We are going down. But there are chances that we might get hit by
1419	 * another MCE during panic path and we may run into unstable state
1420	 * with no way out. Hence, turn ME bit off while going down, so that
1421	 * when another MCE is hit during panic path, system will checkstop
1422	 * and hypervisor will get restarted cleanly by SP.
1423	 */
1424	li	r3,MSR_ME
1425	andc	r10,r10,r3		/* Turn off MSR_ME */
1426	mtspr	SPRN_SRR1,r10
1427	rfid
1428	b	.
14292:
1430	/*
1431	 * Check if we have successfully handled/recovered from error, if not
1432	 * then stay on emergency stack and panic.
1433	 */
1434	ld	r3,RESULT(r1)	/* Load result */
1435	cmpdi	r3,0		/* see if we handled MCE successfully */
1436
1437	beq	1b		/* if !handled then panic */
1438	/*
1439	 * Return from MC interrupt.
1440	 * Queue up the MCE event so that we can log it later, while
1441	 * returning from kernel or opal call.
1442	 */
1443	bl	machine_check_queue_event
1444	MACHINE_CHECK_HANDLER_WINDUP
1445	rfid
14469:
1447	/* Deliver the machine check to host kernel in V mode. */
1448	MACHINE_CHECK_HANDLER_WINDUP
1449	b	machine_check_pSeries
1450
1451unrecover_mce:
1452	/* Invoke machine_check_exception to print MCE event and panic. */
1453	addi	r3,r1,STACK_FRAME_OVERHEAD
1454	bl	machine_check_exception
1455	/*
1456	 * We will not reach here. Even if we did, there is no way out. Call
1457	 * unrecoverable_exception and die.
1458	 */
14591:	addi	r3,r1,STACK_FRAME_OVERHEAD
1460	bl	unrecoverable_exception
1461	b	1b
1462/*
1463 * r13 points to the PACA, r9 contains the saved CR,
1464 * r12 contain the saved SRR1, SRR0 is still ready for return
1465 * r3 has the faulting address
1466 * r9 - r13 are saved in paca->exslb.
1467 * r3 is saved in paca->slb_r3
1468 * We assume we aren't going to take any exceptions during this procedure.
1469 */
1470slb_miss_realmode:
1471	mflr	r10
1472#ifdef CONFIG_RELOCATABLE
1473	mtctr	r11
1474#endif
1475
1476	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1477	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1478
1479	bl	slb_allocate_realmode
1480
1481	/* All done -- return from exception. */
1482
1483	ld	r10,PACA_EXSLB+EX_LR(r13)
1484	ld	r3,PACA_EXSLB+EX_R3(r13)
1485	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1486
1487	mtlr	r10
1488
1489	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1490	beq-	2f
1491
1492.machine	push
1493.machine	"power4"
1494	mtcrf	0x80,r9
1495	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1496.machine	pop
1497
1498	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1499	ld	r9,PACA_EXSLB+EX_R9(r13)
1500	ld	r10,PACA_EXSLB+EX_R10(r13)
1501	ld	r11,PACA_EXSLB+EX_R11(r13)
1502	ld	r12,PACA_EXSLB+EX_R12(r13)
1503	ld	r13,PACA_EXSLB+EX_R13(r13)
1504	rfid
1505	b	.	/* prevent speculative execution */
1506
15072:	mfspr	r11,SPRN_SRR0
1508	ld	r10,PACAKBASE(r13)
1509	LOAD_HANDLER(r10,unrecov_slb)
1510	mtspr	SPRN_SRR0,r10
1511	ld	r10,PACAKMSR(r13)
1512	mtspr	SPRN_SRR1,r10
1513	rfid
1514	b	.
1515
1516unrecov_slb:
1517	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1518	RECONCILE_IRQ_STATE(r10, r11)
1519	bl	save_nvgprs
15201:	addi	r3,r1,STACK_FRAME_OVERHEAD
1521	bl	unrecoverable_exception
1522	b	1b
1523
1524
1525#ifdef CONFIG_PPC_970_NAP
1526power4_fixup_nap:
1527	andc	r9,r9,r10
1528	std	r9,TI_LOCAL_FLAGS(r11)
1529	ld	r10,_LINK(r1)		/* make idle task do the */
1530	std	r10,_NIP(r1)		/* equivalent of a blr */
1531	blr
1532#endif
1533
1534/*
1535 * Hash table stuff
1536 */
1537	.align	7
1538do_hash_page:
1539	std	r3,_DAR(r1)
1540	std	r4,_DSISR(r1)
1541
1542	andis.	r0,r4,0xa410		/* weird error? */
1543	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1544	andis.  r0,r4,DSISR_DABRMATCH@h
1545	bne-    handle_dabr_fault
1546	CURRENT_THREAD_INFO(r11, r1)
1547	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1548	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1549	bne	77f			/* then don't call hash_page now */
1550
1551	/*
1552	 * r3 contains the faulting address
1553	 * r4 msr
1554	 * r5 contains the trap number
1555	 * r6 contains dsisr
1556	 *
1557	 * at return r3 = 0 for success, 1 for page fault, negative for error
1558	 */
1559        mr 	r4,r12
1560	ld      r6,_DSISR(r1)
1561	bl	__hash_page		/* build HPTE if possible */
1562        cmpdi	r3,0			/* see if __hash_page succeeded */
1563
1564	/* Success */
1565	beq	fast_exc_return_irq	/* Return from exception on success */
1566
1567	/* Error */
1568	blt-	13f
1569
1570/* Here we have a page fault that hash_page can't handle. */
1571handle_page_fault:
157211:	ld	r4,_DAR(r1)
1573	ld	r5,_DSISR(r1)
1574	addi	r3,r1,STACK_FRAME_OVERHEAD
1575	bl	do_page_fault
1576	cmpdi	r3,0
1577	beq+	12f
1578	bl	save_nvgprs
1579	mr	r5,r3
1580	addi	r3,r1,STACK_FRAME_OVERHEAD
1581	lwz	r4,_DAR(r1)
1582	bl	bad_page_fault
1583	b	ret_from_except
1584
1585/* We have a data breakpoint exception - handle it */
1586handle_dabr_fault:
1587	bl	save_nvgprs
1588	ld      r4,_DAR(r1)
1589	ld      r5,_DSISR(r1)
1590	addi    r3,r1,STACK_FRAME_OVERHEAD
1591	bl      do_break
159212:	b       ret_from_except_lite
1593
1594
1595/* We have a page fault that hash_page could handle but HV refused
1596 * the PTE insertion
1597 */
159813:	bl	save_nvgprs
1599	mr	r5,r3
1600	addi	r3,r1,STACK_FRAME_OVERHEAD
1601	ld	r4,_DAR(r1)
1602	bl	low_hash_fault
1603	b	ret_from_except
1604
1605/*
1606 * We come here as a result of a DSI at a point where we don't want
1607 * to call hash_page, such as when we are accessing memory (possibly
1608 * user memory) inside a PMU interrupt that occurred while interrupts
1609 * were soft-disabled.  We want to invoke the exception handler for
1610 * the access, or panic if there isn't a handler.
1611 */
161277:	bl	save_nvgprs
1613	mr	r4,r3
1614	addi	r3,r1,STACK_FRAME_OVERHEAD
1615	li	r5,SIGSEGV
1616	bl	bad_page_fault
1617	b	ret_from_except
1618
1619/*
1620 * Here we have detected that the kernel stack pointer is bad.
1621 * R9 contains the saved CR, r13 points to the paca,
1622 * r10 contains the (bad) kernel stack pointer,
1623 * r11 and r12 contain the saved SRR0 and SRR1.
1624 * We switch to using an emergency stack, save the registers there,
1625 * and call kernel_bad_stack(), which panics.
1626 */
1627bad_stack:
1628	ld	r1,PACAEMERGSP(r13)
1629	subi	r1,r1,64+INT_FRAME_SIZE
1630	std	r9,_CCR(r1)
1631	std	r10,GPR1(r1)
1632	std	r11,_NIP(r1)
1633	std	r12,_MSR(r1)
1634	mfspr	r11,SPRN_DAR
1635	mfspr	r12,SPRN_DSISR
1636	std	r11,_DAR(r1)
1637	std	r12,_DSISR(r1)
1638	mflr	r10
1639	mfctr	r11
1640	mfxer	r12
1641	std	r10,_LINK(r1)
1642	std	r11,_CTR(r1)
1643	std	r12,_XER(r1)
1644	SAVE_GPR(0,r1)
1645	SAVE_GPR(2,r1)
1646	ld	r10,EX_R3(r3)
1647	std	r10,GPR3(r1)
1648	SAVE_GPR(4,r1)
1649	SAVE_4GPRS(5,r1)
1650	ld	r9,EX_R9(r3)
1651	ld	r10,EX_R10(r3)
1652	SAVE_2GPRS(9,r1)
1653	ld	r9,EX_R11(r3)
1654	ld	r10,EX_R12(r3)
1655	ld	r11,EX_R13(r3)
1656	std	r9,GPR11(r1)
1657	std	r10,GPR12(r1)
1658	std	r11,GPR13(r1)
1659BEGIN_FTR_SECTION
1660	ld	r10,EX_CFAR(r3)
1661	std	r10,ORIG_GPR3(r1)
1662END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1663	SAVE_8GPRS(14,r1)
1664	SAVE_10GPRS(22,r1)
1665	lhz	r12,PACA_TRAP_SAVE(r13)
1666	std	r12,_TRAP(r1)
1667	addi	r11,r1,INT_FRAME_SIZE
1668	std	r11,0(r1)
1669	li	r12,0
1670	std	r12,0(r11)
1671	ld	r2,PACATOC(r13)
1672	ld	r11,exception_marker@toc(r2)
1673	std	r12,RESULT(r1)
1674	std	r11,STACK_FRAME_OVERHEAD-16(r1)
16751:	addi	r3,r1,STACK_FRAME_OVERHEAD
1676	bl	kernel_bad_stack
1677	b	1b
1678