1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 -        : Early init and support code
29 */
30	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 					\
32BEGIN_FTR_SECTION						\
33	cmpdi	r0,0x1ebe ; 					\
34	beq-	1f ;						\
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
36	mr	r9,r13 ;					\
37	GET_PACA(r13) ;						\
38	mfspr	r11,SPRN_SRR0 ;					\
390:
40
41#define SYSCALL_PSERIES_2_RFID 					\
42	mfspr	r12,SPRN_SRR1 ;					\
43	ld	r10,PACAKBASE(r13) ; 				\
44	LOAD_HANDLER(r10, system_call_entry) ; 			\
45	mtspr	SPRN_SRR0,r10 ; 				\
46	ld	r10,PACAKMSR(r13) ;				\
47	mtspr	SPRN_SRR1,r10 ; 				\
48	rfid ; 							\
49	b	. ;	/* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3					\
52	/* Fast LE/BE switch system call */			\
531:	mfspr	r12,SPRN_SRR1 ;					\
54	xori	r12,r12,MSR_LE ;				\
55	mtspr	SPRN_SRR1,r12 ;					\
56	rfid ;		/* return to userspace */		\
57	b	. ;						\
582:	mfspr	r12,SPRN_SRR1 ;					\
59	andi.	r12,r12,MSR_PR ;				\
60	bne	0b ;						\
61	mtspr	SPRN_SRR0,r3 ;					\
62	mtspr	SPRN_SRR1,r4 ;					\
63	mtspr	SPRN_SDR1,r5 ;					\
64	rfid ;							\
65	b	. ;	/* prevent speculative execution */
66
67#if defined(CONFIG_RELOCATABLE)
68	/*
69	 * We can't branch directly; in the direct case we use LR
70	 * and system_call_entry restores LR.  (We thus need to move
71	 * LR to r10 in the RFID case too.)
72	 */
73#define SYSCALL_PSERIES_2_DIRECT				\
74	mflr	r10 ;						\
75	ld	r12,PACAKBASE(r13) ; 				\
76	LOAD_HANDLER(r12, system_call_entry_direct) ;		\
77	mtctr	r12 ;						\
78	mfspr	r12,SPRN_SRR1 ;					\
79	/* Re-use of r13... No spare regs to do this */	\
80	li	r13,MSR_RI ;					\
81	mtmsrd 	r13,1 ;						\
82	GET_PACA(r13) ;	/* get r13 back */			\
83	bctr ;
84#else
85	/* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT				\
87	mfspr	r12,SPRN_SRR1 ;					\
88	li	r10,MSR_RI ;					\
89	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
90	b	system_call_entry_direct ;
91#endif
92
93/*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101	. = 0x100
102	.globl __start_interrupts
103__start_interrupts:
104
105	.globl system_reset_pSeries;
106system_reset_pSeries:
107	HMT_MEDIUM_PPR_DISCARD
108	SET_SCRATCH0(r13)
109#ifdef CONFIG_PPC_P7_NAP
110BEGIN_FTR_SECTION
111	/* Running native on arch 2.06 or later, check if we are
112	 * waking up from nap. We only handle no state loss and
113	 * supervisor state loss. We do -not- handle hypervisor
114	 * state loss at this time.
115	 */
116	mfspr	r13,SPRN_SRR1
117	rlwinm.	r13,r13,47-31,30,31
118	beq	9f
119
120	/* waking up from powersave (nap) state */
121	cmpwi	cr1,r13,2
122	/* Total loss of HV state is fatal, we could try to use the
123	 * PIR to locate a PACA, then use an emergency stack etc...
124	 * but for now, let's just stay stuck here
125	 */
126	bgt	cr1,.
127	GET_PACA(r13)
128
129#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
130	li	r0,KVM_HWTHREAD_IN_KERNEL
131	stb	r0,HSTATE_HWTHREAD_STATE(r13)
132	/* Order setting hwthread_state vs. testing hwthread_req */
133	sync
134	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
135	cmpwi	r0,0
136	beq	1f
137	b	kvm_start_guest
1381:
139#endif
140
141	beq	cr1,2f
142	b	.power7_wakeup_noloss
1432:	b	.power7_wakeup_loss
1449:
145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146#endif /* CONFIG_PPC_P7_NAP */
147	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148				 NOTEST, 0x100)
149
150	. = 0x200
151machine_check_pSeries_1:
152	/* This is moved out of line as it can be patched by FW, but
153	 * some code path might still want to branch into the original
154	 * vector
155	 */
156	HMT_MEDIUM_PPR_DISCARD
157	SET_SCRATCH0(r13)		/* save r13 */
158#ifdef CONFIG_PPC_P7_NAP
159BEGIN_FTR_SECTION
160	/* Running native on arch 2.06 or later, check if we are
161	 * waking up from nap. We only handle no state loss and
162	 * supervisor state loss. We do -not- handle hypervisor
163	 * state loss at this time.
164	 */
165	mfspr	r13,SPRN_SRR1
166	rlwinm.	r13,r13,47-31,30,31
167	beq	9f
168
169	/* waking up from powersave (nap) state */
170	cmpwi	cr1,r13,2
171	/* Total loss of HV state is fatal. let's just stay stuck here */
172	bgt	cr1,.
1739:
174END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
175#endif /* CONFIG_PPC_P7_NAP */
176	EXCEPTION_PROLOG_0(PACA_EXMC)
177BEGIN_FTR_SECTION
178	b	machine_check_pSeries_early
179FTR_SECTION_ELSE
180	b	machine_check_pSeries_0
181ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
182
183	. = 0x300
184	.globl data_access_pSeries
185data_access_pSeries:
186	HMT_MEDIUM_PPR_DISCARD
187	SET_SCRATCH0(r13)
188BEGIN_FTR_SECTION
189	b	data_access_check_stab
190data_access_not_stab:
191END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
192	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
193				 KVMTEST, 0x300)
194
195	. = 0x380
196	.globl data_access_slb_pSeries
197data_access_slb_pSeries:
198	HMT_MEDIUM_PPR_DISCARD
199	SET_SCRATCH0(r13)
200	EXCEPTION_PROLOG_0(PACA_EXSLB)
201	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
202	std	r3,PACA_EXSLB+EX_R3(r13)
203	mfspr	r3,SPRN_DAR
204#ifdef __DISABLED__
205	/* Keep that around for when we re-implement dynamic VSIDs */
206	cmpdi	r3,0
207	bge	slb_miss_user_pseries
208#endif /* __DISABLED__ */
209	mfspr	r12,SPRN_SRR1
210#ifndef CONFIG_RELOCATABLE
211	b	.slb_miss_realmode
212#else
213	/*
214	 * We can't just use a direct branch to .slb_miss_realmode
215	 * because the distance from here to there depends on where
216	 * the kernel ends up being put.
217	 */
218	mfctr	r11
219	ld	r10,PACAKBASE(r13)
220	LOAD_HANDLER(r10, .slb_miss_realmode)
221	mtctr	r10
222	bctr
223#endif
224
225	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
226
227	. = 0x480
228	.globl instruction_access_slb_pSeries
229instruction_access_slb_pSeries:
230	HMT_MEDIUM_PPR_DISCARD
231	SET_SCRATCH0(r13)
232	EXCEPTION_PROLOG_0(PACA_EXSLB)
233	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
234	std	r3,PACA_EXSLB+EX_R3(r13)
235	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
236#ifdef __DISABLED__
237	/* Keep that around for when we re-implement dynamic VSIDs */
238	cmpdi	r3,0
239	bge	slb_miss_user_pseries
240#endif /* __DISABLED__ */
241	mfspr	r12,SPRN_SRR1
242#ifndef CONFIG_RELOCATABLE
243	b	.slb_miss_realmode
244#else
245	mfctr	r11
246	ld	r10,PACAKBASE(r13)
247	LOAD_HANDLER(r10, .slb_miss_realmode)
248	mtctr	r10
249	bctr
250#endif
251
252	/* We open code these as we can't have a ". = x" (even with
253	 * x = "." within a feature section
254	 */
255	. = 0x500;
256	.globl hardware_interrupt_pSeries;
257	.globl hardware_interrupt_hv;
258hardware_interrupt_pSeries:
259hardware_interrupt_hv:
260	HMT_MEDIUM_PPR_DISCARD
261	BEGIN_FTR_SECTION
262		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
263					    EXC_HV, SOFTEN_TEST_HV)
264		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
265	FTR_SECTION_ELSE
266		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
267					    EXC_STD, SOFTEN_TEST_HV_201)
268		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
269	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
270
271	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
272	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
273
274	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
275	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
276
277	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
278	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
279
280	. = 0x900
281	.globl decrementer_pSeries
282decrementer_pSeries:
283	_MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
284
285	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
286
287	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
288	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
289
290	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
291	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
292
293	. = 0xc00
294	.globl	system_call_pSeries
295system_call_pSeries:
296	HMT_MEDIUM
297#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
298	SET_SCRATCH0(r13)
299	GET_PACA(r13)
300	std	r9,PACA_EXGEN+EX_R9(r13)
301	std	r10,PACA_EXGEN+EX_R10(r13)
302	mfcr	r9
303	KVMTEST(0xc00)
304	GET_SCRATCH0(r13)
305#endif
306	SYSCALL_PSERIES_1
307	SYSCALL_PSERIES_2_RFID
308	SYSCALL_PSERIES_3
309	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
310
311	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
312	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
313
314	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
315	 * out of line to handle them
316	 */
317	. = 0xe00
318hv_data_storage_trampoline:
319	SET_SCRATCH0(r13)
320	EXCEPTION_PROLOG_0(PACA_EXGEN)
321	b	h_data_storage_hv
322
323	. = 0xe20
324hv_instr_storage_trampoline:
325	SET_SCRATCH0(r13)
326	EXCEPTION_PROLOG_0(PACA_EXGEN)
327	b	h_instr_storage_hv
328
329	. = 0xe40
330emulation_assist_trampoline:
331	SET_SCRATCH0(r13)
332	EXCEPTION_PROLOG_0(PACA_EXGEN)
333	b	emulation_assist_hv
334
335	. = 0xe60
336hv_exception_trampoline:
337	SET_SCRATCH0(r13)
338	EXCEPTION_PROLOG_0(PACA_EXGEN)
339	b	hmi_exception_hv
340
341	. = 0xe80
342hv_doorbell_trampoline:
343	SET_SCRATCH0(r13)
344	EXCEPTION_PROLOG_0(PACA_EXGEN)
345	b	h_doorbell_hv
346
347	/* We need to deal with the Altivec unavailable exception
348	 * here which is at 0xf20, thus in the middle of the
349	 * prolog code of the PerformanceMonitor one. A little
350	 * trickery is thus necessary
351	 */
352	. = 0xf00
353performance_monitor_pseries_trampoline:
354	SET_SCRATCH0(r13)
355	EXCEPTION_PROLOG_0(PACA_EXGEN)
356	b	performance_monitor_pSeries
357
358	. = 0xf20
359altivec_unavailable_pseries_trampoline:
360	SET_SCRATCH0(r13)
361	EXCEPTION_PROLOG_0(PACA_EXGEN)
362	b	altivec_unavailable_pSeries
363
364	. = 0xf40
365vsx_unavailable_pseries_trampoline:
366	SET_SCRATCH0(r13)
367	EXCEPTION_PROLOG_0(PACA_EXGEN)
368	b	vsx_unavailable_pSeries
369
370	. = 0xf60
371facility_unavailable_trampoline:
372	SET_SCRATCH0(r13)
373	EXCEPTION_PROLOG_0(PACA_EXGEN)
374	b	facility_unavailable_pSeries
375
376	. = 0xf80
377hv_facility_unavailable_trampoline:
378	SET_SCRATCH0(r13)
379	EXCEPTION_PROLOG_0(PACA_EXGEN)
380	b	facility_unavailable_hv
381
382#ifdef CONFIG_CBE_RAS
383	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
384	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
385#endif /* CONFIG_CBE_RAS */
386
387	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
388	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
389
390	. = 0x1500
391	.global denorm_exception_hv
392denorm_exception_hv:
393	HMT_MEDIUM_PPR_DISCARD
394	mtspr	SPRN_SPRG_HSCRATCH0,r13
395	EXCEPTION_PROLOG_0(PACA_EXGEN)
396	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
397
398#ifdef CONFIG_PPC_DENORMALISATION
399	mfspr	r10,SPRN_HSRR1
400	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
401	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
402	addi	r11,r11,-4		/* HSRR0 is next instruction */
403	bne+	denorm_assist
404#endif
405
406	KVMTEST(0x1500)
407	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
408	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
409
410#ifdef CONFIG_CBE_RAS
411	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
412	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
413#endif /* CONFIG_CBE_RAS */
414
415	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
416	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
417
418#ifdef CONFIG_CBE_RAS
419	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
420	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
421#else
422	. = 0x1800
423#endif /* CONFIG_CBE_RAS */
424
425
426/*** Out of line interrupts support ***/
427
428	.align	7
429	/* moved from 0x200 */
430machine_check_pSeries_early:
431BEGIN_FTR_SECTION
432	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
433	/*
434	 * Register contents:
435	 * R13		= PACA
436	 * R9		= CR
437	 * Original R9 to R13 is saved on PACA_EXMC
438	 *
439	 * Switch to mc_emergency stack and handle re-entrancy (though we
440	 * currently don't test for overflow). Save MCE registers srr1,
441	 * srr0, dar and dsisr and then set ME=1
442	 *
443	 * We use paca->in_mce to check whether this is the first entry or
444	 * nested machine check. We increment paca->in_mce to track nested
445	 * machine checks.
446	 *
447	 * If this is the first entry then set stack pointer to
448	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
449	 * stack frame on mc_emergency stack.
450	 *
451	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
452	 * checkstop if we get another machine check exception before we do
453	 * rfid with MSR_ME=1.
454	 */
455	mr	r11,r1			/* Save r1 */
456	lhz	r10,PACA_IN_MCE(r13)
457	cmpwi	r10,0			/* Are we in nested machine check */
458	bne	0f			/* Yes, we are. */
459	/* First machine check entry */
460	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
4610:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
462	addi	r10,r10,1		/* increment paca->in_mce */
463	sth	r10,PACA_IN_MCE(r13)
464	std	r11,GPR1(r1)		/* Save r1 on the stack. */
465	std	r11,0(r1)		/* make stack chain pointer */
466	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
467	std	r11,_NIP(r1)
468	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
469	std	r11,_MSR(r1)
470	mfspr	r11,SPRN_DAR		/* Save DAR */
471	std	r11,_DAR(r1)
472	mfspr	r11,SPRN_DSISR		/* Save DSISR */
473	std	r11,_DSISR(r1)
474	std	r9,_CCR(r1)		/* Save CR in stackframe */
475	/* Save r9 through r13 from EXMC save area to stack frame. */
476	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
477	mfmsr	r11			/* get MSR value */
478	ori	r11,r11,MSR_ME		/* turn on ME bit */
479	ori	r11,r11,MSR_RI		/* turn on RI bit */
480	ld	r12,PACAKBASE(r13)	/* get high part of &label */
481	LOAD_HANDLER(r12, machine_check_handle_early)
482	mtspr	SPRN_SRR0,r12
483	mtspr	SPRN_SRR1,r11
484	rfid
485	b	.	/* prevent speculative execution */
486END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
487
488machine_check_pSeries:
489	.globl machine_check_fwnmi
490machine_check_fwnmi:
491	HMT_MEDIUM_PPR_DISCARD
492	SET_SCRATCH0(r13)		/* save r13 */
493	EXCEPTION_PROLOG_0(PACA_EXMC)
494machine_check_pSeries_0:
495	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
496	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
497	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
498
499	/* moved from 0x300 */
500data_access_check_stab:
501	GET_PACA(r13)
502	std	r9,PACA_EXSLB+EX_R9(r13)
503	std	r10,PACA_EXSLB+EX_R10(r13)
504	mfspr	r10,SPRN_DAR
505	mfspr	r9,SPRN_DSISR
506	srdi	r10,r10,60
507	rlwimi	r10,r9,16,0x20
508#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
509	lbz	r9,HSTATE_IN_GUEST(r13)
510	rlwimi	r10,r9,8,0x300
511#endif
512	mfcr	r9
513	cmpwi	r10,0x2c
514	beq	do_stab_bolted_pSeries
515	mtcrf	0x80,r9
516	ld	r9,PACA_EXSLB+EX_R9(r13)
517	ld	r10,PACA_EXSLB+EX_R10(r13)
518	b	data_access_not_stab
519do_stab_bolted_pSeries:
520	std	r11,PACA_EXSLB+EX_R11(r13)
521	std	r12,PACA_EXSLB+EX_R12(r13)
522	GET_SCRATCH0(r10)
523	std	r10,PACA_EXSLB+EX_R13(r13)
524	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
525
526	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
527	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
528	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
529	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
530	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
531	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
532
533#ifdef CONFIG_PPC_DENORMALISATION
534denorm_assist:
535BEGIN_FTR_SECTION
536/*
537 * To denormalise we need to move a copy of the register to itself.
538 * For POWER6 do that here for all FP regs.
539 */
540	mfmsr	r10
541	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
542	xori	r10,r10,(MSR_FE0|MSR_FE1)
543	mtmsrd	r10
544	sync
545
546#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
547#define FMR4(n)  FMR2(n) ; FMR2(n+2)
548#define FMR8(n)  FMR4(n) ; FMR4(n+4)
549#define FMR16(n) FMR8(n) ; FMR8(n+8)
550#define FMR32(n) FMR16(n) ; FMR16(n+16)
551	FMR32(0)
552
553FTR_SECTION_ELSE
554/*
555 * To denormalise we need to move a copy of the register to itself.
556 * For POWER7 do that here for the first 32 VSX registers only.
557 */
558	mfmsr	r10
559	oris	r10,r10,MSR_VSX@h
560	mtmsrd	r10
561	sync
562
563#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
564#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
565#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
566#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
567#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
568	XVCPSGNDP32(0)
569
570ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
571
572BEGIN_FTR_SECTION
573	b	denorm_done
574END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
575/*
576 * To denormalise we need to move a copy of the register to itself.
577 * For POWER8 we need to do that for all 64 VSX registers
578 */
579	XVCPSGNDP32(32)
580denorm_done:
581	mtspr	SPRN_HSRR0,r11
582	mtcrf	0x80,r9
583	ld	r9,PACA_EXGEN+EX_R9(r13)
584	RESTORE_PPR_PACA(PACA_EXGEN, r10)
585BEGIN_FTR_SECTION
586	ld	r10,PACA_EXGEN+EX_CFAR(r13)
587	mtspr	SPRN_CFAR,r10
588END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
589	ld	r10,PACA_EXGEN+EX_R10(r13)
590	ld	r11,PACA_EXGEN+EX_R11(r13)
591	ld	r12,PACA_EXGEN+EX_R12(r13)
592	ld	r13,PACA_EXGEN+EX_R13(r13)
593	HRFID
594	b	.
595#endif
596
597	.align	7
598	/* moved from 0xe00 */
599	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
600	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
601	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
602	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
603	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
604	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
605	STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
606	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
607	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
608	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
609
610	/* moved from 0xf00 */
611	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
612	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
613	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
614	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
615	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
616	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
617	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
618	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
619	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
620	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
621
622/*
623 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
624 * - If it was a decrementer interrupt, we bump the dec to max and and return.
625 * - If it was a doorbell we return immediately since doorbells are edge
626 *   triggered and won't automatically refire.
627 * - else we hard disable and return.
628 * This is called with r10 containing the value to OR to the paca field.
629 */
630#define MASKED_INTERRUPT(_H)				\
631masked_##_H##interrupt:					\
632	std	r11,PACA_EXGEN+EX_R11(r13);		\
633	lbz	r11,PACAIRQHAPPENED(r13);		\
634	or	r11,r11,r10;				\
635	stb	r11,PACAIRQHAPPENED(r13);		\
636	cmpwi	r10,PACA_IRQ_DEC;			\
637	bne	1f;					\
638	lis	r10,0x7fff;				\
639	ori	r10,r10,0xffff;				\
640	mtspr	SPRN_DEC,r10;				\
641	b	2f;					\
6421:	cmpwi	r10,PACA_IRQ_DBELL;			\
643	beq	2f;					\
644	mfspr	r10,SPRN_##_H##SRR1;			\
645	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
646	rotldi	r10,r10,16;				\
647	mtspr	SPRN_##_H##SRR1,r10;			\
6482:	mtcrf	0x80,r9;				\
649	ld	r9,PACA_EXGEN+EX_R9(r13);		\
650	ld	r10,PACA_EXGEN+EX_R10(r13);		\
651	ld	r11,PACA_EXGEN+EX_R11(r13);		\
652	GET_SCRATCH0(r13);				\
653	##_H##rfid;					\
654	b	.
655
656	MASKED_INTERRUPT()
657	MASKED_INTERRUPT(H)
658
659/*
660 * Called from arch_local_irq_enable when an interrupt needs
661 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
662 * which kind of interrupt. MSR:EE is already off. We generate a
663 * stackframe like if a real interrupt had happened.
664 *
665 * Note: While MSR:EE is off, we need to make sure that _MSR
666 * in the generated frame has EE set to 1 or the exception
667 * handler will not properly re-enable them.
668 */
669_GLOBAL(__replay_interrupt)
670	/* We are going to jump to the exception common code which
671	 * will retrieve various register values from the PACA which
672	 * we don't give a damn about, so we don't bother storing them.
673	 */
674	mfmsr	r12
675	mflr	r11
676	mfcr	r9
677	ori	r12,r12,MSR_EE
678	cmpwi	r3,0x900
679	beq	decrementer_common
680	cmpwi	r3,0x500
681	beq	hardware_interrupt_common
682BEGIN_FTR_SECTION
683	cmpwi	r3,0xe80
684	beq	h_doorbell_common
685FTR_SECTION_ELSE
686	cmpwi	r3,0xa00
687	beq	doorbell_super_common
688ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
689	blr
690
691#ifdef CONFIG_PPC_PSERIES
692/*
693 * Vectors for the FWNMI option.  Share common code.
694 */
695	.globl system_reset_fwnmi
696      .align 7
697system_reset_fwnmi:
698	HMT_MEDIUM_PPR_DISCARD
699	SET_SCRATCH0(r13)		/* save r13 */
700	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
701				 NOTEST, 0x100)
702
703#endif /* CONFIG_PPC_PSERIES */
704
705#ifdef __DISABLED__
706/*
707 * This is used for when the SLB miss handler has to go virtual,
708 * which doesn't happen for now anymore but will once we re-implement
709 * dynamic VSIDs for shared page tables
710 */
711slb_miss_user_pseries:
712	std	r10,PACA_EXGEN+EX_R10(r13)
713	std	r11,PACA_EXGEN+EX_R11(r13)
714	std	r12,PACA_EXGEN+EX_R12(r13)
715	GET_SCRATCH0(r10)
716	ld	r11,PACA_EXSLB+EX_R9(r13)
717	ld	r12,PACA_EXSLB+EX_R3(r13)
718	std	r10,PACA_EXGEN+EX_R13(r13)
719	std	r11,PACA_EXGEN+EX_R9(r13)
720	std	r12,PACA_EXGEN+EX_R3(r13)
721	clrrdi	r12,r13,32
722	mfmsr	r10
723	mfspr	r11,SRR0			/* save SRR0 */
724	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
725	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
726	mtspr	SRR0,r12
727	mfspr	r12,SRR1			/* and SRR1 */
728	mtspr	SRR1,r10
729	rfid
730	b	.				/* prevent spec. execution */
731#endif /* __DISABLED__ */
732
733#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
734kvmppc_skip_interrupt:
735	/*
736	 * Here all GPRs are unchanged from when the interrupt happened
737	 * except for r13, which is saved in SPRG_SCRATCH0.
738	 */
739	mfspr	r13, SPRN_SRR0
740	addi	r13, r13, 4
741	mtspr	SPRN_SRR0, r13
742	GET_SCRATCH0(r13)
743	rfid
744	b	.
745
746kvmppc_skip_Hinterrupt:
747	/*
748	 * Here all GPRs are unchanged from when the interrupt happened
749	 * except for r13, which is saved in SPRG_SCRATCH0.
750	 */
751	mfspr	r13, SPRN_HSRR0
752	addi	r13, r13, 4
753	mtspr	SPRN_HSRR0, r13
754	GET_SCRATCH0(r13)
755	hrfid
756	b	.
757#endif
758
759/*
760 * Code from here down to __end_handlers is invoked from the
761 * exception prologs above.  Because the prologs assemble the
762 * addresses of these handlers using the LOAD_HANDLER macro,
763 * which uses an ori instruction, these handlers must be in
764 * the first 64k of the kernel image.
765 */
766
767/*** Common interrupt handlers ***/
768
769	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
770
771	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
772	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
773	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
774#ifdef CONFIG_PPC_DOORBELL
775	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
776#else
777	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
778#endif
779	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
780	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
781	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
782	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
783	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
784#ifdef CONFIG_PPC_DOORBELL
785	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
786#else
787	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
788#endif
789	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
790	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
791	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
792#ifdef CONFIG_ALTIVEC
793	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
794#else
795	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
796#endif
797#ifdef CONFIG_CBE_RAS
798	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
799	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
800	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
801#endif /* CONFIG_CBE_RAS */
802
803	/*
804	 * Relocation-on interrupts: A subset of the interrupts can be delivered
805	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
806	 * it.  Addresses are the same as the original interrupt addresses, but
807	 * offset by 0xc000000000004000.
808	 * It's impossible to receive interrupts below 0x300 via this mechanism.
809	 * KVM: None of these traps are from the guest ; anything that escalated
810	 * to HV=1 from HV=0 is delivered via real mode handlers.
811	 */
812
813	/*
814	 * This uses the standard macro, since the original 0x300 vector
815	 * only has extra guff for STAB-based processors -- which never
816	 * come here.
817	 */
818	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
819	. = 0x4380
820	.globl data_access_slb_relon_pSeries
821data_access_slb_relon_pSeries:
822	SET_SCRATCH0(r13)
823	EXCEPTION_PROLOG_0(PACA_EXSLB)
824	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
825	std	r3,PACA_EXSLB+EX_R3(r13)
826	mfspr	r3,SPRN_DAR
827	mfspr	r12,SPRN_SRR1
828#ifndef CONFIG_RELOCATABLE
829	b	.slb_miss_realmode
830#else
831	/*
832	 * We can't just use a direct branch to .slb_miss_realmode
833	 * because the distance from here to there depends on where
834	 * the kernel ends up being put.
835	 */
836	mfctr	r11
837	ld	r10,PACAKBASE(r13)
838	LOAD_HANDLER(r10, .slb_miss_realmode)
839	mtctr	r10
840	bctr
841#endif
842
843	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
844	. = 0x4480
845	.globl instruction_access_slb_relon_pSeries
846instruction_access_slb_relon_pSeries:
847	SET_SCRATCH0(r13)
848	EXCEPTION_PROLOG_0(PACA_EXSLB)
849	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
850	std	r3,PACA_EXSLB+EX_R3(r13)
851	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
852	mfspr	r12,SPRN_SRR1
853#ifndef CONFIG_RELOCATABLE
854	b	.slb_miss_realmode
855#else
856	mfctr	r11
857	ld	r10,PACAKBASE(r13)
858	LOAD_HANDLER(r10, .slb_miss_realmode)
859	mtctr	r10
860	bctr
861#endif
862
863	. = 0x4500
864	.globl hardware_interrupt_relon_pSeries;
865	.globl hardware_interrupt_relon_hv;
866hardware_interrupt_relon_pSeries:
867hardware_interrupt_relon_hv:
868	BEGIN_FTR_SECTION
869		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
870	FTR_SECTION_ELSE
871		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
872	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
873	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
874	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
875	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
876	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
877	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
878	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
879	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
880
881	. = 0x4c00
882	.globl system_call_relon_pSeries
883system_call_relon_pSeries:
884	HMT_MEDIUM
885	SYSCALL_PSERIES_1
886	SYSCALL_PSERIES_2_DIRECT
887	SYSCALL_PSERIES_3
888
889	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
890
891	. = 0x4e00
892	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
893
894	. = 0x4e20
895	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
896
897	. = 0x4e40
898emulation_assist_relon_trampoline:
899	SET_SCRATCH0(r13)
900	EXCEPTION_PROLOG_0(PACA_EXGEN)
901	b	emulation_assist_relon_hv
902
903	. = 0x4e60
904	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
905
906	. = 0x4e80
907h_doorbell_relon_trampoline:
908	SET_SCRATCH0(r13)
909	EXCEPTION_PROLOG_0(PACA_EXGEN)
910	b	h_doorbell_relon_hv
911
912	. = 0x4f00
913performance_monitor_relon_pseries_trampoline:
914	SET_SCRATCH0(r13)
915	EXCEPTION_PROLOG_0(PACA_EXGEN)
916	b	performance_monitor_relon_pSeries
917
918	. = 0x4f20
919altivec_unavailable_relon_pseries_trampoline:
920	SET_SCRATCH0(r13)
921	EXCEPTION_PROLOG_0(PACA_EXGEN)
922	b	altivec_unavailable_relon_pSeries
923
924	. = 0x4f40
925vsx_unavailable_relon_pseries_trampoline:
926	SET_SCRATCH0(r13)
927	EXCEPTION_PROLOG_0(PACA_EXGEN)
928	b	vsx_unavailable_relon_pSeries
929
930	. = 0x4f60
931facility_unavailable_relon_trampoline:
932	SET_SCRATCH0(r13)
933	EXCEPTION_PROLOG_0(PACA_EXGEN)
934	b	facility_unavailable_relon_pSeries
935
936	. = 0x4f80
937hv_facility_unavailable_relon_trampoline:
938	SET_SCRATCH0(r13)
939	EXCEPTION_PROLOG_0(PACA_EXGEN)
940	b	hv_facility_unavailable_relon_hv
941
942	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
943#ifdef CONFIG_PPC_DENORMALISATION
944	. = 0x5500
945	b	denorm_exception_hv
946#endif
947	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
948
949	/* Other future vectors */
950	.align	7
951	.globl	__end_interrupts
952__end_interrupts:
953
954	.align	7
955system_call_entry_direct:
956#if defined(CONFIG_RELOCATABLE)
957	/* The first level prologue may have used LR to get here, saving
958	 * orig in r10.  To save hacking/ifdeffing common code, restore here.
959	 */
960	mtlr	r10
961#endif
962system_call_entry:
963	b	system_call_common
964
965ppc64_runlatch_on_trampoline:
966	b	.__ppc64_runlatch_on
967
968/*
969 * Here we have detected that the kernel stack pointer is bad.
970 * R9 contains the saved CR, r13 points to the paca,
971 * r10 contains the (bad) kernel stack pointer,
972 * r11 and r12 contain the saved SRR0 and SRR1.
973 * We switch to using an emergency stack, save the registers there,
974 * and call kernel_bad_stack(), which panics.
975 */
976bad_stack:
977	ld	r1,PACAEMERGSP(r13)
978	subi	r1,r1,64+INT_FRAME_SIZE
979	std	r9,_CCR(r1)
980	std	r10,GPR1(r1)
981	std	r11,_NIP(r1)
982	std	r12,_MSR(r1)
983	mfspr	r11,SPRN_DAR
984	mfspr	r12,SPRN_DSISR
985	std	r11,_DAR(r1)
986	std	r12,_DSISR(r1)
987	mflr	r10
988	mfctr	r11
989	mfxer	r12
990	std	r10,_LINK(r1)
991	std	r11,_CTR(r1)
992	std	r12,_XER(r1)
993	SAVE_GPR(0,r1)
994	SAVE_GPR(2,r1)
995	ld	r10,EX_R3(r3)
996	std	r10,GPR3(r1)
997	SAVE_GPR(4,r1)
998	SAVE_4GPRS(5,r1)
999	ld	r9,EX_R9(r3)
1000	ld	r10,EX_R10(r3)
1001	SAVE_2GPRS(9,r1)
1002	ld	r9,EX_R11(r3)
1003	ld	r10,EX_R12(r3)
1004	ld	r11,EX_R13(r3)
1005	std	r9,GPR11(r1)
1006	std	r10,GPR12(r1)
1007	std	r11,GPR13(r1)
1008BEGIN_FTR_SECTION
1009	ld	r10,EX_CFAR(r3)
1010	std	r10,ORIG_GPR3(r1)
1011END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1012	SAVE_8GPRS(14,r1)
1013	SAVE_10GPRS(22,r1)
1014	lhz	r12,PACA_TRAP_SAVE(r13)
1015	std	r12,_TRAP(r1)
1016	addi	r11,r1,INT_FRAME_SIZE
1017	std	r11,0(r1)
1018	li	r12,0
1019	std	r12,0(r11)
1020	ld	r2,PACATOC(r13)
1021	ld	r11,exception_marker@toc(r2)
1022	std	r12,RESULT(r1)
1023	std	r11,STACK_FRAME_OVERHEAD-16(r1)
10241:	addi	r3,r1,STACK_FRAME_OVERHEAD
1025	bl	.kernel_bad_stack
1026	b	1b
1027
1028/*
1029 * Here r13 points to the paca, r9 contains the saved CR,
1030 * SRR0 and SRR1 are saved in r11 and r12,
1031 * r9 - r13 are saved in paca->exgen.
1032 */
1033	.align	7
1034	.globl data_access_common
1035data_access_common:
1036	mfspr	r10,SPRN_DAR
1037	std	r10,PACA_EXGEN+EX_DAR(r13)
1038	mfspr	r10,SPRN_DSISR
1039	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1040	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
1041	DISABLE_INTS
1042	ld	r12,_MSR(r1)
1043	ld	r3,PACA_EXGEN+EX_DAR(r13)
1044	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1045	li	r5,0x300
1046	b	.do_hash_page		/* Try to handle as hpte fault */
1047
1048	.align  7
1049	.globl  h_data_storage_common
1050h_data_storage_common:
1051	mfspr   r10,SPRN_HDAR
1052	std     r10,PACA_EXGEN+EX_DAR(r13)
1053	mfspr   r10,SPRN_HDSISR
1054	stw     r10,PACA_EXGEN+EX_DSISR(r13)
1055	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1056	bl      .save_nvgprs
1057	DISABLE_INTS
1058	addi    r3,r1,STACK_FRAME_OVERHEAD
1059	bl      .unknown_exception
1060	b       .ret_from_except
1061
1062	.align	7
1063	.globl instruction_access_common
1064instruction_access_common:
1065	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1066	DISABLE_INTS
1067	ld	r12,_MSR(r1)
1068	ld	r3,_NIP(r1)
1069	andis.	r4,r12,0x5820
1070	li	r5,0x400
1071	b	.do_hash_page		/* Try to handle as hpte fault */
1072
1073	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
1074
1075/*
1076 * Here is the common SLB miss user that is used when going to virtual
1077 * mode for SLB misses, that is currently not used
1078 */
1079#ifdef __DISABLED__
1080	.align	7
1081	.globl	slb_miss_user_common
1082slb_miss_user_common:
1083	mflr	r10
1084	std	r3,PACA_EXGEN+EX_DAR(r13)
1085	stw	r9,PACA_EXGEN+EX_CCR(r13)
1086	std	r10,PACA_EXGEN+EX_LR(r13)
1087	std	r11,PACA_EXGEN+EX_SRR0(r13)
1088	bl	.slb_allocate_user
1089
1090	ld	r10,PACA_EXGEN+EX_LR(r13)
1091	ld	r3,PACA_EXGEN+EX_R3(r13)
1092	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1093	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1094	mtlr	r10
1095	beq-	slb_miss_fault
1096
1097	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1098	beq-	unrecov_user_slb
1099	mfmsr	r10
1100
1101.machine push
1102.machine "power4"
1103	mtcrf	0x80,r9
1104.machine pop
1105
1106	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1107	mtmsrd	r10,1
1108
1109	mtspr	SRR0,r11
1110	mtspr	SRR1,r12
1111
1112	ld	r9,PACA_EXGEN+EX_R9(r13)
1113	ld	r10,PACA_EXGEN+EX_R10(r13)
1114	ld	r11,PACA_EXGEN+EX_R11(r13)
1115	ld	r12,PACA_EXGEN+EX_R12(r13)
1116	ld	r13,PACA_EXGEN+EX_R13(r13)
1117	rfid
1118	b	.
1119
1120slb_miss_fault:
1121	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1122	ld	r4,PACA_EXGEN+EX_DAR(r13)
1123	li	r5,0
1124	std	r4,_DAR(r1)
1125	std	r5,_DSISR(r1)
1126	b	handle_page_fault
1127
1128unrecov_user_slb:
1129	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1130	DISABLE_INTS
1131	bl	.save_nvgprs
11321:	addi	r3,r1,STACK_FRAME_OVERHEAD
1133	bl	.unrecoverable_exception
1134	b	1b
1135
1136#endif /* __DISABLED__ */
1137
1138
1139	/*
1140	 * Machine check is different because we use a different
1141	 * save area: PACA_EXMC instead of PACA_EXGEN.
1142	 */
1143	.align	7
1144	.globl machine_check_common
1145machine_check_common:
1146
1147	mfspr	r10,SPRN_DAR
1148	std	r10,PACA_EXGEN+EX_DAR(r13)
1149	mfspr	r10,SPRN_DSISR
1150	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1151	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1152	FINISH_NAP
1153	DISABLE_INTS
1154	ld	r3,PACA_EXGEN+EX_DAR(r13)
1155	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1156	std	r3,_DAR(r1)
1157	std	r4,_DSISR(r1)
1158	bl	.save_nvgprs
1159	addi	r3,r1,STACK_FRAME_OVERHEAD
1160	bl	.machine_check_exception
1161	b	.ret_from_except
1162
1163	.align	7
1164	.globl alignment_common
1165alignment_common:
1166	mfspr	r10,SPRN_DAR
1167	std	r10,PACA_EXGEN+EX_DAR(r13)
1168	mfspr	r10,SPRN_DSISR
1169	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1170	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1171	ld	r3,PACA_EXGEN+EX_DAR(r13)
1172	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1173	std	r3,_DAR(r1)
1174	std	r4,_DSISR(r1)
1175	bl	.save_nvgprs
1176	DISABLE_INTS
1177	addi	r3,r1,STACK_FRAME_OVERHEAD
1178	bl	.alignment_exception
1179	b	.ret_from_except
1180
1181	.align	7
1182	.globl program_check_common
1183program_check_common:
1184	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1185	bl	.save_nvgprs
1186	DISABLE_INTS
1187	addi	r3,r1,STACK_FRAME_OVERHEAD
1188	bl	.program_check_exception
1189	b	.ret_from_except
1190
1191	.align	7
1192	.globl fp_unavailable_common
1193fp_unavailable_common:
1194	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1195	bne	1f			/* if from user, just load it up */
1196	bl	.save_nvgprs
1197	DISABLE_INTS
1198	addi	r3,r1,STACK_FRAME_OVERHEAD
1199	bl	.kernel_fp_unavailable_exception
1200	BUG_OPCODE
12011:
1202#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1203BEGIN_FTR_SECTION
1204	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1205	 * transaction), go do TM stuff
1206	 */
1207	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1208	bne-	2f
1209END_FTR_SECTION_IFSET(CPU_FTR_TM)
1210#endif
1211	bl	.load_up_fpu
1212	b	fast_exception_return
1213#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12142:	/* User process was in a transaction */
1215	bl	.save_nvgprs
1216	DISABLE_INTS
1217	addi	r3,r1,STACK_FRAME_OVERHEAD
1218	bl	.fp_unavailable_tm
1219	b	.ret_from_except
1220#endif
1221	.align	7
1222	.globl altivec_unavailable_common
1223altivec_unavailable_common:
1224	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1225#ifdef CONFIG_ALTIVEC
1226BEGIN_FTR_SECTION
1227	beq	1f
1228#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1229  BEGIN_FTR_SECTION_NESTED(69)
1230	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1231	 * transaction), go do TM stuff
1232	 */
1233	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1234	bne-	2f
1235  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1236#endif
1237	bl	.load_up_altivec
1238	b	fast_exception_return
1239#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12402:	/* User process was in a transaction */
1241	bl	.save_nvgprs
1242	DISABLE_INTS
1243	addi	r3,r1,STACK_FRAME_OVERHEAD
1244	bl	.altivec_unavailable_tm
1245	b	.ret_from_except
1246#endif
12471:
1248END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1249#endif
1250	bl	.save_nvgprs
1251	DISABLE_INTS
1252	addi	r3,r1,STACK_FRAME_OVERHEAD
1253	bl	.altivec_unavailable_exception
1254	b	.ret_from_except
1255
1256	.align	7
1257	.globl vsx_unavailable_common
1258vsx_unavailable_common:
1259	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1260#ifdef CONFIG_VSX
1261BEGIN_FTR_SECTION
1262	beq	1f
1263#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1264  BEGIN_FTR_SECTION_NESTED(69)
1265	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1266	 * transaction), go do TM stuff
1267	 */
1268	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1269	bne-	2f
1270  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1271#endif
1272	b	.load_up_vsx
1273#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12742:	/* User process was in a transaction */
1275	bl	.save_nvgprs
1276	DISABLE_INTS
1277	addi	r3,r1,STACK_FRAME_OVERHEAD
1278	bl	.vsx_unavailable_tm
1279	b	.ret_from_except
1280#endif
12811:
1282END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1283#endif
1284	bl	.save_nvgprs
1285	DISABLE_INTS
1286	addi	r3,r1,STACK_FRAME_OVERHEAD
1287	bl	.vsx_unavailable_exception
1288	b	.ret_from_except
1289
1290	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
1291	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
1292
1293	.align	7
1294	.globl	__end_handlers
1295__end_handlers:
1296
1297	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1298	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1299	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1300
1301	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1302	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1303	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1304	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1305	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1306
1307#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1308/*
1309 * Data area reserved for FWNMI option.
1310 * This address (0x7000) is fixed by the RPA.
1311 */
1312	.= 0x7000
1313	.globl fwnmi_data_area
1314fwnmi_data_area:
1315
1316	/* pseries and powernv need to keep the whole page from
1317	 * 0x7000 to 0x8000 free for use by the firmware
1318	 */
1319	. = 0x8000
1320#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1321
1322/* Space for CPU0's segment table */
1323	.balign 4096
1324	.globl initial_stab
1325initial_stab:
1326	.space	4096
1327
1328#ifdef CONFIG_PPC_POWERNV
1329_GLOBAL(opal_mc_secondary_handler)
1330	HMT_MEDIUM_PPR_DISCARD
1331	SET_SCRATCH0(r13)
1332	GET_PACA(r13)
1333	clrldi	r3,r3,2
1334	tovirt(r3,r3)
1335	std	r3,PACA_OPAL_MC_EVT(r13)
1336	ld	r13,OPAL_MC_SRR0(r3)
1337	mtspr	SPRN_SRR0,r13
1338	ld	r13,OPAL_MC_SRR1(r3)
1339	mtspr	SPRN_SRR1,r13
1340	ld	r3,OPAL_MC_GPR3(r3)
1341	GET_SCRATCH0(r13)
1342	b	machine_check_pSeries
1343#endif /* CONFIG_PPC_POWERNV */
1344
1345
1346#define MACHINE_CHECK_HANDLER_WINDUP			\
1347	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1348	li	r0,MSR_RI;				\
1349	mfmsr	r9;		/* get MSR value */	\
1350	andc	r9,r9,r0;				\
1351	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1352	/* Move original SRR0 and SRR1 into the respective regs */	\
1353	ld	r9,_MSR(r1);				\
1354	mtspr	SPRN_SRR1,r9;				\
1355	ld	r3,_NIP(r1);				\
1356	mtspr	SPRN_SRR0,r3;				\
1357	ld	r9,_CTR(r1);				\
1358	mtctr	r9;					\
1359	ld	r9,_XER(r1);				\
1360	mtxer	r9;					\
1361	ld	r9,_LINK(r1);				\
1362	mtlr	r9;					\
1363	REST_GPR(0, r1);				\
1364	REST_8GPRS(2, r1);				\
1365	REST_GPR(10, r1);				\
1366	ld	r11,_CCR(r1);				\
1367	mtcr	r11;					\
1368	/* Decrement paca->in_mce. */			\
1369	lhz	r12,PACA_IN_MCE(r13);			\
1370	subi	r12,r12,1;				\
1371	sth	r12,PACA_IN_MCE(r13);			\
1372	REST_GPR(11, r1);				\
1373	REST_2GPRS(12, r1);				\
1374	/* restore original r1. */			\
1375	ld	r1,GPR1(r1)
1376
1377	/*
1378	 * Handle machine check early in real mode. We come here with
1379	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1380	 */
1381	.align	7
1382	.globl machine_check_handle_early
1383machine_check_handle_early:
1384	std	r0,GPR0(r1)	/* Save r0 */
1385	EXCEPTION_PROLOG_COMMON_3(0x200)
1386	bl	.save_nvgprs
1387	addi	r3,r1,STACK_FRAME_OVERHEAD
1388	bl	.machine_check_early
1389	ld	r12,_MSR(r1)
1390#ifdef	CONFIG_PPC_P7_NAP
1391	/*
1392	 * Check if thread was in power saving mode. We come here when any
1393	 * of the following is true:
1394	 * a. thread wasn't in power saving mode
1395	 * b. thread was in power saving mode with no state loss or
1396	 *    supervisor state loss
1397	 *
1398	 * Go back to nap again if (b) is true.
1399	 */
1400	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */
1401	beq	4f			/* No, it wasn;t */
1402	/* Thread was in power saving mode. Go back to nap again. */
1403	cmpwi	r11,2
1404	bne	3f
1405	/* Supervisor state loss */
1406	li	r0,1
1407	stb	r0,PACA_NAPSTATELOST(r13)
14083:	bl	.machine_check_queue_event
1409	MACHINE_CHECK_HANDLER_WINDUP
1410	GET_PACA(r13)
1411	ld	r1,PACAR1(r13)
1412	b	.power7_enter_nap_mode
14134:
1414#endif
1415	/*
1416	 * Check if we are coming from hypervisor userspace. If yes then we
1417	 * continue in host kernel in V mode to deliver the MC event.
1418	 */
1419	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
1420	beq	5f
1421	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1422	bne	9f			/* continue in V mode if we are. */
1423
14245:
1425#ifdef CONFIG_KVM_BOOK3S_64_HV
1426	/*
1427	 * We are coming from kernel context. Check if we are coming from
1428	 * guest. if yes, then we can continue. We will fall through
1429	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1430	 */
1431	lbz	r11,HSTATE_IN_GUEST(r13)
1432	cmpwi	r11,0			/* Check if coming from guest */
1433	bne	9f			/* continue if we are. */
1434#endif
1435	/*
1436	 * At this point we are not sure about what context we come from.
1437	 * Queue up the MCE event and return from the interrupt.
1438	 * But before that, check if this is an un-recoverable exception.
1439	 * If yes, then stay on emergency stack and panic.
1440	 */
1441	andi.	r11,r12,MSR_RI
1442	bne	2f
14431:	addi	r3,r1,STACK_FRAME_OVERHEAD
1444	bl	.unrecoverable_exception
1445	b	1b
14462:
1447	/*
1448	 * Return from MC interrupt.
1449	 * Queue up the MCE event so that we can log it later, while
1450	 * returning from kernel or opal call.
1451	 */
1452	bl	.machine_check_queue_event
1453	MACHINE_CHECK_HANDLER_WINDUP
1454	rfid
14559:
1456	/* Deliver the machine check to host kernel in V mode. */
1457	MACHINE_CHECK_HANDLER_WINDUP
1458	b	machine_check_pSeries
1459
1460/*
1461 * r13 points to the PACA, r9 contains the saved CR,
1462 * r12 contain the saved SRR1, SRR0 is still ready for return
1463 * r3 has the faulting address
1464 * r9 - r13 are saved in paca->exslb.
1465 * r3 is saved in paca->slb_r3
1466 * We assume we aren't going to take any exceptions during this procedure.
1467 */
1468_GLOBAL(slb_miss_realmode)
1469	mflr	r10
1470#ifdef CONFIG_RELOCATABLE
1471	mtctr	r11
1472#endif
1473
1474	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1475	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1476
1477	bl	.slb_allocate_realmode
1478
1479	/* All done -- return from exception. */
1480
1481	ld	r10,PACA_EXSLB+EX_LR(r13)
1482	ld	r3,PACA_EXSLB+EX_R3(r13)
1483	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1484
1485	mtlr	r10
1486
1487	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1488	beq-	2f
1489
1490.machine	push
1491.machine	"power4"
1492	mtcrf	0x80,r9
1493	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1494.machine	pop
1495
1496	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1497	ld	r9,PACA_EXSLB+EX_R9(r13)
1498	ld	r10,PACA_EXSLB+EX_R10(r13)
1499	ld	r11,PACA_EXSLB+EX_R11(r13)
1500	ld	r12,PACA_EXSLB+EX_R12(r13)
1501	ld	r13,PACA_EXSLB+EX_R13(r13)
1502	rfid
1503	b	.	/* prevent speculative execution */
1504
15052:	mfspr	r11,SPRN_SRR0
1506	ld	r10,PACAKBASE(r13)
1507	LOAD_HANDLER(r10,unrecov_slb)
1508	mtspr	SPRN_SRR0,r10
1509	ld	r10,PACAKMSR(r13)
1510	mtspr	SPRN_SRR1,r10
1511	rfid
1512	b	.
1513
1514unrecov_slb:
1515	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1516	DISABLE_INTS
1517	bl	.save_nvgprs
15181:	addi	r3,r1,STACK_FRAME_OVERHEAD
1519	bl	.unrecoverable_exception
1520	b	1b
1521
1522
1523#ifdef CONFIG_PPC_970_NAP
1524power4_fixup_nap:
1525	andc	r9,r9,r10
1526	std	r9,TI_LOCAL_FLAGS(r11)
1527	ld	r10,_LINK(r1)		/* make idle task do the */
1528	std	r10,_NIP(r1)		/* equivalent of a blr */
1529	blr
1530#endif
1531
1532/*
1533 * Hash table stuff
1534 */
1535	.align	7
1536_STATIC(do_hash_page)
1537	std	r3,_DAR(r1)
1538	std	r4,_DSISR(r1)
1539
1540	andis.	r0,r4,0xa410		/* weird error? */
1541	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1542	andis.  r0,r4,DSISR_DABRMATCH@h
1543	bne-    handle_dabr_fault
1544
1545BEGIN_FTR_SECTION
1546	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1547	bne-	do_ste_alloc		/* If so handle it */
1548END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1549
1550	CURRENT_THREAD_INFO(r11, r1)
1551	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1552	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1553	bne	77f			/* then don't call hash_page now */
1554	/*
1555	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1556	 * accessing a userspace segment (even from the kernel). We assume
1557	 * kernel addresses always have the high bit set.
1558	 */
1559	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1560	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1561	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1562	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1563	ori	r4,r4,1			/* add _PAGE_PRESENT */
1564	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1565
1566	/*
1567	 * r3 contains the faulting address
1568	 * r4 contains the required access permissions
1569	 * r5 contains the trap number
1570	 *
1571	 * at return r3 = 0 for success, 1 for page fault, negative for error
1572	 */
1573	bl	.hash_page		/* build HPTE if possible */
1574	cmpdi	r3,0			/* see if hash_page succeeded */
1575
1576	/* Success */
1577	beq	fast_exc_return_irq	/* Return from exception on success */
1578
1579	/* Error */
1580	blt-	13f
1581
1582/* Here we have a page fault that hash_page can't handle. */
1583handle_page_fault:
158411:	ld	r4,_DAR(r1)
1585	ld	r5,_DSISR(r1)
1586	addi	r3,r1,STACK_FRAME_OVERHEAD
1587	bl	.do_page_fault
1588	cmpdi	r3,0
1589	beq+	12f
1590	bl	.save_nvgprs
1591	mr	r5,r3
1592	addi	r3,r1,STACK_FRAME_OVERHEAD
1593	lwz	r4,_DAR(r1)
1594	bl	.bad_page_fault
1595	b	.ret_from_except
1596
1597/* We have a data breakpoint exception - handle it */
1598handle_dabr_fault:
1599	bl	.save_nvgprs
1600	ld      r4,_DAR(r1)
1601	ld      r5,_DSISR(r1)
1602	addi    r3,r1,STACK_FRAME_OVERHEAD
1603	bl      .do_break
160412:	b       .ret_from_except_lite
1605
1606
1607/* We have a page fault that hash_page could handle but HV refused
1608 * the PTE insertion
1609 */
161013:	bl	.save_nvgprs
1611	mr	r5,r3
1612	addi	r3,r1,STACK_FRAME_OVERHEAD
1613	ld	r4,_DAR(r1)
1614	bl	.low_hash_fault
1615	b	.ret_from_except
1616
1617/*
1618 * We come here as a result of a DSI at a point where we don't want
1619 * to call hash_page, such as when we are accessing memory (possibly
1620 * user memory) inside a PMU interrupt that occurred while interrupts
1621 * were soft-disabled.  We want to invoke the exception handler for
1622 * the access, or panic if there isn't a handler.
1623 */
162477:	bl	.save_nvgprs
1625	mr	r4,r3
1626	addi	r3,r1,STACK_FRAME_OVERHEAD
1627	li	r5,SIGSEGV
1628	bl	.bad_page_fault
1629	b	.ret_from_except
1630
1631	/* here we have a segment miss */
1632do_ste_alloc:
1633	bl	.ste_allocate		/* try to insert stab entry */
1634	cmpdi	r3,0
1635	bne-	handle_page_fault
1636	b	fast_exception_return
1637
1638/*
1639 * r13 points to the PACA, r9 contains the saved CR,
1640 * r11 and r12 contain the saved SRR0 and SRR1.
1641 * r9 - r13 are saved in paca->exslb.
1642 * We assume we aren't going to take any exceptions during this procedure.
1643 * We assume (DAR >> 60) == 0xc.
1644 */
1645	.align	7
1646_GLOBAL(do_stab_bolted)
1647	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1648	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1649	mfspr	r11,SPRN_DAR			/* ea */
1650
1651	/*
1652	 * check for bad kernel/user address
1653	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1654	 */
1655	rldicr. r9,r11,4,(63 - 46 - 4)
1656	li	r9,0	/* VSID = 0 for bad address */
1657	bne-	0f
1658
1659	/*
1660	 * Calculate VSID:
1661	 * This is the kernel vsid, we take the top for context from
1662	 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1663	 * Here we know that (ea >> 60) == 0xc
1664	 */
1665	lis	r9,(MAX_USER_CONTEXT + 1)@ha
1666	addi	r9,r9,(MAX_USER_CONTEXT + 1)@l
1667
1668	srdi	r10,r11,SID_SHIFT
1669	rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1670	ASM_VSID_SCRAMBLE(r10, r9, 256M)
1671	rldic	r9,r10,12,16	/* r9 = vsid << 12 */
1672
16730:
1674	/* Hash to the primary group */
1675	ld	r10,PACASTABVIRT(r13)
1676	srdi	r11,r11,SID_SHIFT
1677	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1678
1679	/* Search the primary group for a free entry */
16801:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1681	andi.	r11,r11,0x80
1682	beq	2f
1683	addi	r10,r10,16
1684	andi.	r11,r10,0x70
1685	bne	1b
1686
1687	/* Stick for only searching the primary group for now.		*/
1688	/* At least for now, we use a very simple random castout scheme */
1689	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1690	mftb	r11
1691	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1692	ori	r11,r11,0x10
1693
1694	/* r10 currently points to an ste one past the group of interest */
1695	/* make it point to the randomly selected entry			*/
1696	subi	r10,r10,128
1697	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1698
1699	isync			/* mark the entry invalid		*/
1700	ld	r11,0(r10)
1701	rldicl	r11,r11,56,1	/* clear the valid bit */
1702	rotldi	r11,r11,8
1703	std	r11,0(r10)
1704	sync
1705
1706	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1707	slbie	r11
1708
17092:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1710	eieio
1711
1712	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1713	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1714	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1715	std	r11,0(r10)	/* Put new entry back into the stab	*/
1716
1717	sync
1718
1719	/* All done -- return from exception. */
1720	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1721	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1722
1723	andi.	r10,r12,MSR_RI
1724	beq-	unrecov_slb
1725
1726	mtcrf	0x80,r9			/* restore CR */
1727
1728	mfmsr	r10
1729	clrrdi	r10,r10,2
1730	mtmsrd	r10,1
1731
1732	mtspr	SPRN_SRR0,r11
1733	mtspr	SPRN_SRR1,r12
1734	ld	r9,PACA_EXSLB+EX_R9(r13)
1735	ld	r10,PACA_EXSLB+EX_R10(r13)
1736	ld	r11,PACA_EXSLB+EX_R11(r13)
1737	ld	r12,PACA_EXSLB+EX_R12(r13)
1738	ld	r13,PACA_EXSLB+EX_R13(r13)
1739	rfid
1740	b	.	/* prevent speculative execution */
1741