1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 -        : Early init and support code
29 */
30	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 					\
32BEGIN_FTR_SECTION						\
33	cmpdi	r0,0x1ebe ; 					\
34	beq-	1f ;						\
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
36	mr	r9,r13 ;					\
37	GET_PACA(r13) ;						\
38	mfspr	r11,SPRN_SRR0 ;					\
390:
40
41#define SYSCALL_PSERIES_2_RFID 					\
42	mfspr	r12,SPRN_SRR1 ;					\
43	ld	r10,PACAKBASE(r13) ; 				\
44	LOAD_HANDLER(r10, system_call_entry) ; 			\
45	mtspr	SPRN_SRR0,r10 ; 				\
46	ld	r10,PACAKMSR(r13) ;				\
47	mtspr	SPRN_SRR1,r10 ; 				\
48	rfid ; 							\
49	b	. ;	/* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3					\
52	/* Fast LE/BE switch system call */			\
531:	mfspr	r12,SPRN_SRR1 ;					\
54	xori	r12,r12,MSR_LE ;				\
55	mtspr	SPRN_SRR1,r12 ;					\
56	rfid ;		/* return to userspace */		\
57	b	. ;						\
582:	mfspr	r12,SPRN_SRR1 ;					\
59	andi.	r12,r12,MSR_PR ;				\
60	bne	0b ;						\
61	mtspr	SPRN_SRR0,r3 ;					\
62	mtspr	SPRN_SRR1,r4 ;					\
63	mtspr	SPRN_SDR1,r5 ;					\
64	rfid ;							\
65	b	. ;	/* prevent speculative execution */
66
67#if defined(CONFIG_RELOCATABLE)
68	/*
69	 * We can't branch directly; in the direct case we use LR
70	 * and system_call_entry restores LR.  (We thus need to move
71	 * LR to r10 in the RFID case too.)
72	 */
73#define SYSCALL_PSERIES_2_DIRECT				\
74	mflr	r10 ;						\
75	ld	r12,PACAKBASE(r13) ; 				\
76	LOAD_HANDLER(r12, system_call_entry_direct) ;		\
77	mtctr	r12 ;						\
78	mfspr	r12,SPRN_SRR1 ;					\
79	/* Re-use of r13... No spare regs to do this */	\
80	li	r13,MSR_RI ;					\
81	mtmsrd 	r13,1 ;						\
82	GET_PACA(r13) ;	/* get r13 back */			\
83	bctr ;
84#else
85	/* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT				\
87	mfspr	r12,SPRN_SRR1 ;					\
88	li	r10,MSR_RI ;					\
89	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
90	b	system_call_entry_direct ;
91#endif
92
93/*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101	. = 0x100
102	.globl __start_interrupts
103__start_interrupts:
104
105	.globl system_reset_pSeries;
106system_reset_pSeries:
107	HMT_MEDIUM_PPR_DISCARD
108	SET_SCRATCH0(r13)
109#ifdef CONFIG_PPC_P7_NAP
110BEGIN_FTR_SECTION
111	/* Running native on arch 2.06 or later, check if we are
112	 * waking up from nap. We only handle no state loss and
113	 * supervisor state loss. We do -not- handle hypervisor
114	 * state loss at this time.
115	 */
116	mfspr	r13,SPRN_SRR1
117	rlwinm.	r13,r13,47-31,30,31
118	beq	9f
119
120	/* waking up from powersave (nap) state */
121	cmpwi	cr1,r13,2
122	/* Total loss of HV state is fatal, we could try to use the
123	 * PIR to locate a PACA, then use an emergency stack etc...
124	 * but for now, let's just stay stuck here
125	 */
126	bgt	cr1,.
127	GET_PACA(r13)
128
129#ifdef CONFIG_KVM_BOOK3S_64_HV
130	li	r0,KVM_HWTHREAD_IN_KERNEL
131	stb	r0,HSTATE_HWTHREAD_STATE(r13)
132	/* Order setting hwthread_state vs. testing hwthread_req */
133	sync
134	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
135	cmpwi	r0,0
136	beq	1f
137	b	kvm_start_guest
1381:
139#endif
140
141	beq	cr1,2f
142	b	.power7_wakeup_noloss
1432:	b	.power7_wakeup_loss
1449:
145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146#endif /* CONFIG_PPC_P7_NAP */
147	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148				 NOTEST, 0x100)
149
150	. = 0x200
151machine_check_pSeries_1:
152	/* This is moved out of line as it can be patched by FW, but
153	 * some code path might still want to branch into the original
154	 * vector
155	 */
156	HMT_MEDIUM_PPR_DISCARD
157	SET_SCRATCH0(r13)		/* save r13 */
158	EXCEPTION_PROLOG_0(PACA_EXMC)
159	b	machine_check_pSeries_0
160
161	. = 0x300
162	.globl data_access_pSeries
163data_access_pSeries:
164	HMT_MEDIUM_PPR_DISCARD
165	SET_SCRATCH0(r13)
166BEGIN_FTR_SECTION
167	b	data_access_check_stab
168data_access_not_stab:
169END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
170	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
171				 KVMTEST, 0x300)
172
173	. = 0x380
174	.globl data_access_slb_pSeries
175data_access_slb_pSeries:
176	HMT_MEDIUM_PPR_DISCARD
177	SET_SCRATCH0(r13)
178	EXCEPTION_PROLOG_0(PACA_EXSLB)
179	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
180	std	r3,PACA_EXSLB+EX_R3(r13)
181	mfspr	r3,SPRN_DAR
182#ifdef __DISABLED__
183	/* Keep that around for when we re-implement dynamic VSIDs */
184	cmpdi	r3,0
185	bge	slb_miss_user_pseries
186#endif /* __DISABLED__ */
187	mfspr	r12,SPRN_SRR1
188#ifndef CONFIG_RELOCATABLE
189	b	.slb_miss_realmode
190#else
191	/*
192	 * We can't just use a direct branch to .slb_miss_realmode
193	 * because the distance from here to there depends on where
194	 * the kernel ends up being put.
195	 */
196	mfctr	r11
197	ld	r10,PACAKBASE(r13)
198	LOAD_HANDLER(r10, .slb_miss_realmode)
199	mtctr	r10
200	bctr
201#endif
202
203	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
204
205	. = 0x480
206	.globl instruction_access_slb_pSeries
207instruction_access_slb_pSeries:
208	HMT_MEDIUM_PPR_DISCARD
209	SET_SCRATCH0(r13)
210	EXCEPTION_PROLOG_0(PACA_EXSLB)
211	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
212	std	r3,PACA_EXSLB+EX_R3(r13)
213	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
214#ifdef __DISABLED__
215	/* Keep that around for when we re-implement dynamic VSIDs */
216	cmpdi	r3,0
217	bge	slb_miss_user_pseries
218#endif /* __DISABLED__ */
219	mfspr	r12,SPRN_SRR1
220#ifndef CONFIG_RELOCATABLE
221	b	.slb_miss_realmode
222#else
223	mfctr	r11
224	ld	r10,PACAKBASE(r13)
225	LOAD_HANDLER(r10, .slb_miss_realmode)
226	mtctr	r10
227	bctr
228#endif
229
230	/* We open code these as we can't have a ". = x" (even with
231	 * x = "." within a feature section
232	 */
233	. = 0x500;
234	.globl hardware_interrupt_pSeries;
235	.globl hardware_interrupt_hv;
236hardware_interrupt_pSeries:
237hardware_interrupt_hv:
238	HMT_MEDIUM_PPR_DISCARD
239	BEGIN_FTR_SECTION
240		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
241					    EXC_HV, SOFTEN_TEST_HV)
242		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
243	FTR_SECTION_ELSE
244		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
245					    EXC_STD, SOFTEN_TEST_HV_201)
246		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
247	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
248
249	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
250	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
251
252	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
253	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
254
255	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
256	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
257
258	. = 0x900
259	.globl decrementer_pSeries
260decrementer_pSeries:
261	_MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
262
263	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
264
265	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
266	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
267
268	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
269	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
270
271	. = 0xc00
272	.globl	system_call_pSeries
273system_call_pSeries:
274	HMT_MEDIUM
275#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
276	SET_SCRATCH0(r13)
277	GET_PACA(r13)
278	std	r9,PACA_EXGEN+EX_R9(r13)
279	std	r10,PACA_EXGEN+EX_R10(r13)
280	mfcr	r9
281	KVMTEST(0xc00)
282	GET_SCRATCH0(r13)
283#endif
284	SYSCALL_PSERIES_1
285	SYSCALL_PSERIES_2_RFID
286	SYSCALL_PSERIES_3
287	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
288
289	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
290	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
291
292	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
293	 * out of line to handle them
294	 */
295	. = 0xe00
296hv_exception_trampoline:
297	SET_SCRATCH0(r13)
298	EXCEPTION_PROLOG_0(PACA_EXGEN)
299	b	h_data_storage_hv
300
301	. = 0xe20
302	SET_SCRATCH0(r13)
303	EXCEPTION_PROLOG_0(PACA_EXGEN)
304	b	h_instr_storage_hv
305
306	. = 0xe40
307	SET_SCRATCH0(r13)
308	EXCEPTION_PROLOG_0(PACA_EXGEN)
309	b	emulation_assist_hv
310
311	. = 0xe60
312	SET_SCRATCH0(r13)
313	EXCEPTION_PROLOG_0(PACA_EXGEN)
314	b	hmi_exception_hv
315
316	. = 0xe80
317	SET_SCRATCH0(r13)
318	EXCEPTION_PROLOG_0(PACA_EXGEN)
319	b	h_doorbell_hv
320
321	/* We need to deal with the Altivec unavailable exception
322	 * here which is at 0xf20, thus in the middle of the
323	 * prolog code of the PerformanceMonitor one. A little
324	 * trickery is thus necessary
325	 */
326performance_monitor_pSeries_1:
327	. = 0xf00
328	SET_SCRATCH0(r13)
329	EXCEPTION_PROLOG_0(PACA_EXGEN)
330	b	performance_monitor_pSeries
331
332altivec_unavailable_pSeries_1:
333	. = 0xf20
334	SET_SCRATCH0(r13)
335	EXCEPTION_PROLOG_0(PACA_EXGEN)
336	b	altivec_unavailable_pSeries
337
338vsx_unavailable_pSeries_1:
339	. = 0xf40
340	SET_SCRATCH0(r13)
341	EXCEPTION_PROLOG_0(PACA_EXGEN)
342	b	vsx_unavailable_pSeries
343
344	. = 0xf60
345	SET_SCRATCH0(r13)
346	EXCEPTION_PROLOG_0(PACA_EXGEN)
347	b	tm_unavailable_pSeries
348
349#ifdef CONFIG_CBE_RAS
350	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
351	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
352#endif /* CONFIG_CBE_RAS */
353
354	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
355	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
356
357	. = 0x1500
358	.global denorm_exception_hv
359denorm_exception_hv:
360	HMT_MEDIUM_PPR_DISCARD
361	mtspr	SPRN_SPRG_HSCRATCH0,r13
362	EXCEPTION_PROLOG_0(PACA_EXGEN)
363	std	r11,PACA_EXGEN+EX_R11(r13)
364	std	r12,PACA_EXGEN+EX_R12(r13)
365	mfspr	r9,SPRN_SPRG_HSCRATCH0
366	std	r9,PACA_EXGEN+EX_R13(r13)
367	mfcr	r9
368
369#ifdef CONFIG_PPC_DENORMALISATION
370	mfspr	r10,SPRN_HSRR1
371	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
372	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
373	addi	r11,r11,-4		/* HSRR0 is next instruction */
374	bne+	denorm_assist
375#endif
376
377	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
378	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
379
380#ifdef CONFIG_CBE_RAS
381	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
382	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
383#endif /* CONFIG_CBE_RAS */
384
385	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
386	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
387
388#ifdef CONFIG_CBE_RAS
389	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
390	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
391#else
392	. = 0x1800
393#endif /* CONFIG_CBE_RAS */
394
395
396/*** Out of line interrupts support ***/
397
398	.align	7
399	/* moved from 0x200 */
400machine_check_pSeries:
401	.globl machine_check_fwnmi
402machine_check_fwnmi:
403	HMT_MEDIUM_PPR_DISCARD
404	SET_SCRATCH0(r13)		/* save r13 */
405	EXCEPTION_PROLOG_0(PACA_EXMC)
406machine_check_pSeries_0:
407	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
408	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
409	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
410
411	/* moved from 0x300 */
412data_access_check_stab:
413	GET_PACA(r13)
414	std	r9,PACA_EXSLB+EX_R9(r13)
415	std	r10,PACA_EXSLB+EX_R10(r13)
416	mfspr	r10,SPRN_DAR
417	mfspr	r9,SPRN_DSISR
418	srdi	r10,r10,60
419	rlwimi	r10,r9,16,0x20
420#ifdef CONFIG_KVM_BOOK3S_PR
421	lbz	r9,HSTATE_IN_GUEST(r13)
422	rlwimi	r10,r9,8,0x300
423#endif
424	mfcr	r9
425	cmpwi	r10,0x2c
426	beq	do_stab_bolted_pSeries
427	mtcrf	0x80,r9
428	ld	r9,PACA_EXSLB+EX_R9(r13)
429	ld	r10,PACA_EXSLB+EX_R10(r13)
430	b	data_access_not_stab
431do_stab_bolted_pSeries:
432	std	r11,PACA_EXSLB+EX_R11(r13)
433	std	r12,PACA_EXSLB+EX_R12(r13)
434	GET_SCRATCH0(r10)
435	std	r10,PACA_EXSLB+EX_R13(r13)
436	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
437
438	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
439	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
440	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
441	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
442	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
443	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
444
445#ifdef CONFIG_PPC_DENORMALISATION
446denorm_assist:
447BEGIN_FTR_SECTION
448/*
449 * To denormalise we need to move a copy of the register to itself.
450 * For POWER6 do that here for all FP regs.
451 */
452	mfmsr	r10
453	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
454	xori	r10,r10,(MSR_FE0|MSR_FE1)
455	mtmsrd	r10
456	sync
457	fmr	0,0
458	fmr	1,1
459	fmr	2,2
460	fmr	3,3
461	fmr	4,4
462	fmr	5,5
463	fmr	6,6
464	fmr	7,7
465	fmr	8,8
466	fmr	9,9
467	fmr	10,10
468	fmr	11,11
469	fmr	12,12
470	fmr	13,13
471	fmr	14,14
472	fmr	15,15
473	fmr	16,16
474	fmr	17,17
475	fmr	18,18
476	fmr	19,19
477	fmr	20,20
478	fmr	21,21
479	fmr	22,22
480	fmr	23,23
481	fmr	24,24
482	fmr	25,25
483	fmr	26,26
484	fmr	27,27
485	fmr	28,28
486	fmr	29,29
487	fmr	30,30
488	fmr	31,31
489FTR_SECTION_ELSE
490/*
491 * To denormalise we need to move a copy of the register to itself.
492 * For POWER7 do that here for the first 32 VSX registers only.
493 */
494	mfmsr	r10
495	oris	r10,r10,MSR_VSX@h
496	mtmsrd	r10
497	sync
498	XVCPSGNDP(0,0,0)
499	XVCPSGNDP(1,1,1)
500	XVCPSGNDP(2,2,2)
501	XVCPSGNDP(3,3,3)
502	XVCPSGNDP(4,4,4)
503	XVCPSGNDP(5,5,5)
504	XVCPSGNDP(6,6,6)
505	XVCPSGNDP(7,7,7)
506	XVCPSGNDP(8,8,8)
507	XVCPSGNDP(9,9,9)
508	XVCPSGNDP(10,10,10)
509	XVCPSGNDP(11,11,11)
510	XVCPSGNDP(12,12,12)
511	XVCPSGNDP(13,13,13)
512	XVCPSGNDP(14,14,14)
513	XVCPSGNDP(15,15,15)
514	XVCPSGNDP(16,16,16)
515	XVCPSGNDP(17,17,17)
516	XVCPSGNDP(18,18,18)
517	XVCPSGNDP(19,19,19)
518	XVCPSGNDP(20,20,20)
519	XVCPSGNDP(21,21,21)
520	XVCPSGNDP(22,22,22)
521	XVCPSGNDP(23,23,23)
522	XVCPSGNDP(24,24,24)
523	XVCPSGNDP(25,25,25)
524	XVCPSGNDP(26,26,26)
525	XVCPSGNDP(27,27,27)
526	XVCPSGNDP(28,28,28)
527	XVCPSGNDP(29,29,29)
528	XVCPSGNDP(30,30,30)
529	XVCPSGNDP(31,31,31)
530ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
531	mtspr	SPRN_HSRR0,r11
532	mtcrf	0x80,r9
533	ld	r9,PACA_EXGEN+EX_R9(r13)
534	RESTORE_PPR_PACA(PACA_EXGEN, r10)
535	ld	r10,PACA_EXGEN+EX_R10(r13)
536	ld	r11,PACA_EXGEN+EX_R11(r13)
537	ld	r12,PACA_EXGEN+EX_R12(r13)
538	ld	r13,PACA_EXGEN+EX_R13(r13)
539	HRFID
540	b	.
541#endif
542
543	.align	7
544	/* moved from 0xe00 */
545	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
546	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
547	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
548	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
549	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
550	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
551	STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
552	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
553	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
554	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
555
556	/* moved from 0xf00 */
557	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
558	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
559	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
560	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
561	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
562	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
563	STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
564	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
565
566/*
567 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
568 * - If it was a decrementer interrupt, we bump the dec to max and and return.
569 * - If it was a doorbell we return immediately since doorbells are edge
570 *   triggered and won't automatically refire.
571 * - else we hard disable and return.
572 * This is called with r10 containing the value to OR to the paca field.
573 */
574#define MASKED_INTERRUPT(_H)				\
575masked_##_H##interrupt:					\
576	std	r11,PACA_EXGEN+EX_R11(r13);		\
577	lbz	r11,PACAIRQHAPPENED(r13);		\
578	or	r11,r11,r10;				\
579	stb	r11,PACAIRQHAPPENED(r13);		\
580	cmpwi	r10,PACA_IRQ_DEC;			\
581	bne	1f;					\
582	lis	r10,0x7fff;				\
583	ori	r10,r10,0xffff;				\
584	mtspr	SPRN_DEC,r10;				\
585	b	2f;					\
5861:	cmpwi	r10,PACA_IRQ_DBELL;			\
587	beq	2f;					\
588	mfspr	r10,SPRN_##_H##SRR1;			\
589	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
590	rotldi	r10,r10,16;				\
591	mtspr	SPRN_##_H##SRR1,r10;			\
5922:	mtcrf	0x80,r9;				\
593	ld	r9,PACA_EXGEN+EX_R9(r13);		\
594	ld	r10,PACA_EXGEN+EX_R10(r13);		\
595	ld	r11,PACA_EXGEN+EX_R11(r13);		\
596	GET_SCRATCH0(r13);				\
597	##_H##rfid;					\
598	b	.
599
600	MASKED_INTERRUPT()
601	MASKED_INTERRUPT(H)
602
603/*
604 * Called from arch_local_irq_enable when an interrupt needs
605 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
606 * which kind of interrupt. MSR:EE is already off. We generate a
607 * stackframe like if a real interrupt had happened.
608 *
609 * Note: While MSR:EE is off, we need to make sure that _MSR
610 * in the generated frame has EE set to 1 or the exception
611 * handler will not properly re-enable them.
612 */
613_GLOBAL(__replay_interrupt)
614	/* We are going to jump to the exception common code which
615	 * will retrieve various register values from the PACA which
616	 * we don't give a damn about, so we don't bother storing them.
617	 */
618	mfmsr	r12
619	mflr	r11
620	mfcr	r9
621	ori	r12,r12,MSR_EE
622	cmpwi	r3,0x900
623	beq	decrementer_common
624	cmpwi	r3,0x500
625	beq	hardware_interrupt_common
626BEGIN_FTR_SECTION
627	cmpwi	r3,0xe80
628	beq	h_doorbell_common
629FTR_SECTION_ELSE
630	cmpwi	r3,0xa00
631	beq	doorbell_super_common
632ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
633	blr
634
635#ifdef CONFIG_PPC_PSERIES
636/*
637 * Vectors for the FWNMI option.  Share common code.
638 */
639	.globl system_reset_fwnmi
640      .align 7
641system_reset_fwnmi:
642	HMT_MEDIUM_PPR_DISCARD
643	SET_SCRATCH0(r13)		/* save r13 */
644	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
645				 NOTEST, 0x100)
646
647#endif /* CONFIG_PPC_PSERIES */
648
649#ifdef __DISABLED__
650/*
651 * This is used for when the SLB miss handler has to go virtual,
652 * which doesn't happen for now anymore but will once we re-implement
653 * dynamic VSIDs for shared page tables
654 */
655slb_miss_user_pseries:
656	std	r10,PACA_EXGEN+EX_R10(r13)
657	std	r11,PACA_EXGEN+EX_R11(r13)
658	std	r12,PACA_EXGEN+EX_R12(r13)
659	GET_SCRATCH0(r10)
660	ld	r11,PACA_EXSLB+EX_R9(r13)
661	ld	r12,PACA_EXSLB+EX_R3(r13)
662	std	r10,PACA_EXGEN+EX_R13(r13)
663	std	r11,PACA_EXGEN+EX_R9(r13)
664	std	r12,PACA_EXGEN+EX_R3(r13)
665	clrrdi	r12,r13,32
666	mfmsr	r10
667	mfspr	r11,SRR0			/* save SRR0 */
668	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
669	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
670	mtspr	SRR0,r12
671	mfspr	r12,SRR1			/* and SRR1 */
672	mtspr	SRR1,r10
673	rfid
674	b	.				/* prevent spec. execution */
675#endif /* __DISABLED__ */
676
677/*
678 * Code from here down to __end_handlers is invoked from the
679 * exception prologs above.  Because the prologs assemble the
680 * addresses of these handlers using the LOAD_HANDLER macro,
681 * which uses an ori instruction, these handlers must be in
682 * the first 64k of the kernel image.
683 */
684
685/*** Common interrupt handlers ***/
686
687	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
688
689	/*
690	 * Machine check is different because we use a different
691	 * save area: PACA_EXMC instead of PACA_EXGEN.
692	 */
693	.align	7
694	.globl machine_check_common
695machine_check_common:
696
697	mfspr	r10,SPRN_DAR
698	std	r10,PACA_EXGEN+EX_DAR(r13)
699	mfspr	r10,SPRN_DSISR
700	stw	r10,PACA_EXGEN+EX_DSISR(r13)
701	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
702	FINISH_NAP
703	DISABLE_INTS
704	ld	r3,PACA_EXGEN+EX_DAR(r13)
705	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
706	std	r3,_DAR(r1)
707	std	r4,_DSISR(r1)
708	bl	.save_nvgprs
709	addi	r3,r1,STACK_FRAME_OVERHEAD
710	bl	.machine_check_exception
711	b	.ret_from_except
712
713	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
714	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
715	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
716#ifdef CONFIG_PPC_DOORBELL
717	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
718#else
719	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
720#endif
721	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
722	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
723	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
724	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
725	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
726#ifdef CONFIG_PPC_DOORBELL
727	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
728#else
729	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
730#endif
731	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
732	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
733	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
734#ifdef CONFIG_ALTIVEC
735	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
736#else
737	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
738#endif
739#ifdef CONFIG_CBE_RAS
740	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
741	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
742	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
743#endif /* CONFIG_CBE_RAS */
744
745	/*
746	 * Relocation-on interrupts: A subset of the interrupts can be delivered
747	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
748	 * it.  Addresses are the same as the original interrupt addresses, but
749	 * offset by 0xc000000000004000.
750	 * It's impossible to receive interrupts below 0x300 via this mechanism.
751	 * KVM: None of these traps are from the guest ; anything that escalated
752	 * to HV=1 from HV=0 is delivered via real mode handlers.
753	 */
754
755	/*
756	 * This uses the standard macro, since the original 0x300 vector
757	 * only has extra guff for STAB-based processors -- which never
758	 * come here.
759	 */
760	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
761	. = 0x4380
762	.globl data_access_slb_relon_pSeries
763data_access_slb_relon_pSeries:
764	SET_SCRATCH0(r13)
765	EXCEPTION_PROLOG_0(PACA_EXSLB)
766	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
767	std	r3,PACA_EXSLB+EX_R3(r13)
768	mfspr	r3,SPRN_DAR
769	mfspr	r12,SPRN_SRR1
770#ifndef CONFIG_RELOCATABLE
771	b	.slb_miss_realmode
772#else
773	/*
774	 * We can't just use a direct branch to .slb_miss_realmode
775	 * because the distance from here to there depends on where
776	 * the kernel ends up being put.
777	 */
778	mfctr	r11
779	ld	r10,PACAKBASE(r13)
780	LOAD_HANDLER(r10, .slb_miss_realmode)
781	mtctr	r10
782	bctr
783#endif
784
785	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
786	. = 0x4480
787	.globl instruction_access_slb_relon_pSeries
788instruction_access_slb_relon_pSeries:
789	SET_SCRATCH0(r13)
790	EXCEPTION_PROLOG_0(PACA_EXSLB)
791	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
792	std	r3,PACA_EXSLB+EX_R3(r13)
793	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
794	mfspr	r12,SPRN_SRR1
795#ifndef CONFIG_RELOCATABLE
796	b	.slb_miss_realmode
797#else
798	mfctr	r11
799	ld	r10,PACAKBASE(r13)
800	LOAD_HANDLER(r10, .slb_miss_realmode)
801	mtctr	r10
802	bctr
803#endif
804
805	. = 0x4500
806	.globl hardware_interrupt_relon_pSeries;
807	.globl hardware_interrupt_relon_hv;
808hardware_interrupt_relon_pSeries:
809hardware_interrupt_relon_hv:
810	BEGIN_FTR_SECTION
811		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
812	FTR_SECTION_ELSE
813		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
814	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
815	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
816	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
817	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
818	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
819	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
820	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
821	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
822
823	. = 0x4c00
824	.globl system_call_relon_pSeries
825system_call_relon_pSeries:
826	HMT_MEDIUM
827	SYSCALL_PSERIES_1
828	SYSCALL_PSERIES_2_DIRECT
829	SYSCALL_PSERIES_3
830
831	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
832
833	. = 0x4e00
834	SET_SCRATCH0(r13)
835	EXCEPTION_PROLOG_0(PACA_EXGEN)
836	b	h_data_storage_relon_hv
837
838	. = 0x4e20
839	SET_SCRATCH0(r13)
840	EXCEPTION_PROLOG_0(PACA_EXGEN)
841	b	h_instr_storage_relon_hv
842
843	. = 0x4e40
844	SET_SCRATCH0(r13)
845	EXCEPTION_PROLOG_0(PACA_EXGEN)
846	b	emulation_assist_relon_hv
847
848	. = 0x4e60
849	SET_SCRATCH0(r13)
850	EXCEPTION_PROLOG_0(PACA_EXGEN)
851	b	hmi_exception_relon_hv
852
853	. = 0x4e80
854	SET_SCRATCH0(r13)
855	EXCEPTION_PROLOG_0(PACA_EXGEN)
856	b	h_doorbell_relon_hv
857
858performance_monitor_relon_pSeries_1:
859	. = 0x4f00
860	SET_SCRATCH0(r13)
861	EXCEPTION_PROLOG_0(PACA_EXGEN)
862	b	performance_monitor_relon_pSeries
863
864altivec_unavailable_relon_pSeries_1:
865	. = 0x4f20
866	SET_SCRATCH0(r13)
867	EXCEPTION_PROLOG_0(PACA_EXGEN)
868	b	altivec_unavailable_relon_pSeries
869
870vsx_unavailable_relon_pSeries_1:
871	. = 0x4f40
872	SET_SCRATCH0(r13)
873	EXCEPTION_PROLOG_0(PACA_EXGEN)
874	b	vsx_unavailable_relon_pSeries
875
876tm_unavailable_relon_pSeries_1:
877	. = 0x4f60
878	SET_SCRATCH0(r13)
879	EXCEPTION_PROLOG_0(PACA_EXGEN)
880	b	tm_unavailable_relon_pSeries
881
882	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
883#ifdef CONFIG_PPC_DENORMALISATION
884	. = 0x5500
885	b	denorm_exception_hv
886#endif
887	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
888
889	/* Other future vectors */
890	.align	7
891	.globl	__end_interrupts
892__end_interrupts:
893
894	.align	7
895system_call_entry_direct:
896#if defined(CONFIG_RELOCATABLE)
897	/* The first level prologue may have used LR to get here, saving
898	 * orig in r10.  To save hacking/ifdeffing common code, restore here.
899	 */
900	mtlr	r10
901#endif
902system_call_entry:
903	b	system_call_common
904
905ppc64_runlatch_on_trampoline:
906	b	.__ppc64_runlatch_on
907
908/*
909 * Here we have detected that the kernel stack pointer is bad.
910 * R9 contains the saved CR, r13 points to the paca,
911 * r10 contains the (bad) kernel stack pointer,
912 * r11 and r12 contain the saved SRR0 and SRR1.
913 * We switch to using an emergency stack, save the registers there,
914 * and call kernel_bad_stack(), which panics.
915 */
916bad_stack:
917	ld	r1,PACAEMERGSP(r13)
918	subi	r1,r1,64+INT_FRAME_SIZE
919	std	r9,_CCR(r1)
920	std	r10,GPR1(r1)
921	std	r11,_NIP(r1)
922	std	r12,_MSR(r1)
923	mfspr	r11,SPRN_DAR
924	mfspr	r12,SPRN_DSISR
925	std	r11,_DAR(r1)
926	std	r12,_DSISR(r1)
927	mflr	r10
928	mfctr	r11
929	mfxer	r12
930	std	r10,_LINK(r1)
931	std	r11,_CTR(r1)
932	std	r12,_XER(r1)
933	SAVE_GPR(0,r1)
934	SAVE_GPR(2,r1)
935	ld	r10,EX_R3(r3)
936	std	r10,GPR3(r1)
937	SAVE_GPR(4,r1)
938	SAVE_4GPRS(5,r1)
939	ld	r9,EX_R9(r3)
940	ld	r10,EX_R10(r3)
941	SAVE_2GPRS(9,r1)
942	ld	r9,EX_R11(r3)
943	ld	r10,EX_R12(r3)
944	ld	r11,EX_R13(r3)
945	std	r9,GPR11(r1)
946	std	r10,GPR12(r1)
947	std	r11,GPR13(r1)
948BEGIN_FTR_SECTION
949	ld	r10,EX_CFAR(r3)
950	std	r10,ORIG_GPR3(r1)
951END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
952	SAVE_8GPRS(14,r1)
953	SAVE_10GPRS(22,r1)
954	lhz	r12,PACA_TRAP_SAVE(r13)
955	std	r12,_TRAP(r1)
956	addi	r11,r1,INT_FRAME_SIZE
957	std	r11,0(r1)
958	li	r12,0
959	std	r12,0(r11)
960	ld	r2,PACATOC(r13)
961	ld	r11,exception_marker@toc(r2)
962	std	r12,RESULT(r1)
963	std	r11,STACK_FRAME_OVERHEAD-16(r1)
9641:	addi	r3,r1,STACK_FRAME_OVERHEAD
965	bl	.kernel_bad_stack
966	b	1b
967
968/*
969 * Here r13 points to the paca, r9 contains the saved CR,
970 * SRR0 and SRR1 are saved in r11 and r12,
971 * r9 - r13 are saved in paca->exgen.
972 */
973	.align	7
974	.globl data_access_common
975data_access_common:
976	mfspr	r10,SPRN_DAR
977	std	r10,PACA_EXGEN+EX_DAR(r13)
978	mfspr	r10,SPRN_DSISR
979	stw	r10,PACA_EXGEN+EX_DSISR(r13)
980	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
981	DISABLE_INTS
982	ld	r12,_MSR(r1)
983	ld	r3,PACA_EXGEN+EX_DAR(r13)
984	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
985	li	r5,0x300
986	b	.do_hash_page		/* Try to handle as hpte fault */
987
988	.align  7
989	.globl  h_data_storage_common
990h_data_storage_common:
991	mfspr   r10,SPRN_HDAR
992	std     r10,PACA_EXGEN+EX_DAR(r13)
993	mfspr   r10,SPRN_HDSISR
994	stw     r10,PACA_EXGEN+EX_DSISR(r13)
995	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
996	bl      .save_nvgprs
997	DISABLE_INTS
998	addi    r3,r1,STACK_FRAME_OVERHEAD
999	bl      .unknown_exception
1000	b       .ret_from_except
1001
1002	.align	7
1003	.globl instruction_access_common
1004instruction_access_common:
1005	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1006	DISABLE_INTS
1007	ld	r12,_MSR(r1)
1008	ld	r3,_NIP(r1)
1009	andis.	r4,r12,0x5820
1010	li	r5,0x400
1011	b	.do_hash_page		/* Try to handle as hpte fault */
1012
1013	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
1014
1015/*
1016 * Here is the common SLB miss user that is used when going to virtual
1017 * mode for SLB misses, that is currently not used
1018 */
1019#ifdef __DISABLED__
1020	.align	7
1021	.globl	slb_miss_user_common
1022slb_miss_user_common:
1023	mflr	r10
1024	std	r3,PACA_EXGEN+EX_DAR(r13)
1025	stw	r9,PACA_EXGEN+EX_CCR(r13)
1026	std	r10,PACA_EXGEN+EX_LR(r13)
1027	std	r11,PACA_EXGEN+EX_SRR0(r13)
1028	bl	.slb_allocate_user
1029
1030	ld	r10,PACA_EXGEN+EX_LR(r13)
1031	ld	r3,PACA_EXGEN+EX_R3(r13)
1032	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1033	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1034	mtlr	r10
1035	beq-	slb_miss_fault
1036
1037	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1038	beq-	unrecov_user_slb
1039	mfmsr	r10
1040
1041.machine push
1042.machine "power4"
1043	mtcrf	0x80,r9
1044.machine pop
1045
1046	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1047	mtmsrd	r10,1
1048
1049	mtspr	SRR0,r11
1050	mtspr	SRR1,r12
1051
1052	ld	r9,PACA_EXGEN+EX_R9(r13)
1053	ld	r10,PACA_EXGEN+EX_R10(r13)
1054	ld	r11,PACA_EXGEN+EX_R11(r13)
1055	ld	r12,PACA_EXGEN+EX_R12(r13)
1056	ld	r13,PACA_EXGEN+EX_R13(r13)
1057	rfid
1058	b	.
1059
1060slb_miss_fault:
1061	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1062	ld	r4,PACA_EXGEN+EX_DAR(r13)
1063	li	r5,0
1064	std	r4,_DAR(r1)
1065	std	r5,_DSISR(r1)
1066	b	handle_page_fault
1067
1068unrecov_user_slb:
1069	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1070	DISABLE_INTS
1071	bl	.save_nvgprs
10721:	addi	r3,r1,STACK_FRAME_OVERHEAD
1073	bl	.unrecoverable_exception
1074	b	1b
1075
1076#endif /* __DISABLED__ */
1077
1078
1079	.align	7
1080	.globl alignment_common
1081alignment_common:
1082	mfspr	r10,SPRN_DAR
1083	std	r10,PACA_EXGEN+EX_DAR(r13)
1084	mfspr	r10,SPRN_DSISR
1085	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1086	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1087	ld	r3,PACA_EXGEN+EX_DAR(r13)
1088	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1089	std	r3,_DAR(r1)
1090	std	r4,_DSISR(r1)
1091	bl	.save_nvgprs
1092	DISABLE_INTS
1093	addi	r3,r1,STACK_FRAME_OVERHEAD
1094	bl	.alignment_exception
1095	b	.ret_from_except
1096
1097	.align	7
1098	.globl program_check_common
1099program_check_common:
1100	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1101	bl	.save_nvgprs
1102	DISABLE_INTS
1103	addi	r3,r1,STACK_FRAME_OVERHEAD
1104	bl	.program_check_exception
1105	b	.ret_from_except
1106
1107	.align	7
1108	.globl fp_unavailable_common
1109fp_unavailable_common:
1110	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1111	bne	1f			/* if from user, just load it up */
1112	bl	.save_nvgprs
1113	DISABLE_INTS
1114	addi	r3,r1,STACK_FRAME_OVERHEAD
1115	bl	.kernel_fp_unavailable_exception
1116	BUG_OPCODE
11171:
1118#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1119BEGIN_FTR_SECTION
1120	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1121	 * transaction), go do TM stuff
1122	 */
1123	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1124	bne-	2f
1125END_FTR_SECTION_IFSET(CPU_FTR_TM)
1126#endif
1127	bl	.load_up_fpu
1128	b	fast_exception_return
1129#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11302:	/* User process was in a transaction */
1131	bl	.save_nvgprs
1132	DISABLE_INTS
1133	addi	r3,r1,STACK_FRAME_OVERHEAD
1134	bl	.fp_unavailable_tm
1135	b	.ret_from_except
1136#endif
1137	.align	7
1138	.globl altivec_unavailable_common
1139altivec_unavailable_common:
1140	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1141#ifdef CONFIG_ALTIVEC
1142BEGIN_FTR_SECTION
1143	beq	1f
1144#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1145  BEGIN_FTR_SECTION_NESTED(69)
1146	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1147	 * transaction), go do TM stuff
1148	 */
1149	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1150	bne-	2f
1151  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1152#endif
1153	bl	.load_up_altivec
1154	b	fast_exception_return
1155#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11562:	/* User process was in a transaction */
1157	bl	.save_nvgprs
1158	DISABLE_INTS
1159	addi	r3,r1,STACK_FRAME_OVERHEAD
1160	bl	.altivec_unavailable_tm
1161	b	.ret_from_except
1162#endif
11631:
1164END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1165#endif
1166	bl	.save_nvgprs
1167	DISABLE_INTS
1168	addi	r3,r1,STACK_FRAME_OVERHEAD
1169	bl	.altivec_unavailable_exception
1170	b	.ret_from_except
1171
1172	.align	7
1173	.globl vsx_unavailable_common
1174vsx_unavailable_common:
1175	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1176#ifdef CONFIG_VSX
1177BEGIN_FTR_SECTION
1178	beq	1f
1179#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1180  BEGIN_FTR_SECTION_NESTED(69)
1181	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1182	 * transaction), go do TM stuff
1183	 */
1184	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1185	bne-	2f
1186  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1187#endif
1188	b	.load_up_vsx
1189#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11902:	/* User process was in a transaction */
1191	bl	.save_nvgprs
1192	DISABLE_INTS
1193	addi	r3,r1,STACK_FRAME_OVERHEAD
1194	bl	.vsx_unavailable_tm
1195	b	.ret_from_except
1196#endif
11971:
1198END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1199#endif
1200	bl	.save_nvgprs
1201	DISABLE_INTS
1202	addi	r3,r1,STACK_FRAME_OVERHEAD
1203	bl	.vsx_unavailable_exception
1204	b	.ret_from_except
1205
1206	.align	7
1207	.globl tm_unavailable_common
1208tm_unavailable_common:
1209	EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
1210	bl	.save_nvgprs
1211	DISABLE_INTS
1212	addi	r3,r1,STACK_FRAME_OVERHEAD
1213	bl	.tm_unavailable_exception
1214	b	.ret_from_except
1215
1216	.align	7
1217	.globl	__end_handlers
1218__end_handlers:
1219
1220	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1221	STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
1222	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1223	STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
1224	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1225	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1226	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1227	STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
1228	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
1229	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1230	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
1231
1232	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1233	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1234	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1235	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
1236
1237#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1238/*
1239 * Data area reserved for FWNMI option.
1240 * This address (0x7000) is fixed by the RPA.
1241 */
1242	.= 0x7000
1243	.globl fwnmi_data_area
1244fwnmi_data_area:
1245
1246	/* pseries and powernv need to keep the whole page from
1247	 * 0x7000 to 0x8000 free for use by the firmware
1248	 */
1249	. = 0x8000
1250#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1251
1252/* Space for CPU0's segment table */
1253	.balign 4096
1254	.globl initial_stab
1255initial_stab:
1256	.space	4096
1257
1258#ifdef CONFIG_PPC_POWERNV
1259_GLOBAL(opal_mc_secondary_handler)
1260	HMT_MEDIUM_PPR_DISCARD
1261	SET_SCRATCH0(r13)
1262	GET_PACA(r13)
1263	clrldi	r3,r3,2
1264	tovirt(r3,r3)
1265	std	r3,PACA_OPAL_MC_EVT(r13)
1266	ld	r13,OPAL_MC_SRR0(r3)
1267	mtspr	SPRN_SRR0,r13
1268	ld	r13,OPAL_MC_SRR1(r3)
1269	mtspr	SPRN_SRR1,r13
1270	ld	r3,OPAL_MC_GPR3(r3)
1271	GET_SCRATCH0(r13)
1272	b	machine_check_pSeries
1273#endif /* CONFIG_PPC_POWERNV */
1274
1275
1276/*
1277 * r13 points to the PACA, r9 contains the saved CR,
1278 * r12 contain the saved SRR1, SRR0 is still ready for return
1279 * r3 has the faulting address
1280 * r9 - r13 are saved in paca->exslb.
1281 * r3 is saved in paca->slb_r3
1282 * We assume we aren't going to take any exceptions during this procedure.
1283 */
1284_GLOBAL(slb_miss_realmode)
1285	mflr	r10
1286#ifdef CONFIG_RELOCATABLE
1287	mtctr	r11
1288#endif
1289
1290	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1291	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1292
1293	bl	.slb_allocate_realmode
1294
1295	/* All done -- return from exception. */
1296
1297	ld	r10,PACA_EXSLB+EX_LR(r13)
1298	ld	r3,PACA_EXSLB+EX_R3(r13)
1299	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1300
1301	mtlr	r10
1302
1303	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1304	beq-	2f
1305
1306.machine	push
1307.machine	"power4"
1308	mtcrf	0x80,r9
1309	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1310.machine	pop
1311
1312	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1313	ld	r9,PACA_EXSLB+EX_R9(r13)
1314	ld	r10,PACA_EXSLB+EX_R10(r13)
1315	ld	r11,PACA_EXSLB+EX_R11(r13)
1316	ld	r12,PACA_EXSLB+EX_R12(r13)
1317	ld	r13,PACA_EXSLB+EX_R13(r13)
1318	rfid
1319	b	.	/* prevent speculative execution */
1320
13212:	mfspr	r11,SPRN_SRR0
1322	ld	r10,PACAKBASE(r13)
1323	LOAD_HANDLER(r10,unrecov_slb)
1324	mtspr	SPRN_SRR0,r10
1325	ld	r10,PACAKMSR(r13)
1326	mtspr	SPRN_SRR1,r10
1327	rfid
1328	b	.
1329
1330unrecov_slb:
1331	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1332	DISABLE_INTS
1333	bl	.save_nvgprs
13341:	addi	r3,r1,STACK_FRAME_OVERHEAD
1335	bl	.unrecoverable_exception
1336	b	1b
1337
1338
1339#ifdef CONFIG_PPC_970_NAP
1340power4_fixup_nap:
1341	andc	r9,r9,r10
1342	std	r9,TI_LOCAL_FLAGS(r11)
1343	ld	r10,_LINK(r1)		/* make idle task do the */
1344	std	r10,_NIP(r1)		/* equivalent of a blr */
1345	blr
1346#endif
1347
1348/*
1349 * Hash table stuff
1350 */
1351	.align	7
1352_STATIC(do_hash_page)
1353	std	r3,_DAR(r1)
1354	std	r4,_DSISR(r1)
1355
1356	andis.	r0,r4,0xa410		/* weird error? */
1357	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1358	andis.  r0,r4,DSISR_DABRMATCH@h
1359	bne-    handle_dabr_fault
1360
1361BEGIN_FTR_SECTION
1362	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1363	bne-	do_ste_alloc		/* If so handle it */
1364END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1365
1366	CURRENT_THREAD_INFO(r11, r1)
1367	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1368	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1369	bne	77f			/* then don't call hash_page now */
1370	/*
1371	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1372	 * accessing a userspace segment (even from the kernel). We assume
1373	 * kernel addresses always have the high bit set.
1374	 */
1375	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1376	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1377	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1378	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1379	ori	r4,r4,1			/* add _PAGE_PRESENT */
1380	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1381
1382	/*
1383	 * r3 contains the faulting address
1384	 * r4 contains the required access permissions
1385	 * r5 contains the trap number
1386	 *
1387	 * at return r3 = 0 for success, 1 for page fault, negative for error
1388	 */
1389	bl	.hash_page		/* build HPTE if possible */
1390	cmpdi	r3,0			/* see if hash_page succeeded */
1391
1392	/* Success */
1393	beq	fast_exc_return_irq	/* Return from exception on success */
1394
1395	/* Error */
1396	blt-	13f
1397
1398/* Here we have a page fault that hash_page can't handle. */
1399handle_page_fault:
140011:	ld	r4,_DAR(r1)
1401	ld	r5,_DSISR(r1)
1402	addi	r3,r1,STACK_FRAME_OVERHEAD
1403	bl	.do_page_fault
1404	cmpdi	r3,0
1405	beq+	12f
1406	bl	.save_nvgprs
1407	mr	r5,r3
1408	addi	r3,r1,STACK_FRAME_OVERHEAD
1409	lwz	r4,_DAR(r1)
1410	bl	.bad_page_fault
1411	b	.ret_from_except
1412
1413/* We have a data breakpoint exception - handle it */
1414handle_dabr_fault:
1415	bl	.save_nvgprs
1416	ld      r4,_DAR(r1)
1417	ld      r5,_DSISR(r1)
1418	addi    r3,r1,STACK_FRAME_OVERHEAD
1419	bl      .do_break
142012:	b       .ret_from_except_lite
1421
1422
1423/* We have a page fault that hash_page could handle but HV refused
1424 * the PTE insertion
1425 */
142613:	bl	.save_nvgprs
1427	mr	r5,r3
1428	addi	r3,r1,STACK_FRAME_OVERHEAD
1429	ld	r4,_DAR(r1)
1430	bl	.low_hash_fault
1431	b	.ret_from_except
1432
1433/*
1434 * We come here as a result of a DSI at a point where we don't want
1435 * to call hash_page, such as when we are accessing memory (possibly
1436 * user memory) inside a PMU interrupt that occurred while interrupts
1437 * were soft-disabled.  We want to invoke the exception handler for
1438 * the access, or panic if there isn't a handler.
1439 */
144077:	bl	.save_nvgprs
1441	mr	r4,r3
1442	addi	r3,r1,STACK_FRAME_OVERHEAD
1443	li	r5,SIGSEGV
1444	bl	.bad_page_fault
1445	b	.ret_from_except
1446
1447	/* here we have a segment miss */
1448do_ste_alloc:
1449	bl	.ste_allocate		/* try to insert stab entry */
1450	cmpdi	r3,0
1451	bne-	handle_page_fault
1452	b	fast_exception_return
1453
1454/*
1455 * r13 points to the PACA, r9 contains the saved CR,
1456 * r11 and r12 contain the saved SRR0 and SRR1.
1457 * r9 - r13 are saved in paca->exslb.
1458 * We assume we aren't going to take any exceptions during this procedure.
1459 * We assume (DAR >> 60) == 0xc.
1460 */
1461	.align	7
1462_GLOBAL(do_stab_bolted)
1463	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1464	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1465	mfspr	r11,SPRN_DAR			/* ea */
1466
1467	/*
1468	 * check for bad kernel/user address
1469	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1470	 */
1471	rldicr. r9,r11,4,(63 - 46 - 4)
1472	li	r9,0	/* VSID = 0 for bad address */
1473	bne-	0f
1474
1475	/*
1476	 * Calculate VSID:
1477	 * This is the kernel vsid, we take the top for context from
1478	 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1479	 * Here we know that (ea >> 60) == 0xc
1480	 */
1481	lis	r9,(MAX_USER_CONTEXT + 1)@ha
1482	addi	r9,r9,(MAX_USER_CONTEXT + 1)@l
1483
1484	srdi	r10,r11,SID_SHIFT
1485	rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1486	ASM_VSID_SCRAMBLE(r10, r9, 256M)
1487	rldic	r9,r10,12,16	/* r9 = vsid << 12 */
1488
14890:
1490	/* Hash to the primary group */
1491	ld	r10,PACASTABVIRT(r13)
1492	srdi	r11,r11,SID_SHIFT
1493	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1494
1495	/* Search the primary group for a free entry */
14961:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1497	andi.	r11,r11,0x80
1498	beq	2f
1499	addi	r10,r10,16
1500	andi.	r11,r10,0x70
1501	bne	1b
1502
1503	/* Stick for only searching the primary group for now.		*/
1504	/* At least for now, we use a very simple random castout scheme */
1505	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1506	mftb	r11
1507	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1508	ori	r11,r11,0x10
1509
1510	/* r10 currently points to an ste one past the group of interest */
1511	/* make it point to the randomly selected entry			*/
1512	subi	r10,r10,128
1513	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1514
1515	isync			/* mark the entry invalid		*/
1516	ld	r11,0(r10)
1517	rldicl	r11,r11,56,1	/* clear the valid bit */
1518	rotldi	r11,r11,8
1519	std	r11,0(r10)
1520	sync
1521
1522	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1523	slbie	r11
1524
15252:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1526	eieio
1527
1528	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1529	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1530	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1531	std	r11,0(r10)	/* Put new entry back into the stab	*/
1532
1533	sync
1534
1535	/* All done -- return from exception. */
1536	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1537	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1538
1539	andi.	r10,r12,MSR_RI
1540	beq-	unrecov_slb
1541
1542	mtcrf	0x80,r9			/* restore CR */
1543
1544	mfmsr	r10
1545	clrrdi	r10,r10,2
1546	mtmsrd	r10,1
1547
1548	mtspr	SPRN_SRR0,r11
1549	mtspr	SPRN_SRR1,r12
1550	ld	r9,PACA_EXSLB+EX_R9(r13)
1551	ld	r10,PACA_EXSLB+EX_R10(r13)
1552	ld	r11,PACA_EXSLB+EX_R11(r13)
1553	ld	r12,PACA_EXSLB+EX_R12(r13)
1554	ld	r13,PACA_EXSLB+EX_R13(r13)
1555	rfid
1556	b	.	/* prevent speculative execution */
1557