xref: /openbmc/linux/arch/powerpc/kernel/exceptions-64s.S (revision d0c0c9a13f682157e8610565b6125a31d24434bc)
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 -        : Early init and support code
29 */
30	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 					\
32BEGIN_FTR_SECTION						\
33	cmpdi	r0,0x1ebe ; 					\
34	beq-	1f ;						\
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
36	mr	r9,r13 ;					\
37	GET_PACA(r13) ;						\
38	mfspr	r11,SPRN_SRR0 ;					\
390:
40
41#define SYSCALL_PSERIES_2_RFID 					\
42	mfspr	r12,SPRN_SRR1 ;					\
43	ld	r10,PACAKBASE(r13) ; 				\
44	LOAD_HANDLER(r10, system_call_entry) ; 			\
45	mtspr	SPRN_SRR0,r10 ; 				\
46	ld	r10,PACAKMSR(r13) ;				\
47	mtspr	SPRN_SRR1,r10 ; 				\
48	rfid ; 							\
49	b	. ;	/* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3					\
52	/* Fast LE/BE switch system call */			\
531:	mfspr	r12,SPRN_SRR1 ;					\
54	xori	r12,r12,MSR_LE ;				\
55	mtspr	SPRN_SRR1,r12 ;					\
56	rfid ;		/* return to userspace */		\
57	b	. ;						\
582:	mfspr	r12,SPRN_SRR1 ;					\
59	andi.	r12,r12,MSR_PR ;				\
60	bne	0b ;						\
61	mtspr	SPRN_SRR0,r3 ;					\
62	mtspr	SPRN_SRR1,r4 ;					\
63	mtspr	SPRN_SDR1,r5 ;					\
64	rfid ;							\
65	b	. ;	/* prevent speculative execution */
66
67#if defined(CONFIG_RELOCATABLE)
68	/*
69	 * We can't branch directly; in the direct case we use LR
70	 * and system_call_entry restores LR.  (We thus need to move
71	 * LR to r10 in the RFID case too.)
72	 */
73#define SYSCALL_PSERIES_2_DIRECT				\
74	mflr	r10 ;						\
75	ld	r12,PACAKBASE(r13) ; 				\
76	LOAD_HANDLER(r12, system_call_entry_direct) ;		\
77	mtlr	r12 ;						\
78	mfspr	r12,SPRN_SRR1 ;					\
79	/* Re-use of r13... No spare regs to do this */	\
80	li	r13,MSR_RI ;					\
81	mtmsrd 	r13,1 ;						\
82	GET_PACA(r13) ;	/* get r13 back */			\
83	blr ;
84#else
85	/* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT				\
87	mfspr	r12,SPRN_SRR1 ;					\
88	li	r10,MSR_RI ;					\
89	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
90	b	system_call_entry_direct ;
91#endif
92
93/*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101	. = 0x100
102	.globl __start_interrupts
103__start_interrupts:
104
105	.globl system_reset_pSeries;
106system_reset_pSeries:
107	HMT_MEDIUM_PPR_DISCARD
108	SET_SCRATCH0(r13)
109#ifdef CONFIG_PPC_P7_NAP
110BEGIN_FTR_SECTION
111	/* Running native on arch 2.06 or later, check if we are
112	 * waking up from nap. We only handle no state loss and
113	 * supervisor state loss. We do -not- handle hypervisor
114	 * state loss at this time.
115	 */
116	mfspr	r13,SPRN_SRR1
117	rlwinm.	r13,r13,47-31,30,31
118	beq	9f
119
120	/* waking up from powersave (nap) state */
121	cmpwi	cr1,r13,2
122	/* Total loss of HV state is fatal, we could try to use the
123	 * PIR to locate a PACA, then use an emergency stack etc...
124	 * but for now, let's just stay stuck here
125	 */
126	bgt	cr1,.
127	GET_PACA(r13)
128
129#ifdef CONFIG_KVM_BOOK3S_64_HV
130	li	r0,KVM_HWTHREAD_IN_KERNEL
131	stb	r0,HSTATE_HWTHREAD_STATE(r13)
132	/* Order setting hwthread_state vs. testing hwthread_req */
133	sync
134	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
135	cmpwi	r0,0
136	beq	1f
137	b	kvm_start_guest
1381:
139#endif
140
141	beq	cr1,2f
142	b	.power7_wakeup_noloss
1432:	b	.power7_wakeup_loss
1449:
145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146#endif /* CONFIG_PPC_P7_NAP */
147	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148				 NOTEST, 0x100)
149
150	. = 0x200
151machine_check_pSeries_1:
152	/* This is moved out of line as it can be patched by FW, but
153	 * some code path might still want to branch into the original
154	 * vector
155	 */
156	HMT_MEDIUM_PPR_DISCARD
157	SET_SCRATCH0(r13)		/* save r13 */
158	EXCEPTION_PROLOG_0(PACA_EXMC)
159	b	machine_check_pSeries_0
160
161	. = 0x300
162	.globl data_access_pSeries
163data_access_pSeries:
164	HMT_MEDIUM_PPR_DISCARD
165	SET_SCRATCH0(r13)
166BEGIN_FTR_SECTION
167	b	data_access_check_stab
168data_access_not_stab:
169END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
170	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
171				 KVMTEST, 0x300)
172
173	. = 0x380
174	.globl data_access_slb_pSeries
175data_access_slb_pSeries:
176	HMT_MEDIUM_PPR_DISCARD
177	SET_SCRATCH0(r13)
178	EXCEPTION_PROLOG_0(PACA_EXSLB)
179	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
180	std	r3,PACA_EXSLB+EX_R3(r13)
181	mfspr	r3,SPRN_DAR
182#ifdef __DISABLED__
183	/* Keep that around for when we re-implement dynamic VSIDs */
184	cmpdi	r3,0
185	bge	slb_miss_user_pseries
186#endif /* __DISABLED__ */
187	mfspr	r12,SPRN_SRR1
188#ifndef CONFIG_RELOCATABLE
189	b	.slb_miss_realmode
190#else
191	/*
192	 * We can't just use a direct branch to .slb_miss_realmode
193	 * because the distance from here to there depends on where
194	 * the kernel ends up being put.
195	 */
196	mfctr	r11
197	ld	r10,PACAKBASE(r13)
198	LOAD_HANDLER(r10, .slb_miss_realmode)
199	mtctr	r10
200	bctr
201#endif
202
203	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
204
205	. = 0x480
206	.globl instruction_access_slb_pSeries
207instruction_access_slb_pSeries:
208	HMT_MEDIUM_PPR_DISCARD
209	SET_SCRATCH0(r13)
210	EXCEPTION_PROLOG_0(PACA_EXSLB)
211	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
212	std	r3,PACA_EXSLB+EX_R3(r13)
213	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
214#ifdef __DISABLED__
215	/* Keep that around for when we re-implement dynamic VSIDs */
216	cmpdi	r3,0
217	bge	slb_miss_user_pseries
218#endif /* __DISABLED__ */
219	mfspr	r12,SPRN_SRR1
220#ifndef CONFIG_RELOCATABLE
221	b	.slb_miss_realmode
222#else
223	mfctr	r11
224	ld	r10,PACAKBASE(r13)
225	LOAD_HANDLER(r10, .slb_miss_realmode)
226	mtctr	r10
227	bctr
228#endif
229
230	/* We open code these as we can't have a ". = x" (even with
231	 * x = "." within a feature section
232	 */
233	. = 0x500;
234	.globl hardware_interrupt_pSeries;
235	.globl hardware_interrupt_hv;
236hardware_interrupt_pSeries:
237hardware_interrupt_hv:
238	BEGIN_FTR_SECTION
239		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
240					    EXC_HV, SOFTEN_TEST_HV)
241		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
242	FTR_SECTION_ELSE
243		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
244					    EXC_STD, SOFTEN_TEST_HV_201)
245		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
246	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
247
248	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
249	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
250
251	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
252	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
253
254	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
255	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
256
257	MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
258	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
259
260	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
261	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
262
263	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
264	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
265
266	. = 0xc00
267	.globl	system_call_pSeries
268system_call_pSeries:
269	HMT_MEDIUM
270#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
271	SET_SCRATCH0(r13)
272	GET_PACA(r13)
273	std	r9,PACA_EXGEN+EX_R9(r13)
274	std	r10,PACA_EXGEN+EX_R10(r13)
275	mfcr	r9
276	KVMTEST(0xc00)
277	GET_SCRATCH0(r13)
278#endif
279	SYSCALL_PSERIES_1
280	SYSCALL_PSERIES_2_RFID
281	SYSCALL_PSERIES_3
282	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
283
284	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
285	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
286
287	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
288	 * out of line to handle them
289	 */
290	. = 0xe00
291hv_exception_trampoline:
292	SET_SCRATCH0(r13)
293	EXCEPTION_PROLOG_0(PACA_EXGEN)
294	b	h_data_storage_hv
295
296	. = 0xe20
297	SET_SCRATCH0(r13)
298	EXCEPTION_PROLOG_0(PACA_EXGEN)
299	b	h_instr_storage_hv
300
301	. = 0xe40
302	SET_SCRATCH0(r13)
303	EXCEPTION_PROLOG_0(PACA_EXGEN)
304	b	emulation_assist_hv
305
306	. = 0xe60
307	SET_SCRATCH0(r13)
308	EXCEPTION_PROLOG_0(PACA_EXGEN)
309	b	hmi_exception_hv
310
311	. = 0xe80
312	SET_SCRATCH0(r13)
313	EXCEPTION_PROLOG_0(PACA_EXGEN)
314	b	h_doorbell_hv
315
316	/* We need to deal with the Altivec unavailable exception
317	 * here which is at 0xf20, thus in the middle of the
318	 * prolog code of the PerformanceMonitor one. A little
319	 * trickery is thus necessary
320	 */
321performance_monitor_pSeries_1:
322	. = 0xf00
323	SET_SCRATCH0(r13)
324	EXCEPTION_PROLOG_0(PACA_EXGEN)
325	b	performance_monitor_pSeries
326
327altivec_unavailable_pSeries_1:
328	. = 0xf20
329	SET_SCRATCH0(r13)
330	EXCEPTION_PROLOG_0(PACA_EXGEN)
331	b	altivec_unavailable_pSeries
332
333vsx_unavailable_pSeries_1:
334	. = 0xf40
335	SET_SCRATCH0(r13)
336	EXCEPTION_PROLOG_0(PACA_EXGEN)
337	b	vsx_unavailable_pSeries
338
339	. = 0xf60
340	SET_SCRATCH0(r13)
341	EXCEPTION_PROLOG_0(PACA_EXGEN)
342	b	tm_unavailable_pSeries
343
344#ifdef CONFIG_CBE_RAS
345	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
346	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
347#endif /* CONFIG_CBE_RAS */
348
349	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
350	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
351
352	. = 0x1500
353	.global denorm_exception_hv
354denorm_exception_hv:
355	HMT_MEDIUM_PPR_DISCARD
356	mtspr	SPRN_SPRG_HSCRATCH0,r13
357	EXCEPTION_PROLOG_0(PACA_EXGEN)
358	std	r11,PACA_EXGEN+EX_R11(r13)
359	std	r12,PACA_EXGEN+EX_R12(r13)
360	mfspr	r9,SPRN_SPRG_HSCRATCH0
361	std	r9,PACA_EXGEN+EX_R13(r13)
362	mfcr	r9
363
364#ifdef CONFIG_PPC_DENORMALISATION
365	mfspr	r10,SPRN_HSRR1
366	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
367	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
368	addi	r11,r11,-4		/* HSRR0 is next instruction */
369	bne+	denorm_assist
370#endif
371
372	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
373	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
374
375#ifdef CONFIG_CBE_RAS
376	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
377	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
378#endif /* CONFIG_CBE_RAS */
379
380	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
381	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
382
383#ifdef CONFIG_CBE_RAS
384	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
385	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
386#else
387	. = 0x1800
388#endif /* CONFIG_CBE_RAS */
389
390
391/*** Out of line interrupts support ***/
392
393	.align	7
394	/* moved from 0x200 */
395machine_check_pSeries:
396	.globl machine_check_fwnmi
397machine_check_fwnmi:
398	HMT_MEDIUM_PPR_DISCARD
399	SET_SCRATCH0(r13)		/* save r13 */
400	EXCEPTION_PROLOG_0(PACA_EXMC)
401machine_check_pSeries_0:
402	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
403	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
404	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
405
406	/* moved from 0x300 */
407data_access_check_stab:
408	GET_PACA(r13)
409	std	r9,PACA_EXSLB+EX_R9(r13)
410	std	r10,PACA_EXSLB+EX_R10(r13)
411	mfspr	r10,SPRN_DAR
412	mfspr	r9,SPRN_DSISR
413	srdi	r10,r10,60
414	rlwimi	r10,r9,16,0x20
415#ifdef CONFIG_KVM_BOOK3S_PR
416	lbz	r9,HSTATE_IN_GUEST(r13)
417	rlwimi	r10,r9,8,0x300
418#endif
419	mfcr	r9
420	cmpwi	r10,0x2c
421	beq	do_stab_bolted_pSeries
422	mtcrf	0x80,r9
423	ld	r9,PACA_EXSLB+EX_R9(r13)
424	ld	r10,PACA_EXSLB+EX_R10(r13)
425	b	data_access_not_stab
426do_stab_bolted_pSeries:
427	std	r11,PACA_EXSLB+EX_R11(r13)
428	std	r12,PACA_EXSLB+EX_R12(r13)
429	GET_SCRATCH0(r10)
430	std	r10,PACA_EXSLB+EX_R13(r13)
431	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
432
433	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
434	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
435	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
436	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
437	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
438	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
439
440#ifdef CONFIG_PPC_DENORMALISATION
441denorm_assist:
442BEGIN_FTR_SECTION
443/*
444 * To denormalise we need to move a copy of the register to itself.
445 * For POWER6 do that here for all FP regs.
446 */
447	mfmsr	r10
448	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
449	xori	r10,r10,(MSR_FE0|MSR_FE1)
450	mtmsrd	r10
451	sync
452	fmr	0,0
453	fmr	1,1
454	fmr	2,2
455	fmr	3,3
456	fmr	4,4
457	fmr	5,5
458	fmr	6,6
459	fmr	7,7
460	fmr	8,8
461	fmr	9,9
462	fmr	10,10
463	fmr	11,11
464	fmr	12,12
465	fmr	13,13
466	fmr	14,14
467	fmr	15,15
468	fmr	16,16
469	fmr	17,17
470	fmr	18,18
471	fmr	19,19
472	fmr	20,20
473	fmr	21,21
474	fmr	22,22
475	fmr	23,23
476	fmr	24,24
477	fmr	25,25
478	fmr	26,26
479	fmr	27,27
480	fmr	28,28
481	fmr	29,29
482	fmr	30,30
483	fmr	31,31
484FTR_SECTION_ELSE
485/*
486 * To denormalise we need to move a copy of the register to itself.
487 * For POWER7 do that here for the first 32 VSX registers only.
488 */
489	mfmsr	r10
490	oris	r10,r10,MSR_VSX@h
491	mtmsrd	r10
492	sync
493	XVCPSGNDP(0,0,0)
494	XVCPSGNDP(1,1,1)
495	XVCPSGNDP(2,2,2)
496	XVCPSGNDP(3,3,3)
497	XVCPSGNDP(4,4,4)
498	XVCPSGNDP(5,5,5)
499	XVCPSGNDP(6,6,6)
500	XVCPSGNDP(7,7,7)
501	XVCPSGNDP(8,8,8)
502	XVCPSGNDP(9,9,9)
503	XVCPSGNDP(10,10,10)
504	XVCPSGNDP(11,11,11)
505	XVCPSGNDP(12,12,12)
506	XVCPSGNDP(13,13,13)
507	XVCPSGNDP(14,14,14)
508	XVCPSGNDP(15,15,15)
509	XVCPSGNDP(16,16,16)
510	XVCPSGNDP(17,17,17)
511	XVCPSGNDP(18,18,18)
512	XVCPSGNDP(19,19,19)
513	XVCPSGNDP(20,20,20)
514	XVCPSGNDP(21,21,21)
515	XVCPSGNDP(22,22,22)
516	XVCPSGNDP(23,23,23)
517	XVCPSGNDP(24,24,24)
518	XVCPSGNDP(25,25,25)
519	XVCPSGNDP(26,26,26)
520	XVCPSGNDP(27,27,27)
521	XVCPSGNDP(28,28,28)
522	XVCPSGNDP(29,29,29)
523	XVCPSGNDP(30,30,30)
524	XVCPSGNDP(31,31,31)
525ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
526	mtspr	SPRN_HSRR0,r11
527	mtcrf	0x80,r9
528	ld	r9,PACA_EXGEN+EX_R9(r13)
529	RESTORE_PPR_PACA(PACA_EXGEN, r10)
530	ld	r10,PACA_EXGEN+EX_R10(r13)
531	ld	r11,PACA_EXGEN+EX_R11(r13)
532	ld	r12,PACA_EXGEN+EX_R12(r13)
533	ld	r13,PACA_EXGEN+EX_R13(r13)
534	HRFID
535	b	.
536#endif
537
538	.align	7
539	/* moved from 0xe00 */
540	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
541	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
542	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
543	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
544	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
545	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
546	STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
547	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
548	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
549	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
550
551	/* moved from 0xf00 */
552	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
553	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
554	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
555	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
556	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
557	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
558	STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
559	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
560
561/*
562 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
563 * - If it was a decrementer interrupt, we bump the dec to max and and return.
564 * - If it was a doorbell we return immediately since doorbells are edge
565 *   triggered and won't automatically refire.
566 * - else we hard disable and return.
567 * This is called with r10 containing the value to OR to the paca field.
568 */
569#define MASKED_INTERRUPT(_H)				\
570masked_##_H##interrupt:					\
571	std	r11,PACA_EXGEN+EX_R11(r13);		\
572	lbz	r11,PACAIRQHAPPENED(r13);		\
573	or	r11,r11,r10;				\
574	stb	r11,PACAIRQHAPPENED(r13);		\
575	cmpwi	r10,PACA_IRQ_DEC;			\
576	bne	1f;					\
577	lis	r10,0x7fff;				\
578	ori	r10,r10,0xffff;				\
579	mtspr	SPRN_DEC,r10;				\
580	b	2f;					\
5811:	cmpwi	r10,PACA_IRQ_DBELL;			\
582	beq	2f;					\
583	mfspr	r10,SPRN_##_H##SRR1;			\
584	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
585	rotldi	r10,r10,16;				\
586	mtspr	SPRN_##_H##SRR1,r10;			\
5872:	mtcrf	0x80,r9;				\
588	ld	r9,PACA_EXGEN+EX_R9(r13);		\
589	ld	r10,PACA_EXGEN+EX_R10(r13);		\
590	ld	r11,PACA_EXGEN+EX_R11(r13);		\
591	GET_SCRATCH0(r13);				\
592	##_H##rfid;					\
593	b	.
594
595	MASKED_INTERRUPT()
596	MASKED_INTERRUPT(H)
597
598/*
599 * Called from arch_local_irq_enable when an interrupt needs
600 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
601 * which kind of interrupt. MSR:EE is already off. We generate a
602 * stackframe like if a real interrupt had happened.
603 *
604 * Note: While MSR:EE is off, we need to make sure that _MSR
605 * in the generated frame has EE set to 1 or the exception
606 * handler will not properly re-enable them.
607 */
608_GLOBAL(__replay_interrupt)
609	/* We are going to jump to the exception common code which
610	 * will retrieve various register values from the PACA which
611	 * we don't give a damn about, so we don't bother storing them.
612	 */
613	mfmsr	r12
614	mflr	r11
615	mfcr	r9
616	ori	r12,r12,MSR_EE
617	cmpwi	r3,0x900
618	beq	decrementer_common
619	cmpwi	r3,0x500
620	beq	hardware_interrupt_common
621BEGIN_FTR_SECTION
622	cmpwi	r3,0xe80
623	beq	h_doorbell_common
624FTR_SECTION_ELSE
625	cmpwi	r3,0xa00
626	beq	doorbell_super_common
627ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
628	blr
629
630#ifdef CONFIG_PPC_PSERIES
631/*
632 * Vectors for the FWNMI option.  Share common code.
633 */
634	.globl system_reset_fwnmi
635      .align 7
636system_reset_fwnmi:
637	HMT_MEDIUM_PPR_DISCARD
638	SET_SCRATCH0(r13)		/* save r13 */
639	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
640				 NOTEST, 0x100)
641
642#endif /* CONFIG_PPC_PSERIES */
643
644#ifdef __DISABLED__
645/*
646 * This is used for when the SLB miss handler has to go virtual,
647 * which doesn't happen for now anymore but will once we re-implement
648 * dynamic VSIDs for shared page tables
649 */
650slb_miss_user_pseries:
651	std	r10,PACA_EXGEN+EX_R10(r13)
652	std	r11,PACA_EXGEN+EX_R11(r13)
653	std	r12,PACA_EXGEN+EX_R12(r13)
654	GET_SCRATCH0(r10)
655	ld	r11,PACA_EXSLB+EX_R9(r13)
656	ld	r12,PACA_EXSLB+EX_R3(r13)
657	std	r10,PACA_EXGEN+EX_R13(r13)
658	std	r11,PACA_EXGEN+EX_R9(r13)
659	std	r12,PACA_EXGEN+EX_R3(r13)
660	clrrdi	r12,r13,32
661	mfmsr	r10
662	mfspr	r11,SRR0			/* save SRR0 */
663	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
664	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
665	mtspr	SRR0,r12
666	mfspr	r12,SRR1			/* and SRR1 */
667	mtspr	SRR1,r10
668	rfid
669	b	.				/* prevent spec. execution */
670#endif /* __DISABLED__ */
671
672/*
673 * Code from here down to __end_handlers is invoked from the
674 * exception prologs above.  Because the prologs assemble the
675 * addresses of these handlers using the LOAD_HANDLER macro,
676 * which uses an ori instruction, these handlers must be in
677 * the first 64k of the kernel image.
678 */
679
680/*** Common interrupt handlers ***/
681
682	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
683
684	/*
685	 * Machine check is different because we use a different
686	 * save area: PACA_EXMC instead of PACA_EXGEN.
687	 */
688	.align	7
689	.globl machine_check_common
690machine_check_common:
691	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
692	FINISH_NAP
693	DISABLE_INTS
694	bl	.save_nvgprs
695	addi	r3,r1,STACK_FRAME_OVERHEAD
696	bl	.machine_check_exception
697	b	.ret_from_except
698
699	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
700	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
701	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
702#ifdef CONFIG_PPC_DOORBELL
703	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
704#else
705	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
706#endif
707	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
708	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
709	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
710	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
711	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
712#ifdef CONFIG_PPC_DOORBELL
713	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
714#else
715	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
716#endif
717	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
718	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
719	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
720#ifdef CONFIG_ALTIVEC
721	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
722#else
723	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
724#endif
725#ifdef CONFIG_CBE_RAS
726	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
727	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
728	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
729#endif /* CONFIG_CBE_RAS */
730
731	/*
732	 * Relocation-on interrupts: A subset of the interrupts can be delivered
733	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
734	 * it.  Addresses are the same as the original interrupt addresses, but
735	 * offset by 0xc000000000004000.
736	 * It's impossible to receive interrupts below 0x300 via this mechanism.
737	 * KVM: None of these traps are from the guest ; anything that escalated
738	 * to HV=1 from HV=0 is delivered via real mode handlers.
739	 */
740
741	/*
742	 * This uses the standard macro, since the original 0x300 vector
743	 * only has extra guff for STAB-based processors -- which never
744	 * come here.
745	 */
746	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
747	. = 0x4380
748	.globl data_access_slb_relon_pSeries
749data_access_slb_relon_pSeries:
750	SET_SCRATCH0(r13)
751	EXCEPTION_PROLOG_0(PACA_EXSLB)
752	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
753	std	r3,PACA_EXSLB+EX_R3(r13)
754	mfspr	r3,SPRN_DAR
755	mfspr	r12,SPRN_SRR1
756#ifndef CONFIG_RELOCATABLE
757	b	.slb_miss_realmode
758#else
759	/*
760	 * We can't just use a direct branch to .slb_miss_realmode
761	 * because the distance from here to there depends on where
762	 * the kernel ends up being put.
763	 */
764	mfctr	r11
765	ld	r10,PACAKBASE(r13)
766	LOAD_HANDLER(r10, .slb_miss_realmode)
767	mtctr	r10
768	bctr
769#endif
770
771	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
772	. = 0x4480
773	.globl instruction_access_slb_relon_pSeries
774instruction_access_slb_relon_pSeries:
775	SET_SCRATCH0(r13)
776	EXCEPTION_PROLOG_0(PACA_EXSLB)
777	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
778	std	r3,PACA_EXSLB+EX_R3(r13)
779	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
780	mfspr	r12,SPRN_SRR1
781#ifndef CONFIG_RELOCATABLE
782	b	.slb_miss_realmode
783#else
784	mfctr	r11
785	ld	r10,PACAKBASE(r13)
786	LOAD_HANDLER(r10, .slb_miss_realmode)
787	mtctr	r10
788	bctr
789#endif
790
791	. = 0x4500
792	.globl hardware_interrupt_relon_pSeries;
793	.globl hardware_interrupt_relon_hv;
794hardware_interrupt_relon_pSeries:
795hardware_interrupt_relon_hv:
796	BEGIN_FTR_SECTION
797		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
798	FTR_SECTION_ELSE
799		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
800	ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
801	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
802	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
803	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
804	MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
805	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
806	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
807	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
808
809	. = 0x4c00
810	.globl system_call_relon_pSeries
811system_call_relon_pSeries:
812	HMT_MEDIUM
813	SYSCALL_PSERIES_1
814	SYSCALL_PSERIES_2_DIRECT
815	SYSCALL_PSERIES_3
816
817	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
818
819	. = 0x4e00
820	SET_SCRATCH0(r13)
821	EXCEPTION_PROLOG_0(PACA_EXGEN)
822	b	h_data_storage_relon_hv
823
824	. = 0x4e20
825	SET_SCRATCH0(r13)
826	EXCEPTION_PROLOG_0(PACA_EXGEN)
827	b	h_instr_storage_relon_hv
828
829	. = 0x4e40
830	SET_SCRATCH0(r13)
831	EXCEPTION_PROLOG_0(PACA_EXGEN)
832	b	emulation_assist_relon_hv
833
834	. = 0x4e60
835	SET_SCRATCH0(r13)
836	EXCEPTION_PROLOG_0(PACA_EXGEN)
837	b	hmi_exception_relon_hv
838
839	. = 0x4e80
840	SET_SCRATCH0(r13)
841	EXCEPTION_PROLOG_0(PACA_EXGEN)
842	b	h_doorbell_relon_hv
843
844performance_monitor_relon_pSeries_1:
845	. = 0x4f00
846	SET_SCRATCH0(r13)
847	EXCEPTION_PROLOG_0(PACA_EXGEN)
848	b	performance_monitor_relon_pSeries
849
850altivec_unavailable_relon_pSeries_1:
851	. = 0x4f20
852	SET_SCRATCH0(r13)
853	EXCEPTION_PROLOG_0(PACA_EXGEN)
854	b	altivec_unavailable_relon_pSeries
855
856vsx_unavailable_relon_pSeries_1:
857	. = 0x4f40
858	SET_SCRATCH0(r13)
859	EXCEPTION_PROLOG_0(PACA_EXGEN)
860	b	vsx_unavailable_relon_pSeries
861
862tm_unavailable_relon_pSeries_1:
863	. = 0x4f60
864	SET_SCRATCH0(r13)
865	EXCEPTION_PROLOG_0(PACA_EXGEN)
866	b	tm_unavailable_relon_pSeries
867
868	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
869#ifdef CONFIG_PPC_DENORMALISATION
870	. = 0x5500
871	b	denorm_exception_hv
872#endif
873#ifdef CONFIG_HVC_SCOM
874	STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
875	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
876#endif /* CONFIG_HVC_SCOM */
877	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
878
879	/* Other future vectors */
880	.align	7
881	.globl	__end_interrupts
882__end_interrupts:
883
884	.align	7
885system_call_entry_direct:
886#if defined(CONFIG_RELOCATABLE)
887	/* The first level prologue may have used LR to get here, saving
888	 * orig in r10.  To save hacking/ifdeffing common code, restore here.
889	 */
890	mtlr	r10
891#endif
892system_call_entry:
893	b	system_call_common
894
895ppc64_runlatch_on_trampoline:
896	b	.__ppc64_runlatch_on
897
898/*
899 * Here we have detected that the kernel stack pointer is bad.
900 * R9 contains the saved CR, r13 points to the paca,
901 * r10 contains the (bad) kernel stack pointer,
902 * r11 and r12 contain the saved SRR0 and SRR1.
903 * We switch to using an emergency stack, save the registers there,
904 * and call kernel_bad_stack(), which panics.
905 */
906bad_stack:
907	ld	r1,PACAEMERGSP(r13)
908	subi	r1,r1,64+INT_FRAME_SIZE
909	std	r9,_CCR(r1)
910	std	r10,GPR1(r1)
911	std	r11,_NIP(r1)
912	std	r12,_MSR(r1)
913	mfspr	r11,SPRN_DAR
914	mfspr	r12,SPRN_DSISR
915	std	r11,_DAR(r1)
916	std	r12,_DSISR(r1)
917	mflr	r10
918	mfctr	r11
919	mfxer	r12
920	std	r10,_LINK(r1)
921	std	r11,_CTR(r1)
922	std	r12,_XER(r1)
923	SAVE_GPR(0,r1)
924	SAVE_GPR(2,r1)
925	ld	r10,EX_R3(r3)
926	std	r10,GPR3(r1)
927	SAVE_GPR(4,r1)
928	SAVE_4GPRS(5,r1)
929	ld	r9,EX_R9(r3)
930	ld	r10,EX_R10(r3)
931	SAVE_2GPRS(9,r1)
932	ld	r9,EX_R11(r3)
933	ld	r10,EX_R12(r3)
934	ld	r11,EX_R13(r3)
935	std	r9,GPR11(r1)
936	std	r10,GPR12(r1)
937	std	r11,GPR13(r1)
938BEGIN_FTR_SECTION
939	ld	r10,EX_CFAR(r3)
940	std	r10,ORIG_GPR3(r1)
941END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
942	SAVE_8GPRS(14,r1)
943	SAVE_10GPRS(22,r1)
944	lhz	r12,PACA_TRAP_SAVE(r13)
945	std	r12,_TRAP(r1)
946	addi	r11,r1,INT_FRAME_SIZE
947	std	r11,0(r1)
948	li	r12,0
949	std	r12,0(r11)
950	ld	r2,PACATOC(r13)
951	ld	r11,exception_marker@toc(r2)
952	std	r12,RESULT(r1)
953	std	r11,STACK_FRAME_OVERHEAD-16(r1)
9541:	addi	r3,r1,STACK_FRAME_OVERHEAD
955	bl	.kernel_bad_stack
956	b	1b
957
958/*
959 * Here r13 points to the paca, r9 contains the saved CR,
960 * SRR0 and SRR1 are saved in r11 and r12,
961 * r9 - r13 are saved in paca->exgen.
962 */
963	.align	7
964	.globl data_access_common
965data_access_common:
966	mfspr	r10,SPRN_DAR
967	std	r10,PACA_EXGEN+EX_DAR(r13)
968	mfspr	r10,SPRN_DSISR
969	stw	r10,PACA_EXGEN+EX_DSISR(r13)
970	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
971	DISABLE_INTS
972	ld	r12,_MSR(r1)
973	ld	r3,PACA_EXGEN+EX_DAR(r13)
974	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
975	li	r5,0x300
976	b	.do_hash_page		/* Try to handle as hpte fault */
977
978	.align  7
979	.globl  h_data_storage_common
980h_data_storage_common:
981	mfspr   r10,SPRN_HDAR
982	std     r10,PACA_EXGEN+EX_DAR(r13)
983	mfspr   r10,SPRN_HDSISR
984	stw     r10,PACA_EXGEN+EX_DSISR(r13)
985	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
986	bl      .save_nvgprs
987	DISABLE_INTS
988	addi    r3,r1,STACK_FRAME_OVERHEAD
989	bl      .unknown_exception
990	b       .ret_from_except
991
992	.align	7
993	.globl instruction_access_common
994instruction_access_common:
995	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
996	DISABLE_INTS
997	ld	r12,_MSR(r1)
998	ld	r3,_NIP(r1)
999	andis.	r4,r12,0x5820
1000	li	r5,0x400
1001	b	.do_hash_page		/* Try to handle as hpte fault */
1002
1003	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
1004
1005/*
1006 * Here is the common SLB miss user that is used when going to virtual
1007 * mode for SLB misses, that is currently not used
1008 */
1009#ifdef __DISABLED__
1010	.align	7
1011	.globl	slb_miss_user_common
1012slb_miss_user_common:
1013	mflr	r10
1014	std	r3,PACA_EXGEN+EX_DAR(r13)
1015	stw	r9,PACA_EXGEN+EX_CCR(r13)
1016	std	r10,PACA_EXGEN+EX_LR(r13)
1017	std	r11,PACA_EXGEN+EX_SRR0(r13)
1018	bl	.slb_allocate_user
1019
1020	ld	r10,PACA_EXGEN+EX_LR(r13)
1021	ld	r3,PACA_EXGEN+EX_R3(r13)
1022	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1023	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1024	mtlr	r10
1025	beq-	slb_miss_fault
1026
1027	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1028	beq-	unrecov_user_slb
1029	mfmsr	r10
1030
1031.machine push
1032.machine "power4"
1033	mtcrf	0x80,r9
1034.machine pop
1035
1036	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1037	mtmsrd	r10,1
1038
1039	mtspr	SRR0,r11
1040	mtspr	SRR1,r12
1041
1042	ld	r9,PACA_EXGEN+EX_R9(r13)
1043	ld	r10,PACA_EXGEN+EX_R10(r13)
1044	ld	r11,PACA_EXGEN+EX_R11(r13)
1045	ld	r12,PACA_EXGEN+EX_R12(r13)
1046	ld	r13,PACA_EXGEN+EX_R13(r13)
1047	rfid
1048	b	.
1049
1050slb_miss_fault:
1051	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1052	ld	r4,PACA_EXGEN+EX_DAR(r13)
1053	li	r5,0
1054	std	r4,_DAR(r1)
1055	std	r5,_DSISR(r1)
1056	b	handle_page_fault
1057
1058unrecov_user_slb:
1059	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1060	DISABLE_INTS
1061	bl	.save_nvgprs
10621:	addi	r3,r1,STACK_FRAME_OVERHEAD
1063	bl	.unrecoverable_exception
1064	b	1b
1065
1066#endif /* __DISABLED__ */
1067
1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r12 contain the saved SRR1, SRR0 is still ready for return
1072 * r3 has the faulting address
1073 * r9 - r13 are saved in paca->exslb.
1074 * r3 is saved in paca->slb_r3
1075 * We assume we aren't going to take any exceptions during this procedure.
1076 */
1077_GLOBAL(slb_miss_realmode)
1078	mflr	r10
1079#ifdef CONFIG_RELOCATABLE
1080	mtctr	r11
1081#endif
1082
1083	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1084	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1085
1086	bl	.slb_allocate_realmode
1087
1088	/* All done -- return from exception. */
1089
1090	ld	r10,PACA_EXSLB+EX_LR(r13)
1091	ld	r3,PACA_EXSLB+EX_R3(r13)
1092	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1093
1094	mtlr	r10
1095
1096	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1097	beq-	2f
1098
1099.machine	push
1100.machine	"power4"
1101	mtcrf	0x80,r9
1102	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1103.machine	pop
1104
1105	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1106	ld	r9,PACA_EXSLB+EX_R9(r13)
1107	ld	r10,PACA_EXSLB+EX_R10(r13)
1108	ld	r11,PACA_EXSLB+EX_R11(r13)
1109	ld	r12,PACA_EXSLB+EX_R12(r13)
1110	ld	r13,PACA_EXSLB+EX_R13(r13)
1111	rfid
1112	b	.	/* prevent speculative execution */
1113
11142:	mfspr	r11,SPRN_SRR0
1115	ld	r10,PACAKBASE(r13)
1116	LOAD_HANDLER(r10,unrecov_slb)
1117	mtspr	SPRN_SRR0,r10
1118	ld	r10,PACAKMSR(r13)
1119	mtspr	SPRN_SRR1,r10
1120	rfid
1121	b	.
1122
1123unrecov_slb:
1124	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1125	DISABLE_INTS
1126	bl	.save_nvgprs
11271:	addi	r3,r1,STACK_FRAME_OVERHEAD
1128	bl	.unrecoverable_exception
1129	b	1b
1130
1131
1132#ifdef CONFIG_PPC_970_NAP
1133power4_fixup_nap:
1134	andc	r9,r9,r10
1135	std	r9,TI_LOCAL_FLAGS(r11)
1136	ld	r10,_LINK(r1)		/* make idle task do the */
1137	std	r10,_NIP(r1)		/* equivalent of a blr */
1138	blr
1139#endif
1140
1141	.align	7
1142	.globl alignment_common
1143alignment_common:
1144	mfspr	r10,SPRN_DAR
1145	std	r10,PACA_EXGEN+EX_DAR(r13)
1146	mfspr	r10,SPRN_DSISR
1147	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1148	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1149	ld	r3,PACA_EXGEN+EX_DAR(r13)
1150	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1151	std	r3,_DAR(r1)
1152	std	r4,_DSISR(r1)
1153	bl	.save_nvgprs
1154	DISABLE_INTS
1155	addi	r3,r1,STACK_FRAME_OVERHEAD
1156	bl	.alignment_exception
1157	b	.ret_from_except
1158
1159	.align	7
1160	.globl program_check_common
1161program_check_common:
1162	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1163	bl	.save_nvgprs
1164	DISABLE_INTS
1165	addi	r3,r1,STACK_FRAME_OVERHEAD
1166	bl	.program_check_exception
1167	b	.ret_from_except
1168
1169	.align	7
1170	.globl fp_unavailable_common
1171fp_unavailable_common:
1172	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1173	bne	1f			/* if from user, just load it up */
1174	bl	.save_nvgprs
1175	DISABLE_INTS
1176	addi	r3,r1,STACK_FRAME_OVERHEAD
1177	bl	.kernel_fp_unavailable_exception
1178	BUG_OPCODE
11791:	bl	.load_up_fpu
1180	b	fast_exception_return
1181
1182	.align	7
1183	.globl altivec_unavailable_common
1184altivec_unavailable_common:
1185	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1186#ifdef CONFIG_ALTIVEC
1187BEGIN_FTR_SECTION
1188	beq	1f
1189	bl	.load_up_altivec
1190	b	fast_exception_return
11911:
1192END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1193#endif
1194	bl	.save_nvgprs
1195	DISABLE_INTS
1196	addi	r3,r1,STACK_FRAME_OVERHEAD
1197	bl	.altivec_unavailable_exception
1198	b	.ret_from_except
1199
1200	.align	7
1201	.globl vsx_unavailable_common
1202vsx_unavailable_common:
1203	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1204#ifdef CONFIG_VSX
1205BEGIN_FTR_SECTION
1206	beq	1f
1207	b	.load_up_vsx
12081:
1209END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1210#endif
1211	bl	.save_nvgprs
1212	DISABLE_INTS
1213	addi	r3,r1,STACK_FRAME_OVERHEAD
1214	bl	.vsx_unavailable_exception
1215	b	.ret_from_except
1216
1217	.align	7
1218	.globl tm_unavailable_common
1219tm_unavailable_common:
1220	EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
1221	bl	.save_nvgprs
1222	addi	r3,r1,STACK_FRAME_OVERHEAD
1223	bl	.tm_unavailable_exception
1224	b	.ret_from_except
1225
1226	.align	7
1227	.globl	__end_handlers
1228__end_handlers:
1229
1230	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1231	STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
1232	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1233	STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
1234	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1235	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1236	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1237	STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
1238	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
1239	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1240	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
1241
1242	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1243	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1244	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1245	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
1246
1247#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1248/*
1249 * Data area reserved for FWNMI option.
1250 * This address (0x7000) is fixed by the RPA.
1251 */
1252	.= 0x7000
1253	.globl fwnmi_data_area
1254fwnmi_data_area:
1255
1256	/* pseries and powernv need to keep the whole page from
1257	 * 0x7000 to 0x8000 free for use by the firmware
1258	 */
1259	. = 0x8000
1260#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1261
1262/* Space for CPU0's segment table */
1263	.balign 4096
1264	.globl initial_stab
1265initial_stab:
1266	.space	4096
1267
1268#ifdef CONFIG_PPC_POWERNV
1269_GLOBAL(opal_mc_secondary_handler)
1270	HMT_MEDIUM_PPR_DISCARD
1271	SET_SCRATCH0(r13)
1272	GET_PACA(r13)
1273	clrldi	r3,r3,2
1274	tovirt(r3,r3)
1275	std	r3,PACA_OPAL_MC_EVT(r13)
1276	ld	r13,OPAL_MC_SRR0(r3)
1277	mtspr	SPRN_SRR0,r13
1278	ld	r13,OPAL_MC_SRR1(r3)
1279	mtspr	SPRN_SRR1,r13
1280	ld	r3,OPAL_MC_GPR3(r3)
1281	GET_SCRATCH0(r13)
1282	b	machine_check_pSeries
1283#endif /* CONFIG_PPC_POWERNV */
1284
1285
1286/*
1287 * Hash table stuff
1288 */
1289	.align	7
1290_STATIC(do_hash_page)
1291	std	r3,_DAR(r1)
1292	std	r4,_DSISR(r1)
1293
1294	andis.	r0,r4,0xa410		/* weird error? */
1295	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1296	andis.  r0,r4,DSISR_DABRMATCH@h
1297	bne-    handle_dabr_fault
1298
1299BEGIN_FTR_SECTION
1300	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1301	bne-	do_ste_alloc		/* If so handle it */
1302END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1303
1304	CURRENT_THREAD_INFO(r11, r1)
1305	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1306	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1307	bne	77f			/* then don't call hash_page now */
1308	/*
1309	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1310	 * accessing a userspace segment (even from the kernel). We assume
1311	 * kernel addresses always have the high bit set.
1312	 */
1313	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1314	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1315	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1316	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1317	ori	r4,r4,1			/* add _PAGE_PRESENT */
1318	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1319
1320	/*
1321	 * r3 contains the faulting address
1322	 * r4 contains the required access permissions
1323	 * r5 contains the trap number
1324	 *
1325	 * at return r3 = 0 for success, 1 for page fault, negative for error
1326	 */
1327	bl	.hash_page		/* build HPTE if possible */
1328	cmpdi	r3,0			/* see if hash_page succeeded */
1329
1330	/* Success */
1331	beq	fast_exc_return_irq	/* Return from exception on success */
1332
1333	/* Error */
1334	blt-	13f
1335
1336/* Here we have a page fault that hash_page can't handle. */
1337handle_page_fault:
133811:	ld	r4,_DAR(r1)
1339	ld	r5,_DSISR(r1)
1340	addi	r3,r1,STACK_FRAME_OVERHEAD
1341	bl	.do_page_fault
1342	cmpdi	r3,0
1343	beq+	12f
1344	bl	.save_nvgprs
1345	mr	r5,r3
1346	addi	r3,r1,STACK_FRAME_OVERHEAD
1347	lwz	r4,_DAR(r1)
1348	bl	.bad_page_fault
1349	b	.ret_from_except
1350
1351/* We have a data breakpoint exception - handle it */
1352handle_dabr_fault:
1353	bl	.save_nvgprs
1354	ld      r4,_DAR(r1)
1355	ld      r5,_DSISR(r1)
1356	addi    r3,r1,STACK_FRAME_OVERHEAD
1357	bl      .do_break
135812:	b       .ret_from_except_lite
1359
1360
1361/* We have a page fault that hash_page could handle but HV refused
1362 * the PTE insertion
1363 */
136413:	bl	.save_nvgprs
1365	mr	r5,r3
1366	addi	r3,r1,STACK_FRAME_OVERHEAD
1367	ld	r4,_DAR(r1)
1368	bl	.low_hash_fault
1369	b	.ret_from_except
1370
1371/*
1372 * We come here as a result of a DSI at a point where we don't want
1373 * to call hash_page, such as when we are accessing memory (possibly
1374 * user memory) inside a PMU interrupt that occurred while interrupts
1375 * were soft-disabled.  We want to invoke the exception handler for
1376 * the access, or panic if there isn't a handler.
1377 */
137877:	bl	.save_nvgprs
1379	mr	r4,r3
1380	addi	r3,r1,STACK_FRAME_OVERHEAD
1381	li	r5,SIGSEGV
1382	bl	.bad_page_fault
1383	b	.ret_from_except
1384
1385	/* here we have a segment miss */
1386do_ste_alloc:
1387	bl	.ste_allocate		/* try to insert stab entry */
1388	cmpdi	r3,0
1389	bne-	handle_page_fault
1390	b	fast_exception_return
1391
1392/*
1393 * r13 points to the PACA, r9 contains the saved CR,
1394 * r11 and r12 contain the saved SRR0 and SRR1.
1395 * r9 - r13 are saved in paca->exslb.
1396 * We assume we aren't going to take any exceptions during this procedure.
1397 * We assume (DAR >> 60) == 0xc.
1398 */
1399	.align	7
1400_GLOBAL(do_stab_bolted)
1401	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1402	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1403
1404	/* Hash to the primary group */
1405	ld	r10,PACASTABVIRT(r13)
1406	mfspr	r11,SPRN_DAR
1407	srdi	r11,r11,28
1408	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1409
1410	/* Calculate VSID */
1411	/* This is a kernel address, so protovsid = ESID | 1 << 37 */
1412	li	r9,0x1
1413	rldimi  r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1414	ASM_VSID_SCRAMBLE(r11, r9, 256M)
1415	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
1416
1417	/* Search the primary group for a free entry */
14181:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1419	andi.	r11,r11,0x80
1420	beq	2f
1421	addi	r10,r10,16
1422	andi.	r11,r10,0x70
1423	bne	1b
1424
1425	/* Stick for only searching the primary group for now.		*/
1426	/* At least for now, we use a very simple random castout scheme */
1427	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1428	mftb	r11
1429	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1430	ori	r11,r11,0x10
1431
1432	/* r10 currently points to an ste one past the group of interest */
1433	/* make it point to the randomly selected entry			*/
1434	subi	r10,r10,128
1435	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1436
1437	isync			/* mark the entry invalid		*/
1438	ld	r11,0(r10)
1439	rldicl	r11,r11,56,1	/* clear the valid bit */
1440	rotldi	r11,r11,8
1441	std	r11,0(r10)
1442	sync
1443
1444	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1445	slbie	r11
1446
14472:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1448	eieio
1449
1450	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1451	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1452	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1453	std	r11,0(r10)	/* Put new entry back into the stab	*/
1454
1455	sync
1456
1457	/* All done -- return from exception. */
1458	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1459	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1460
1461	andi.	r10,r12,MSR_RI
1462	beq-	unrecov_slb
1463
1464	mtcrf	0x80,r9			/* restore CR */
1465
1466	mfmsr	r10
1467	clrrdi	r10,r10,2
1468	mtmsrd	r10,1
1469
1470	mtspr	SPRN_SRR0,r11
1471	mtspr	SPRN_SRR1,r12
1472	ld	r9,PACA_EXSLB+EX_R9(r13)
1473	ld	r10,PACA_EXSLB+EX_R10(r13)
1474	ld	r11,PACA_EXSLB+EX_R11(r13)
1475	ld	r12,PACA_EXSLB+EX_R12(r13)
1476	ld	r13,PACA_EXSLB+EX_R13(r13)
1477	rfid
1478	b	.	/* prevent speculative execution */
1479