1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x2fff : pSeries Interrupt prologs
23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs
24 * 0x6000 - 0x6fff : Initial (CPU0) segment table
25 * 0x7000 - 0x7fff : FWNMI data area
26 * 0x8000 -        : Early init and support code
27 */
28
29/*
30 * This is the start of the interrupt handlers for pSeries
31 * This code runs with relocation off.
32 * Code from here to __end_interrupts gets copied down to real
33 * address 0x100 when we are running a relocatable kernel.
34 * Therefore any relative branches in this section must only
35 * branch to labels in this section.
36 */
37	. = 0x100
38	.globl __start_interrupts
39__start_interrupts:
40
41	.globl system_reset_pSeries;
42system_reset_pSeries:
43	HMT_MEDIUM;
44	SET_SCRATCH0(r13)
45#ifdef CONFIG_PPC_P7_NAP
46BEGIN_FTR_SECTION
47	/* Running native on arch 2.06 or later, check if we are
48	 * waking up from nap. We only handle no state loss and
49	 * supervisor state loss. We do -not- handle hypervisor
50	 * state loss at this time.
51	 */
52	mfspr	r13,SPRN_SRR1
53	rlwinm.	r13,r13,47-31,30,31
54	beq	9f
55
56	/* waking up from powersave (nap) state */
57	cmpwi	cr1,r13,2
58	/* Total loss of HV state is fatal, we could try to use the
59	 * PIR to locate a PACA, then use an emergency stack etc...
60	 * but for now, let's just stay stuck here
61	 */
62	bgt	cr1,.
63	GET_PACA(r13)
64
65#ifdef CONFIG_KVM_BOOK3S_64_HV
66	li	r0,KVM_HWTHREAD_IN_KERNEL
67	stb	r0,HSTATE_HWTHREAD_STATE(r13)
68	/* Order setting hwthread_state vs. testing hwthread_req */
69	sync
70	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
71	cmpwi	r0,0
72	beq	1f
73	b	kvm_start_guest
741:
75#endif
76
77	beq	cr1,2f
78	b	.power7_wakeup_noloss
792:	b	.power7_wakeup_loss
809:
81END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
82#endif /* CONFIG_PPC_P7_NAP */
83	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
84				 NOTEST, 0x100)
85
86	. = 0x200
87machine_check_pSeries_1:
88	/* This is moved out of line as it can be patched by FW, but
89	 * some code path might still want to branch into the original
90	 * vector
91	 */
92	b	machine_check_pSeries
93
94	. = 0x300
95	.globl data_access_pSeries
96data_access_pSeries:
97	HMT_MEDIUM
98	SET_SCRATCH0(r13)
99BEGIN_FTR_SECTION
100	b	data_access_check_stab
101data_access_not_stab:
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
103	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
104				 KVMTEST, 0x300)
105
106	. = 0x380
107	.globl data_access_slb_pSeries
108data_access_slb_pSeries:
109	HMT_MEDIUM
110	SET_SCRATCH0(r13)
111	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
112	std	r3,PACA_EXSLB+EX_R3(r13)
113	mfspr	r3,SPRN_DAR
114#ifdef __DISABLED__
115	/* Keep that around for when we re-implement dynamic VSIDs */
116	cmpdi	r3,0
117	bge	slb_miss_user_pseries
118#endif /* __DISABLED__ */
119	mfspr	r12,SPRN_SRR1
120#ifndef CONFIG_RELOCATABLE
121	b	.slb_miss_realmode
122#else
123	/*
124	 * We can't just use a direct branch to .slb_miss_realmode
125	 * because the distance from here to there depends on where
126	 * the kernel ends up being put.
127	 */
128	mfctr	r11
129	ld	r10,PACAKBASE(r13)
130	LOAD_HANDLER(r10, .slb_miss_realmode)
131	mtctr	r10
132	bctr
133#endif
134
135	STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
136
137	. = 0x480
138	.globl instruction_access_slb_pSeries
139instruction_access_slb_pSeries:
140	HMT_MEDIUM
141	SET_SCRATCH0(r13)
142	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
143	std	r3,PACA_EXSLB+EX_R3(r13)
144	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
145#ifdef __DISABLED__
146	/* Keep that around for when we re-implement dynamic VSIDs */
147	cmpdi	r3,0
148	bge	slb_miss_user_pseries
149#endif /* __DISABLED__ */
150	mfspr	r12,SPRN_SRR1
151#ifndef CONFIG_RELOCATABLE
152	b	.slb_miss_realmode
153#else
154	mfctr	r11
155	ld	r10,PACAKBASE(r13)
156	LOAD_HANDLER(r10, .slb_miss_realmode)
157	mtctr	r10
158	bctr
159#endif
160
161	/* We open code these as we can't have a ". = x" (even with
162	 * x = "." within a feature section
163	 */
164	. = 0x500;
165	.globl hardware_interrupt_pSeries;
166	.globl hardware_interrupt_hv;
167hardware_interrupt_pSeries:
168hardware_interrupt_hv:
169	BEGIN_FTR_SECTION
170		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
171					    EXC_HV, SOFTEN_TEST_HV)
172		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
173	FTR_SECTION_ELSE
174		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
175					    EXC_STD, SOFTEN_TEST_HV_201)
176		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
177	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
178
179	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
180	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
181
182	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
183	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
184
185	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
186	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
187
188	MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
189	STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
190
191	STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
192	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
193
194	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
195	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
196
197	. = 0xc00
198	.globl	system_call_pSeries
199system_call_pSeries:
200	HMT_MEDIUM
201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
202	SET_SCRATCH0(r13)
203	GET_PACA(r13)
204	std	r9,PACA_EXGEN+EX_R9(r13)
205	std	r10,PACA_EXGEN+EX_R10(r13)
206	mfcr	r9
207	KVMTEST(0xc00)
208	GET_SCRATCH0(r13)
209#endif
210BEGIN_FTR_SECTION
211	cmpdi	r0,0x1ebe
212	beq-	1f
213END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
214	mr	r9,r13
215	GET_PACA(r13)
216	mfspr	r11,SPRN_SRR0
217	mfspr	r12,SPRN_SRR1
218	ld	r10,PACAKBASE(r13)
219	LOAD_HANDLER(r10, system_call_entry)
220	mtspr	SPRN_SRR0,r10
221	ld	r10,PACAKMSR(r13)
222	mtspr	SPRN_SRR1,r10
223	rfid
224	b	.	/* prevent speculative execution */
225
226	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
227
228/* Fast LE/BE switch system call */
2291:	mfspr	r12,SPRN_SRR1
230	xori	r12,r12,MSR_LE
231	mtspr	SPRN_SRR1,r12
232	rfid		/* return to userspace */
233	b	.
234
235	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
236	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
237
238	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
239	 * out of line to handle them
240	 */
241	. = 0xe00
242hv_exception_trampoline:
243	b	h_data_storage_hv
244	. = 0xe20
245	b	h_instr_storage_hv
246	. = 0xe40
247	b	emulation_assist_hv
248	. = 0xe50
249	b	hmi_exception_hv
250	. = 0xe60
251	b	hmi_exception_hv
252
253	/* We need to deal with the Altivec unavailable exception
254	 * here which is at 0xf20, thus in the middle of the
255	 * prolog code of the PerformanceMonitor one. A little
256	 * trickery is thus necessary
257	 */
258performance_monitor_pSeries_1:
259	. = 0xf00
260	b	performance_monitor_pSeries
261
262altivec_unavailable_pSeries_1:
263	. = 0xf20
264	b	altivec_unavailable_pSeries
265
266vsx_unavailable_pSeries_1:
267	. = 0xf40
268	b	vsx_unavailable_pSeries
269
270#ifdef CONFIG_CBE_RAS
271	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
272	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
273#endif /* CONFIG_CBE_RAS */
274
275	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
276	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
277
278	. = 0x1500
279	.global denorm_Hypervisor
280denorm_exception_hv:
281	HMT_MEDIUM
282	mtspr	SPRN_SPRG_HSCRATCH0,r13
283	mfspr	r13,SPRN_SPRG_HPACA
284	std	r9,PACA_EXGEN+EX_R9(r13)
285	std	r10,PACA_EXGEN+EX_R10(r13)
286	std	r11,PACA_EXGEN+EX_R11(r13)
287	std	r12,PACA_EXGEN+EX_R12(r13)
288	mfspr	r9,SPRN_SPRG_HSCRATCH0
289	std	r9,PACA_EXGEN+EX_R13(r13)
290	mfcr	r9
291
292#ifdef CONFIG_PPC_DENORMALISATION
293	mfspr	r10,SPRN_HSRR1
294	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
295	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
296	addi	r11,r11,-4		/* HSRR0 is next instruction */
297	bne+	denorm_assist
298#endif
299
300	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
301	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
302
303#ifdef CONFIG_CBE_RAS
304	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
305	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
306#endif /* CONFIG_CBE_RAS */
307
308	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
309	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
310
311#ifdef CONFIG_CBE_RAS
312	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
313	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
314#endif /* CONFIG_CBE_RAS */
315
316	. = 0x3000
317
318/*** Out of line interrupts support ***/
319
320	/* moved from 0x200 */
321machine_check_pSeries:
322	.globl machine_check_fwnmi
323machine_check_fwnmi:
324	HMT_MEDIUM
325	SET_SCRATCH0(r13)		/* save r13 */
326	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
327				 EXC_STD, KVMTEST, 0x200)
328	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
329
330	/* moved from 0x300 */
331data_access_check_stab:
332	GET_PACA(r13)
333	std	r9,PACA_EXSLB+EX_R9(r13)
334	std	r10,PACA_EXSLB+EX_R10(r13)
335	mfspr	r10,SPRN_DAR
336	mfspr	r9,SPRN_DSISR
337	srdi	r10,r10,60
338	rlwimi	r10,r9,16,0x20
339#ifdef CONFIG_KVM_BOOK3S_PR
340	lbz	r9,HSTATE_IN_GUEST(r13)
341	rlwimi	r10,r9,8,0x300
342#endif
343	mfcr	r9
344	cmpwi	r10,0x2c
345	beq	do_stab_bolted_pSeries
346	mtcrf	0x80,r9
347	ld	r9,PACA_EXSLB+EX_R9(r13)
348	ld	r10,PACA_EXSLB+EX_R10(r13)
349	b	data_access_not_stab
350do_stab_bolted_pSeries:
351	std	r11,PACA_EXSLB+EX_R11(r13)
352	std	r12,PACA_EXSLB+EX_R12(r13)
353	GET_SCRATCH0(r10)
354	std	r10,PACA_EXSLB+EX_R13(r13)
355	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
356
357	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
358	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
359	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
360	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
361	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
362	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
363
364#ifdef CONFIG_PPC_DENORMALISATION
365denorm_assist:
366BEGIN_FTR_SECTION
367/*
368 * To denormalise we need to move a copy of the register to itself.
369 * For POWER6 do that here for all FP regs.
370 */
371	mfmsr	r10
372	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
373	xori	r10,r10,(MSR_FE0|MSR_FE1)
374	mtmsrd	r10
375	sync
376	fmr	0,0
377	fmr	1,1
378	fmr	2,2
379	fmr	3,3
380	fmr	4,4
381	fmr	5,5
382	fmr	6,6
383	fmr	7,7
384	fmr	8,8
385	fmr	9,9
386	fmr	10,10
387	fmr	11,11
388	fmr	12,12
389	fmr	13,13
390	fmr	14,14
391	fmr	15,15
392	fmr	16,16
393	fmr	17,17
394	fmr	18,18
395	fmr	19,19
396	fmr	20,20
397	fmr	21,21
398	fmr	22,22
399	fmr	23,23
400	fmr	24,24
401	fmr	25,25
402	fmr	26,26
403	fmr	27,27
404	fmr	28,28
405	fmr	29,29
406	fmr	30,30
407	fmr	31,31
408FTR_SECTION_ELSE
409/*
410 * To denormalise we need to move a copy of the register to itself.
411 * For POWER7 do that here for the first 32 VSX registers only.
412 */
413	mfmsr	r10
414	oris	r10,r10,MSR_VSX@h
415	mtmsrd	r10
416	sync
417	XVCPSGNDP(0,0,0)
418	XVCPSGNDP(1,1,1)
419	XVCPSGNDP(2,2,2)
420	XVCPSGNDP(3,3,3)
421	XVCPSGNDP(4,4,4)
422	XVCPSGNDP(5,5,5)
423	XVCPSGNDP(6,6,6)
424	XVCPSGNDP(7,7,7)
425	XVCPSGNDP(8,8,8)
426	XVCPSGNDP(9,9,9)
427	XVCPSGNDP(10,10,10)
428	XVCPSGNDP(11,11,11)
429	XVCPSGNDP(12,12,12)
430	XVCPSGNDP(13,13,13)
431	XVCPSGNDP(14,14,14)
432	XVCPSGNDP(15,15,15)
433	XVCPSGNDP(16,16,16)
434	XVCPSGNDP(17,17,17)
435	XVCPSGNDP(18,18,18)
436	XVCPSGNDP(19,19,19)
437	XVCPSGNDP(20,20,20)
438	XVCPSGNDP(21,21,21)
439	XVCPSGNDP(22,22,22)
440	XVCPSGNDP(23,23,23)
441	XVCPSGNDP(24,24,24)
442	XVCPSGNDP(25,25,25)
443	XVCPSGNDP(26,26,26)
444	XVCPSGNDP(27,27,27)
445	XVCPSGNDP(28,28,28)
446	XVCPSGNDP(29,29,29)
447	XVCPSGNDP(30,30,30)
448	XVCPSGNDP(31,31,31)
449ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
450	mtspr	SPRN_HSRR0,r11
451	mtcrf	0x80,r9
452	ld	r9,PACA_EXGEN+EX_R9(r13)
453	ld	r10,PACA_EXGEN+EX_R10(r13)
454	ld	r11,PACA_EXGEN+EX_R11(r13)
455	ld	r12,PACA_EXGEN+EX_R12(r13)
456	ld	r13,PACA_EXGEN+EX_R13(r13)
457	HRFID
458	b	.
459#endif
460
461	.align	7
462	/* moved from 0xe00 */
463	STD_EXCEPTION_HV(., 0xe02, h_data_storage)
464	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
465	STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
466	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
467	STD_EXCEPTION_HV(., 0xe42, emulation_assist)
468	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
469	STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
470	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
471
472	/* moved from 0xf00 */
473	STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
474	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
475	STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
476	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
477	STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
478	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
479
480/*
481 * An interrupt came in while soft-disabled. We set paca->irq_happened,
482 * then, if it was a decrementer interrupt, we bump the dec to max and
483 * and return, else we hard disable and return. This is called with
484 * r10 containing the value to OR to the paca field.
485 */
486#define MASKED_INTERRUPT(_H)				\
487masked_##_H##interrupt:					\
488	std	r11,PACA_EXGEN+EX_R11(r13);		\
489	lbz	r11,PACAIRQHAPPENED(r13);		\
490	or	r11,r11,r10;				\
491	stb	r11,PACAIRQHAPPENED(r13);		\
492	andi.	r10,r10,PACA_IRQ_DEC;			\
493	beq	1f;					\
494	lis	r10,0x7fff;				\
495	ori	r10,r10,0xffff;				\
496	mtspr	SPRN_DEC,r10;				\
497	b	2f;					\
4981:	mfspr	r10,SPRN_##_H##SRR1;			\
499	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
500	rotldi	r10,r10,16;				\
501	mtspr	SPRN_##_H##SRR1,r10;			\
5022:	mtcrf	0x80,r9;				\
503	ld	r9,PACA_EXGEN+EX_R9(r13);		\
504	ld	r10,PACA_EXGEN+EX_R10(r13);		\
505	ld	r11,PACA_EXGEN+EX_R11(r13);		\
506	GET_SCRATCH0(r13);				\
507	##_H##rfid;					\
508	b	.
509
510	MASKED_INTERRUPT()
511	MASKED_INTERRUPT(H)
512
513/*
514 * Called from arch_local_irq_enable when an interrupt needs
515 * to be resent. r3 contains 0x500 or 0x900 to indicate which
516 * kind of interrupt. MSR:EE is already off. We generate a
517 * stackframe like if a real interrupt had happened.
518 *
519 * Note: While MSR:EE is off, we need to make sure that _MSR
520 * in the generated frame has EE set to 1 or the exception
521 * handler will not properly re-enable them.
522 */
523_GLOBAL(__replay_interrupt)
524	/* We are going to jump to the exception common code which
525	 * will retrieve various register values from the PACA which
526	 * we don't give a damn about, so we don't bother storing them.
527	 */
528	mfmsr	r12
529	mflr	r11
530	mfcr	r9
531	ori	r12,r12,MSR_EE
532	andi.	r3,r3,0x0800
533	bne	decrementer_common
534	b	hardware_interrupt_common
535
536#ifdef CONFIG_PPC_PSERIES
537/*
538 * Vectors for the FWNMI option.  Share common code.
539 */
540	.globl system_reset_fwnmi
541      .align 7
542system_reset_fwnmi:
543	HMT_MEDIUM
544	SET_SCRATCH0(r13)		/* save r13 */
545	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
546				 NOTEST, 0x100)
547
548#endif /* CONFIG_PPC_PSERIES */
549
550#ifdef __DISABLED__
551/*
552 * This is used for when the SLB miss handler has to go virtual,
553 * which doesn't happen for now anymore but will once we re-implement
554 * dynamic VSIDs for shared page tables
555 */
556slb_miss_user_pseries:
557	std	r10,PACA_EXGEN+EX_R10(r13)
558	std	r11,PACA_EXGEN+EX_R11(r13)
559	std	r12,PACA_EXGEN+EX_R12(r13)
560	GET_SCRATCH0(r10)
561	ld	r11,PACA_EXSLB+EX_R9(r13)
562	ld	r12,PACA_EXSLB+EX_R3(r13)
563	std	r10,PACA_EXGEN+EX_R13(r13)
564	std	r11,PACA_EXGEN+EX_R9(r13)
565	std	r12,PACA_EXGEN+EX_R3(r13)
566	clrrdi	r12,r13,32
567	mfmsr	r10
568	mfspr	r11,SRR0			/* save SRR0 */
569	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
570	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
571	mtspr	SRR0,r12
572	mfspr	r12,SRR1			/* and SRR1 */
573	mtspr	SRR1,r10
574	rfid
575	b	.				/* prevent spec. execution */
576#endif /* __DISABLED__ */
577
578	.align	7
579	.globl	__end_interrupts
580__end_interrupts:
581
582/*
583 * Code from here down to __end_handlers is invoked from the
584 * exception prologs above.  Because the prologs assemble the
585 * addresses of these handlers using the LOAD_HANDLER macro,
586 * which uses an addi instruction, these handlers must be in
587 * the first 32k of the kernel image.
588 */
589
590/*** Common interrupt handlers ***/
591
592	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
593
594	/*
595	 * Machine check is different because we use a different
596	 * save area: PACA_EXMC instead of PACA_EXGEN.
597	 */
598	.align	7
599	.globl machine_check_common
600machine_check_common:
601	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
602	FINISH_NAP
603	DISABLE_INTS
604	bl	.save_nvgprs
605	addi	r3,r1,STACK_FRAME_OVERHEAD
606	bl	.machine_check_exception
607	b	.ret_from_except
608
609	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
610	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
611	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
612	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
613	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
614	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
615	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
616        STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
617        STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
618	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
619	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
620	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
621#ifdef CONFIG_ALTIVEC
622	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
623#else
624	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
625#endif
626#ifdef CONFIG_CBE_RAS
627	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
628	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
629	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
630#endif /* CONFIG_CBE_RAS */
631
632	.align	7
633system_call_entry:
634	b	system_call_common
635
636ppc64_runlatch_on_trampoline:
637	b	.__ppc64_runlatch_on
638
639/*
640 * Here we have detected that the kernel stack pointer is bad.
641 * R9 contains the saved CR, r13 points to the paca,
642 * r10 contains the (bad) kernel stack pointer,
643 * r11 and r12 contain the saved SRR0 and SRR1.
644 * We switch to using an emergency stack, save the registers there,
645 * and call kernel_bad_stack(), which panics.
646 */
647bad_stack:
648	ld	r1,PACAEMERGSP(r13)
649	subi	r1,r1,64+INT_FRAME_SIZE
650	std	r9,_CCR(r1)
651	std	r10,GPR1(r1)
652	std	r11,_NIP(r1)
653	std	r12,_MSR(r1)
654	mfspr	r11,SPRN_DAR
655	mfspr	r12,SPRN_DSISR
656	std	r11,_DAR(r1)
657	std	r12,_DSISR(r1)
658	mflr	r10
659	mfctr	r11
660	mfxer	r12
661	std	r10,_LINK(r1)
662	std	r11,_CTR(r1)
663	std	r12,_XER(r1)
664	SAVE_GPR(0,r1)
665	SAVE_GPR(2,r1)
666	ld	r10,EX_R3(r3)
667	std	r10,GPR3(r1)
668	SAVE_GPR(4,r1)
669	SAVE_4GPRS(5,r1)
670	ld	r9,EX_R9(r3)
671	ld	r10,EX_R10(r3)
672	SAVE_2GPRS(9,r1)
673	ld	r9,EX_R11(r3)
674	ld	r10,EX_R12(r3)
675	ld	r11,EX_R13(r3)
676	std	r9,GPR11(r1)
677	std	r10,GPR12(r1)
678	std	r11,GPR13(r1)
679BEGIN_FTR_SECTION
680	ld	r10,EX_CFAR(r3)
681	std	r10,ORIG_GPR3(r1)
682END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
683	SAVE_8GPRS(14,r1)
684	SAVE_10GPRS(22,r1)
685	lhz	r12,PACA_TRAP_SAVE(r13)
686	std	r12,_TRAP(r1)
687	addi	r11,r1,INT_FRAME_SIZE
688	std	r11,0(r1)
689	li	r12,0
690	std	r12,0(r11)
691	ld	r2,PACATOC(r13)
692	ld	r11,exception_marker@toc(r2)
693	std	r12,RESULT(r1)
694	std	r11,STACK_FRAME_OVERHEAD-16(r1)
6951:	addi	r3,r1,STACK_FRAME_OVERHEAD
696	bl	.kernel_bad_stack
697	b	1b
698
699/*
700 * Here r13 points to the paca, r9 contains the saved CR,
701 * SRR0 and SRR1 are saved in r11 and r12,
702 * r9 - r13 are saved in paca->exgen.
703 */
704	.align	7
705	.globl data_access_common
706data_access_common:
707	mfspr	r10,SPRN_DAR
708	std	r10,PACA_EXGEN+EX_DAR(r13)
709	mfspr	r10,SPRN_DSISR
710	stw	r10,PACA_EXGEN+EX_DSISR(r13)
711	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
712	DISABLE_INTS
713	ld	r12,_MSR(r1)
714	ld	r3,PACA_EXGEN+EX_DAR(r13)
715	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
716	li	r5,0x300
717	b	.do_hash_page	 	/* Try to handle as hpte fault */
718
719	.align  7
720        .globl  h_data_storage_common
721h_data_storage_common:
722        mfspr   r10,SPRN_HDAR
723        std     r10,PACA_EXGEN+EX_DAR(r13)
724        mfspr   r10,SPRN_HDSISR
725        stw     r10,PACA_EXGEN+EX_DSISR(r13)
726        EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
727        bl      .save_nvgprs
728	DISABLE_INTS
729        addi    r3,r1,STACK_FRAME_OVERHEAD
730        bl      .unknown_exception
731        b       .ret_from_except
732
733	.align	7
734	.globl instruction_access_common
735instruction_access_common:
736	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
737	DISABLE_INTS
738	ld	r12,_MSR(r1)
739	ld	r3,_NIP(r1)
740	andis.	r4,r12,0x5820
741	li	r5,0x400
742	b	.do_hash_page		/* Try to handle as hpte fault */
743
744        STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
745
746/*
747 * Here is the common SLB miss user that is used when going to virtual
748 * mode for SLB misses, that is currently not used
749 */
750#ifdef __DISABLED__
751	.align	7
752	.globl	slb_miss_user_common
753slb_miss_user_common:
754	mflr	r10
755	std	r3,PACA_EXGEN+EX_DAR(r13)
756	stw	r9,PACA_EXGEN+EX_CCR(r13)
757	std	r10,PACA_EXGEN+EX_LR(r13)
758	std	r11,PACA_EXGEN+EX_SRR0(r13)
759	bl	.slb_allocate_user
760
761	ld	r10,PACA_EXGEN+EX_LR(r13)
762	ld	r3,PACA_EXGEN+EX_R3(r13)
763	lwz	r9,PACA_EXGEN+EX_CCR(r13)
764	ld	r11,PACA_EXGEN+EX_SRR0(r13)
765	mtlr	r10
766	beq-	slb_miss_fault
767
768	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
769	beq-	unrecov_user_slb
770	mfmsr	r10
771
772.machine push
773.machine "power4"
774	mtcrf	0x80,r9
775.machine pop
776
777	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
778	mtmsrd	r10,1
779
780	mtspr	SRR0,r11
781	mtspr	SRR1,r12
782
783	ld	r9,PACA_EXGEN+EX_R9(r13)
784	ld	r10,PACA_EXGEN+EX_R10(r13)
785	ld	r11,PACA_EXGEN+EX_R11(r13)
786	ld	r12,PACA_EXGEN+EX_R12(r13)
787	ld	r13,PACA_EXGEN+EX_R13(r13)
788	rfid
789	b	.
790
791slb_miss_fault:
792	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
793	ld	r4,PACA_EXGEN+EX_DAR(r13)
794	li	r5,0
795	std	r4,_DAR(r1)
796	std	r5,_DSISR(r1)
797	b	handle_page_fault
798
799unrecov_user_slb:
800	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
801	DISABLE_INTS
802	bl	.save_nvgprs
8031:	addi	r3,r1,STACK_FRAME_OVERHEAD
804	bl	.unrecoverable_exception
805	b	1b
806
807#endif /* __DISABLED__ */
808
809
810/*
811 * r13 points to the PACA, r9 contains the saved CR,
812 * r12 contain the saved SRR1, SRR0 is still ready for return
813 * r3 has the faulting address
814 * r9 - r13 are saved in paca->exslb.
815 * r3 is saved in paca->slb_r3
816 * We assume we aren't going to take any exceptions during this procedure.
817 */
818_GLOBAL(slb_miss_realmode)
819	mflr	r10
820#ifdef CONFIG_RELOCATABLE
821	mtctr	r11
822#endif
823
824	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
825	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
826
827	bl	.slb_allocate_realmode
828
829	/* All done -- return from exception. */
830
831	ld	r10,PACA_EXSLB+EX_LR(r13)
832	ld	r3,PACA_EXSLB+EX_R3(r13)
833	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
834
835	mtlr	r10
836
837	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
838	beq-	2f
839
840.machine	push
841.machine	"power4"
842	mtcrf	0x80,r9
843	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
844.machine	pop
845
846	ld	r9,PACA_EXSLB+EX_R9(r13)
847	ld	r10,PACA_EXSLB+EX_R10(r13)
848	ld	r11,PACA_EXSLB+EX_R11(r13)
849	ld	r12,PACA_EXSLB+EX_R12(r13)
850	ld	r13,PACA_EXSLB+EX_R13(r13)
851	rfid
852	b	.	/* prevent speculative execution */
853
8542:	mfspr	r11,SPRN_SRR0
855	ld	r10,PACAKBASE(r13)
856	LOAD_HANDLER(r10,unrecov_slb)
857	mtspr	SPRN_SRR0,r10
858	ld	r10,PACAKMSR(r13)
859	mtspr	SPRN_SRR1,r10
860	rfid
861	b	.
862
863unrecov_slb:
864	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
865	DISABLE_INTS
866	bl	.save_nvgprs
8671:	addi	r3,r1,STACK_FRAME_OVERHEAD
868	bl	.unrecoverable_exception
869	b	1b
870
871
872#ifdef CONFIG_PPC_970_NAP
873power4_fixup_nap:
874	andc	r9,r9,r10
875	std	r9,TI_LOCAL_FLAGS(r11)
876	ld	r10,_LINK(r1)		/* make idle task do the */
877	std	r10,_NIP(r1)		/* equivalent of a blr */
878	blr
879#endif
880
881	.align	7
882	.globl alignment_common
883alignment_common:
884	mfspr	r10,SPRN_DAR
885	std	r10,PACA_EXGEN+EX_DAR(r13)
886	mfspr	r10,SPRN_DSISR
887	stw	r10,PACA_EXGEN+EX_DSISR(r13)
888	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
889	ld	r3,PACA_EXGEN+EX_DAR(r13)
890	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
891	std	r3,_DAR(r1)
892	std	r4,_DSISR(r1)
893	bl	.save_nvgprs
894	DISABLE_INTS
895	addi	r3,r1,STACK_FRAME_OVERHEAD
896	bl	.alignment_exception
897	b	.ret_from_except
898
899	.align	7
900	.globl program_check_common
901program_check_common:
902	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
903	bl	.save_nvgprs
904	DISABLE_INTS
905	addi	r3,r1,STACK_FRAME_OVERHEAD
906	bl	.program_check_exception
907	b	.ret_from_except
908
909	.align	7
910	.globl fp_unavailable_common
911fp_unavailable_common:
912	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
913	bne	1f			/* if from user, just load it up */
914	bl	.save_nvgprs
915	DISABLE_INTS
916	addi	r3,r1,STACK_FRAME_OVERHEAD
917	bl	.kernel_fp_unavailable_exception
918	BUG_OPCODE
9191:	bl	.load_up_fpu
920	b	fast_exception_return
921
922	.align	7
923	.globl altivec_unavailable_common
924altivec_unavailable_common:
925	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
926#ifdef CONFIG_ALTIVEC
927BEGIN_FTR_SECTION
928	beq	1f
929	bl	.load_up_altivec
930	b	fast_exception_return
9311:
932END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
933#endif
934	bl	.save_nvgprs
935	DISABLE_INTS
936	addi	r3,r1,STACK_FRAME_OVERHEAD
937	bl	.altivec_unavailable_exception
938	b	.ret_from_except
939
940	.align	7
941	.globl vsx_unavailable_common
942vsx_unavailable_common:
943	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
944#ifdef CONFIG_VSX
945BEGIN_FTR_SECTION
946	beq	1f
947	b	.load_up_vsx
9481:
949END_FTR_SECTION_IFSET(CPU_FTR_VSX)
950#endif
951	bl	.save_nvgprs
952	DISABLE_INTS
953	addi	r3,r1,STACK_FRAME_OVERHEAD
954	bl	.vsx_unavailable_exception
955	b	.ret_from_except
956
957	.align	7
958	.globl	__end_handlers
959__end_handlers:
960
961/*
962 * Hash table stuff
963 */
964	.align	7
965_STATIC(do_hash_page)
966	std	r3,_DAR(r1)
967	std	r4,_DSISR(r1)
968
969	andis.	r0,r4,0xa410		/* weird error? */
970	bne-	handle_page_fault	/* if not, try to insert a HPTE */
971	andis.  r0,r4,DSISR_DABRMATCH@h
972	bne-    handle_dabr_fault
973
974BEGIN_FTR_SECTION
975	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
976	bne-	do_ste_alloc		/* If so handle it */
977END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
978
979	CURRENT_THREAD_INFO(r11, r1)
980	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
981	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
982	bne	77f			/* then don't call hash_page now */
983	/*
984	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
985	 * accessing a userspace segment (even from the kernel). We assume
986	 * kernel addresses always have the high bit set.
987	 */
988	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
989	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
990	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
991	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
992	ori	r4,r4,1			/* add _PAGE_PRESENT */
993	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
994
995	/*
996	 * r3 contains the faulting address
997	 * r4 contains the required access permissions
998	 * r5 contains the trap number
999	 *
1000	 * at return r3 = 0 for success, 1 for page fault, negative for error
1001	 */
1002	bl	.hash_page		/* build HPTE if possible */
1003	cmpdi	r3,0			/* see if hash_page succeeded */
1004
1005	/* Success */
1006	beq	fast_exc_return_irq	/* Return from exception on success */
1007
1008	/* Error */
1009	blt-	13f
1010
1011/* Here we have a page fault that hash_page can't handle. */
1012handle_page_fault:
101311:	ld	r4,_DAR(r1)
1014	ld	r5,_DSISR(r1)
1015	addi	r3,r1,STACK_FRAME_OVERHEAD
1016	bl	.do_page_fault
1017	cmpdi	r3,0
1018	beq+	12f
1019	bl	.save_nvgprs
1020	mr	r5,r3
1021	addi	r3,r1,STACK_FRAME_OVERHEAD
1022	lwz	r4,_DAR(r1)
1023	bl	.bad_page_fault
1024	b	.ret_from_except
1025
1026/* We have a data breakpoint exception - handle it */
1027handle_dabr_fault:
1028	bl	.save_nvgprs
1029	ld      r4,_DAR(r1)
1030	ld      r5,_DSISR(r1)
1031	addi    r3,r1,STACK_FRAME_OVERHEAD
1032	bl      .do_dabr
103312:	b       .ret_from_except_lite
1034
1035
1036/* We have a page fault that hash_page could handle but HV refused
1037 * the PTE insertion
1038 */
103913:	bl	.save_nvgprs
1040	mr	r5,r3
1041	addi	r3,r1,STACK_FRAME_OVERHEAD
1042	ld	r4,_DAR(r1)
1043	bl	.low_hash_fault
1044	b	.ret_from_except
1045
1046/*
1047 * We come here as a result of a DSI at a point where we don't want
1048 * to call hash_page, such as when we are accessing memory (possibly
1049 * user memory) inside a PMU interrupt that occurred while interrupts
1050 * were soft-disabled.  We want to invoke the exception handler for
1051 * the access, or panic if there isn't a handler.
1052 */
105377:	bl	.save_nvgprs
1054	mr	r4,r3
1055	addi	r3,r1,STACK_FRAME_OVERHEAD
1056	li	r5,SIGSEGV
1057	bl	.bad_page_fault
1058	b	.ret_from_except
1059
1060	/* here we have a segment miss */
1061do_ste_alloc:
1062	bl	.ste_allocate		/* try to insert stab entry */
1063	cmpdi	r3,0
1064	bne-	handle_page_fault
1065	b	fast_exception_return
1066
1067/*
1068 * r13 points to the PACA, r9 contains the saved CR,
1069 * r11 and r12 contain the saved SRR0 and SRR1.
1070 * r9 - r13 are saved in paca->exslb.
1071 * We assume we aren't going to take any exceptions during this procedure.
1072 * We assume (DAR >> 60) == 0xc.
1073 */
1074	.align	7
1075_GLOBAL(do_stab_bolted)
1076	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1077	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1078
1079	/* Hash to the primary group */
1080	ld	r10,PACASTABVIRT(r13)
1081	mfspr	r11,SPRN_DAR
1082	srdi	r11,r11,28
1083	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1084
1085	/* Calculate VSID */
1086	/* This is a kernel address, so protovsid = ESID | 1 << 37 */
1087	li	r9,0x1
1088	rldimi  r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1089	ASM_VSID_SCRAMBLE(r11, r9, 256M)
1090	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
1091
1092	/* Search the primary group for a free entry */
10931:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1094	andi.	r11,r11,0x80
1095	beq	2f
1096	addi	r10,r10,16
1097	andi.	r11,r10,0x70
1098	bne	1b
1099
1100	/* Stick for only searching the primary group for now.		*/
1101	/* At least for now, we use a very simple random castout scheme */
1102	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1103	mftb	r11
1104	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1105	ori	r11,r11,0x10
1106
1107	/* r10 currently points to an ste one past the group of interest */
1108	/* make it point to the randomly selected entry			*/
1109	subi	r10,r10,128
1110	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1111
1112	isync			/* mark the entry invalid		*/
1113	ld	r11,0(r10)
1114	rldicl	r11,r11,56,1	/* clear the valid bit */
1115	rotldi	r11,r11,8
1116	std	r11,0(r10)
1117	sync
1118
1119	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1120	slbie	r11
1121
11222:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1123	eieio
1124
1125	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1126	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1127	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1128	std	r11,0(r10)	/* Put new entry back into the stab	*/
1129
1130	sync
1131
1132	/* All done -- return from exception. */
1133	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1134	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1135
1136	andi.	r10,r12,MSR_RI
1137	beq-	unrecov_slb
1138
1139	mtcrf	0x80,r9			/* restore CR */
1140
1141	mfmsr	r10
1142	clrrdi	r10,r10,2
1143	mtmsrd	r10,1
1144
1145	mtspr	SPRN_SRR0,r11
1146	mtspr	SPRN_SRR1,r12
1147	ld	r9,PACA_EXSLB+EX_R9(r13)
1148	ld	r10,PACA_EXSLB+EX_R10(r13)
1149	ld	r11,PACA_EXSLB+EX_R11(r13)
1150	ld	r12,PACA_EXSLB+EX_R12(r13)
1151	ld	r13,PACA_EXSLB+EX_R13(r13)
1152	rfid
1153	b	.	/* prevent speculative execution */
1154
1155#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1156/*
1157 * Data area reserved for FWNMI option.
1158 * This address (0x7000) is fixed by the RPA.
1159 */
1160	.= 0x7000
1161	.globl fwnmi_data_area
1162fwnmi_data_area:
1163
1164	/* pseries and powernv need to keep the whole page from
1165	 * 0x7000 to 0x8000 free for use by the firmware
1166	 */
1167        . = 0x8000
1168#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1169
1170/* Space for CPU0's segment table */
1171	.balign 4096
1172	.globl initial_stab
1173initial_stab:
1174	.space	4096
1175
1176#ifdef CONFIG_PPC_POWERNV
1177_GLOBAL(opal_mc_secondary_handler)
1178	HMT_MEDIUM
1179	SET_SCRATCH0(r13)
1180	GET_PACA(r13)
1181	clrldi	r3,r3,2
1182	tovirt(r3,r3)
1183	std	r3,PACA_OPAL_MC_EVT(r13)
1184	ld	r13,OPAL_MC_SRR0(r3)
1185	mtspr	SPRN_SRR0,r13
1186	ld	r13,OPAL_MC_SRR1(r3)
1187	mtspr	SPRN_SRR1,r13
1188	ld	r3,OPAL_MC_GPR3(r3)
1189	GET_SCRATCH0(r13)
1190	b	machine_check_pSeries
1191#endif /* CONFIG_PPC_POWERNV */
1192