1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
9 * position dependent assembly.
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
16#include <asm/hw_irq.h>
17#include <asm/exception-64s.h>
18#include <asm/ptrace.h>
19#include <asm/cpuidle.h>
20#include <asm/head-64.h>
21#include <asm/feature-fixups.h>
22#include <asm/kup.h>
23
24/* PACA save area offsets (exgen, exmc, etc) */
25#define EX_R9		0
26#define EX_R10		8
27#define EX_R11		16
28#define EX_R12		24
29#define EX_R13		32
30#define EX_DAR		40
31#define EX_DSISR	48
32#define EX_CCR		52
33#define EX_CFAR		56
34#define EX_PPR		64
35#if defined(CONFIG_RELOCATABLE)
36#define EX_CTR		72
37.if EX_SIZE != 10
38	.error "EX_SIZE is wrong"
39.endif
40#else
41.if EX_SIZE != 9
42	.error "EX_SIZE is wrong"
43.endif
44#endif
45
46/*
47 * We're short on space and time in the exception prolog, so we can't
48 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
49 * Instead we get the base of the kernel from paca->kernelbase and or in the low
50 * part of label. This requires that the label be within 64KB of kernelbase, and
51 * that kernelbase be 64K aligned.
52 */
53#define LOAD_HANDLER(reg, label)					\
54	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
55	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
56
57#define __LOAD_HANDLER(reg, label)					\
58	ld	reg,PACAKBASE(r13);					\
59	ori	reg,reg,(ABS_ADDR(label))@l
60
61/*
62 * Branches from unrelocated code (e.g., interrupts) to labels outside
63 * head-y require >64K offsets.
64 */
65#define __LOAD_FAR_HANDLER(reg, label)					\
66	ld	reg,PACAKBASE(r13);					\
67	ori	reg,reg,(ABS_ADDR(label))@l;				\
68	addis	reg,reg,(ABS_ADDR(label))@h
69
70/* Exception register prefixes */
71#define EXC_HV		1
72#define EXC_STD		0
73
74#if defined(CONFIG_RELOCATABLE)
75/*
76 * If we support interrupts with relocation on AND we're a relocatable kernel,
77 * we need to use CTR to get to the 2nd level handler.  So, save/restore it
78 * when required.
79 */
80#define SAVE_CTR(reg, area)	mfctr	reg ; 	std	reg,area+EX_CTR(r13)
81#define GET_CTR(reg, area) 			ld	reg,area+EX_CTR(r13)
82#define RESTORE_CTR(reg, area)	ld	reg,area+EX_CTR(r13) ; mtctr reg
83#else
84/* ...else CTR is unused and in register. */
85#define SAVE_CTR(reg, area)
86#define GET_CTR(reg, area) 	mfctr	reg
87#define RESTORE_CTR(reg, area)
88#endif
89
90/*
91 * PPR save/restore macros used in exceptions-64s.S
92 * Used for P7 or later processors
93 */
94#define SAVE_PPR(area, ra)						\
95BEGIN_FTR_SECTION_NESTED(940)						\
96	ld	ra,area+EX_PPR(r13);	/* Read PPR from paca */	\
97	std	ra,_PPR(r1);						\
98END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
99
100#define RESTORE_PPR_PACA(area, ra)					\
101BEGIN_FTR_SECTION_NESTED(941)						\
102	ld	ra,area+EX_PPR(r13);					\
103	mtspr	SPRN_PPR,ra;						\
104END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
105
106/*
107 * Get an SPR into a register if the CPU has the given feature
108 */
109#define OPT_GET_SPR(ra, spr, ftr)					\
110BEGIN_FTR_SECTION_NESTED(943)						\
111	mfspr	ra,spr;							\
112END_FTR_SECTION_NESTED(ftr,ftr,943)
113
114/*
115 * Set an SPR from a register if the CPU has the given feature
116 */
117#define OPT_SET_SPR(ra, spr, ftr)					\
118BEGIN_FTR_SECTION_NESTED(943)						\
119	mtspr	spr,ra;							\
120END_FTR_SECTION_NESTED(ftr,ftr,943)
121
122/*
123 * Save a register to the PACA if the CPU has the given feature
124 */
125#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr)				\
126BEGIN_FTR_SECTION_NESTED(943)						\
127	std	ra,offset(r13);						\
128END_FTR_SECTION_NESTED(ftr,ftr,943)
129
130.macro EXCEPTION_PROLOG_0 area
131	SET_SCRATCH0(r13)			/* save r13 */
132	GET_PACA(r13)
133	std	r9,\area\()+EX_R9(r13)		/* save r9 */
134	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
135	HMT_MEDIUM
136	std	r10,\area\()+EX_R10(r13)	/* save r10 - r12 */
137	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
138.endm
139
140.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, dar, dsisr, bitmask
141	OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
142	OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
143	INTERRUPT_TO_KERNEL
144	SAVE_CTR(r10, \area\())
145	mfcr	r9
146	.if \kvm
147		KVMTEST \hsrr \vec
148	.endif
149	.if \bitmask
150		lbz	r10,PACAIRQSOFTMASK(r13)
151		andi.	r10,r10,\bitmask
152		/* Associate vector numbers with bits in paca->irq_happened */
153		.if \vec == 0x500 || \vec == 0xea0
154		li	r10,PACA_IRQ_EE
155		.elseif \vec == 0x900
156		li	r10,PACA_IRQ_DEC
157		.elseif \vec == 0xa00 || \vec == 0xe80
158		li	r10,PACA_IRQ_DBELL
159		.elseif \vec == 0xe60
160		li	r10,PACA_IRQ_HMI
161		.elseif \vec == 0xf00
162		li	r10,PACA_IRQ_PMI
163		.else
164		.abort "Bad maskable vector"
165		.endif
166
167		.if \hsrr
168		bne	masked_Hinterrupt
169		.else
170		bne	masked_interrupt
171		.endif
172	.endif
173
174	std	r11,\area\()+EX_R11(r13)
175	std	r12,\area\()+EX_R12(r13)
176
177	/*
178	 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
179	 * because a d-side MCE will clobber those registers so is
180	 * not recoverable if they are live.
181	 */
182	GET_SCRATCH0(r10)
183	std	r10,\area\()+EX_R13(r13)
184	.if \dar
185	mfspr	r10,SPRN_DAR
186	std	r10,\area\()+EX_DAR(r13)
187	.endif
188	.if \dsisr
189	mfspr	r10,SPRN_DSISR
190	stw	r10,\area\()+EX_DSISR(r13)
191	.endif
192.endm
193
194.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri
195	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
196	.if ! \set_ri
197	xori	r10,r10,MSR_RI		/* Clear MSR_RI */
198	.endif
199	.if \hsrr
200	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
201	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
202	mtspr	SPRN_HSRR1,r10
203	.else
204	mfspr	r11,SPRN_SRR0		/* save SRR0 */
205	mfspr	r12,SPRN_SRR1		/* and SRR1 */
206	mtspr	SPRN_SRR1,r10
207	.endif
208	LOAD_HANDLER(r10, \label\())
209	.if \hsrr
210	mtspr	SPRN_HSRR0,r10
211	HRFI_TO_KERNEL
212	.else
213	mtspr	SPRN_SRR0,r10
214	RFI_TO_KERNEL
215	.endif
216	b	.	/* prevent speculative execution */
217.endm
218
219.macro EXCEPTION_PROLOG_2_VIRT label, hsrr
220#ifdef CONFIG_RELOCATABLE
221	.if \hsrr
222	mfspr	r11,SPRN_HSRR0	/* save HSRR0 */
223	.else
224	mfspr	r11,SPRN_SRR0	/* save SRR0 */
225	.endif
226	LOAD_HANDLER(r12, \label\())
227	mtctr	r12
228	.if \hsrr
229	mfspr	r12,SPRN_HSRR1	/* and HSRR1 */
230	.else
231	mfspr	r12,SPRN_SRR1	/* and HSRR1 */
232	.endif
233	li	r10,MSR_RI
234	mtmsrd 	r10,1		/* Set RI (EE=0) */
235	bctr
236#else
237	.if \hsrr
238	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
239	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
240	.else
241	mfspr	r11,SPRN_SRR0		/* save SRR0 */
242	mfspr	r12,SPRN_SRR1		/* and SRR1 */
243	.endif
244	li	r10,MSR_RI
245	mtmsrd 	r10,1			/* Set RI (EE=0) */
246	b	\label
247#endif
248.endm
249
250/*
251 * Branch to label using its 0xC000 address. This results in instruction
252 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
253 * on using mtmsr rather than rfid.
254 *
255 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
256 * load KBASE for a slight optimisation.
257 */
258#define BRANCH_TO_C000(reg, label)					\
259	__LOAD_FAR_HANDLER(reg, label);					\
260	mtctr	reg;							\
261	bctr
262
263#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
264#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
265/*
266 * If hv is possible, interrupts come into to the hv version
267 * of the kvmppc_interrupt code, which then jumps to the PR handler,
268 * kvmppc_interrupt_pr, if the guest is a PR guest.
269 */
270#define kvmppc_interrupt kvmppc_interrupt_hv
271#else
272#define kvmppc_interrupt kvmppc_interrupt_pr
273#endif
274
275.macro KVMTEST hsrr, n
276	lbz	r10,HSTATE_IN_GUEST(r13)
277	cmpwi	r10,0
278	.if \hsrr
279	bne	do_kvm_H\n
280	.else
281	bne	do_kvm_\n
282	.endif
283.endm
284
285.macro KVM_HANDLER area, hsrr, n, skip
286	.if \skip
287	cmpwi	r10,KVM_GUEST_MODE_SKIP
288	beq	89f
289	.else
290BEGIN_FTR_SECTION_NESTED(947)
291	ld	r10,\area+EX_CFAR(r13)
292	std	r10,HSTATE_CFAR(r13)
293END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
294	.endif
295
296BEGIN_FTR_SECTION_NESTED(948)
297	ld	r10,\area+EX_PPR(r13)
298	std	r10,HSTATE_PPR(r13)
299END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
300	ld	r10,\area+EX_R10(r13)
301	std	r12,HSTATE_SCRATCH0(r13)
302	sldi	r12,r9,32
303	/* HSRR variants have the 0x2 bit added to their trap number */
304	.if \hsrr
305	ori	r12,r12,(\n + 0x2)
306	.else
307	ori	r12,r12,(\n)
308	.endif
309
310#ifdef CONFIG_RELOCATABLE
311	/*
312	 * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
313	 * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
314	 * to be saved in HSTATE_SCRATCH1.
315	 */
316	mfctr	r9
317	std	r9,HSTATE_SCRATCH1(r13)
318	__LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
319	mtctr	r9
320	ld	r9,\area+EX_R9(r13)
321	bctr
322#else
323	ld	r9,\area+EX_R9(r13)
324	b	kvmppc_interrupt
325#endif
326
327
328	.if \skip
32989:	mtocrf	0x80,r9
330	ld	r9,\area+EX_R9(r13)
331	ld	r10,\area+EX_R10(r13)
332	.if \hsrr
333	b	kvmppc_skip_Hinterrupt
334	.else
335	b	kvmppc_skip_interrupt
336	.endif
337	.endif
338.endm
339
340#else
341.macro KVMTEST hsrr, n
342.endm
343.macro KVM_HANDLER area, hsrr, n, skip
344.endm
345#endif
346
347#define EXCEPTION_PROLOG_COMMON_1()					   \
348	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
349	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
350	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
351	std	r10,0(r1);		/* make stack chain pointer	*/ \
352	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
353	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
354
355/* Save original regs values from save area to stack frame. */
356#define EXCEPTION_PROLOG_COMMON_2(area)					   \
357	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
358	ld	r10,area+EX_R10(r13);					   \
359	std	r9,GPR9(r1);						   \
360	std	r10,GPR10(r1);						   \
361	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
362	ld	r10,area+EX_R12(r13);					   \
363	ld	r11,area+EX_R13(r13);					   \
364	std	r9,GPR11(r1);						   \
365	std	r10,GPR12(r1);						   \
366	std	r11,GPR13(r1);						   \
367BEGIN_FTR_SECTION_NESTED(66);						   \
368	ld	r10,area+EX_CFAR(r13);					   \
369	std	r10,ORIG_GPR3(r1);					   \
370END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66);			   \
371	GET_CTR(r10, area);						   \
372	std	r10,_CTR(r1);
373
374#define EXCEPTION_PROLOG_COMMON_3(trap)					   \
375	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
376	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe   */ \
377	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
378	mflr	r9;			/* Get LR, later save to stack	*/ \
379	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
380	std	r9,_LINK(r1);						   \
381	lbz	r10,PACAIRQSOFTMASK(r13);				   \
382	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \
383	std	r10,SOFTE(r1);						   \
384	std	r11,_XER(r1);						   \
385	li	r9,(trap)+1;						   \
386	std	r9,_TRAP(r1);		/* set trap number		*/ \
387	li	r10,0;							   \
388	ld	r11,exception_marker@toc(r2);				   \
389	std	r10,RESULT(r1);		/* clear regs->result		*/ \
390	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
391
392/*
393 * On entry r13 points to the paca, r9-r13 are saved in the paca,
394 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
395 * SRR1, and relocation is on.
396 */
397#define EXCEPTION_COMMON(area, trap)					   \
398	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
399	mr	r10,r1;			/* Save r1			*/ \
400	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
401	beq-	1f;							   \
402	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
4031:	tdgei	r1,-INT_FRAME_SIZE;	/* trap if r1 is in userspace	*/ \
404	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;				   \
4053:	EXCEPTION_PROLOG_COMMON_1();					   \
406	kuap_save_amr_and_lock r9, r10, cr1, cr0;			   \
407	beq	4f;			/* if from kernel mode		*/ \
408	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);				   \
409	SAVE_PPR(area, r9);						   \
4104:	EXCEPTION_PROLOG_COMMON_2(area);				   \
411	EXCEPTION_PROLOG_COMMON_3(trap);				   \
412	ACCOUNT_STOLEN_TIME
413
414/*
415 * Exception where stack is already set in r1, r1 is saved in r10.
416 * PPR save and CPU accounting is not done (for some reason).
417 */
418#define EXCEPTION_COMMON_STACK(area, trap)			\
419	EXCEPTION_PROLOG_COMMON_1();				\
420	kuap_save_amr_and_lock r9, r10, cr1;			\
421	EXCEPTION_PROLOG_COMMON_2(area);			\
422	EXCEPTION_PROLOG_COMMON_3(trap)
423
424/*
425 * Restore all registers including H/SRR0/1 saved in a stack frame of a
426 * standard exception.
427 */
428.macro EXCEPTION_RESTORE_REGS hsrr
429	/* Move original SRR0 and SRR1 into the respective regs */
430	ld	r9,_MSR(r1)
431	.if \hsrr
432	mtspr	SPRN_HSRR1,r9
433	.else
434	mtspr	SPRN_SRR1,r9
435	.endif
436	ld	r9,_NIP(r1)
437	.if \hsrr
438	mtspr	SPRN_HSRR0,r9
439	.else
440	mtspr	SPRN_SRR0,r9
441	.endif
442	ld	r9,_CTR(r1)
443	mtctr	r9
444	ld	r9,_XER(r1)
445	mtxer	r9
446	ld	r9,_LINK(r1)
447	mtlr	r9
448	ld	r9,_CCR(r1)
449	mtcr	r9
450	REST_8GPRS(2, r1)
451	REST_4GPRS(10, r1)
452	REST_GPR(0, r1)
453	/* restore original r1. */
454	ld	r1,GPR1(r1)
455.endm
456
457#define RUNLATCH_ON				\
458BEGIN_FTR_SECTION				\
459	ld	r3, PACA_THREAD_INFO(r13);	\
460	ld	r4,TI_LOCAL_FLAGS(r3);		\
461	andi.	r0,r4,_TLF_RUNLATCH;		\
462	beql	ppc64_runlatch_on_trampoline;	\
463END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
464
465/*
466 * When the idle code in power4_idle puts the CPU into NAP mode,
467 * it has to do so in a loop, and relies on the external interrupt
468 * and decrementer interrupt entry code to get it out of the loop.
469 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
470 * to signal that it is in the loop and needs help to get out.
471 */
472#ifdef CONFIG_PPC_970_NAP
473#define FINISH_NAP				\
474BEGIN_FTR_SECTION				\
475	ld	r11, PACA_THREAD_INFO(r13);	\
476	ld	r9,TI_LOCAL_FLAGS(r11);		\
477	andi.	r10,r9,_TLF_NAPPING;		\
478	bnel	power4_fixup_nap;		\
479END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
480#else
481#define FINISH_NAP
482#endif
483
484/*
485 * Following are the BOOK3S exception handler helper macros.
486 * Handlers come in a number of types, and each type has a number of varieties.
487 *
488 * EXC_REAL_*     - real, unrelocated exception vectors
489 * EXC_VIRT_*     - virt (AIL), unrelocated exception vectors
490 * TRAMP_REAL_*   - real, unrelocated helpers (virt can call these)
491 * TRAMP_VIRT_*   - virt, unreloc helpers (in practice, real can use)
492 * TRAMP_KVM      - KVM handlers that get put into real, unrelocated
493 * EXC_COMMON     - virt, relocated common handlers
494 *
495 * The EXC handlers are given a name, and branch to name_common, or the
496 * appropriate KVM or masking function. Vector handler verieties are as
497 * follows:
498 *
499 * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
500 *
501 * EXC_{REAL|VIRT}  - standard exception
502 *
503 * EXC_{REAL|VIRT}_suffix
504 *     where _suffix is:
505 *   - _MASKABLE               - maskable exception
506 *   - _OOL                    - out of line with trampoline to common handler
507 *   - _HV                     - HV exception
508 *
509 * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
510 *
511 * KVM handlers come in the following verieties:
512 * TRAMP_KVM
513 * TRAMP_KVM_SKIP
514 * TRAMP_KVM_HV
515 * TRAMP_KVM_HV_SKIP
516 *
517 * COMMON handlers come in the following verieties:
518 * EXC_COMMON_BEGIN/END - used to open-code the handler
519 * EXC_COMMON
520 * EXC_COMMON_ASYNC
521 *
522 * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
523 * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
524 */
525
526#define __EXC_REAL(name, start, size, area)				\
527	EXC_REAL_BEGIN(name, start, size);				\
528	EXCEPTION_PROLOG_0 area ;					\
529	EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0, 0, 0 ;		\
530	EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ;		\
531	EXC_REAL_END(name, start, size)
532
533#define EXC_REAL(name, start, size)					\
534	__EXC_REAL(name, start, size, PACA_EXGEN)
535
536#define __EXC_VIRT(name, start, size, realvec, area)			\
537	EXC_VIRT_BEGIN(name, start, size);				\
538	EXCEPTION_PROLOG_0 area ;					\
539	EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0, 0, 0;		\
540	EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ;		\
541	EXC_VIRT_END(name, start, size)
542
543#define EXC_VIRT(name, start, size, realvec)				\
544	__EXC_VIRT(name, start, size, realvec, PACA_EXGEN)
545
546#define EXC_REAL_MASKABLE(name, start, size, bitmask)			\
547	EXC_REAL_BEGIN(name, start, size);				\
548	EXCEPTION_PROLOG_0 PACA_EXGEN ;					\
549	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, 0, 0, bitmask ; \
550	EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ;		\
551	EXC_REAL_END(name, start, size)
552
553#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask)		\
554	EXC_VIRT_BEGIN(name, start, size);				\
555	EXCEPTION_PROLOG_0 PACA_EXGEN ;					\
556	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \
557	EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ;		\
558	EXC_VIRT_END(name, start, size)
559
560#define EXC_REAL_HV(name, start, size)					\
561	EXC_REAL_BEGIN(name, start, size);				\
562	EXCEPTION_PROLOG_0 PACA_EXGEN;					\
563	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0, 0, 0 ;	\
564	EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ;		\
565	EXC_REAL_END(name, start, size)
566
567#define EXC_VIRT_HV(name, start, size, realvec)				\
568	EXC_VIRT_BEGIN(name, start, size);				\
569	EXCEPTION_PROLOG_0 PACA_EXGEN;					\
570	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ;	\
571	EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ;			\
572	EXC_VIRT_END(name, start, size)
573
574#define __EXC_REAL_OOL(name, start, size)				\
575	EXC_REAL_BEGIN(name, start, size);				\
576	EXCEPTION_PROLOG_0 PACA_EXGEN ;					\
577	b	tramp_real_##name ;					\
578	EXC_REAL_END(name, start, size)
579
580#define __TRAMP_REAL_OOL(name, vec)					\
581	TRAMP_REAL_BEGIN(tramp_real_##name);				\
582	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, 0 ;	\
583	EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
584
585#define EXC_REAL_OOL(name, start, size)					\
586	__EXC_REAL_OOL(name, start, size);				\
587	__TRAMP_REAL_OOL(name, start)
588
589#define __EXC_REAL_OOL_MASKABLE(name, start, size)			\
590	__EXC_REAL_OOL(name, start, size)
591
592#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask)			\
593	TRAMP_REAL_BEGIN(tramp_real_##name);				\
594	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, bitmask ;	\
595	EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
596
597#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask)		\
598	__EXC_REAL_OOL_MASKABLE(name, start, size);			\
599	__TRAMP_REAL_OOL_MASKABLE(name, start, bitmask)
600
601#define __EXC_REAL_OOL_HV(name, start, size)				\
602	__EXC_REAL_OOL(name, start, size)
603
604#define __TRAMP_REAL_OOL_HV(name, vec)					\
605	TRAMP_REAL_BEGIN(tramp_real_##name);				\
606	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, 0 ;	\
607	EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
608
609#define EXC_REAL_OOL_HV(name, start, size)				\
610	__EXC_REAL_OOL_HV(name, start, size);				\
611	__TRAMP_REAL_OOL_HV(name, start)
612
613#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size)			\
614	__EXC_REAL_OOL(name, start, size)
615
616#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask)		\
617	TRAMP_REAL_BEGIN(tramp_real_##name);				\
618	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, bitmask ;	\
619	EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
620
621#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask)		\
622	__EXC_REAL_OOL_MASKABLE_HV(name, start, size);			\
623	__TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask)
624
625#define __EXC_VIRT_OOL(name, start, size)				\
626	EXC_VIRT_BEGIN(name, start, size);				\
627	EXCEPTION_PROLOG_0 PACA_EXGEN ;					\
628	b	tramp_virt_##name;					\
629	EXC_VIRT_END(name, start, size)
630
631#define __TRAMP_VIRT_OOL(name, realvec)					\
632	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
633	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0, 0, 0 ;	\
634	EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD
635
636#define EXC_VIRT_OOL(name, start, size, realvec)			\
637	__EXC_VIRT_OOL(name, start, size);				\
638	__TRAMP_VIRT_OOL(name, realvec)
639
640#define __EXC_VIRT_OOL_MASKABLE(name, start, size)			\
641	__EXC_VIRT_OOL(name, start, size)
642
643#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)		\
644	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
645	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \
646	EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
647
648#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask)	\
649	__EXC_VIRT_OOL_MASKABLE(name, start, size);			\
650	__TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)
651
652#define __EXC_VIRT_OOL_HV(name, start, size)				\
653	__EXC_VIRT_OOL(name, start, size)
654
655#define __TRAMP_VIRT_OOL_HV(name, realvec)				\
656	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
657	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ;	\
658	EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
659
660#define EXC_VIRT_OOL_HV(name, start, size, realvec)			\
661	__EXC_VIRT_OOL_HV(name, start, size);				\
662	__TRAMP_VIRT_OOL_HV(name, realvec)
663
664#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size)			\
665	__EXC_VIRT_OOL(name, start, size)
666
667#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)		\
668	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
669	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, bitmask ; \
670	EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
671
672#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask)	\
673	__EXC_VIRT_OOL_MASKABLE_HV(name, start, size);			\
674	__TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)
675
676#define TRAMP_KVM(area, n)						\
677	TRAMP_KVM_BEGIN(do_kvm_##n);					\
678	KVM_HANDLER area, EXC_STD, n, 0
679
680#define TRAMP_KVM_SKIP(area, n)						\
681	TRAMP_KVM_BEGIN(do_kvm_##n);					\
682	KVM_HANDLER area, EXC_STD, n, 1
683
684#define TRAMP_KVM_HV(area, n)						\
685	TRAMP_KVM_BEGIN(do_kvm_H##n);					\
686	KVM_HANDLER area, EXC_HV, n, 0
687
688#define TRAMP_KVM_HV_SKIP(area, n)					\
689	TRAMP_KVM_BEGIN(do_kvm_H##n);					\
690	KVM_HANDLER area, EXC_HV, n, 1
691
692#define EXC_COMMON(name, realvec, hdlr)					\
693	EXC_COMMON_BEGIN(name);						\
694	EXCEPTION_COMMON(PACA_EXGEN, realvec);				\
695	bl	save_nvgprs;						\
696	RECONCILE_IRQ_STATE(r10, r11);					\
697	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
698	bl	hdlr;							\
699	b	ret_from_except
700
701/*
702 * Like EXC_COMMON, but for exceptions that can occur in the idle task and
703 * therefore need the special idle handling (finish nap and runlatch)
704 */
705#define EXC_COMMON_ASYNC(name, realvec, hdlr)				\
706	EXC_COMMON_BEGIN(name);						\
707	EXCEPTION_COMMON(PACA_EXGEN, realvec);				\
708	FINISH_NAP;							\
709	RECONCILE_IRQ_STATE(r10, r11);					\
710	RUNLATCH_ON;							\
711	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
712	bl	hdlr;							\
713	b	ret_from_except_lite
714
715
716/*
717 * There are a few constraints to be concerned with.
718 * - Real mode exceptions code/data must be located at their physical location.
719 * - Virtual mode exceptions must be mapped at their 0xc000... location.
720 * - Fixed location code must not call directly beyond the __end_interrupts
721 *   area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
722 *   must be used.
723 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
724 *   virtual 0xc00...
725 * - Conditional branch targets must be within +/-32K of caller.
726 *
727 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
728 * therefore don't have to run in physically located code or rfid to
729 * virtual mode kernel code. However on relocatable kernels they do have
730 * to branch to KERNELBASE offset because the rest of the kernel (outside
731 * the exception vectors) may be located elsewhere.
732 *
733 * Virtual exceptions correspond with physical, except their entry points
734 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
735 * offset applied. Virtual exceptions are enabled with the Alternate
736 * Interrupt Location (AIL) bit set in the LPCR. However this does not
737 * guarantee they will be delivered virtually. Some conditions (see the ISA)
738 * cause exceptions to be delivered in real mode.
739 *
740 * It's impossible to receive interrupts below 0x300 via AIL.
741 *
742 * KVM: None of the virtual exceptions are from the guest. Anything that
743 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
744 *
745 *
746 * We layout physical memory as follows:
747 * 0x0000 - 0x00ff : Secondary processor spin code
748 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
749 * 0x1900 - 0x3fff : Real mode trampolines
750 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
751 * 0x5900 - 0x6fff : Relon mode trampolines
752 * 0x7000 - 0x7fff : FWNMI data area
753 * 0x8000 -   .... : Common interrupt handlers, remaining early
754 *                   setup code, rest of kernel.
755 *
756 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
757 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
758 * vectors there.
759 */
760OPEN_FIXED_SECTION(real_vectors,        0x0100, 0x1900)
761OPEN_FIXED_SECTION(real_trampolines,    0x1900, 0x4000)
762OPEN_FIXED_SECTION(virt_vectors,        0x4000, 0x5900)
763OPEN_FIXED_SECTION(virt_trampolines,    0x5900, 0x7000)
764
765#ifdef CONFIG_PPC_POWERNV
766	.globl start_real_trampolines
767	.globl end_real_trampolines
768	.globl start_virt_trampolines
769	.globl end_virt_trampolines
770#endif
771
772#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
773/*
774 * Data area reserved for FWNMI option.
775 * This address (0x7000) is fixed by the RPA.
776 * pseries and powernv need to keep the whole page from
777 * 0x7000 to 0x8000 free for use by the firmware
778 */
779ZERO_FIXED_SECTION(fwnmi_page,          0x7000, 0x8000)
780OPEN_TEXT_SECTION(0x8000)
781#else
782OPEN_TEXT_SECTION(0x7000)
783#endif
784
785USE_FIXED_SECTION(real_vectors)
786
787/*
788 * This is the start of the interrupt handlers for pSeries
789 * This code runs with relocation off.
790 * Code from here to __end_interrupts gets copied down to real
791 * address 0x100 when we are running a relocatable kernel.
792 * Therefore any relative branches in this section must only
793 * branch to labels in this section.
794 */
795	.globl __start_interrupts
796__start_interrupts:
797
798/* No virt vectors corresponding with 0x0..0x100 */
799EXC_VIRT_NONE(0x4000, 0x100)
800
801
802EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
803#ifdef CONFIG_PPC_P7_NAP
804	/*
805	 * If running native on arch 2.06 or later, check if we are waking up
806	 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
807	 * bits 46:47. A non-0 value indicates that we are coming from a power
808	 * saving state. The idle wakeup handler initially runs in real mode,
809	 * but we branch to the 0xc000... address so we can turn on relocation
810	 * with mtmsrd later, after SPRs are restored.
811	 *
812	 * Careful to minimise cost for the fast path (idle wakeup) while
813	 * also avoiding clobbering CFAR for the debug path (non-idle).
814	 *
815	 * For the idle wake case volatile registers can be clobbered, which
816	 * is why we use those initially. If it turns out to not be an idle
817	 * wake, carefully put everything back the way it was, so we can use
818	 * common exception macros to handle it.
819	 */
820BEGIN_FTR_SECTION
821	SET_SCRATCH0(r13)
822	GET_PACA(r13)
823	std	r3,PACA_EXNMI+0*8(r13)
824	std	r4,PACA_EXNMI+1*8(r13)
825	std	r5,PACA_EXNMI+2*8(r13)
826	mfspr	r3,SPRN_SRR1
827	mfocrf	r4,0x80
828	rlwinm.	r5,r3,47-31,30,31
829	bne+	system_reset_idle_wake
830	/* Not powersave wakeup. Restore regs for regular interrupt handler. */
831	mtocrf	0x80,r4
832	ld	r3,PACA_EXNMI+0*8(r13)
833	ld	r4,PACA_EXNMI+1*8(r13)
834	ld	r5,PACA_EXNMI+2*8(r13)
835	GET_SCRATCH0(r13)
836END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
837#endif
838
839	EXCEPTION_PROLOG_0 PACA_EXNMI
840	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 1, 0x100, 0, 0, 0
841	EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
842	/*
843	 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
844	 * being used, so a nested NMI exception would corrupt it.
845	 *
846	 * In theory, we should not enable relocation here if it was disabled
847	 * in SRR1, because the MMU may not be configured to support it (e.g.,
848	 * SLB may have been cleared). In practice, there should only be a few
849	 * small windows where that's the case, and sreset is considered to
850	 * be dangerous anyway.
851	 */
852EXC_REAL_END(system_reset, 0x100, 0x100)
853
854EXC_VIRT_NONE(0x4100, 0x100)
855TRAMP_KVM(PACA_EXNMI, 0x100)
856
857#ifdef CONFIG_PPC_P7_NAP
858TRAMP_REAL_BEGIN(system_reset_idle_wake)
859	/* We are waking up from idle, so may clobber any volatile register */
860	cmpwi	cr1,r5,2
861	bltlr	cr1	/* no state loss, return to idle caller with r3=SRR1 */
862	BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
863#endif
864
865#ifdef CONFIG_PPC_PSERIES
866/*
867 * Vectors for the FWNMI option.  Share common code.
868 */
869TRAMP_REAL_BEGIN(system_reset_fwnmi)
870	/* See comment at system_reset exception, don't turn on RI */
871	EXCEPTION_PROLOG_0 PACA_EXNMI
872	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0, 0, 0
873	EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
874
875#endif /* CONFIG_PPC_PSERIES */
876
877EXC_COMMON_BEGIN(system_reset_common)
878	/*
879	 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
880	 * to recover, but nested NMI will notice in_nmi and not recover
881	 * because of the use of the NMI stack. in_nmi reentrancy is tested in
882	 * system_reset_exception.
883	 */
884	lhz	r10,PACA_IN_NMI(r13)
885	addi	r10,r10,1
886	sth	r10,PACA_IN_NMI(r13)
887	li	r10,MSR_RI
888	mtmsrd 	r10,1
889
890	mr	r10,r1
891	ld	r1,PACA_NMI_EMERG_SP(r13)
892	subi	r1,r1,INT_FRAME_SIZE
893	EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100)
894	bl	save_nvgprs
895	/*
896	 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
897	 * the right thing. We do not want to reconcile because that goes
898	 * through irq tracing which we don't want in NMI.
899	 *
900	 * Save PACAIRQHAPPENED because some code will do a hard disable
901	 * (e.g., xmon). So we want to restore this back to where it was
902	 * when we return. DAR is unused in the stack, so save it there.
903	 */
904	li	r10,IRQS_ALL_DISABLED
905	stb	r10,PACAIRQSOFTMASK(r13)
906	lbz	r10,PACAIRQHAPPENED(r13)
907	std	r10,_DAR(r1)
908
909	addi	r3,r1,STACK_FRAME_OVERHEAD
910	bl	system_reset_exception
911
912	/* Clear MSR_RI before setting SRR0 and SRR1. */
913	li	r9,0
914	mtmsrd	r9,1
915
916	/*
917	 * MSR_RI is clear, now we can decrement paca->in_nmi.
918	 */
919	lhz	r10,PACA_IN_NMI(r13)
920	subi	r10,r10,1
921	sth	r10,PACA_IN_NMI(r13)
922
923	/*
924	 * Restore soft mask settings.
925	 */
926	ld	r10,_DAR(r1)
927	stb	r10,PACAIRQHAPPENED(r13)
928	ld	r10,SOFTE(r1)
929	stb	r10,PACAIRQSOFTMASK(r13)
930
931	EXCEPTION_RESTORE_REGS EXC_STD
932	RFI_TO_USER_OR_KERNEL
933
934
935EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
936	/* This is moved out of line as it can be patched by FW, but
937	 * some code path might still want to branch into the original
938	 * vector
939	 */
940	EXCEPTION_PROLOG_0 PACA_EXMC
941BEGIN_FTR_SECTION
942	b	machine_check_common_early
943FTR_SECTION_ELSE
944	b	machine_check_pSeries_0
945ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
946EXC_REAL_END(machine_check, 0x200, 0x100)
947EXC_VIRT_NONE(0x4200, 0x100)
948TRAMP_REAL_BEGIN(machine_check_common_early)
949	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0, 0, 0
950	/*
951	 * Register contents:
952	 * R13		= PACA
953	 * R9		= CR
954	 * Original R9 to R13 is saved on PACA_EXMC
955	 *
956	 * Switch to mc_emergency stack and handle re-entrancy (we limit
957	 * the nested MCE upto level 4 to avoid stack overflow).
958	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
959	 *
960	 * We use paca->in_mce to check whether this is the first entry or
961	 * nested machine check. We increment paca->in_mce to track nested
962	 * machine checks.
963	 *
964	 * If this is the first entry then set stack pointer to
965	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
966	 * stack frame on mc_emergency stack.
967	 *
968	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
969	 * checkstop if we get another machine check exception before we do
970	 * rfid with MSR_ME=1.
971	 *
972	 * This interrupt can wake directly from idle. If that is the case,
973	 * the machine check is handled then the idle wakeup code is called
974	 * to restore state.
975	 */
976	mr	r11,r1			/* Save r1 */
977	lhz	r10,PACA_IN_MCE(r13)
978	cmpwi	r10,0			/* Are we in nested machine check */
979	bne	0f			/* Yes, we are. */
980	/* First machine check entry */
981	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
9820:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
983	addi	r10,r10,1		/* increment paca->in_mce */
984	sth	r10,PACA_IN_MCE(r13)
985	/* Limit nested MCE to level 4 to avoid stack overflow */
986	cmpwi	r10,MAX_MCE_DEPTH
987	bgt	2f			/* Check if we hit limit of 4 */
988	std	r11,GPR1(r1)		/* Save r1 on the stack. */
989	std	r11,0(r1)		/* make stack chain pointer */
990	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
991	std	r11,_NIP(r1)
992	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
993	std	r11,_MSR(r1)
994	mfspr	r11,SPRN_DAR		/* Save DAR */
995	std	r11,_DAR(r1)
996	mfspr	r11,SPRN_DSISR		/* Save DSISR */
997	std	r11,_DSISR(r1)
998	std	r9,_CCR(r1)		/* Save CR in stackframe */
999	/* We don't touch AMR here, we never go to virtual mode */
1000	/* Save r9 through r13 from EXMC save area to stack frame. */
1001	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
1002	mfmsr	r11			/* get MSR value */
1003BEGIN_FTR_SECTION
1004	ori	r11,r11,MSR_ME		/* turn on ME bit */
1005END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1006	ori	r11,r11,MSR_RI		/* turn on RI bit */
1007	LOAD_HANDLER(r12, machine_check_handle_early)
10081:	mtspr	SPRN_SRR0,r12
1009	mtspr	SPRN_SRR1,r11
1010	RFI_TO_KERNEL
1011	b	.	/* prevent speculative execution */
10122:
1013	/* Stack overflow. Stay on emergency stack and panic.
1014	 * Keep the ME bit off while panic-ing, so that if we hit
1015	 * another machine check we checkstop.
1016	 */
1017	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
1018	ld	r11,PACAKMSR(r13)
1019	LOAD_HANDLER(r12, unrecover_mce)
1020	li	r10,MSR_ME
1021	andc	r11,r11,r10		/* Turn off MSR_ME */
1022	b	1b
1023	b	.	/* prevent speculative execution */
1024
1025TRAMP_REAL_BEGIN(machine_check_pSeries)
1026	.globl machine_check_fwnmi
1027machine_check_fwnmi:
1028	EXCEPTION_PROLOG_0 PACA_EXMC
1029BEGIN_FTR_SECTION
1030	b	machine_check_common_early
1031END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
1032machine_check_pSeries_0:
1033	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
1034	/*
1035	 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1036	 * nested machine check corrupts it. machine_check_common enables
1037	 * MSR_RI.
1038	 */
1039	EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
1040
1041TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
1042
1043EXC_COMMON_BEGIN(machine_check_common)
1044	/*
1045	 * Machine check is different because we use a different
1046	 * save area: PACA_EXMC instead of PACA_EXGEN.
1047	 */
1048	EXCEPTION_COMMON(PACA_EXMC, 0x200)
1049	FINISH_NAP
1050	RECONCILE_IRQ_STATE(r10, r11)
1051	ld	r3,PACA_EXMC+EX_DAR(r13)
1052	lwz	r4,PACA_EXMC+EX_DSISR(r13)
1053	/* Enable MSR_RI when finished with PACA_EXMC */
1054	li	r10,MSR_RI
1055	mtmsrd 	r10,1
1056	std	r3,_DAR(r1)
1057	std	r4,_DSISR(r1)
1058	bl	save_nvgprs
1059	addi	r3,r1,STACK_FRAME_OVERHEAD
1060	bl	machine_check_exception
1061	b	ret_from_except
1062
1063#define MACHINE_CHECK_HANDLER_WINDUP			\
1064	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1065	li	r9,0;					\
1066	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1067	/* Decrement paca->in_mce now RI is clear. */	\
1068	lhz	r12,PACA_IN_MCE(r13);			\
1069	subi	r12,r12,1;				\
1070	sth	r12,PACA_IN_MCE(r13);			\
1071	EXCEPTION_RESTORE_REGS EXC_STD
1072
1073#ifdef CONFIG_PPC_P7_NAP
1074/*
1075 * This is an idle wakeup. Low level machine check has already been
1076 * done. Queue the event then call the idle code to do the wake up.
1077 */
1078EXC_COMMON_BEGIN(machine_check_idle_common)
1079	bl	machine_check_queue_event
1080
1081	/*
1082	 * We have not used any non-volatile GPRs here, and as a rule
1083	 * most exception code including machine check does not.
1084	 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
1085	 * wakeup will restore volatile registers.
1086	 *
1087	 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
1088	 *
1089	 * Then decrement MCE nesting after finishing with the stack.
1090	 */
1091	ld	r3,_MSR(r1)
1092	ld	r4,_LINK(r1)
1093
1094	lhz	r11,PACA_IN_MCE(r13)
1095	subi	r11,r11,1
1096	sth	r11,PACA_IN_MCE(r13)
1097
1098	mtlr	r4
1099	rlwinm	r10,r3,47-31,30,31
1100	cmpwi	cr1,r10,2
1101	bltlr	cr1	/* no state loss, return to idle caller */
1102	b	idle_return_gpr_loss
1103#endif
1104	/*
1105	 * Handle machine check early in real mode. We come here with
1106	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1107	 */
1108EXC_COMMON_BEGIN(machine_check_handle_early)
1109	std	r0,GPR0(r1)	/* Save r0 */
1110	EXCEPTION_PROLOG_COMMON_3(0x200)
1111	bl	save_nvgprs
1112	addi	r3,r1,STACK_FRAME_OVERHEAD
1113	bl	machine_check_early
1114	std	r3,RESULT(r1)	/* Save result */
1115	ld	r12,_MSR(r1)
1116BEGIN_FTR_SECTION
1117	b	4f
1118END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
1119
1120#ifdef	CONFIG_PPC_P7_NAP
1121	/*
1122	 * Check if thread was in power saving mode. We come here when any
1123	 * of the following is true:
1124	 * a. thread wasn't in power saving mode
1125	 * b. thread was in power saving mode with no state loss,
1126	 *    supervisor state loss or hypervisor state loss.
1127	 *
1128	 * Go back to nap/sleep/winkle mode again if (b) is true.
1129	 */
1130BEGIN_FTR_SECTION
1131	rlwinm.	r11,r12,47-31,30,31
1132	bne	machine_check_idle_common
1133END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1134#endif
1135
1136	/*
1137	 * Check if we are coming from hypervisor userspace. If yes then we
1138	 * continue in host kernel in V mode to deliver the MC event.
1139	 */
1140	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
1141	beq	5f
11424:	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1143	bne	9f			/* continue in V mode if we are. */
1144
11455:
1146#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1147BEGIN_FTR_SECTION
1148	/*
1149	 * We are coming from kernel context. Check if we are coming from
1150	 * guest. if yes, then we can continue. We will fall through
1151	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1152	 */
1153	lbz	r11,HSTATE_IN_GUEST(r13)
1154	cmpwi	r11,0			/* Check if coming from guest */
1155	bne	9f			/* continue if we are. */
1156END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1157#endif
1158	/*
1159	 * At this point we are not sure about what context we come from.
1160	 * Queue up the MCE event and return from the interrupt.
1161	 * But before that, check if this is an un-recoverable exception.
1162	 * If yes, then stay on emergency stack and panic.
1163	 */
1164	andi.	r11,r12,MSR_RI
1165	bne	2f
11661:	mfspr	r11,SPRN_SRR0
1167	LOAD_HANDLER(r10,unrecover_mce)
1168	mtspr	SPRN_SRR0,r10
1169	ld	r10,PACAKMSR(r13)
1170	/*
1171	 * We are going down. But there are chances that we might get hit by
1172	 * another MCE during panic path and we may run into unstable state
1173	 * with no way out. Hence, turn ME bit off while going down, so that
1174	 * when another MCE is hit during panic path, system will checkstop
1175	 * and hypervisor will get restarted cleanly by SP.
1176	 */
1177	li	r3,MSR_ME
1178	andc	r10,r10,r3		/* Turn off MSR_ME */
1179	mtspr	SPRN_SRR1,r10
1180	RFI_TO_KERNEL
1181	b	.
11822:
1183	/*
1184	 * Check if we have successfully handled/recovered from error, if not
1185	 * then stay on emergency stack and panic.
1186	 */
1187	ld	r3,RESULT(r1)	/* Load result */
1188	cmpdi	r3,0		/* see if we handled MCE successfully */
1189
1190	beq	1b		/* if !handled then panic */
1191BEGIN_FTR_SECTION
1192	/*
1193	 * Return from MC interrupt.
1194	 * Queue up the MCE event so that we can log it later, while
1195	 * returning from kernel or opal call.
1196	 */
1197	bl	machine_check_queue_event
1198	MACHINE_CHECK_HANDLER_WINDUP
1199	RFI_TO_USER_OR_KERNEL
1200FTR_SECTION_ELSE
1201	/*
1202	 * pSeries: Return from MC interrupt. Before that stay on emergency
1203	 * stack and call machine_check_exception to log the MCE event.
1204	 */
1205	LOAD_HANDLER(r10,mce_return)
1206	mtspr	SPRN_SRR0,r10
1207	ld	r10,PACAKMSR(r13)
1208	mtspr	SPRN_SRR1,r10
1209	RFI_TO_KERNEL
1210	b	.
1211ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
12129:
1213	/* Deliver the machine check to host kernel in V mode. */
1214	MACHINE_CHECK_HANDLER_WINDUP
1215	EXCEPTION_PROLOG_0 PACA_EXMC
1216	b	machine_check_pSeries_0
1217
1218EXC_COMMON_BEGIN(unrecover_mce)
1219	/* Invoke machine_check_exception to print MCE event and panic. */
1220	addi	r3,r1,STACK_FRAME_OVERHEAD
1221	bl	machine_check_exception
1222	/*
1223	 * We will not reach here. Even if we did, there is no way out. Call
1224	 * unrecoverable_exception and die.
1225	 */
12261:	addi	r3,r1,STACK_FRAME_OVERHEAD
1227	bl	unrecoverable_exception
1228	b	1b
1229
1230EXC_COMMON_BEGIN(mce_return)
1231	/* Invoke machine_check_exception to print MCE event and return. */
1232	addi	r3,r1,STACK_FRAME_OVERHEAD
1233	bl	machine_check_exception
1234	MACHINE_CHECK_HANDLER_WINDUP
1235	RFI_TO_KERNEL
1236	b	.
1237
1238EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1239	EXCEPTION_PROLOG_0 PACA_EXGEN
1240	b	tramp_real_data_access
1241EXC_REAL_END(data_access, 0x300, 0x80)
1242
1243TRAMP_REAL_BEGIN(tramp_real_data_access)
1244	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 1, 1, 0
1245	EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1
1246
1247EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1248	EXCEPTION_PROLOG_0 PACA_EXGEN
1249	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 1, 1, 0
1250EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD
1251EXC_VIRT_END(data_access, 0x4300, 0x80)
1252
1253TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
1254
1255EXC_COMMON_BEGIN(data_access_common)
1256	/*
1257	 * Here r13 points to the paca, r9 contains the saved CR,
1258	 * SRR0 and SRR1 are saved in r11 and r12,
1259	 * r9 - r13 are saved in paca->exgen.
1260	 * EX_DAR and EX_DSISR have saved DAR/DSISR
1261	 */
1262	EXCEPTION_COMMON(PACA_EXGEN, 0x300)
1263	RECONCILE_IRQ_STATE(r10, r11)
1264	ld	r12,_MSR(r1)
1265	ld	r3,PACA_EXGEN+EX_DAR(r13)
1266	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1267	li	r5,0x300
1268	std	r3,_DAR(r1)
1269	std	r4,_DSISR(r1)
1270BEGIN_MMU_FTR_SECTION
1271	b	do_hash_page		/* Try to handle as hpte fault */
1272MMU_FTR_SECTION_ELSE
1273	b	handle_page_fault
1274ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1275
1276
1277EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
1278	EXCEPTION_PROLOG_0 PACA_EXSLB
1279	b	tramp_real_data_access_slb
1280EXC_REAL_END(data_access_slb, 0x380, 0x80)
1281
1282TRAMP_REAL_BEGIN(tramp_real_data_access_slb)
1283	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 1, 0, 0
1284	EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1
1285
1286EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
1287	EXCEPTION_PROLOG_0 PACA_EXSLB
1288	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 1, 0, 0
1289	EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD
1290EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
1291
1292TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
1293
1294EXC_COMMON_BEGIN(data_access_slb_common)
1295	EXCEPTION_COMMON(PACA_EXSLB, 0x380)
1296	ld	r4,PACA_EXSLB+EX_DAR(r13)
1297	std	r4,_DAR(r1)
1298	addi	r3,r1,STACK_FRAME_OVERHEAD
1299BEGIN_MMU_FTR_SECTION
1300	/* HPT case, do SLB fault */
1301	bl	do_slb_fault
1302	cmpdi	r3,0
1303	bne-	1f
1304	b	fast_exception_return
13051:	/* Error case */
1306MMU_FTR_SECTION_ELSE
1307	/* Radix case, access is outside page table range */
1308	li	r3,-EFAULT
1309ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1310	std	r3,RESULT(r1)
1311	bl	save_nvgprs
1312	RECONCILE_IRQ_STATE(r10, r11)
1313	ld	r4,_DAR(r1)
1314	ld	r5,RESULT(r1)
1315	addi	r3,r1,STACK_FRAME_OVERHEAD
1316	bl	do_bad_slb_fault
1317	b	ret_from_except
1318
1319
1320EXC_REAL(instruction_access, 0x400, 0x80)
1321EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
1322TRAMP_KVM(PACA_EXGEN, 0x400)
1323
1324EXC_COMMON_BEGIN(instruction_access_common)
1325	EXCEPTION_COMMON(PACA_EXGEN, 0x400)
1326	RECONCILE_IRQ_STATE(r10, r11)
1327	ld	r12,_MSR(r1)
1328	ld	r3,_NIP(r1)
1329	andis.	r4,r12,DSISR_SRR1_MATCH_64S@h
1330	li	r5,0x400
1331	std	r3,_DAR(r1)
1332	std	r4,_DSISR(r1)
1333BEGIN_MMU_FTR_SECTION
1334	b	do_hash_page		/* Try to handle as hpte fault */
1335MMU_FTR_SECTION_ELSE
1336	b	handle_page_fault
1337ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1338
1339
1340__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB)
1341__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB)
1342TRAMP_KVM(PACA_EXSLB, 0x480)
1343
1344EXC_COMMON_BEGIN(instruction_access_slb_common)
1345	EXCEPTION_COMMON(PACA_EXSLB, 0x480)
1346	ld	r4,_NIP(r1)
1347	addi	r3,r1,STACK_FRAME_OVERHEAD
1348BEGIN_MMU_FTR_SECTION
1349	/* HPT case, do SLB fault */
1350	bl	do_slb_fault
1351	cmpdi	r3,0
1352	bne-	1f
1353	b	fast_exception_return
13541:	/* Error case */
1355MMU_FTR_SECTION_ELSE
1356	/* Radix case, access is outside page table range */
1357	li	r3,-EFAULT
1358ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1359	std	r3,RESULT(r1)
1360	bl	save_nvgprs
1361	RECONCILE_IRQ_STATE(r10, r11)
1362	ld	r4,_NIP(r1)
1363	ld	r5,RESULT(r1)
1364	addi	r3,r1,STACK_FRAME_OVERHEAD
1365	bl	do_bad_slb_fault
1366	b	ret_from_except
1367
1368
1369EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
1370	EXCEPTION_PROLOG_0 PACA_EXGEN
1371BEGIN_FTR_SECTION
1372	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
1373	EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1
1374FTR_SECTION_ELSE
1375	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
1376	EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1
1377ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1378EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
1379
1380EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
1381	EXCEPTION_PROLOG_0 PACA_EXGEN
1382BEGIN_FTR_SECTION
1383	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
1384	EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV
1385FTR_SECTION_ELSE
1386	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED
1387	EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD
1388ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1389EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1390
1391TRAMP_KVM(PACA_EXGEN, 0x500)
1392TRAMP_KVM_HV(PACA_EXGEN, 0x500)
1393EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
1394
1395
1396EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1397	EXCEPTION_PROLOG_0 PACA_EXGEN
1398	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 1, 1, 0
1399	EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1
1400EXC_REAL_END(alignment, 0x600, 0x100)
1401
1402EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1403	EXCEPTION_PROLOG_0 PACA_EXGEN
1404	EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 1, 1, 0
1405	EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD
1406EXC_VIRT_END(alignment, 0x4600, 0x100)
1407
1408TRAMP_KVM(PACA_EXGEN, 0x600)
1409EXC_COMMON_BEGIN(alignment_common)
1410	EXCEPTION_COMMON(PACA_EXGEN, 0x600)
1411	ld	r3,PACA_EXGEN+EX_DAR(r13)
1412	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1413	std	r3,_DAR(r1)
1414	std	r4,_DSISR(r1)
1415	bl	save_nvgprs
1416	RECONCILE_IRQ_STATE(r10, r11)
1417	addi	r3,r1,STACK_FRAME_OVERHEAD
1418	bl	alignment_exception
1419	b	ret_from_except
1420
1421
1422EXC_REAL(program_check, 0x700, 0x100)
1423EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
1424TRAMP_KVM(PACA_EXGEN, 0x700)
1425EXC_COMMON_BEGIN(program_check_common)
1426	/*
1427	 * It's possible to receive a TM Bad Thing type program check with
1428	 * userspace register values (in particular r1), but with SRR1 reporting
1429	 * that we came from the kernel. Normally that would confuse the bad
1430	 * stack logic, and we would report a bad kernel stack pointer. Instead
1431	 * we switch to the emergency stack if we're taking a TM Bad Thing from
1432	 * the kernel.
1433	 */
1434
1435	andi.	r10,r12,MSR_PR
1436	bne	2f			/* If userspace, go normal path */
1437
1438	andis.	r10,r12,(SRR1_PROGTM)@h
1439	bne	1f			/* If TM, emergency		*/
1440
1441	cmpdi	r1,-INT_FRAME_SIZE	/* check if r1 is in userspace	*/
1442	blt	2f			/* normal path if not		*/
1443
1444	/* Use the emergency stack					*/
14451:	andi.	r10,r12,MSR_PR		/* Set CR0 correctly for label	*/
1446					/* 3 in EXCEPTION_PROLOG_COMMON	*/
1447	mr	r10,r1			/* Save r1			*/
1448	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1449	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1450	b 3f				/* Jump into the macro !!	*/
14512:
1452	EXCEPTION_COMMON(PACA_EXGEN, 0x700)
1453	bl	save_nvgprs
1454	RECONCILE_IRQ_STATE(r10, r11)
1455	addi	r3,r1,STACK_FRAME_OVERHEAD
1456	bl	program_check_exception
1457	b	ret_from_except
1458
1459
1460EXC_REAL(fp_unavailable, 0x800, 0x100)
1461EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
1462TRAMP_KVM(PACA_EXGEN, 0x800)
1463EXC_COMMON_BEGIN(fp_unavailable_common)
1464	EXCEPTION_COMMON(PACA_EXGEN, 0x800)
1465	bne	1f			/* if from user, just load it up */
1466	bl	save_nvgprs
1467	RECONCILE_IRQ_STATE(r10, r11)
1468	addi	r3,r1,STACK_FRAME_OVERHEAD
1469	bl	kernel_fp_unavailable_exception
1470	BUG_OPCODE
14711:
1472#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1473BEGIN_FTR_SECTION
1474	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1475	 * transaction), go do TM stuff
1476	 */
1477	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1478	bne-	2f
1479END_FTR_SECTION_IFSET(CPU_FTR_TM)
1480#endif
1481	bl	load_up_fpu
1482	b	fast_exception_return
1483#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
14842:	/* User process was in a transaction */
1485	bl	save_nvgprs
1486	RECONCILE_IRQ_STATE(r10, r11)
1487	addi	r3,r1,STACK_FRAME_OVERHEAD
1488	bl	fp_unavailable_tm
1489	b	ret_from_except
1490#endif
1491
1492
1493EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
1494EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
1495TRAMP_KVM(PACA_EXGEN, 0x900)
1496EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
1497
1498
1499EXC_REAL_HV(hdecrementer, 0x980, 0x80)
1500EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
1501TRAMP_KVM_HV(PACA_EXGEN, 0x980)
1502EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
1503
1504
1505EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
1506EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
1507TRAMP_KVM(PACA_EXGEN, 0xa00)
1508#ifdef CONFIG_PPC_DOORBELL
1509EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
1510#else
1511EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
1512#endif
1513
1514
1515EXC_REAL(trap_0b, 0xb00, 0x100)
1516EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
1517TRAMP_KVM(PACA_EXGEN, 0xb00)
1518EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
1519
1520/*
1521 * system call / hypercall (0xc00, 0x4c00)
1522 *
1523 * The system call exception is invoked with "sc 0" and does not alter HV bit.
1524 * There is support for kernel code to invoke system calls but there are no
1525 * in-tree users.
1526 *
1527 * The hypercall is invoked with "sc 1" and sets HV=1.
1528 *
1529 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1530 * 0x4c00 virtual mode.
1531 *
1532 * Call convention:
1533 *
1534 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
1535 *
1536 * For hypercalls, the register convention is as follows:
1537 * r0 volatile
1538 * r1-2 nonvolatile
1539 * r3 volatile parameter and return value for status
1540 * r4-r10 volatile input and output value
1541 * r11 volatile hypercall number and output value
1542 * r12 volatile input and output value
1543 * r13-r31 nonvolatile
1544 * LR nonvolatile
1545 * CTR volatile
1546 * XER volatile
1547 * CR0-1 CR5-7 volatile
1548 * CR2-4 nonvolatile
1549 * Other registers nonvolatile
1550 *
1551 * The intersection of volatile registers that don't contain possible
1552 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1553 * without saving, though xer is not a good idea to use, as hardware may
1554 * interpret some bits so it may be costly to change them.
1555 */
1556.macro SYSTEM_CALL virt
1557#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1558	/*
1559	 * There is a little bit of juggling to get syscall and hcall
1560	 * working well. Save r13 in ctr to avoid using SPRG scratch
1561	 * register.
1562	 *
1563	 * Userspace syscalls have already saved the PPR, hcalls must save
1564	 * it before setting HMT_MEDIUM.
1565	 */
1566	mtctr	r13
1567	GET_PACA(r13)
1568	std	r10,PACA_EXGEN+EX_R10(r13)
1569	INTERRUPT_TO_KERNEL
1570	KVMTEST EXC_STD 0xc00 /* uses r10, branch to do_kvm_0xc00_system_call */
1571	mfctr	r9
1572#else
1573	mr	r9,r13
1574	GET_PACA(r13)
1575	INTERRUPT_TO_KERNEL
1576#endif
1577
1578#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1579BEGIN_FTR_SECTION
1580	cmpdi	r0,0x1ebe
1581	beq-	1f
1582END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1583#endif
1584
1585	/* We reach here with PACA in r13, r13 in r9. */
1586	mfspr	r11,SPRN_SRR0
1587	mfspr	r12,SPRN_SRR1
1588
1589	HMT_MEDIUM
1590
1591	.if ! \virt
1592	__LOAD_HANDLER(r10, system_call_common)
1593	mtspr	SPRN_SRR0,r10
1594	ld	r10,PACAKMSR(r13)
1595	mtspr	SPRN_SRR1,r10
1596	RFI_TO_KERNEL
1597	b	.	/* prevent speculative execution */
1598	.else
1599	li	r10,MSR_RI
1600	mtmsrd 	r10,1			/* Set RI (EE=0) */
1601#ifdef CONFIG_RELOCATABLE
1602	__LOAD_HANDLER(r10, system_call_common)
1603	mtctr	r10
1604	bctr
1605#else
1606	b	system_call_common
1607#endif
1608	.endif
1609
1610#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1611	/* Fast LE/BE switch system call */
16121:	mfspr	r12,SPRN_SRR1
1613	xori	r12,r12,MSR_LE
1614	mtspr	SPRN_SRR1,r12
1615	mr	r13,r9
1616	RFI_TO_USER	/* return to userspace */
1617	b	.	/* prevent speculative execution */
1618#endif
1619.endm
1620
1621EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1622	SYSTEM_CALL 0
1623EXC_REAL_END(system_call, 0xc00, 0x100)
1624
1625EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1626	SYSTEM_CALL 1
1627EXC_VIRT_END(system_call, 0x4c00, 0x100)
1628
1629#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1630	/*
1631	 * This is a hcall, so register convention is as above, with these
1632	 * differences:
1633	 * r13 = PACA
1634	 * ctr = orig r13
1635	 * orig r10 saved in PACA
1636	 */
1637TRAMP_KVM_BEGIN(do_kvm_0xc00)
1638	 /*
1639	  * Save the PPR (on systems that support it) before changing to
1640	  * HMT_MEDIUM. That allows the KVM code to save that value into the
1641	  * guest state (it is the guest's PPR value).
1642	  */
1643	OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
1644	HMT_MEDIUM
1645	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
1646	mfctr	r10
1647	SET_SCRATCH0(r10)
1648	std	r9,PACA_EXGEN+EX_R9(r13)
1649	mfcr	r9
1650	KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0
1651#endif
1652
1653
1654EXC_REAL(single_step, 0xd00, 0x100)
1655EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
1656TRAMP_KVM(PACA_EXGEN, 0xd00)
1657EXC_COMMON(single_step_common, 0xd00, single_step_exception)
1658
1659EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
1660EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
1661TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1662EXC_COMMON_BEGIN(h_data_storage_common)
1663	mfspr   r10,SPRN_HDAR
1664	std     r10,PACA_EXGEN+EX_DAR(r13)
1665	mfspr   r10,SPRN_HDSISR
1666	stw     r10,PACA_EXGEN+EX_DSISR(r13)
1667	EXCEPTION_COMMON(PACA_EXGEN, 0xe00)
1668	bl      save_nvgprs
1669	RECONCILE_IRQ_STATE(r10, r11)
1670	addi    r3,r1,STACK_FRAME_OVERHEAD
1671BEGIN_MMU_FTR_SECTION
1672	ld	r4,PACA_EXGEN+EX_DAR(r13)
1673	lwz	r5,PACA_EXGEN+EX_DSISR(r13)
1674	std	r4,_DAR(r1)
1675	std	r5,_DSISR(r1)
1676	li	r5,SIGSEGV
1677	bl      bad_page_fault
1678MMU_FTR_SECTION_ELSE
1679	bl      unknown_exception
1680ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
1681	b       ret_from_except
1682
1683
1684EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
1685EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
1686TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1687EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1688
1689
1690EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1691EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
1692TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1693EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1694
1695
1696/*
1697 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1698 * first, and then eventaully from there to the trampoline to get into virtual
1699 * mode.
1700 */
1701EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
1702	EXCEPTION_PROLOG_0 PACA_EXGEN
1703	b	hmi_exception_early
1704EXC_REAL_END(hmi_exception, 0xe60, 0x20)
1705EXC_VIRT_NONE(0x4e60, 0x20)
1706TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1707TRAMP_REAL_BEGIN(hmi_exception_early)
1708	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, 0
1709	mfctr	r10			/* save ctr, even for !RELOCATABLE */
1710	BRANCH_TO_C000(r11, hmi_exception_early_common)
1711
1712EXC_COMMON_BEGIN(hmi_exception_early_common)
1713	mtctr	r10			/* Restore ctr */
1714	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
1715	mfspr	r12,SPRN_HSRR1		/* Save HSRR1 */
1716	mr	r10,r1			/* Save r1 */
1717	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack for realmode */
1718	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1719	EXCEPTION_PROLOG_COMMON_1()
1720	/* We don't touch AMR here, we never go to virtual mode */
1721	EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1722	EXCEPTION_PROLOG_COMMON_3(0xe60)
1723	addi	r3,r1,STACK_FRAME_OVERHEAD
1724	bl	hmi_exception_realmode
1725	cmpdi	cr0,r3,0
1726	bne	1f
1727
1728	EXCEPTION_RESTORE_REGS EXC_HV
1729	HRFI_TO_USER_OR_KERNEL
1730
17311:
1732	/*
1733	 * Go to virtual mode and pull the HMI event information from
1734	 * firmware.
1735	 */
1736	EXCEPTION_RESTORE_REGS EXC_HV
1737	EXCEPTION_PROLOG_0 PACA_EXGEN
1738	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, IRQS_DISABLED
1739	EXCEPTION_PROLOG_2_REAL hmi_exception_common, EXC_HV, 1
1740
1741EXC_COMMON_BEGIN(hmi_exception_common)
1742	EXCEPTION_COMMON(PACA_EXGEN, 0xe60)
1743	FINISH_NAP
1744	bl	save_nvgprs
1745	RECONCILE_IRQ_STATE(r10, r11)
1746	RUNLATCH_ON
1747	addi	r3,r1,STACK_FRAME_OVERHEAD
1748	bl	handle_hmi_exception
1749	b	ret_from_except
1750
1751EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1752EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
1753TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1754#ifdef CONFIG_PPC_DOORBELL
1755EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1756#else
1757EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1758#endif
1759
1760
1761EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1762EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
1763TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1764EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1765
1766
1767EXC_REAL_NONE(0xec0, 0x20)
1768EXC_VIRT_NONE(0x4ec0, 0x20)
1769EXC_REAL_NONE(0xee0, 0x20)
1770EXC_VIRT_NONE(0x4ee0, 0x20)
1771
1772
1773EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1774EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
1775TRAMP_KVM(PACA_EXGEN, 0xf00)
1776EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1777
1778
1779EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1780EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
1781TRAMP_KVM(PACA_EXGEN, 0xf20)
1782EXC_COMMON_BEGIN(altivec_unavailable_common)
1783	EXCEPTION_COMMON(PACA_EXGEN, 0xf20)
1784#ifdef CONFIG_ALTIVEC
1785BEGIN_FTR_SECTION
1786	beq	1f
1787#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1788  BEGIN_FTR_SECTION_NESTED(69)
1789	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1790	 * transaction), go do TM stuff
1791	 */
1792	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1793	bne-	2f
1794  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1795#endif
1796	bl	load_up_altivec
1797	b	fast_exception_return
1798#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
17992:	/* User process was in a transaction */
1800	bl	save_nvgprs
1801	RECONCILE_IRQ_STATE(r10, r11)
1802	addi	r3,r1,STACK_FRAME_OVERHEAD
1803	bl	altivec_unavailable_tm
1804	b	ret_from_except
1805#endif
18061:
1807END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1808#endif
1809	bl	save_nvgprs
1810	RECONCILE_IRQ_STATE(r10, r11)
1811	addi	r3,r1,STACK_FRAME_OVERHEAD
1812	bl	altivec_unavailable_exception
1813	b	ret_from_except
1814
1815
1816EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1817EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
1818TRAMP_KVM(PACA_EXGEN, 0xf40)
1819EXC_COMMON_BEGIN(vsx_unavailable_common)
1820	EXCEPTION_COMMON(PACA_EXGEN, 0xf40)
1821#ifdef CONFIG_VSX
1822BEGIN_FTR_SECTION
1823	beq	1f
1824#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1825  BEGIN_FTR_SECTION_NESTED(69)
1826	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1827	 * transaction), go do TM stuff
1828	 */
1829	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1830	bne-	2f
1831  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1832#endif
1833	b	load_up_vsx
1834#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
18352:	/* User process was in a transaction */
1836	bl	save_nvgprs
1837	RECONCILE_IRQ_STATE(r10, r11)
1838	addi	r3,r1,STACK_FRAME_OVERHEAD
1839	bl	vsx_unavailable_tm
1840	b	ret_from_except
1841#endif
18421:
1843END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1844#endif
1845	bl	save_nvgprs
1846	RECONCILE_IRQ_STATE(r10, r11)
1847	addi	r3,r1,STACK_FRAME_OVERHEAD
1848	bl	vsx_unavailable_exception
1849	b	ret_from_except
1850
1851
1852EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1853EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1854TRAMP_KVM(PACA_EXGEN, 0xf60)
1855EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1856
1857
1858EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1859EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
1860TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1861EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1862
1863
1864EXC_REAL_NONE(0xfa0, 0x20)
1865EXC_VIRT_NONE(0x4fa0, 0x20)
1866EXC_REAL_NONE(0xfc0, 0x20)
1867EXC_VIRT_NONE(0x4fc0, 0x20)
1868EXC_REAL_NONE(0xfe0, 0x20)
1869EXC_VIRT_NONE(0x4fe0, 0x20)
1870
1871EXC_REAL_NONE(0x1000, 0x100)
1872EXC_VIRT_NONE(0x5000, 0x100)
1873EXC_REAL_NONE(0x1100, 0x100)
1874EXC_VIRT_NONE(0x5100, 0x100)
1875
1876#ifdef CONFIG_CBE_RAS
1877EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
1878EXC_VIRT_NONE(0x5200, 0x100)
1879TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
1880EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
1881#else /* CONFIG_CBE_RAS */
1882EXC_REAL_NONE(0x1200, 0x100)
1883EXC_VIRT_NONE(0x5200, 0x100)
1884#endif
1885
1886
1887EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
1888EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
1889TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
1890EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
1891
1892EXC_REAL_NONE(0x1400, 0x100)
1893EXC_VIRT_NONE(0x5400, 0x100)
1894
1895EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
1896	EXCEPTION_PROLOG_0 PACA_EXGEN
1897	EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0, 0, 0
1898
1899#ifdef CONFIG_PPC_DENORMALISATION
1900	mfspr	r10,SPRN_HSRR1
1901	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
1902	bne+	denorm_assist
1903#endif
1904
1905	KVMTEST EXC_HV 0x1500
1906	EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1
1907EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
1908
1909#ifdef CONFIG_PPC_DENORMALISATION
1910EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
1911	b	exc_real_0x1500_denorm_exception_hv
1912EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
1913#else
1914EXC_VIRT_NONE(0x5500, 0x100)
1915#endif
1916
1917TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
1918
1919#ifdef CONFIG_PPC_DENORMALISATION
1920TRAMP_REAL_BEGIN(denorm_assist)
1921BEGIN_FTR_SECTION
1922/*
1923 * To denormalise we need to move a copy of the register to itself.
1924 * For POWER6 do that here for all FP regs.
1925 */
1926	mfmsr	r10
1927	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
1928	xori	r10,r10,(MSR_FE0|MSR_FE1)
1929	mtmsrd	r10
1930	sync
1931
1932	.Lreg=0
1933	.rept 32
1934	fmr	.Lreg,.Lreg
1935	.Lreg=.Lreg+1
1936	.endr
1937
1938FTR_SECTION_ELSE
1939/*
1940 * To denormalise we need to move a copy of the register to itself.
1941 * For POWER7 do that here for the first 32 VSX registers only.
1942 */
1943	mfmsr	r10
1944	oris	r10,r10,MSR_VSX@h
1945	mtmsrd	r10
1946	sync
1947
1948	.Lreg=0
1949	.rept 32
1950	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
1951	.Lreg=.Lreg+1
1952	.endr
1953
1954ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
1955
1956BEGIN_FTR_SECTION
1957	b	denorm_done
1958END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1959/*
1960 * To denormalise we need to move a copy of the register to itself.
1961 * For POWER8 we need to do that for all 64 VSX registers
1962 */
1963	.Lreg=32
1964	.rept 32
1965	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
1966	.Lreg=.Lreg+1
1967	.endr
1968
1969denorm_done:
1970	mfspr	r11,SPRN_HSRR0
1971	subi	r11,r11,4
1972	mtspr	SPRN_HSRR0,r11
1973	mtcrf	0x80,r9
1974	ld	r9,PACA_EXGEN+EX_R9(r13)
1975	RESTORE_PPR_PACA(PACA_EXGEN, r10)
1976BEGIN_FTR_SECTION
1977	ld	r10,PACA_EXGEN+EX_CFAR(r13)
1978	mtspr	SPRN_CFAR,r10
1979END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1980	ld	r10,PACA_EXGEN+EX_R10(r13)
1981	ld	r11,PACA_EXGEN+EX_R11(r13)
1982	ld	r12,PACA_EXGEN+EX_R12(r13)
1983	ld	r13,PACA_EXGEN+EX_R13(r13)
1984	HRFI_TO_UNKNOWN
1985	b	.
1986#endif
1987
1988EXC_COMMON(denorm_common, 0x1500, unknown_exception)
1989
1990
1991#ifdef CONFIG_CBE_RAS
1992EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
1993EXC_VIRT_NONE(0x5600, 0x100)
1994TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
1995EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
1996#else /* CONFIG_CBE_RAS */
1997EXC_REAL_NONE(0x1600, 0x100)
1998EXC_VIRT_NONE(0x5600, 0x100)
1999#endif
2000
2001
2002EXC_REAL(altivec_assist, 0x1700, 0x100)
2003EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
2004TRAMP_KVM(PACA_EXGEN, 0x1700)
2005#ifdef CONFIG_ALTIVEC
2006EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
2007#else
2008EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
2009#endif
2010
2011
2012#ifdef CONFIG_CBE_RAS
2013EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
2014EXC_VIRT_NONE(0x5800, 0x100)
2015TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
2016EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
2017#else /* CONFIG_CBE_RAS */
2018EXC_REAL_NONE(0x1800, 0x100)
2019EXC_VIRT_NONE(0x5800, 0x100)
2020#endif
2021
2022#ifdef CONFIG_PPC_WATCHDOG
2023
2024#define MASKED_DEC_HANDLER_LABEL 3f
2025
2026#define MASKED_DEC_HANDLER(_H)				\
20273: /* soft-nmi */					\
2028	std	r12,PACA_EXGEN+EX_R12(r13);		\
2029	GET_SCRATCH0(r10);				\
2030	std	r10,PACA_EXGEN+EX_R13(r13);		\
2031	EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1
2032
2033/*
2034 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2035 * stack is one that is usable by maskable interrupts so long as MSR_EE
2036 * remains off. It is used for recovery when something has corrupted the
2037 * normal kernel stack, for example. The "soft NMI" must not use the process
2038 * stack because we want irq disabled sections to avoid touching the stack
2039 * at all (other than PMU interrupts), so use the emergency stack for this,
2040 * and run it entirely with interrupts hard disabled.
2041 */
2042EXC_COMMON_BEGIN(soft_nmi_common)
2043	mr	r10,r1
2044	ld	r1,PACAEMERGSP(r13)
2045	subi	r1,r1,INT_FRAME_SIZE
2046	EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900)
2047	bl	save_nvgprs
2048	RECONCILE_IRQ_STATE(r10, r11)
2049	addi	r3,r1,STACK_FRAME_OVERHEAD
2050	bl	soft_nmi_interrupt
2051	b	ret_from_except
2052
2053#else /* CONFIG_PPC_WATCHDOG */
2054#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
2055#define MASKED_DEC_HANDLER(_H)
2056#endif /* CONFIG_PPC_WATCHDOG */
2057
2058/*
2059 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2060 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2061 * - If it was a doorbell we return immediately since doorbells are edge
2062 *   triggered and won't automatically refire.
2063 * - If it was a HMI we return immediately since we handled it in realmode
2064 *   and it won't refire.
2065 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
2066 * This is called with r10 containing the value to OR to the paca field.
2067 */
2068.macro MASKED_INTERRUPT hsrr
2069	.if \hsrr
2070masked_Hinterrupt:
2071	.else
2072masked_interrupt:
2073	.endif
2074	std	r11,PACA_EXGEN+EX_R11(r13)
2075	lbz	r11,PACAIRQHAPPENED(r13)
2076	or	r11,r11,r10
2077	stb	r11,PACAIRQHAPPENED(r13)
2078	cmpwi	r10,PACA_IRQ_DEC
2079	bne	1f
2080	lis	r10,0x7fff
2081	ori	r10,r10,0xffff
2082	mtspr	SPRN_DEC,r10
2083	b	MASKED_DEC_HANDLER_LABEL
20841:	andi.	r10,r10,PACA_IRQ_MUST_HARD_MASK
2085	beq	2f
2086	.if \hsrr
2087	mfspr	r10,SPRN_HSRR1
2088	xori	r10,r10,MSR_EE	/* clear MSR_EE */
2089	mtspr	SPRN_HSRR1,r10
2090	.else
2091	mfspr	r10,SPRN_SRR1
2092	xori	r10,r10,MSR_EE	/* clear MSR_EE */
2093	mtspr	SPRN_SRR1,r10
2094	.endif
2095	ori	r11,r11,PACA_IRQ_HARD_DIS
2096	stb	r11,PACAIRQHAPPENED(r13)
20972:	/* done */
2098	mtcrf	0x80,r9
2099	std	r1,PACAR1(r13)
2100	ld	r9,PACA_EXGEN+EX_R9(r13)
2101	ld	r10,PACA_EXGEN+EX_R10(r13)
2102	ld	r11,PACA_EXGEN+EX_R11(r13)
2103	/* returns to kernel where r13 must be set up, so don't restore it */
2104	.if \hsrr
2105	HRFI_TO_KERNEL
2106	.else
2107	RFI_TO_KERNEL
2108	.endif
2109	b	.
2110	MASKED_DEC_HANDLER(\hsrr\())
2111.endm
2112
2113TRAMP_REAL_BEGIN(stf_barrier_fallback)
2114	std	r9,PACA_EXRFI+EX_R9(r13)
2115	std	r10,PACA_EXRFI+EX_R10(r13)
2116	sync
2117	ld	r9,PACA_EXRFI+EX_R9(r13)
2118	ld	r10,PACA_EXRFI+EX_R10(r13)
2119	ori	31,31,0
2120	.rept 14
2121	b	1f
21221:
2123	.endr
2124	blr
2125
2126TRAMP_REAL_BEGIN(rfi_flush_fallback)
2127	SET_SCRATCH0(r13);
2128	GET_PACA(r13);
2129	std	r1,PACA_EXRFI+EX_R12(r13)
2130	ld	r1,PACAKSAVE(r13)
2131	std	r9,PACA_EXRFI+EX_R9(r13)
2132	std	r10,PACA_EXRFI+EX_R10(r13)
2133	std	r11,PACA_EXRFI+EX_R11(r13)
2134	mfctr	r9
2135	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2136	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2137	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2138	mtctr	r11
2139	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2140
2141	/* order ld/st prior to dcbt stop all streams with flushing */
2142	sync
2143
2144	/*
2145	 * The load adresses are at staggered offsets within cachelines,
2146	 * which suits some pipelines better (on others it should not
2147	 * hurt).
2148	 */
21491:
2150	ld	r11,(0x80 + 8)*0(r10)
2151	ld	r11,(0x80 + 8)*1(r10)
2152	ld	r11,(0x80 + 8)*2(r10)
2153	ld	r11,(0x80 + 8)*3(r10)
2154	ld	r11,(0x80 + 8)*4(r10)
2155	ld	r11,(0x80 + 8)*5(r10)
2156	ld	r11,(0x80 + 8)*6(r10)
2157	ld	r11,(0x80 + 8)*7(r10)
2158	addi	r10,r10,0x80*8
2159	bdnz	1b
2160
2161	mtctr	r9
2162	ld	r9,PACA_EXRFI+EX_R9(r13)
2163	ld	r10,PACA_EXRFI+EX_R10(r13)
2164	ld	r11,PACA_EXRFI+EX_R11(r13)
2165	ld	r1,PACA_EXRFI+EX_R12(r13)
2166	GET_SCRATCH0(r13);
2167	rfid
2168
2169TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2170	SET_SCRATCH0(r13);
2171	GET_PACA(r13);
2172	std	r1,PACA_EXRFI+EX_R12(r13)
2173	ld	r1,PACAKSAVE(r13)
2174	std	r9,PACA_EXRFI+EX_R9(r13)
2175	std	r10,PACA_EXRFI+EX_R10(r13)
2176	std	r11,PACA_EXRFI+EX_R11(r13)
2177	mfctr	r9
2178	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2179	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2180	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2181	mtctr	r11
2182	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2183
2184	/* order ld/st prior to dcbt stop all streams with flushing */
2185	sync
2186
2187	/*
2188	 * The load adresses are at staggered offsets within cachelines,
2189	 * which suits some pipelines better (on others it should not
2190	 * hurt).
2191	 */
21921:
2193	ld	r11,(0x80 + 8)*0(r10)
2194	ld	r11,(0x80 + 8)*1(r10)
2195	ld	r11,(0x80 + 8)*2(r10)
2196	ld	r11,(0x80 + 8)*3(r10)
2197	ld	r11,(0x80 + 8)*4(r10)
2198	ld	r11,(0x80 + 8)*5(r10)
2199	ld	r11,(0x80 + 8)*6(r10)
2200	ld	r11,(0x80 + 8)*7(r10)
2201	addi	r10,r10,0x80*8
2202	bdnz	1b
2203
2204	mtctr	r9
2205	ld	r9,PACA_EXRFI+EX_R9(r13)
2206	ld	r10,PACA_EXRFI+EX_R10(r13)
2207	ld	r11,PACA_EXRFI+EX_R11(r13)
2208	ld	r1,PACA_EXRFI+EX_R12(r13)
2209	GET_SCRATCH0(r13);
2210	hrfid
2211
2212/*
2213 * Real mode exceptions actually use this too, but alternate
2214 * instruction code patches (which end up in the common .text area)
2215 * cannot reach these if they are put there.
2216 */
2217USE_FIXED_SECTION(virt_trampolines)
2218	MASKED_INTERRUPT EXC_STD
2219	MASKED_INTERRUPT EXC_HV
2220
2221#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2222TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
2223	/*
2224	 * Here all GPRs are unchanged from when the interrupt happened
2225	 * except for r13, which is saved in SPRG_SCRATCH0.
2226	 */
2227	mfspr	r13, SPRN_SRR0
2228	addi	r13, r13, 4
2229	mtspr	SPRN_SRR0, r13
2230	GET_SCRATCH0(r13)
2231	RFI_TO_KERNEL
2232	b	.
2233
2234TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
2235	/*
2236	 * Here all GPRs are unchanged from when the interrupt happened
2237	 * except for r13, which is saved in SPRG_SCRATCH0.
2238	 */
2239	mfspr	r13, SPRN_HSRR0
2240	addi	r13, r13, 4
2241	mtspr	SPRN_HSRR0, r13
2242	GET_SCRATCH0(r13)
2243	HRFI_TO_KERNEL
2244	b	.
2245#endif
2246
2247/*
2248 * Ensure that any handlers that get invoked from the exception prologs
2249 * above are below the first 64KB (0x10000) of the kernel image because
2250 * the prologs assemble the addresses of these handlers using the
2251 * LOAD_HANDLER macro, which uses an ori instruction.
2252 */
2253
2254/*** Common interrupt handlers ***/
2255
2256
2257	/*
2258	 * Relocation-on interrupts: A subset of the interrupts can be delivered
2259	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
2260	 * it.  Addresses are the same as the original interrupt addresses, but
2261	 * offset by 0xc000000000004000.
2262	 * It's impossible to receive interrupts below 0x300 via this mechanism.
2263	 * KVM: None of these traps are from the guest ; anything that escalated
2264	 * to HV=1 from HV=0 is delivered via real mode handlers.
2265	 */
2266
2267	/*
2268	 * This uses the standard macro, since the original 0x300 vector
2269	 * only has extra guff for STAB-based processors -- which never
2270	 * come here.
2271	 */
2272
2273EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
2274	b	__ppc64_runlatch_on
2275
2276USE_FIXED_SECTION(virt_trampolines)
2277	/*
2278	 * The __end_interrupts marker must be past the out-of-line (OOL)
2279	 * handlers, so that they are copied to real address 0x100 when running
2280	 * a relocatable kernel. This ensures they can be reached from the short
2281	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
2282	 * directly, without using LOAD_HANDLER().
2283	 */
2284	.align	7
2285	.globl	__end_interrupts
2286__end_interrupts:
2287DEFINE_FIXED_SYMBOL(__end_interrupts)
2288
2289#ifdef CONFIG_PPC_970_NAP
2290EXC_COMMON_BEGIN(power4_fixup_nap)
2291	andc	r9,r9,r10
2292	std	r9,TI_LOCAL_FLAGS(r11)
2293	ld	r10,_LINK(r1)		/* make idle task do the */
2294	std	r10,_NIP(r1)		/* equivalent of a blr */
2295	blr
2296#endif
2297
2298CLOSE_FIXED_SECTION(real_vectors);
2299CLOSE_FIXED_SECTION(real_trampolines);
2300CLOSE_FIXED_SECTION(virt_vectors);
2301CLOSE_FIXED_SECTION(virt_trampolines);
2302
2303USE_TEXT_SECTION()
2304
2305/*
2306 * Hash table stuff
2307 */
2308	.balign	IFETCH_ALIGN_BYTES
2309do_hash_page:
2310#ifdef CONFIG_PPC_BOOK3S_64
2311	lis	r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
2312	ori	r0,r0,DSISR_BAD_FAULT_64S@l
2313	and.	r0,r4,r0		/* weird error? */
2314	bne-	handle_page_fault	/* if not, try to insert a HPTE */
2315	ld	r11, PACA_THREAD_INFO(r13)
2316	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
2317	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
2318	bne	77f			/* then don't call hash_page now */
2319
2320	/*
2321	 * r3 contains the faulting address
2322	 * r4 msr
2323	 * r5 contains the trap number
2324	 * r6 contains dsisr
2325	 *
2326	 * at return r3 = 0 for success, 1 for page fault, negative for error
2327	 */
2328        mr 	r4,r12
2329	ld      r6,_DSISR(r1)
2330	bl	__hash_page		/* build HPTE if possible */
2331        cmpdi	r3,0			/* see if __hash_page succeeded */
2332
2333	/* Success */
2334	beq	fast_exc_return_irq	/* Return from exception on success */
2335
2336	/* Error */
2337	blt-	13f
2338
2339	/* Reload DSISR into r4 for the DABR check below */
2340	ld      r4,_DSISR(r1)
2341#endif /* CONFIG_PPC_BOOK3S_64 */
2342
2343/* Here we have a page fault that hash_page can't handle. */
2344handle_page_fault:
234511:	andis.  r0,r4,DSISR_DABRMATCH@h
2346	bne-    handle_dabr_fault
2347	ld	r4,_DAR(r1)
2348	ld	r5,_DSISR(r1)
2349	addi	r3,r1,STACK_FRAME_OVERHEAD
2350	bl	do_page_fault
2351	cmpdi	r3,0
2352	beq+	ret_from_except_lite
2353	bl	save_nvgprs
2354	mr	r5,r3
2355	addi	r3,r1,STACK_FRAME_OVERHEAD
2356	lwz	r4,_DAR(r1)
2357	bl	bad_page_fault
2358	b	ret_from_except
2359
2360/* We have a data breakpoint exception - handle it */
2361handle_dabr_fault:
2362	bl	save_nvgprs
2363	ld      r4,_DAR(r1)
2364	ld      r5,_DSISR(r1)
2365	addi    r3,r1,STACK_FRAME_OVERHEAD
2366	bl      do_break
2367	/*
2368	 * do_break() may have changed the NV GPRS while handling a breakpoint.
2369	 * If so, we need to restore them with their updated values. Don't use
2370	 * ret_from_except_lite here.
2371	 */
2372	b       ret_from_except
2373
2374
2375#ifdef CONFIG_PPC_BOOK3S_64
2376/* We have a page fault that hash_page could handle but HV refused
2377 * the PTE insertion
2378 */
237913:	bl	save_nvgprs
2380	mr	r5,r3
2381	addi	r3,r1,STACK_FRAME_OVERHEAD
2382	ld	r4,_DAR(r1)
2383	bl	low_hash_fault
2384	b	ret_from_except
2385#endif
2386
2387/*
2388 * We come here as a result of a DSI at a point where we don't want
2389 * to call hash_page, such as when we are accessing memory (possibly
2390 * user memory) inside a PMU interrupt that occurred while interrupts
2391 * were soft-disabled.  We want to invoke the exception handler for
2392 * the access, or panic if there isn't a handler.
2393 */
239477:	bl	save_nvgprs
2395	mr	r4,r3
2396	addi	r3,r1,STACK_FRAME_OVERHEAD
2397	li	r5,SIGSEGV
2398	bl	bad_page_fault
2399	b	ret_from_except
2400
2401/*
2402 * When doorbell is triggered from system reset wakeup, the message is
2403 * not cleared, so it would fire again when EE is enabled.
2404 *
2405 * When coming from local_irq_enable, there may be the same problem if
2406 * we were hard disabled.
2407 *
2408 * Execute msgclr to clear pending exceptions before handling it.
2409 */
2410h_doorbell_common_msgclr:
2411	LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2412	PPC_MSGCLR(3)
2413	b 	h_doorbell_common
2414
2415doorbell_super_common_msgclr:
2416	LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2417	PPC_MSGCLRP(3)
2418	b 	doorbell_super_common
2419
2420/*
2421 * Called from arch_local_irq_enable when an interrupt needs
2422 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
2423 * which kind of interrupt. MSR:EE is already off. We generate a
2424 * stackframe like if a real interrupt had happened.
2425 *
2426 * Note: While MSR:EE is off, we need to make sure that _MSR
2427 * in the generated frame has EE set to 1 or the exception
2428 * handler will not properly re-enable them.
2429 *
2430 * Note that we don't specify LR as the NIP (return address) for
2431 * the interrupt because that would unbalance the return branch
2432 * predictor.
2433 */
2434_GLOBAL(__replay_interrupt)
2435	/* We are going to jump to the exception common code which
2436	 * will retrieve various register values from the PACA which
2437	 * we don't give a damn about, so we don't bother storing them.
2438	 */
2439	mfmsr	r12
2440	LOAD_REG_ADDR(r11, replay_interrupt_return)
2441	mfcr	r9
2442	ori	r12,r12,MSR_EE
2443	cmpwi	r3,0x900
2444	beq	decrementer_common
2445	cmpwi	r3,0x500
2446BEGIN_FTR_SECTION
2447	beq	h_virt_irq_common
2448FTR_SECTION_ELSE
2449	beq	hardware_interrupt_common
2450ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
2451	cmpwi	r3,0xf00
2452	beq	performance_monitor_common
2453BEGIN_FTR_SECTION
2454	cmpwi	r3,0xa00
2455	beq	h_doorbell_common_msgclr
2456	cmpwi	r3,0xe60
2457	beq	hmi_exception_common
2458FTR_SECTION_ELSE
2459	cmpwi	r3,0xa00
2460	beq	doorbell_super_common_msgclr
2461ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
2462replay_interrupt_return:
2463	blr
2464
2465_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
2466