1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
9 * position dependent assembly.
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
16#include <asm/hw_irq.h>
17#include <asm/exception-64s.h>
18#include <asm/ptrace.h>
19#include <asm/cpuidle.h>
20#include <asm/head-64.h>
21#include <asm/feature-fixups.h>
22#include <asm/kup.h>
23
24/*
25 * Following are fixed section helper macros.
26 *
27 * EXC_REAL_BEGIN/END  - real, unrelocated exception vectors
28 * EXC_VIRT_BEGIN/END  - virt (AIL), unrelocated exception vectors
29 * TRAMP_REAL_BEGIN    - real, unrelocated helpers (virt may call these)
30 * TRAMP_VIRT_BEGIN    - virt, unreloc helpers (in practice, real can use)
31 * EXC_COMMON          - After switching to virtual, relocated mode.
32 */
33
34#define EXC_REAL_BEGIN(name, start, size)			\
35	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
36
37#define EXC_REAL_END(name, start, size)				\
38	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
39
40#define EXC_VIRT_BEGIN(name, start, size)			\
41	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
42
43#define EXC_VIRT_END(name, start, size)				\
44	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
45
46#define EXC_COMMON_BEGIN(name)					\
47	USE_TEXT_SECTION();					\
48	.balign IFETCH_ALIGN_BYTES;				\
49	.global name;						\
50	_ASM_NOKPROBE_SYMBOL(name);				\
51	DEFINE_FIXED_SYMBOL(name);				\
52name:
53
54#define TRAMP_REAL_BEGIN(name)					\
55	FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
56
57#define TRAMP_VIRT_BEGIN(name)					\
58	FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
59
60#define EXC_REAL_NONE(start, size)				\
61	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
62	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
63
64#define EXC_VIRT_NONE(start, size)				\
65	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
66	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
67
68/*
69 * We're short on space and time in the exception prolog, so we can't
70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
71 * Instead we get the base of the kernel from paca->kernelbase and or in the low
72 * part of label. This requires that the label be within 64KB of kernelbase, and
73 * that kernelbase be 64K aligned.
74 */
75#define LOAD_HANDLER(reg, label)					\
76	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
77	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
78
79#define __LOAD_HANDLER(reg, label)					\
80	ld	reg,PACAKBASE(r13);					\
81	ori	reg,reg,(ABS_ADDR(label))@l
82
83/*
84 * Branches from unrelocated code (e.g., interrupts) to labels outside
85 * head-y require >64K offsets.
86 */
87#define __LOAD_FAR_HANDLER(reg, label)					\
88	ld	reg,PACAKBASE(r13);					\
89	ori	reg,reg,(ABS_ADDR(label))@l;				\
90	addis	reg,reg,(ABS_ADDR(label))@h
91
92/*
93 * Branch to label using its 0xC000 address. This results in instruction
94 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
95 * on using mtmsr rather than rfid.
96 *
97 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
98 * load KBASE for a slight optimisation.
99 */
100#define BRANCH_TO_C000(reg, label)					\
101	__LOAD_FAR_HANDLER(reg, label);					\
102	mtctr	reg;							\
103	bctr
104
105/*
106 * Interrupt code generation macros
107 */
108#define IVEC		.L_IVEC_\name\()	/* Interrupt vector address */
109#define IHSRR		.L_IHSRR_\name\()	/* Sets SRR or HSRR registers */
110#define IHSRR_IF_HVMODE	.L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
111#define IAREA		.L_IAREA_\name\()	/* PACA save area */
112#define IVIRT		.L_IVIRT_\name\()	/* Has virt mode entry point */
113#define IISIDE		.L_IISIDE_\name\()	/* Uses SRR0/1 not DAR/DSISR */
114#define IDAR		.L_IDAR_\name\()	/* Uses DAR (or SRR0) */
115#define IDSISR		.L_IDSISR_\name\()	/* Uses DSISR (or SRR1) */
116#define ISET_RI		.L_ISET_RI_\name\()	/* Run common code w/ MSR[RI]=1 */
117#define IBRANCH_TO_COMMON	.L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
118#define IREALMODE_COMMON	.L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
119#define IMASK		.L_IMASK_\name\()	/* IRQ soft-mask bit */
120#define IKVM_REAL	.L_IKVM_REAL_\name\()	/* Real entry tests KVM */
121#define __IKVM_REAL(name)	.L_IKVM_REAL_ ## name
122#define IKVM_VIRT	.L_IKVM_VIRT_\name\()	/* Virt entry tests KVM */
123#define ISTACK		.L_ISTACK_\name\()	/* Set regular kernel stack */
124#define __ISTACK(name)	.L_ISTACK_ ## name
125#define IKUAP		.L_IKUAP_\name\()	/* Do KUAP lock */
126
127#define INT_DEFINE_BEGIN(n)						\
128.macro int_define_ ## n name
129
130#define INT_DEFINE_END(n)						\
131.endm ;									\
132int_define_ ## n n ;							\
133do_define_int n
134
135.macro do_define_int name
136	.ifndef IVEC
137		.error "IVEC not defined"
138	.endif
139	.ifndef IHSRR
140		IHSRR=0
141	.endif
142	.ifndef IHSRR_IF_HVMODE
143		IHSRR_IF_HVMODE=0
144	.endif
145	.ifndef IAREA
146		IAREA=PACA_EXGEN
147	.endif
148	.ifndef IVIRT
149		IVIRT=1
150	.endif
151	.ifndef IISIDE
152		IISIDE=0
153	.endif
154	.ifndef IDAR
155		IDAR=0
156	.endif
157	.ifndef IDSISR
158		IDSISR=0
159	.endif
160	.ifndef ISET_RI
161		ISET_RI=1
162	.endif
163	.ifndef IBRANCH_TO_COMMON
164		IBRANCH_TO_COMMON=1
165	.endif
166	.ifndef IREALMODE_COMMON
167		IREALMODE_COMMON=0
168	.else
169		.if ! IBRANCH_TO_COMMON
170			.error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
171		.endif
172	.endif
173	.ifndef IMASK
174		IMASK=0
175	.endif
176	.ifndef IKVM_REAL
177		IKVM_REAL=0
178	.endif
179	.ifndef IKVM_VIRT
180		IKVM_VIRT=0
181	.endif
182	.ifndef ISTACK
183		ISTACK=1
184	.endif
185	.ifndef IKUAP
186		IKUAP=1
187	.endif
188.endm
189
190/*
191 * All interrupts which set HSRR registers, as well as SRESET and MCE and
192 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
193 * so they all generally need to test whether they were taken in guest context.
194 *
195 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
196 * taken with MSR[HV]=0.
197 *
198 * Interrupts which set SRR registers (with the above exceptions) do not
199 * elevate to MSR[HV]=1 mode, though most can be taken when running with
200 * MSR[HV]=1  (e.g., bare metal kernel and userspace). So these interrupts do
201 * not need to test whether a guest is running because they get delivered to
202 * the guest directly, including nested HV KVM guests.
203 *
204 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
205 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
206 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
207 * delivered to the real-mode entry point, therefore such interrupts only test
208 * KVM in their real mode handlers, and only when PR KVM is possible.
209 *
210 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
211 * delivered in real-mode when the MMU is in hash mode because the MMU
212 * registers are not set appropriately to translate host addresses. In nested
213 * radix mode these can be delivered in virt-mode as the host translations are
214 * used implicitly (see: effective LPID, effective PID).
215 */
216
217/*
218 * If an interrupt is taken while a guest is running, it is immediately routed
219 * to KVM to handle.
220 */
221
222.macro KVMTEST name handler
223#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
224	lbz	r10,HSTATE_IN_GUEST(r13)
225	cmpwi	r10,0
226	/* HSRR variants have the 0x2 bit added to their trap number */
227	.if IHSRR_IF_HVMODE
228	BEGIN_FTR_SECTION
229	li	r10,(IVEC + 0x2)
230	FTR_SECTION_ELSE
231	li	r10,(IVEC)
232	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
233	.elseif IHSRR
234	li	r10,(IVEC + 0x2)
235	.else
236	li	r10,(IVEC)
237	.endif
238	bne	\handler
239#endif
240.endm
241
242/*
243 * This is the BOOK3S interrupt entry code macro.
244 *
245 * This can result in one of several things happening:
246 * - Branch to the _common handler, relocated, in virtual mode.
247 *   These are normal interrupts (synchronous and asynchronous) handled by
248 *   the kernel.
249 * - Branch to KVM, relocated but real mode interrupts remain in real mode.
250 *   These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
251 *   / intended for host or guest kernel, but KVM must always be involved
252 *   because the machine state is set for guest execution.
253 * - Branch to the masked handler, unrelocated.
254 *   These occur when maskable asynchronous interrupts are taken with the
255 *   irq_soft_mask set.
256 * - Branch to an "early" handler in real mode but relocated.
257 *   This is done if early=1. MCE and HMI use these to handle errors in real
258 *   mode.
259 * - Fall through and continue executing in real, unrelocated mode.
260 *   This is done if early=2.
261 */
262
263.macro GEN_BRANCH_TO_COMMON name, virt
264	.if IREALMODE_COMMON
265	LOAD_HANDLER(r10, \name\()_common)
266	mtctr	r10
267	bctr
268	.else
269	.if \virt
270#ifndef CONFIG_RELOCATABLE
271	b	\name\()_common_virt
272#else
273	LOAD_HANDLER(r10, \name\()_common_virt)
274	mtctr	r10
275	bctr
276#endif
277	.else
278	LOAD_HANDLER(r10, \name\()_common_real)
279	mtctr	r10
280	bctr
281	.endif
282	.endif
283.endm
284
285.macro GEN_INT_ENTRY name, virt, ool=0
286	SET_SCRATCH0(r13)			/* save r13 */
287	GET_PACA(r13)
288	std	r9,IAREA+EX_R9(r13)		/* save r9 */
289BEGIN_FTR_SECTION
290	mfspr	r9,SPRN_PPR
291END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
292	HMT_MEDIUM
293	std	r10,IAREA+EX_R10(r13)		/* save r10 - r12 */
294BEGIN_FTR_SECTION
295	mfspr	r10,SPRN_CFAR
296END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
297	.if \ool
298	.if !\virt
299	b	tramp_real_\name
300	.pushsection .text
301	TRAMP_REAL_BEGIN(tramp_real_\name)
302	.else
303	b	tramp_virt_\name
304	.pushsection .text
305	TRAMP_VIRT_BEGIN(tramp_virt_\name)
306	.endif
307	.endif
308
309BEGIN_FTR_SECTION
310	std	r9,IAREA+EX_PPR(r13)
311END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
312BEGIN_FTR_SECTION
313	std	r10,IAREA+EX_CFAR(r13)
314END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
315	INTERRUPT_TO_KERNEL
316	mfctr	r10
317	std	r10,IAREA+EX_CTR(r13)
318	mfcr	r9
319	std	r11,IAREA+EX_R11(r13)
320	std	r12,IAREA+EX_R12(r13)
321
322	/*
323	 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
324	 * because a d-side MCE will clobber those registers so is
325	 * not recoverable if they are live.
326	 */
327	GET_SCRATCH0(r10)
328	std	r10,IAREA+EX_R13(r13)
329	.if IDAR && !IISIDE
330	.if IHSRR
331	mfspr	r10,SPRN_HDAR
332	.else
333	mfspr	r10,SPRN_DAR
334	.endif
335	std	r10,IAREA+EX_DAR(r13)
336	.endif
337	.if IDSISR && !IISIDE
338	.if IHSRR
339	mfspr	r10,SPRN_HDSISR
340	.else
341	mfspr	r10,SPRN_DSISR
342	.endif
343	stw	r10,IAREA+EX_DSISR(r13)
344	.endif
345
346	.if IHSRR_IF_HVMODE
347	BEGIN_FTR_SECTION
348	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
349	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
350	FTR_SECTION_ELSE
351	mfspr	r11,SPRN_SRR0		/* save SRR0 */
352	mfspr	r12,SPRN_SRR1		/* and SRR1 */
353	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
354	.elseif IHSRR
355	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
356	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
357	.else
358	mfspr	r11,SPRN_SRR0		/* save SRR0 */
359	mfspr	r12,SPRN_SRR1		/* and SRR1 */
360	.endif
361
362	.if IBRANCH_TO_COMMON
363	GEN_BRANCH_TO_COMMON \name \virt
364	.endif
365
366	.if \ool
367	.popsection
368	.endif
369.endm
370
371/*
372 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
373 * entry, except in the case of the real-mode handlers which require
374 * __GEN_REALMODE_COMMON_ENTRY.
375 *
376 * This switches to virtual mode and sets MSR[RI].
377 */
378.macro __GEN_COMMON_ENTRY name
379DEFINE_FIXED_SYMBOL(\name\()_common_real)
380\name\()_common_real:
381	.if IKVM_REAL
382		KVMTEST \name kvm_interrupt
383	.endif
384
385	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
386	/* MSR[RI] is clear iff using SRR regs */
387	.if IHSRR_IF_HVMODE
388	BEGIN_FTR_SECTION
389	xori	r10,r10,MSR_RI
390	END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
391	.elseif ! IHSRR
392	xori	r10,r10,MSR_RI
393	.endif
394	mtmsrd	r10
395
396	.if IVIRT
397	.if IKVM_VIRT
398	b	1f /* skip the virt test coming from real */
399	.endif
400
401	.balign IFETCH_ALIGN_BYTES
402DEFINE_FIXED_SYMBOL(\name\()_common_virt)
403\name\()_common_virt:
404	.if IKVM_VIRT
405		KVMTEST \name kvm_interrupt
4061:
407	.endif
408	.endif /* IVIRT */
409.endm
410
411/*
412 * Don't switch to virt mode. Used for early MCE and HMI handlers that
413 * want to run in real mode.
414 */
415.macro __GEN_REALMODE_COMMON_ENTRY name
416DEFINE_FIXED_SYMBOL(\name\()_common_real)
417\name\()_common_real:
418	.if IKVM_REAL
419		KVMTEST \name kvm_interrupt
420	.endif
421.endm
422
423.macro __GEN_COMMON_BODY name
424	.if IMASK
425		.if ! ISTACK
426		.error "No support for masked interrupt to use custom stack"
427		.endif
428
429		/* If coming from user, skip soft-mask tests. */
430		andi.	r10,r12,MSR_PR
431		bne	3f
432
433		/*
434		 * Kernel code running below __end_soft_masked may be
435		 * implicitly soft-masked if it is within the regions
436		 * in the soft mask table.
437		 */
438		LOAD_HANDLER(r10, __end_soft_masked)
439		cmpld	r11,r10
440		bge+	1f
441
442		/* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
443		mtctr	r12
444		stw	r9,PACA_EXGEN+EX_CCR(r13)
445		SEARCH_SOFT_MASK_TABLE
446		cmpdi	r12,0
447		mfctr	r12		/* Restore r12 to SRR1 */
448		lwz	r9,PACA_EXGEN+EX_CCR(r13)
449		beq	1f		/* Not in soft-mask table */
450		li	r10,IMASK
451		b	2f		/* In soft-mask table, always mask */
452
453		/* Test the soft mask state against our interrupt's bit */
4541:		lbz	r10,PACAIRQSOFTMASK(r13)
4552:		andi.	r10,r10,IMASK
456		/* Associate vector numbers with bits in paca->irq_happened */
457		.if IVEC == 0x500 || IVEC == 0xea0
458		li	r10,PACA_IRQ_EE
459		.elseif IVEC == 0x900
460		li	r10,PACA_IRQ_DEC
461		.elseif IVEC == 0xa00 || IVEC == 0xe80
462		li	r10,PACA_IRQ_DBELL
463		.elseif IVEC == 0xe60
464		li	r10,PACA_IRQ_HMI
465		.elseif IVEC == 0xf00
466		li	r10,PACA_IRQ_PMI
467		.else
468		.abort "Bad maskable vector"
469		.endif
470
471		.if IHSRR_IF_HVMODE
472		BEGIN_FTR_SECTION
473		bne	masked_Hinterrupt
474		FTR_SECTION_ELSE
475		bne	masked_interrupt
476		ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
477		.elseif IHSRR
478		bne	masked_Hinterrupt
479		.else
480		bne	masked_interrupt
481		.endif
482	.endif
483
484	.if ISTACK
485	andi.	r10,r12,MSR_PR		/* See if coming from user	*/
4863:	mr	r10,r1			/* Save r1			*/
487	subi	r1,r1,INT_FRAME_SIZE	/* alloc frame on kernel stack	*/
488	beq-	100f
489	ld	r1,PACAKSAVE(r13)	/* kernel stack to use		*/
490100:	tdgei	r1,-INT_FRAME_SIZE	/* trap if r1 is in userspace	*/
491	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
492	.endif
493
494	std	r9,_CCR(r1)		/* save CR in stackframe	*/
495	std	r11,_NIP(r1)		/* save SRR0 in stackframe	*/
496	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
497	std	r10,0(r1)		/* make stack chain pointer	*/
498	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
499	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
500
501	/* Mark our [H]SRRs valid for return */
502	li	r10,1
503	.if IHSRR_IF_HVMODE
504	BEGIN_FTR_SECTION
505	stb	r10,PACAHSRR_VALID(r13)
506	FTR_SECTION_ELSE
507	stb	r10,PACASRR_VALID(r13)
508	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
509	.elseif IHSRR
510	stb	r10,PACAHSRR_VALID(r13)
511	.else
512	stb	r10,PACASRR_VALID(r13)
513	.endif
514
515	.if ISET_RI
516	li	r10,MSR_RI
517	mtmsrd	r10,1			/* Set MSR_RI */
518	.endif
519
520	.if ISTACK
521	.if IKUAP
522	kuap_save_amr_and_lock r9, r10, cr1, cr0
523	.endif
524	beq	101f			/* if from kernel mode		*/
525BEGIN_FTR_SECTION
526	ld	r9,IAREA+EX_PPR(r13)	/* Read PPR from paca		*/
527	std	r9,_PPR(r1)
528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
529101:
530	.else
531	.if IKUAP
532	kuap_save_amr_and_lock r9, r10, cr1
533	.endif
534	.endif
535
536	/* Save original regs values from save area to stack frame. */
537	ld	r9,IAREA+EX_R9(r13)	/* move r9, r10 to stackframe	*/
538	ld	r10,IAREA+EX_R10(r13)
539	std	r9,GPR9(r1)
540	std	r10,GPR10(r1)
541	ld	r9,IAREA+EX_R11(r13)	/* move r11 - r13 to stackframe	*/
542	ld	r10,IAREA+EX_R12(r13)
543	ld	r11,IAREA+EX_R13(r13)
544	std	r9,GPR11(r1)
545	std	r10,GPR12(r1)
546	std	r11,GPR13(r1)
547
548	SAVE_NVGPRS(r1)
549
550	.if IDAR
551	.if IISIDE
552	ld	r10,_NIP(r1)
553	.else
554	ld	r10,IAREA+EX_DAR(r13)
555	.endif
556	std	r10,_DAR(r1)
557	.endif
558
559	.if IDSISR
560	.if IISIDE
561	ld	r10,_MSR(r1)
562	lis	r11,DSISR_SRR1_MATCH_64S@h
563	and	r10,r10,r11
564	.else
565	lwz	r10,IAREA+EX_DSISR(r13)
566	.endif
567	std	r10,_DSISR(r1)
568	.endif
569
570BEGIN_FTR_SECTION
571	ld	r10,IAREA+EX_CFAR(r13)
572	std	r10,ORIG_GPR3(r1)
573END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
574	ld	r10,IAREA+EX_CTR(r13)
575	std	r10,_CTR(r1)
576	std	r2,GPR2(r1)		/* save r2 in stackframe	*/
577	SAVE_4GPRS(3, r1)		/* save r3 - r6 in stackframe   */
578	SAVE_2GPRS(7, r1)		/* save r7, r8 in stackframe	*/
579	mflr	r9			/* Get LR, later save to stack	*/
580	ld	r2,PACATOC(r13)		/* get kernel TOC into r2	*/
581	std	r9,_LINK(r1)
582	lbz	r10,PACAIRQSOFTMASK(r13)
583	mfspr	r11,SPRN_XER		/* save XER in stackframe	*/
584	std	r10,SOFTE(r1)
585	std	r11,_XER(r1)
586	li	r9,IVEC
587	std	r9,_TRAP(r1)		/* set trap number		*/
588	li	r10,0
589	ld	r11,exception_marker@toc(r2)
590	std	r10,RESULT(r1)		/* clear regs->result		*/
591	std	r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame	*/
592.endm
593
594/*
595 * On entry r13 points to the paca, r9-r13 are saved in the paca,
596 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
597 * SRR1, and relocation is on.
598 *
599 * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
600 * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
601 */
602.macro GEN_COMMON name
603	__GEN_COMMON_ENTRY \name
604	__GEN_COMMON_BODY \name
605.endm
606
607.macro SEARCH_RESTART_TABLE
608#ifdef CONFIG_RELOCATABLE
609	mr	r12,r2
610	ld	r2,PACATOC(r13)
611	LOAD_REG_ADDR(r9, __start___restart_table)
612	LOAD_REG_ADDR(r10, __stop___restart_table)
613	mr	r2,r12
614#else
615	LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
616	LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
617#endif
618300:
619	cmpd	r9,r10
620	beq	302f
621	ld	r12,0(r9)
622	cmpld	r11,r12
623	blt	301f
624	ld	r12,8(r9)
625	cmpld	r11,r12
626	bge	301f
627	ld	r12,16(r9)
628	b	303f
629301:
630	addi	r9,r9,24
631	b	300b
632302:
633	li	r12,0
634303:
635.endm
636
637.macro SEARCH_SOFT_MASK_TABLE
638#ifdef CONFIG_RELOCATABLE
639	mr	r12,r2
640	ld	r2,PACATOC(r13)
641	LOAD_REG_ADDR(r9, __start___soft_mask_table)
642	LOAD_REG_ADDR(r10, __stop___soft_mask_table)
643	mr	r2,r12
644#else
645	LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
646	LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
647#endif
648300:
649	cmpd	r9,r10
650	beq	302f
651	ld	r12,0(r9)
652	cmpld	r11,r12
653	blt	301f
654	ld	r12,8(r9)
655	cmpld	r11,r12
656	bge	301f
657	li	r12,1
658	b	303f
659301:
660	addi	r9,r9,16
661	b	300b
662302:
663	li	r12,0
664303:
665.endm
666
667/*
668 * Restore all registers including H/SRR0/1 saved in a stack frame of a
669 * standard exception.
670 */
671.macro EXCEPTION_RESTORE_REGS hsrr=0
672	/* Move original SRR0 and SRR1 into the respective regs */
673	ld	r9,_MSR(r1)
674	li	r10,0
675	.if \hsrr
676	mtspr	SPRN_HSRR1,r9
677	stb	r10,PACAHSRR_VALID(r13)
678	.else
679	mtspr	SPRN_SRR1,r9
680	stb	r10,PACASRR_VALID(r13)
681	.endif
682	ld	r9,_NIP(r1)
683	.if \hsrr
684	mtspr	SPRN_HSRR0,r9
685	.else
686	mtspr	SPRN_SRR0,r9
687	.endif
688	ld	r9,_CTR(r1)
689	mtctr	r9
690	ld	r9,_XER(r1)
691	mtxer	r9
692	ld	r9,_LINK(r1)
693	mtlr	r9
694	ld	r9,_CCR(r1)
695	mtcr	r9
696	REST_8GPRS(2, r1)
697	REST_4GPRS(10, r1)
698	REST_GPR(0, r1)
699	/* restore original r1. */
700	ld	r1,GPR1(r1)
701.endm
702
703/*
704 * There are a few constraints to be concerned with.
705 * - Real mode exceptions code/data must be located at their physical location.
706 * - Virtual mode exceptions must be mapped at their 0xc000... location.
707 * - Fixed location code must not call directly beyond the __end_interrupts
708 *   area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
709 *   must be used.
710 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
711 *   virtual 0xc00...
712 * - Conditional branch targets must be within +/-32K of caller.
713 *
714 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
715 * therefore don't have to run in physically located code or rfid to
716 * virtual mode kernel code. However on relocatable kernels they do have
717 * to branch to KERNELBASE offset because the rest of the kernel (outside
718 * the exception vectors) may be located elsewhere.
719 *
720 * Virtual exceptions correspond with physical, except their entry points
721 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
722 * offset applied. Virtual exceptions are enabled with the Alternate
723 * Interrupt Location (AIL) bit set in the LPCR. However this does not
724 * guarantee they will be delivered virtually. Some conditions (see the ISA)
725 * cause exceptions to be delivered in real mode.
726 *
727 * The scv instructions are a special case. They get a 0x3000 offset applied.
728 * scv exceptions have unique reentrancy properties, see below.
729 *
730 * It's impossible to receive interrupts below 0x300 via AIL.
731 *
732 * KVM: None of the virtual exceptions are from the guest. Anything that
733 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
734 *
735 *
736 * We layout physical memory as follows:
737 * 0x0000 - 0x00ff : Secondary processor spin code
738 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
739 * 0x1900 - 0x2fff : Real mode trampolines
740 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
741 * 0x5900 - 0x6fff : Relon mode trampolines
742 * 0x7000 - 0x7fff : FWNMI data area
743 * 0x8000 -   .... : Common interrupt handlers, remaining early
744 *                   setup code, rest of kernel.
745 *
746 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
747 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
748 * vectors there.
749 */
750OPEN_FIXED_SECTION(real_vectors,        0x0100, 0x1900)
751OPEN_FIXED_SECTION(real_trampolines,    0x1900, 0x3000)
752OPEN_FIXED_SECTION(virt_vectors,        0x3000, 0x5900)
753OPEN_FIXED_SECTION(virt_trampolines,    0x5900, 0x7000)
754
755#ifdef CONFIG_PPC_POWERNV
756	.globl start_real_trampolines
757	.globl end_real_trampolines
758	.globl start_virt_trampolines
759	.globl end_virt_trampolines
760#endif
761
762#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
763/*
764 * Data area reserved for FWNMI option.
765 * This address (0x7000) is fixed by the RPA.
766 * pseries and powernv need to keep the whole page from
767 * 0x7000 to 0x8000 free for use by the firmware
768 */
769ZERO_FIXED_SECTION(fwnmi_page,          0x7000, 0x8000)
770OPEN_TEXT_SECTION(0x8000)
771#else
772OPEN_TEXT_SECTION(0x7000)
773#endif
774
775USE_FIXED_SECTION(real_vectors)
776
777/*
778 * This is the start of the interrupt handlers for pSeries
779 * This code runs with relocation off.
780 * Code from here to __end_interrupts gets copied down to real
781 * address 0x100 when we are running a relocatable kernel.
782 * Therefore any relative branches in this section must only
783 * branch to labels in this section.
784 */
785	.globl __start_interrupts
786__start_interrupts:
787
788/**
789 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
790 * This is a synchronous interrupt invoked with the "scv" instruction. The
791 * system call does not alter the HV bit, so it is directed to the OS.
792 *
793 * Handling:
794 * scv instructions enter the kernel without changing EE, RI, ME, or HV.
795 * In particular, this means we can take a maskable interrupt at any point
796 * in the scv handler, which is unlike any other interrupt. This is solved
797 * by treating the instruction addresses in the handler as being soft-masked,
798 * by adding a SOFT_MASK_TABLE entry for them.
799 *
800 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
801 * ensure scv is never executed with relocation off, which means AIL-0
802 * should never happen.
803 *
804 * Before leaving the following inside-__end_soft_masked text, at least of the
805 * following must be true:
806 * - MSR[PR]=1 (i.e., return to userspace)
807 * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
808 * - Standard kernel environment is set up (stack, paca, etc)
809 *
810 * Call convention:
811 *
812 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
813 */
814EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
815	/* SCV 0 */
816	mr	r9,r13
817	GET_PACA(r13)
818	mflr	r11
819	mfctr	r12
820	li	r10,IRQS_ALL_DISABLED
821	stb	r10,PACAIRQSOFTMASK(r13)
822#ifdef CONFIG_RELOCATABLE
823	b	system_call_vectored_tramp
824#else
825	b	system_call_vectored_common
826#endif
827	nop
828
829	/* SCV 1 - 127 */
830	.rept	127
831	mr	r9,r13
832	GET_PACA(r13)
833	mflr	r11
834	mfctr	r12
835	li	r10,IRQS_ALL_DISABLED
836	stb	r10,PACAIRQSOFTMASK(r13)
837	li	r0,-1 /* cause failure */
838#ifdef CONFIG_RELOCATABLE
839	b	system_call_vectored_sigill_tramp
840#else
841	b	system_call_vectored_sigill
842#endif
843	.endr
844EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
845
846// Treat scv vectors as soft-masked, see comment above.
847// Use absolute values rather than labels here, so they don't get relocated,
848// because this code runs unrelocated.
849SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
850
851#ifdef CONFIG_RELOCATABLE
852TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
853	__LOAD_HANDLER(r10, system_call_vectored_common)
854	mtctr	r10
855	bctr
856
857TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
858	__LOAD_HANDLER(r10, system_call_vectored_sigill)
859	mtctr	r10
860	bctr
861#endif
862
863
864/* No virt vectors corresponding with 0x0..0x100 */
865EXC_VIRT_NONE(0x4000, 0x100)
866
867
868/**
869 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
870 * This is a non-maskable, asynchronous interrupt always taken in real-mode.
871 * It is caused by:
872 * - Wake from power-saving state, on powernv.
873 * - An NMI from another CPU, triggered by firmware or hypercall.
874 * - As crash/debug signal injected from BMC, firmware or hypervisor.
875 *
876 * Handling:
877 * Power-save wakeup is the only performance critical path, so this is
878 * determined quickly as possible first. In this case volatile registers
879 * can be discarded and SPRs like CFAR don't need to be read.
880 *
881 * If not a powersave wakeup, then it's run as a regular interrupt, however
882 * it uses its own stack and PACA save area to preserve the regular kernel
883 * environment for debugging.
884 *
885 * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
886 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
887 * correct to switch to virtual mode to run the regular interrupt handler
888 * because it might be interrupted when the MMU is in a bad state (e.g., SLB
889 * is clear).
890 *
891 * FWNMI:
892 * PAPR specifies a "fwnmi" facility which sends the sreset to a different
893 * entry point with a different register set up. Some hypervisors will
894 * send the sreset to 0x100 in the guest if it is not fwnmi capable.
895 *
896 * KVM:
897 * Unlike most SRR interrupts, this may be taken by the host while executing
898 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
899 * mode and then raise the sreset.
900 */
901INT_DEFINE_BEGIN(system_reset)
902	IVEC=0x100
903	IAREA=PACA_EXNMI
904	IVIRT=0 /* no virt entry point */
905	/*
906	 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
907	 * being used, so a nested NMI exception would corrupt it.
908	 */
909	ISET_RI=0
910	ISTACK=0
911	IKVM_REAL=1
912INT_DEFINE_END(system_reset)
913
914EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
915#ifdef CONFIG_PPC_P7_NAP
916	/*
917	 * If running native on arch 2.06 or later, check if we are waking up
918	 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
919	 * bits 46:47. A non-0 value indicates that we are coming from a power
920	 * saving state. The idle wakeup handler initially runs in real mode,
921	 * but we branch to the 0xc000... address so we can turn on relocation
922	 * with mtmsrd later, after SPRs are restored.
923	 *
924	 * Careful to minimise cost for the fast path (idle wakeup) while
925	 * also avoiding clobbering CFAR for the debug path (non-idle).
926	 *
927	 * For the idle wake case volatile registers can be clobbered, which
928	 * is why we use those initially. If it turns out to not be an idle
929	 * wake, carefully put everything back the way it was, so we can use
930	 * common exception macros to handle it.
931	 */
932BEGIN_FTR_SECTION
933	SET_SCRATCH0(r13)
934	GET_PACA(r13)
935	std	r3,PACA_EXNMI+0*8(r13)
936	std	r4,PACA_EXNMI+1*8(r13)
937	std	r5,PACA_EXNMI+2*8(r13)
938	mfspr	r3,SPRN_SRR1
939	mfocrf	r4,0x80
940	rlwinm.	r5,r3,47-31,30,31
941	bne+	system_reset_idle_wake
942	/* Not powersave wakeup. Restore regs for regular interrupt handler. */
943	mtocrf	0x80,r4
944	ld	r3,PACA_EXNMI+0*8(r13)
945	ld	r4,PACA_EXNMI+1*8(r13)
946	ld	r5,PACA_EXNMI+2*8(r13)
947	GET_SCRATCH0(r13)
948END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
949#endif
950
951	GEN_INT_ENTRY system_reset, virt=0
952	/*
953	 * In theory, we should not enable relocation here if it was disabled
954	 * in SRR1, because the MMU may not be configured to support it (e.g.,
955	 * SLB may have been cleared). In practice, there should only be a few
956	 * small windows where that's the case, and sreset is considered to
957	 * be dangerous anyway.
958	 */
959EXC_REAL_END(system_reset, 0x100, 0x100)
960EXC_VIRT_NONE(0x4100, 0x100)
961
962#ifdef CONFIG_PPC_P7_NAP
963TRAMP_REAL_BEGIN(system_reset_idle_wake)
964	/* We are waking up from idle, so may clobber any volatile register */
965	cmpwi	cr1,r5,2
966	bltlr	cr1	/* no state loss, return to idle caller with r3=SRR1 */
967	BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
968#endif
969
970#ifdef CONFIG_PPC_PSERIES
971/*
972 * Vectors for the FWNMI option.  Share common code.
973 */
974TRAMP_REAL_BEGIN(system_reset_fwnmi)
975	GEN_INT_ENTRY system_reset, virt=0
976
977#endif /* CONFIG_PPC_PSERIES */
978
979EXC_COMMON_BEGIN(system_reset_common)
980	__GEN_COMMON_ENTRY system_reset
981	/*
982	 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
983	 * to recover, but nested NMI will notice in_nmi and not recover
984	 * because of the use of the NMI stack. in_nmi reentrancy is tested in
985	 * system_reset_exception.
986	 */
987	lhz	r10,PACA_IN_NMI(r13)
988	addi	r10,r10,1
989	sth	r10,PACA_IN_NMI(r13)
990	li	r10,MSR_RI
991	mtmsrd 	r10,1
992
993	mr	r10,r1
994	ld	r1,PACA_NMI_EMERG_SP(r13)
995	subi	r1,r1,INT_FRAME_SIZE
996	__GEN_COMMON_BODY system_reset
997
998	addi	r3,r1,STACK_FRAME_OVERHEAD
999	bl	system_reset_exception
1000
1001	/* Clear MSR_RI before setting SRR0 and SRR1. */
1002	li	r9,0
1003	mtmsrd	r9,1
1004
1005	/*
1006	 * MSR_RI is clear, now we can decrement paca->in_nmi.
1007	 */
1008	lhz	r10,PACA_IN_NMI(r13)
1009	subi	r10,r10,1
1010	sth	r10,PACA_IN_NMI(r13)
1011
1012	kuap_kernel_restore r9, r10
1013	EXCEPTION_RESTORE_REGS
1014	RFI_TO_USER_OR_KERNEL
1015
1016
1017/**
1018 * Interrupt 0x200 - Machine Check Interrupt (MCE).
1019 * This is a non-maskable interrupt always taken in real-mode. It can be
1020 * synchronous or asynchronous, caused by hardware or software, and it may be
1021 * taken in a power-saving state.
1022 *
1023 * Handling:
1024 * Similarly to system reset, this uses its own stack and PACA save area,
1025 * the difference is re-entrancy is allowed on the machine check stack.
1026 *
1027 * machine_check_early is run in real mode, and carefully decodes the
1028 * machine check and tries to handle it (e.g., flush the SLB if there was an
1029 * error detected there), determines if it was recoverable and logs the
1030 * event.
1031 *
1032 * This early code does not "reconcile" irq soft-mask state like SRESET or
1033 * regular interrupts do, so irqs_disabled() among other things may not work
1034 * properly (irq disable/enable already doesn't work because irq tracing can
1035 * not work in real mode).
1036 *
1037 * Then, depending on the execution context when the interrupt is taken, there
1038 * are 3 main actions:
1039 * - Executing in kernel mode. The event is queued with irq_work, which means
1040 *   it is handled when it is next safe to do so (i.e., the kernel has enabled
1041 *   interrupts), which could be immediately when the interrupt returns. This
1042 *   avoids nasty issues like switching to virtual mode when the MMU is in a
1043 *   bad state, or when executing OPAL code. (SRESET is exposed to such issues,
1044 *   but it has different priorities). Check to see if the CPU was in power
1045 *   save, and return via the wake up code if it was.
1046 *
1047 * - Executing in user mode. machine_check_exception is run like a normal
1048 *   interrupt handler, which processes the data generated by the early handler.
1049 *
1050 * - Executing in guest mode. The interrupt is run with its KVM test, and
1051 *   branches to KVM to deal with. KVM may queue the event for the host
1052 *   to report later.
1053 *
1054 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
1055 * or SCRATCH0 is in use, it may cause a crash.
1056 *
1057 * KVM:
1058 * See SRESET.
1059 */
1060INT_DEFINE_BEGIN(machine_check_early)
1061	IVEC=0x200
1062	IAREA=PACA_EXMC
1063	IVIRT=0 /* no virt entry point */
1064	IREALMODE_COMMON=1
1065	/*
1066	 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1067	 * nested machine check corrupts it. machine_check_common enables
1068	 * MSR_RI.
1069	 */
1070	ISET_RI=0
1071	ISTACK=0
1072	IDAR=1
1073	IDSISR=1
1074	IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
1075INT_DEFINE_END(machine_check_early)
1076
1077INT_DEFINE_BEGIN(machine_check)
1078	IVEC=0x200
1079	IAREA=PACA_EXMC
1080	IVIRT=0 /* no virt entry point */
1081	ISET_RI=0
1082	IDAR=1
1083	IDSISR=1
1084	IKVM_REAL=1
1085INT_DEFINE_END(machine_check)
1086
1087EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
1088	GEN_INT_ENTRY machine_check_early, virt=0
1089EXC_REAL_END(machine_check, 0x200, 0x100)
1090EXC_VIRT_NONE(0x4200, 0x100)
1091
1092#ifdef CONFIG_PPC_PSERIES
1093TRAMP_REAL_BEGIN(machine_check_fwnmi)
1094	/* See comment at machine_check exception, don't turn on RI */
1095	GEN_INT_ENTRY machine_check_early, virt=0
1096#endif
1097
1098#define MACHINE_CHECK_HANDLER_WINDUP			\
1099	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1100	li	r9,0;					\
1101	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1102	/* Decrement paca->in_mce now RI is clear. */	\
1103	lhz	r12,PACA_IN_MCE(r13);			\
1104	subi	r12,r12,1;				\
1105	sth	r12,PACA_IN_MCE(r13);			\
1106	EXCEPTION_RESTORE_REGS
1107
1108EXC_COMMON_BEGIN(machine_check_early_common)
1109	__GEN_REALMODE_COMMON_ENTRY machine_check_early
1110
1111	/*
1112	 * Switch to mc_emergency stack and handle re-entrancy (we limit
1113	 * the nested MCE upto level 4 to avoid stack overflow).
1114	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
1115	 *
1116	 * We use paca->in_mce to check whether this is the first entry or
1117	 * nested machine check. We increment paca->in_mce to track nested
1118	 * machine checks.
1119	 *
1120	 * If this is the first entry then set stack pointer to
1121	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
1122	 * stack frame on mc_emergency stack.
1123	 *
1124	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
1125	 * checkstop if we get another machine check exception before we do
1126	 * rfid with MSR_ME=1.
1127	 *
1128	 * This interrupt can wake directly from idle. If that is the case,
1129	 * the machine check is handled then the idle wakeup code is called
1130	 * to restore state.
1131	 */
1132	lhz	r10,PACA_IN_MCE(r13)
1133	cmpwi	r10,0			/* Are we in nested machine check */
1134	cmpwi	cr1,r10,MAX_MCE_DEPTH	/* Are we at maximum nesting */
1135	addi	r10,r10,1		/* increment paca->in_mce */
1136	sth	r10,PACA_IN_MCE(r13)
1137
1138	mr	r10,r1			/* Save r1 */
1139	bne	1f
1140	/* First machine check entry */
1141	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
11421:	/* Limit nested MCE to level 4 to avoid stack overflow */
1143	bgt	cr1,unrecoverable_mce	/* Check if we hit limit of 4 */
1144	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
1145
1146	__GEN_COMMON_BODY machine_check_early
1147
1148BEGIN_FTR_SECTION
1149	bl	enable_machine_check
1150END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1151	li	r10,MSR_RI
1152	mtmsrd	r10,1
1153
1154	addi	r3,r1,STACK_FRAME_OVERHEAD
1155	bl	machine_check_early
1156	std	r3,RESULT(r1)	/* Save result */
1157	ld	r12,_MSR(r1)
1158
1159#ifdef CONFIG_PPC_P7_NAP
1160	/*
1161	 * Check if thread was in power saving mode. We come here when any
1162	 * of the following is true:
1163	 * a. thread wasn't in power saving mode
1164	 * b. thread was in power saving mode with no state loss,
1165	 *    supervisor state loss or hypervisor state loss.
1166	 *
1167	 * Go back to nap/sleep/winkle mode again if (b) is true.
1168	 */
1169BEGIN_FTR_SECTION
1170	rlwinm.	r11,r12,47-31,30,31
1171	bne	machine_check_idle_common
1172END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1173#endif
1174
1175#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1176	/*
1177	 * Check if we are coming from guest. If yes, then run the normal
1178	 * exception handler which will take the
1179	 * machine_check_kvm->kvm_interrupt branch to deliver the MC event
1180	 * to guest.
1181	 */
1182	lbz	r11,HSTATE_IN_GUEST(r13)
1183	cmpwi	r11,0			/* Check if coming from guest */
1184	bne	mce_deliver		/* continue if we are. */
1185#endif
1186
1187	/*
1188	 * Check if we are coming from userspace. If yes, then run the normal
1189	 * exception handler which will deliver the MC event to this kernel.
1190	 */
1191	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1192	bne	mce_deliver		/* continue in V mode if we are. */
1193
1194	/*
1195	 * At this point we are coming from kernel context.
1196	 * Queue up the MCE event and return from the interrupt.
1197	 * But before that, check if this is an un-recoverable exception.
1198	 * If yes, then stay on emergency stack and panic.
1199	 */
1200	andi.	r11,r12,MSR_RI
1201	beq	unrecoverable_mce
1202
1203	/*
1204	 * Check if we have successfully handled/recovered from error, if not
1205	 * then stay on emergency stack and panic.
1206	 */
1207	ld	r3,RESULT(r1)	/* Load result */
1208	cmpdi	r3,0		/* see if we handled MCE successfully */
1209	beq	unrecoverable_mce /* if !handled then panic */
1210
1211	/*
1212	 * Return from MC interrupt.
1213	 * Queue up the MCE event so that we can log it later, while
1214	 * returning from kernel or opal call.
1215	 */
1216	bl	machine_check_queue_event
1217	MACHINE_CHECK_HANDLER_WINDUP
1218	RFI_TO_KERNEL
1219
1220mce_deliver:
1221	/*
1222	 * This is a host user or guest MCE. Restore all registers, then
1223	 * run the "late" handler. For host user, this will run the
1224	 * machine_check_exception handler in virtual mode like a normal
1225	 * interrupt handler. For guest, this will trigger the KVM test
1226	 * and branch to the KVM interrupt similarly to other interrupts.
1227	 */
1228BEGIN_FTR_SECTION
1229	ld	r10,ORIG_GPR3(r1)
1230	mtspr	SPRN_CFAR,r10
1231END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1232	MACHINE_CHECK_HANDLER_WINDUP
1233	GEN_INT_ENTRY machine_check, virt=0
1234
1235EXC_COMMON_BEGIN(machine_check_common)
1236	/*
1237	 * Machine check is different because we use a different
1238	 * save area: PACA_EXMC instead of PACA_EXGEN.
1239	 */
1240	GEN_COMMON machine_check
1241
1242	/* Enable MSR_RI when finished with PACA_EXMC */
1243	li	r10,MSR_RI
1244	mtmsrd 	r10,1
1245	addi	r3,r1,STACK_FRAME_OVERHEAD
1246	bl	machine_check_exception
1247	b	interrupt_return_srr
1248
1249
1250#ifdef CONFIG_PPC_P7_NAP
1251/*
1252 * This is an idle wakeup. Low level machine check has already been
1253 * done. Queue the event then call the idle code to do the wake up.
1254 */
1255EXC_COMMON_BEGIN(machine_check_idle_common)
1256	bl	machine_check_queue_event
1257
1258	/*
1259	 * GPR-loss wakeups are relatively straightforward, because the
1260	 * idle sleep code has saved all non-volatile registers on its
1261	 * own stack, and r1 in PACAR1.
1262	 *
1263	 * For no-loss wakeups the r1 and lr registers used by the
1264	 * early machine check handler have to be restored first. r2 is
1265	 * the kernel TOC, so no need to restore it.
1266	 *
1267	 * Then decrement MCE nesting after finishing with the stack.
1268	 */
1269	ld	r3,_MSR(r1)
1270	ld	r4,_LINK(r1)
1271	ld	r1,GPR1(r1)
1272
1273	lhz	r11,PACA_IN_MCE(r13)
1274	subi	r11,r11,1
1275	sth	r11,PACA_IN_MCE(r13)
1276
1277	mtlr	r4
1278	rlwinm	r10,r3,47-31,30,31
1279	cmpwi	cr1,r10,2
1280	bltlr	cr1	/* no state loss, return to idle caller with r3=SRR1 */
1281	b	idle_return_gpr_loss
1282#endif
1283
1284EXC_COMMON_BEGIN(unrecoverable_mce)
1285	/*
1286	 * We are going down. But there are chances that we might get hit by
1287	 * another MCE during panic path and we may run into unstable state
1288	 * with no way out. Hence, turn ME bit off while going down, so that
1289	 * when another MCE is hit during panic path, system will checkstop
1290	 * and hypervisor will get restarted cleanly by SP.
1291	 */
1292BEGIN_FTR_SECTION
1293	li	r10,0 /* clear MSR_RI */
1294	mtmsrd	r10,1
1295	bl	disable_machine_check
1296END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1297	ld	r10,PACAKMSR(r13)
1298	li	r3,MSR_ME
1299	andc	r10,r10,r3
1300	mtmsrd	r10
1301
1302	lhz	r12,PACA_IN_MCE(r13)
1303	subi	r12,r12,1
1304	sth	r12,PACA_IN_MCE(r13)
1305
1306	/* Invoke machine_check_exception to print MCE event and panic. */
1307	addi	r3,r1,STACK_FRAME_OVERHEAD
1308	bl	machine_check_exception
1309
1310	/*
1311	 * We will not reach here. Even if we did, there is no way out.
1312	 * Call unrecoverable_exception and die.
1313	 */
1314	addi	r3,r1,STACK_FRAME_OVERHEAD
1315	bl	unrecoverable_exception
1316	b	.
1317
1318
1319/**
1320 * Interrupt 0x300 - Data Storage Interrupt (DSI).
1321 * This is a synchronous interrupt generated due to a data access exception,
1322 * e.g., a load orstore which does not have a valid page table entry with
1323 * permissions. DAWR matches also fault here, as do RC updates, and minor misc
1324 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
1325 *
1326 * Handling:
1327 * - Hash MMU
1328 *   Go to do_hash_fault, which attempts to fill the HPT from an entry in the
1329 *   Linux page table. Hash faults can hit in kernel mode in a fairly
1330 *   arbitrary state (e.g., interrupts disabled, locks held) when accessing
1331 *   "non-bolted" regions, e.g., vmalloc space. However these should always be
1332 *   backed by Linux page table entries.
1333 *
1334 *   If no entry is found the Linux page fault handler is invoked (by
1335 *   do_hash_fault). Linux page faults can happen in kernel mode due to user
1336 *   copy operations of course.
1337 *
1338 *   KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
1339 *   MMU context, which may cause a DSI in the host, which must go to the
1340 *   KVM handler. MSR[IR] is not enabled, so the real-mode handler will
1341 *   always be used regardless of AIL setting.
1342 *
1343 * - Radix MMU
1344 *   The hardware loads from the Linux page table directly, so a fault goes
1345 *   immediately to Linux page fault.
1346 *
1347 * Conditions like DAWR match are handled on the way in to Linux page fault.
1348 */
1349INT_DEFINE_BEGIN(data_access)
1350	IVEC=0x300
1351	IDAR=1
1352	IDSISR=1
1353	IKVM_REAL=1
1354INT_DEFINE_END(data_access)
1355
1356EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1357	GEN_INT_ENTRY data_access, virt=0
1358EXC_REAL_END(data_access, 0x300, 0x80)
1359EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1360	GEN_INT_ENTRY data_access, virt=1
1361EXC_VIRT_END(data_access, 0x4300, 0x80)
1362EXC_COMMON_BEGIN(data_access_common)
1363	GEN_COMMON data_access
1364	ld	r4,_DSISR(r1)
1365	addi	r3,r1,STACK_FRAME_OVERHEAD
1366	andis.	r0,r4,DSISR_DABRMATCH@h
1367	bne-	1f
1368BEGIN_MMU_FTR_SECTION
1369	bl	do_hash_fault
1370MMU_FTR_SECTION_ELSE
1371	bl	do_page_fault
1372ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1373	b	interrupt_return_srr
1374
13751:	bl	do_break
1376	/*
1377	 * do_break() may have changed the NV GPRS while handling a breakpoint.
1378	 * If so, we need to restore them with their updated values.
1379	 */
1380	REST_NVGPRS(r1)
1381	b	interrupt_return_srr
1382
1383
1384/**
1385 * Interrupt 0x380 - Data Segment Interrupt (DSLB).
1386 * This is a synchronous interrupt in response to an MMU fault missing SLB
1387 * entry for HPT, or an address outside RPT translation range.
1388 *
1389 * Handling:
1390 * - HPT:
1391 *   This refills the SLB, or reports an access fault similarly to a bad page
1392 *   fault. When coming from user-mode, the SLB handler may access any kernel
1393 *   data, though it may itself take a DSLB. When coming from kernel mode,
1394 *   recursive faults must be avoided so access is restricted to the kernel
1395 *   image text/data, kernel stack, and any data allocated below
1396 *   ppc64_bolted_size (first segment). The kernel handler must avoid stomping
1397 *   on user-handler data structures.
1398 *
1399 *   KVM: Same as 0x300, DSLB must test for KVM guest.
1400 */
1401INT_DEFINE_BEGIN(data_access_slb)
1402	IVEC=0x380
1403	IDAR=1
1404	IKVM_REAL=1
1405INT_DEFINE_END(data_access_slb)
1406
1407EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
1408	GEN_INT_ENTRY data_access_slb, virt=0
1409EXC_REAL_END(data_access_slb, 0x380, 0x80)
1410EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
1411	GEN_INT_ENTRY data_access_slb, virt=1
1412EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
1413EXC_COMMON_BEGIN(data_access_slb_common)
1414	GEN_COMMON data_access_slb
1415BEGIN_MMU_FTR_SECTION
1416	/* HPT case, do SLB fault */
1417	addi	r3,r1,STACK_FRAME_OVERHEAD
1418	bl	do_slb_fault
1419	cmpdi	r3,0
1420	bne-	1f
1421	b	fast_interrupt_return_srr
14221:	/* Error case */
1423MMU_FTR_SECTION_ELSE
1424	/* Radix case, access is outside page table range */
1425	li	r3,-EFAULT
1426ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1427	std	r3,RESULT(r1)
1428	addi	r3,r1,STACK_FRAME_OVERHEAD
1429	bl	do_bad_slb_fault
1430	b	interrupt_return_srr
1431
1432
1433/**
1434 * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
1435 * This is a synchronous interrupt in response to an MMU fault due to an
1436 * instruction fetch.
1437 *
1438 * Handling:
1439 * Similar to DSI, though in response to fetch. The faulting address is found
1440 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
1441 */
1442INT_DEFINE_BEGIN(instruction_access)
1443	IVEC=0x400
1444	IISIDE=1
1445	IDAR=1
1446	IDSISR=1
1447#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1448	IKVM_REAL=1
1449#endif
1450INT_DEFINE_END(instruction_access)
1451
1452EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
1453	GEN_INT_ENTRY instruction_access, virt=0
1454EXC_REAL_END(instruction_access, 0x400, 0x80)
1455EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
1456	GEN_INT_ENTRY instruction_access, virt=1
1457EXC_VIRT_END(instruction_access, 0x4400, 0x80)
1458EXC_COMMON_BEGIN(instruction_access_common)
1459	GEN_COMMON instruction_access
1460	addi	r3,r1,STACK_FRAME_OVERHEAD
1461BEGIN_MMU_FTR_SECTION
1462	bl	do_hash_fault
1463MMU_FTR_SECTION_ELSE
1464	bl	do_page_fault
1465ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1466	b	interrupt_return_srr
1467
1468
1469/**
1470 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
1471 * This is a synchronous interrupt in response to an MMU fault due to an
1472 * instruction fetch.
1473 *
1474 * Handling:
1475 * Similar to DSLB, though in response to fetch. The faulting address is found
1476 * in SRR0 (rather than DAR).
1477 */
1478INT_DEFINE_BEGIN(instruction_access_slb)
1479	IVEC=0x480
1480	IISIDE=1
1481	IDAR=1
1482#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1483	IKVM_REAL=1
1484#endif
1485INT_DEFINE_END(instruction_access_slb)
1486
1487EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
1488	GEN_INT_ENTRY instruction_access_slb, virt=0
1489EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
1490EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
1491	GEN_INT_ENTRY instruction_access_slb, virt=1
1492EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
1493EXC_COMMON_BEGIN(instruction_access_slb_common)
1494	GEN_COMMON instruction_access_slb
1495BEGIN_MMU_FTR_SECTION
1496	/* HPT case, do SLB fault */
1497	addi	r3,r1,STACK_FRAME_OVERHEAD
1498	bl	do_slb_fault
1499	cmpdi	r3,0
1500	bne-	1f
1501	b	fast_interrupt_return_srr
15021:	/* Error case */
1503MMU_FTR_SECTION_ELSE
1504	/* Radix case, access is outside page table range */
1505	li	r3,-EFAULT
1506ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1507	std	r3,RESULT(r1)
1508	addi	r3,r1,STACK_FRAME_OVERHEAD
1509	bl	do_bad_slb_fault
1510	b	interrupt_return_srr
1511
1512
1513/**
1514 * Interrupt 0x500 - External Interrupt.
1515 * This is an asynchronous maskable interrupt in response to an "external
1516 * exception" from the interrupt controller or hypervisor (e.g., device
1517 * interrupt). It is maskable in hardware by clearing MSR[EE], and
1518 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
1519 *
1520 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
1521 * interrupts are delivered with HSRR registers, guests use SRRs, which
1522 * reqiures IHSRR_IF_HVMODE.
1523 *
1524 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
1525 * external interrupts are delivered as Hypervisor Virtualization Interrupts
1526 * rather than External Interrupts.
1527 *
1528 * Handling:
1529 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
1530 * because registers at the time of the interrupt are not so important as it is
1531 * asynchronous.
1532 *
1533 * If soft masked, the masked handler will note the pending interrupt for
1534 * replay, and clear MSR[EE] in the interrupted context.
1535 */
1536INT_DEFINE_BEGIN(hardware_interrupt)
1537	IVEC=0x500
1538	IHSRR_IF_HVMODE=1
1539	IMASK=IRQS_DISABLED
1540	IKVM_REAL=1
1541	IKVM_VIRT=1
1542INT_DEFINE_END(hardware_interrupt)
1543
1544EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
1545	GEN_INT_ENTRY hardware_interrupt, virt=0
1546EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
1547EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
1548	GEN_INT_ENTRY hardware_interrupt, virt=1
1549EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1550EXC_COMMON_BEGIN(hardware_interrupt_common)
1551	GEN_COMMON hardware_interrupt
1552	addi	r3,r1,STACK_FRAME_OVERHEAD
1553	bl	do_IRQ
1554	BEGIN_FTR_SECTION
1555	b	interrupt_return_hsrr
1556	FTR_SECTION_ELSE
1557	b	interrupt_return_srr
1558	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1559
1560
1561/**
1562 * Interrupt 0x600 - Alignment Interrupt
1563 * This is a synchronous interrupt in response to data alignment fault.
1564 */
1565INT_DEFINE_BEGIN(alignment)
1566	IVEC=0x600
1567	IDAR=1
1568	IDSISR=1
1569#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1570	IKVM_REAL=1
1571#endif
1572INT_DEFINE_END(alignment)
1573
1574EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1575	GEN_INT_ENTRY alignment, virt=0
1576EXC_REAL_END(alignment, 0x600, 0x100)
1577EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1578	GEN_INT_ENTRY alignment, virt=1
1579EXC_VIRT_END(alignment, 0x4600, 0x100)
1580EXC_COMMON_BEGIN(alignment_common)
1581	GEN_COMMON alignment
1582	addi	r3,r1,STACK_FRAME_OVERHEAD
1583	bl	alignment_exception
1584	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1585	b	interrupt_return_srr
1586
1587
1588/**
1589 * Interrupt 0x700 - Program Interrupt (program check).
1590 * This is a synchronous interrupt in response to various instruction faults:
1591 * traps, privilege errors, TM errors, floating point exceptions.
1592 *
1593 * Handling:
1594 * This interrupt may use the "emergency stack" in some cases when being taken
1595 * from kernel context, which complicates handling.
1596 */
1597INT_DEFINE_BEGIN(program_check)
1598	IVEC=0x700
1599#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1600	IKVM_REAL=1
1601#endif
1602INT_DEFINE_END(program_check)
1603
1604EXC_REAL_BEGIN(program_check, 0x700, 0x100)
1605
1606#ifdef CONFIG_CPU_LITTLE_ENDIAN
1607	/*
1608	 * There's a short window during boot where although the kernel is
1609	 * running little endian, any exceptions will cause the CPU to switch
1610	 * back to big endian. For example a WARN() boils down to a trap
1611	 * instruction, which will cause a program check, and we end up here but
1612	 * with the CPU in big endian mode. The first instruction of the program
1613	 * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when
1614	 * executed in the wrong endian is an lhzu with a ~3GB displacement from
1615	 * r3. The content of r3 is random, so that is a load from some random
1616	 * location, and depending on the system can easily lead to a checkstop,
1617	 * or an infinitely recursive page fault.
1618	 *
1619	 * So to handle that case we have a trampoline here that can detect we
1620	 * are in the wrong endian and flip us back to the correct endian. We
1621	 * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires
1622	 * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as
1623	 * SPRG1 is already used for the paca. SPRG3 is user readable, but this
1624	 * trampoline is only active very early in boot, and SPRG3 will be
1625	 * reinitialised in vdso_getcpu_init() before userspace starts.
1626	 */
1627BEGIN_FTR_SECTION
1628	tdi   0,0,0x48    // Trap never, or in reverse endian: b . + 8
1629	b     1f          // Skip trampoline if endian is correct
1630	.long 0xa643707d  // mtsprg  0, r11      Backup r11
1631	.long 0xa6027a7d  // mfsrr0  r11
1632	.long 0xa643727d  // mtsprg  2, r11      Backup SRR0 in SPRG2
1633	.long 0xa6027b7d  // mfsrr1  r11
1634	.long 0xa643737d  // mtsprg  3, r11      Backup SRR1 in SPRG3
1635	.long 0xa600607d  // mfmsr   r11
1636	.long 0x01006b69  // xori    r11, r11, 1 Invert MSR[LE]
1637	.long 0xa6037b7d  // mtsrr1  r11
1638	.long 0x34076039  // li      r11, 0x734
1639	.long 0xa6037a7d  // mtsrr0  r11
1640	.long 0x2400004c  // rfid
1641	mfsprg r11, 3
1642	mtsrr1 r11        // Restore SRR1
1643	mfsprg r11, 2
1644	mtsrr0 r11        // Restore SRR0
1645	mfsprg r11, 0     // Restore r11
16461:
1647END_FTR_SECTION(0, 1)     // nop out after boot
1648#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1649
1650	GEN_INT_ENTRY program_check, virt=0
1651EXC_REAL_END(program_check, 0x700, 0x100)
1652EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
1653	GEN_INT_ENTRY program_check, virt=1
1654EXC_VIRT_END(program_check, 0x4700, 0x100)
1655EXC_COMMON_BEGIN(program_check_common)
1656	__GEN_COMMON_ENTRY program_check
1657
1658	/*
1659	 * It's possible to receive a TM Bad Thing type program check with
1660	 * userspace register values (in particular r1), but with SRR1 reporting
1661	 * that we came from the kernel. Normally that would confuse the bad
1662	 * stack logic, and we would report a bad kernel stack pointer. Instead
1663	 * we switch to the emergency stack if we're taking a TM Bad Thing from
1664	 * the kernel.
1665	 */
1666
1667	andi.	r10,r12,MSR_PR
1668	bne	2f			/* If userspace, go normal path */
1669
1670	andis.	r10,r12,(SRR1_PROGTM)@h
1671	bne	1f			/* If TM, emergency		*/
1672
1673	cmpdi	r1,-INT_FRAME_SIZE	/* check if r1 is in userspace	*/
1674	blt	2f			/* normal path if not		*/
1675
1676	/* Use the emergency stack					*/
16771:	andi.	r10,r12,MSR_PR		/* Set CR0 correctly for label	*/
1678					/* 3 in EXCEPTION_PROLOG_COMMON	*/
1679	mr	r10,r1			/* Save r1			*/
1680	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1681	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1682	__ISTACK(program_check)=0
1683	__GEN_COMMON_BODY program_check
1684	b 3f
16852:
1686	__ISTACK(program_check)=1
1687	__GEN_COMMON_BODY program_check
16883:
1689	addi	r3,r1,STACK_FRAME_OVERHEAD
1690	bl	program_check_exception
1691	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1692	b	interrupt_return_srr
1693
1694
1695/*
1696 * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
1697 * This is a synchronous interrupt in response to executing an fp instruction
1698 * with MSR[FP]=0.
1699 *
1700 * Handling:
1701 * This will load FP registers and enable the FP bit if coming from userspace,
1702 * otherwise report a bad kernel use of FP.
1703 */
1704INT_DEFINE_BEGIN(fp_unavailable)
1705	IVEC=0x800
1706#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1707	IKVM_REAL=1
1708#endif
1709INT_DEFINE_END(fp_unavailable)
1710
1711EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
1712	GEN_INT_ENTRY fp_unavailable, virt=0
1713EXC_REAL_END(fp_unavailable, 0x800, 0x100)
1714EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
1715	GEN_INT_ENTRY fp_unavailable, virt=1
1716EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
1717EXC_COMMON_BEGIN(fp_unavailable_common)
1718	GEN_COMMON fp_unavailable
1719	bne	1f			/* if from user, just load it up */
1720	addi	r3,r1,STACK_FRAME_OVERHEAD
1721	bl	kernel_fp_unavailable_exception
17220:	trap
1723	EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
17241:
1725#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1726BEGIN_FTR_SECTION
1727	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1728	 * transaction), go do TM stuff
1729	 */
1730	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1731	bne-	2f
1732END_FTR_SECTION_IFSET(CPU_FTR_TM)
1733#endif
1734	bl	load_up_fpu
1735	b	fast_interrupt_return_srr
1736#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
17372:	/* User process was in a transaction */
1738	addi	r3,r1,STACK_FRAME_OVERHEAD
1739	bl	fp_unavailable_tm
1740	b	interrupt_return_srr
1741#endif
1742
1743
1744/**
1745 * Interrupt 0x900 - Decrementer Interrupt.
1746 * This is an asynchronous interrupt in response to a decrementer exception
1747 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
1748 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
1749 * local_irq_disable()).
1750 *
1751 * Handling:
1752 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
1753 *
1754 * If soft masked, the masked handler will note the pending interrupt for
1755 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
1756 * in the interrupted context.
1757 * If PPC_WATCHDOG is configured, the soft masked handler will actually set
1758 * things back up to run soft_nmi_interrupt as a regular interrupt handler
1759 * on the emergency stack.
1760 */
1761INT_DEFINE_BEGIN(decrementer)
1762	IVEC=0x900
1763	IMASK=IRQS_DISABLED
1764#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1765	IKVM_REAL=1
1766#endif
1767INT_DEFINE_END(decrementer)
1768
1769EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
1770	GEN_INT_ENTRY decrementer, virt=0
1771EXC_REAL_END(decrementer, 0x900, 0x80)
1772EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
1773	GEN_INT_ENTRY decrementer, virt=1
1774EXC_VIRT_END(decrementer, 0x4900, 0x80)
1775EXC_COMMON_BEGIN(decrementer_common)
1776	GEN_COMMON decrementer
1777	addi	r3,r1,STACK_FRAME_OVERHEAD
1778	bl	timer_interrupt
1779	b	interrupt_return_srr
1780
1781
1782/**
1783 * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
1784 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
1785 * register.
1786 *
1787 * Handling:
1788 * Linux does not use this outside KVM where it's used to keep a host timer
1789 * while the guest is given control of DEC. It should normally be caught by
1790 * the KVM test and routed there.
1791 */
1792INT_DEFINE_BEGIN(hdecrementer)
1793	IVEC=0x980
1794	IHSRR=1
1795	ISTACK=0
1796	IKVM_REAL=1
1797	IKVM_VIRT=1
1798INT_DEFINE_END(hdecrementer)
1799
1800EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
1801	GEN_INT_ENTRY hdecrementer, virt=0
1802EXC_REAL_END(hdecrementer, 0x980, 0x80)
1803EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
1804	GEN_INT_ENTRY hdecrementer, virt=1
1805EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
1806EXC_COMMON_BEGIN(hdecrementer_common)
1807	__GEN_COMMON_ENTRY hdecrementer
1808	/*
1809	 * Hypervisor decrementer interrupts not caught by the KVM test
1810	 * shouldn't occur but are sometimes left pending on exit from a KVM
1811	 * guest.  We don't need to do anything to clear them, as they are
1812	 * edge-triggered.
1813	 *
1814	 * Be careful to avoid touching the kernel stack.
1815	 */
1816	li	r10,0
1817	stb	r10,PACAHSRR_VALID(r13)
1818	ld	r10,PACA_EXGEN+EX_CTR(r13)
1819	mtctr	r10
1820	mtcrf	0x80,r9
1821	ld	r9,PACA_EXGEN+EX_R9(r13)
1822	ld	r10,PACA_EXGEN+EX_R10(r13)
1823	ld	r11,PACA_EXGEN+EX_R11(r13)
1824	ld	r12,PACA_EXGEN+EX_R12(r13)
1825	ld	r13,PACA_EXGEN+EX_R13(r13)
1826	HRFI_TO_KERNEL
1827
1828
1829/**
1830 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
1831 * This is an asynchronous interrupt in response to a msgsndp doorbell.
1832 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
1833 * IRQS_DISABLED mask (i.e., local_irq_disable()).
1834 *
1835 * Handling:
1836 * Guests may use this for IPIs between threads in a core if the
1837 * hypervisor supports it. NVGPRS are not saved (see 0x500).
1838 *
1839 * If soft masked, the masked handler will note the pending interrupt for
1840 * replay, leaving MSR[EE] enabled in the interrupted context because the
1841 * doorbells are edge triggered.
1842 */
1843INT_DEFINE_BEGIN(doorbell_super)
1844	IVEC=0xa00
1845	IMASK=IRQS_DISABLED
1846#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1847	IKVM_REAL=1
1848#endif
1849INT_DEFINE_END(doorbell_super)
1850
1851EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
1852	GEN_INT_ENTRY doorbell_super, virt=0
1853EXC_REAL_END(doorbell_super, 0xa00, 0x100)
1854EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
1855	GEN_INT_ENTRY doorbell_super, virt=1
1856EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
1857EXC_COMMON_BEGIN(doorbell_super_common)
1858	GEN_COMMON doorbell_super
1859	addi	r3,r1,STACK_FRAME_OVERHEAD
1860#ifdef CONFIG_PPC_DOORBELL
1861	bl	doorbell_exception
1862#else
1863	bl	unknown_async_exception
1864#endif
1865	b	interrupt_return_srr
1866
1867
1868EXC_REAL_NONE(0xb00, 0x100)
1869EXC_VIRT_NONE(0x4b00, 0x100)
1870
1871/**
1872 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
1873 * This is a synchronous interrupt invoked with the "sc" instruction. The
1874 * system call is invoked with "sc 0" and does not alter the HV bit, so it
1875 * is directed to the currently running OS. The hypercall is invoked with
1876 * "sc 1" and it sets HV=1, so it elevates to hypervisor.
1877 *
1878 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1879 * 0x4c00 virtual mode.
1880 *
1881 * Handling:
1882 * If the KVM test fires then it was due to a hypercall and is accordingly
1883 * routed to KVM. Otherwise this executes a normal Linux system call.
1884 *
1885 * Call convention:
1886 *
1887 * syscall and hypercalls register conventions are documented in
1888 * Documentation/powerpc/syscall64-abi.rst and
1889 * Documentation/powerpc/papr_hcalls.rst respectively.
1890 *
1891 * The intersection of volatile registers that don't contain possible
1892 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1893 * without saving, though xer is not a good idea to use, as hardware may
1894 * interpret some bits so it may be costly to change them.
1895 */
1896INT_DEFINE_BEGIN(system_call)
1897	IVEC=0xc00
1898	IKVM_REAL=1
1899	IKVM_VIRT=1
1900INT_DEFINE_END(system_call)
1901
1902.macro SYSTEM_CALL virt
1903#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1904	/*
1905	 * There is a little bit of juggling to get syscall and hcall
1906	 * working well. Save r13 in ctr to avoid using SPRG scratch
1907	 * register.
1908	 *
1909	 * Userspace syscalls have already saved the PPR, hcalls must save
1910	 * it before setting HMT_MEDIUM.
1911	 */
1912	mtctr	r13
1913	GET_PACA(r13)
1914	std	r10,PACA_EXGEN+EX_R10(r13)
1915	INTERRUPT_TO_KERNEL
1916	KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
1917	mfctr	r9
1918#else
1919	mr	r9,r13
1920	GET_PACA(r13)
1921	INTERRUPT_TO_KERNEL
1922#endif
1923
1924#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1925BEGIN_FTR_SECTION
1926	cmpdi	r0,0x1ebe
1927	beq-	1f
1928END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1929#endif
1930
1931	/* We reach here with PACA in r13, r13 in r9. */
1932	mfspr	r11,SPRN_SRR0
1933	mfspr	r12,SPRN_SRR1
1934
1935	HMT_MEDIUM
1936
1937	.if ! \virt
1938	__LOAD_HANDLER(r10, system_call_common_real)
1939	mtctr	r10
1940	bctr
1941	.else
1942#ifdef CONFIG_RELOCATABLE
1943	__LOAD_HANDLER(r10, system_call_common)
1944	mtctr	r10
1945	bctr
1946#else
1947	b	system_call_common
1948#endif
1949	.endif
1950
1951#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1952	/* Fast LE/BE switch system call */
19531:	mfspr	r12,SPRN_SRR1
1954	xori	r12,r12,MSR_LE
1955	mtspr	SPRN_SRR1,r12
1956	mr	r13,r9
1957	RFI_TO_USER	/* return to userspace */
1958	b	.	/* prevent speculative execution */
1959#endif
1960.endm
1961
1962EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1963	SYSTEM_CALL 0
1964EXC_REAL_END(system_call, 0xc00, 0x100)
1965EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1966	SYSTEM_CALL 1
1967EXC_VIRT_END(system_call, 0x4c00, 0x100)
1968
1969#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1970TRAMP_REAL_BEGIN(kvm_hcall)
1971	std	r9,PACA_EXGEN+EX_R9(r13)
1972	std	r11,PACA_EXGEN+EX_R11(r13)
1973	std	r12,PACA_EXGEN+EX_R12(r13)
1974	mfcr	r9
1975	mfctr	r10
1976	std	r10,PACA_EXGEN+EX_R13(r13)
1977	li	r10,0
1978	std	r10,PACA_EXGEN+EX_CFAR(r13)
1979	std	r10,PACA_EXGEN+EX_CTR(r13)
1980	 /*
1981	  * Save the PPR (on systems that support it) before changing to
1982	  * HMT_MEDIUM. That allows the KVM code to save that value into the
1983	  * guest state (it is the guest's PPR value).
1984	  */
1985BEGIN_FTR_SECTION
1986	mfspr	r10,SPRN_PPR
1987	std	r10,PACA_EXGEN+EX_PPR(r13)
1988END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1989
1990	HMT_MEDIUM
1991
1992#ifdef CONFIG_RELOCATABLE
1993	/*
1994	 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
1995	 * outside the head section.
1996	 */
1997	__LOAD_FAR_HANDLER(r10, kvmppc_hcall)
1998	mtctr   r10
1999	bctr
2000#else
2001	b       kvmppc_hcall
2002#endif
2003#endif
2004
2005/**
2006 * Interrupt 0xd00 - Trace Interrupt.
2007 * This is a synchronous interrupt in response to instruction step or
2008 * breakpoint faults.
2009 */
2010INT_DEFINE_BEGIN(single_step)
2011	IVEC=0xd00
2012#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2013	IKVM_REAL=1
2014#endif
2015INT_DEFINE_END(single_step)
2016
2017EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
2018	GEN_INT_ENTRY single_step, virt=0
2019EXC_REAL_END(single_step, 0xd00, 0x100)
2020EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
2021	GEN_INT_ENTRY single_step, virt=1
2022EXC_VIRT_END(single_step, 0x4d00, 0x100)
2023EXC_COMMON_BEGIN(single_step_common)
2024	GEN_COMMON single_step
2025	addi	r3,r1,STACK_FRAME_OVERHEAD
2026	bl	single_step_exception
2027	b	interrupt_return_srr
2028
2029
2030/**
2031 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
2032 * This is a synchronous interrupt in response to an MMU fault caused by a
2033 * guest data access.
2034 *
2035 * Handling:
2036 * This should always get routed to KVM. In radix MMU mode, this is caused
2037 * by a guest nested radix access that can't be performed due to the
2038 * partition scope page table. In hash mode, this can be caused by guests
2039 * running with translation disabled (virtual real mode) or with VPM enabled.
2040 * KVM will update the page table structures or disallow the access.
2041 */
2042INT_DEFINE_BEGIN(h_data_storage)
2043	IVEC=0xe00
2044	IHSRR=1
2045	IDAR=1
2046	IDSISR=1
2047	IKVM_REAL=1
2048	IKVM_VIRT=1
2049INT_DEFINE_END(h_data_storage)
2050
2051EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
2052	GEN_INT_ENTRY h_data_storage, virt=0, ool=1
2053EXC_REAL_END(h_data_storage, 0xe00, 0x20)
2054EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
2055	GEN_INT_ENTRY h_data_storage, virt=1, ool=1
2056EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
2057EXC_COMMON_BEGIN(h_data_storage_common)
2058	GEN_COMMON h_data_storage
2059	addi    r3,r1,STACK_FRAME_OVERHEAD
2060BEGIN_MMU_FTR_SECTION
2061	bl      do_bad_page_fault_segv
2062MMU_FTR_SECTION_ELSE
2063	bl      unknown_exception
2064ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
2065	b       interrupt_return_hsrr
2066
2067
2068/**
2069 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
2070 * This is a synchronous interrupt in response to an MMU fault caused by a
2071 * guest instruction fetch, similar to HDSI.
2072 */
2073INT_DEFINE_BEGIN(h_instr_storage)
2074	IVEC=0xe20
2075	IHSRR=1
2076	IKVM_REAL=1
2077	IKVM_VIRT=1
2078INT_DEFINE_END(h_instr_storage)
2079
2080EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
2081	GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
2082EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
2083EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
2084	GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
2085EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
2086EXC_COMMON_BEGIN(h_instr_storage_common)
2087	GEN_COMMON h_instr_storage
2088	addi	r3,r1,STACK_FRAME_OVERHEAD
2089	bl	unknown_exception
2090	b	interrupt_return_hsrr
2091
2092
2093/**
2094 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
2095 */
2096INT_DEFINE_BEGIN(emulation_assist)
2097	IVEC=0xe40
2098	IHSRR=1
2099	IKVM_REAL=1
2100	IKVM_VIRT=1
2101INT_DEFINE_END(emulation_assist)
2102
2103EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
2104	GEN_INT_ENTRY emulation_assist, virt=0, ool=1
2105EXC_REAL_END(emulation_assist, 0xe40, 0x20)
2106EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
2107	GEN_INT_ENTRY emulation_assist, virt=1, ool=1
2108EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
2109EXC_COMMON_BEGIN(emulation_assist_common)
2110	GEN_COMMON emulation_assist
2111	addi	r3,r1,STACK_FRAME_OVERHEAD
2112	bl	emulation_assist_interrupt
2113	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2114	b	interrupt_return_hsrr
2115
2116
2117/**
2118 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
2119 * This is an asynchronous interrupt caused by a Hypervisor Maintenance
2120 * Exception. It is always taken in real mode but uses HSRR registers
2121 * unlike SRESET and MCE.
2122 *
2123 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
2124 * with IRQS_DISABLED mask (i.e., local_irq_disable()).
2125 *
2126 * Handling:
2127 * This is a special case, this is handled similarly to machine checks, with an
2128 * initial real mode handler that is not soft-masked, which attempts to fix the
2129 * problem. Then a regular handler which is soft-maskable and reports the
2130 * problem.
2131 *
2132 * The emergency stack is used for the early real mode handler.
2133 *
2134 * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
2135 * either use soft-masking for the MCE, or use irq_work for the HMI.
2136 *
2137 * KVM:
2138 * Unlike MCE, this calls into KVM without calling the real mode handler
2139 * first.
2140 */
2141INT_DEFINE_BEGIN(hmi_exception_early)
2142	IVEC=0xe60
2143	IHSRR=1
2144	IREALMODE_COMMON=1
2145	ISTACK=0
2146	IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
2147	IKVM_REAL=1
2148INT_DEFINE_END(hmi_exception_early)
2149
2150INT_DEFINE_BEGIN(hmi_exception)
2151	IVEC=0xe60
2152	IHSRR=1
2153	IMASK=IRQS_DISABLED
2154	IKVM_REAL=1
2155INT_DEFINE_END(hmi_exception)
2156
2157EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
2158	GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
2159EXC_REAL_END(hmi_exception, 0xe60, 0x20)
2160EXC_VIRT_NONE(0x4e60, 0x20)
2161
2162EXC_COMMON_BEGIN(hmi_exception_early_common)
2163	__GEN_REALMODE_COMMON_ENTRY hmi_exception_early
2164
2165	mr	r10,r1			/* Save r1 */
2166	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack for realmode */
2167	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
2168
2169	__GEN_COMMON_BODY hmi_exception_early
2170
2171	addi	r3,r1,STACK_FRAME_OVERHEAD
2172	bl	hmi_exception_realmode
2173	cmpdi	cr0,r3,0
2174	bne	1f
2175
2176	EXCEPTION_RESTORE_REGS hsrr=1
2177	HRFI_TO_USER_OR_KERNEL
2178
21791:
2180	/*
2181	 * Go to virtual mode and pull the HMI event information from
2182	 * firmware.
2183	 */
2184	EXCEPTION_RESTORE_REGS hsrr=1
2185	GEN_INT_ENTRY hmi_exception, virt=0
2186
2187EXC_COMMON_BEGIN(hmi_exception_common)
2188	GEN_COMMON hmi_exception
2189	addi	r3,r1,STACK_FRAME_OVERHEAD
2190	bl	handle_hmi_exception
2191	b	interrupt_return_hsrr
2192
2193
2194/**
2195 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
2196 * This is an asynchronous interrupt in response to a msgsnd doorbell.
2197 * Similar to the 0xa00 doorbell but for host rather than guest.
2198 */
2199INT_DEFINE_BEGIN(h_doorbell)
2200	IVEC=0xe80
2201	IHSRR=1
2202	IMASK=IRQS_DISABLED
2203	IKVM_REAL=1
2204	IKVM_VIRT=1
2205INT_DEFINE_END(h_doorbell)
2206
2207EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
2208	GEN_INT_ENTRY h_doorbell, virt=0, ool=1
2209EXC_REAL_END(h_doorbell, 0xe80, 0x20)
2210EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
2211	GEN_INT_ENTRY h_doorbell, virt=1, ool=1
2212EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
2213EXC_COMMON_BEGIN(h_doorbell_common)
2214	GEN_COMMON h_doorbell
2215	addi	r3,r1,STACK_FRAME_OVERHEAD
2216#ifdef CONFIG_PPC_DOORBELL
2217	bl	doorbell_exception
2218#else
2219	bl	unknown_async_exception
2220#endif
2221	b	interrupt_return_hsrr
2222
2223
2224/**
2225 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
2226 * This is an asynchronous interrupt in response to an "external exception".
2227 * Similar to 0x500 but for host only.
2228 */
2229INT_DEFINE_BEGIN(h_virt_irq)
2230	IVEC=0xea0
2231	IHSRR=1
2232	IMASK=IRQS_DISABLED
2233	IKVM_REAL=1
2234	IKVM_VIRT=1
2235INT_DEFINE_END(h_virt_irq)
2236
2237EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
2238	GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
2239EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
2240EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
2241	GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
2242EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
2243EXC_COMMON_BEGIN(h_virt_irq_common)
2244	GEN_COMMON h_virt_irq
2245	addi	r3,r1,STACK_FRAME_OVERHEAD
2246	bl	do_IRQ
2247	b	interrupt_return_hsrr
2248
2249
2250EXC_REAL_NONE(0xec0, 0x20)
2251EXC_VIRT_NONE(0x4ec0, 0x20)
2252EXC_REAL_NONE(0xee0, 0x20)
2253EXC_VIRT_NONE(0x4ee0, 0x20)
2254
2255
2256/*
2257 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
2258 * This is an asynchronous interrupt in response to a PMU exception.
2259 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
2260 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
2261 *
2262 * Handling:
2263 * This calls into the perf subsystem.
2264 *
2265 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
2266 * runs under local_irq_disable. However it may be soft-masked in
2267 * powerpc-specific code.
2268 *
2269 * If soft masked, the masked handler will note the pending interrupt for
2270 * replay, and clear MSR[EE] in the interrupted context.
2271 */
2272INT_DEFINE_BEGIN(performance_monitor)
2273	IVEC=0xf00
2274	IMASK=IRQS_PMI_DISABLED
2275#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2276	IKVM_REAL=1
2277#endif
2278INT_DEFINE_END(performance_monitor)
2279
2280EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
2281	GEN_INT_ENTRY performance_monitor, virt=0, ool=1
2282EXC_REAL_END(performance_monitor, 0xf00, 0x20)
2283EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
2284	GEN_INT_ENTRY performance_monitor, virt=1, ool=1
2285EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
2286EXC_COMMON_BEGIN(performance_monitor_common)
2287	GEN_COMMON performance_monitor
2288	addi	r3,r1,STACK_FRAME_OVERHEAD
2289	bl	performance_monitor_exception
2290	b	interrupt_return_srr
2291
2292
2293/**
2294 * Interrupt 0xf20 - Vector Unavailable Interrupt.
2295 * This is a synchronous interrupt in response to
2296 * executing a vector (or altivec) instruction with MSR[VEC]=0.
2297 * Similar to FP unavailable.
2298 */
2299INT_DEFINE_BEGIN(altivec_unavailable)
2300	IVEC=0xf20
2301#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2302	IKVM_REAL=1
2303#endif
2304INT_DEFINE_END(altivec_unavailable)
2305
2306EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
2307	GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
2308EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
2309EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
2310	GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
2311EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
2312EXC_COMMON_BEGIN(altivec_unavailable_common)
2313	GEN_COMMON altivec_unavailable
2314#ifdef CONFIG_ALTIVEC
2315BEGIN_FTR_SECTION
2316	beq	1f
2317#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2318  BEGIN_FTR_SECTION_NESTED(69)
2319	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
2320	 * transaction), go do TM stuff
2321	 */
2322	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
2323	bne-	2f
2324  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2325#endif
2326	bl	load_up_altivec
2327	b	fast_interrupt_return_srr
2328#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
23292:	/* User process was in a transaction */
2330	addi	r3,r1,STACK_FRAME_OVERHEAD
2331	bl	altivec_unavailable_tm
2332	b	interrupt_return_srr
2333#endif
23341:
2335END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2336#endif
2337	addi	r3,r1,STACK_FRAME_OVERHEAD
2338	bl	altivec_unavailable_exception
2339	b	interrupt_return_srr
2340
2341
2342/**
2343 * Interrupt 0xf40 - VSX Unavailable Interrupt.
2344 * This is a synchronous interrupt in response to
2345 * executing a VSX instruction with MSR[VSX]=0.
2346 * Similar to FP unavailable.
2347 */
2348INT_DEFINE_BEGIN(vsx_unavailable)
2349	IVEC=0xf40
2350#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2351	IKVM_REAL=1
2352#endif
2353INT_DEFINE_END(vsx_unavailable)
2354
2355EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
2356	GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
2357EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
2358EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
2359	GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
2360EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
2361EXC_COMMON_BEGIN(vsx_unavailable_common)
2362	GEN_COMMON vsx_unavailable
2363#ifdef CONFIG_VSX
2364BEGIN_FTR_SECTION
2365	beq	1f
2366#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2367  BEGIN_FTR_SECTION_NESTED(69)
2368	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
2369	 * transaction), go do TM stuff
2370	 */
2371	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
2372	bne-	2f
2373  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2374#endif
2375	b	load_up_vsx
2376#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
23772:	/* User process was in a transaction */
2378	addi	r3,r1,STACK_FRAME_OVERHEAD
2379	bl	vsx_unavailable_tm
2380	b	interrupt_return_srr
2381#endif
23821:
2383END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2384#endif
2385	addi	r3,r1,STACK_FRAME_OVERHEAD
2386	bl	vsx_unavailable_exception
2387	b	interrupt_return_srr
2388
2389
2390/**
2391 * Interrupt 0xf60 - Facility Unavailable Interrupt.
2392 * This is a synchronous interrupt in response to
2393 * executing an instruction without access to the facility that can be
2394 * resolved by the OS (e.g., FSCR, MSR).
2395 * Similar to FP unavailable.
2396 */
2397INT_DEFINE_BEGIN(facility_unavailable)
2398	IVEC=0xf60
2399#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2400	IKVM_REAL=1
2401#endif
2402INT_DEFINE_END(facility_unavailable)
2403
2404EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
2405	GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
2406EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
2407EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
2408	GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
2409EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
2410EXC_COMMON_BEGIN(facility_unavailable_common)
2411	GEN_COMMON facility_unavailable
2412	addi	r3,r1,STACK_FRAME_OVERHEAD
2413	bl	facility_unavailable_exception
2414	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2415	b	interrupt_return_srr
2416
2417
2418/**
2419 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
2420 * This is a synchronous interrupt in response to
2421 * executing an instruction without access to the facility that can only
2422 * be resolved in HV mode (e.g., HFSCR).
2423 * Similar to FP unavailable.
2424 */
2425INT_DEFINE_BEGIN(h_facility_unavailable)
2426	IVEC=0xf80
2427	IHSRR=1
2428	IKVM_REAL=1
2429	IKVM_VIRT=1
2430INT_DEFINE_END(h_facility_unavailable)
2431
2432EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
2433	GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
2434EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
2435EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
2436	GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
2437EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
2438EXC_COMMON_BEGIN(h_facility_unavailable_common)
2439	GEN_COMMON h_facility_unavailable
2440	addi	r3,r1,STACK_FRAME_OVERHEAD
2441	bl	facility_unavailable_exception
2442	REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
2443	b	interrupt_return_hsrr
2444
2445
2446EXC_REAL_NONE(0xfa0, 0x20)
2447EXC_VIRT_NONE(0x4fa0, 0x20)
2448EXC_REAL_NONE(0xfc0, 0x20)
2449EXC_VIRT_NONE(0x4fc0, 0x20)
2450EXC_REAL_NONE(0xfe0, 0x20)
2451EXC_VIRT_NONE(0x4fe0, 0x20)
2452
2453EXC_REAL_NONE(0x1000, 0x100)
2454EXC_VIRT_NONE(0x5000, 0x100)
2455EXC_REAL_NONE(0x1100, 0x100)
2456EXC_VIRT_NONE(0x5100, 0x100)
2457
2458#ifdef CONFIG_CBE_RAS
2459INT_DEFINE_BEGIN(cbe_system_error)
2460	IVEC=0x1200
2461	IHSRR=1
2462INT_DEFINE_END(cbe_system_error)
2463
2464EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
2465	GEN_INT_ENTRY cbe_system_error, virt=0
2466EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
2467EXC_VIRT_NONE(0x5200, 0x100)
2468EXC_COMMON_BEGIN(cbe_system_error_common)
2469	GEN_COMMON cbe_system_error
2470	addi	r3,r1,STACK_FRAME_OVERHEAD
2471	bl	cbe_system_error_exception
2472	b	interrupt_return_hsrr
2473
2474#else /* CONFIG_CBE_RAS */
2475EXC_REAL_NONE(0x1200, 0x100)
2476EXC_VIRT_NONE(0x5200, 0x100)
2477#endif
2478
2479/**
2480 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
2481 * This has been removed from the ISA before 2.01, which is the earliest
2482 * 64-bit BookS ISA supported, however the G5 / 970 implements this
2483 * interrupt with a non-architected feature available through the support
2484 * processor interface.
2485 */
2486INT_DEFINE_BEGIN(instruction_breakpoint)
2487	IVEC=0x1300
2488#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2489	IKVM_REAL=1
2490#endif
2491INT_DEFINE_END(instruction_breakpoint)
2492
2493EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
2494	GEN_INT_ENTRY instruction_breakpoint, virt=0
2495EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
2496EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
2497	GEN_INT_ENTRY instruction_breakpoint, virt=1
2498EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
2499EXC_COMMON_BEGIN(instruction_breakpoint_common)
2500	GEN_COMMON instruction_breakpoint
2501	addi	r3,r1,STACK_FRAME_OVERHEAD
2502	bl	instruction_breakpoint_exception
2503	b	interrupt_return_srr
2504
2505
2506EXC_REAL_NONE(0x1400, 0x100)
2507EXC_VIRT_NONE(0x5400, 0x100)
2508
2509/**
2510 * Interrupt 0x1500 - Soft Patch Interrupt
2511 *
2512 * Handling:
2513 * This is an implementation specific interrupt which can be used for a
2514 * range of exceptions.
2515 *
2516 * This interrupt handler is unique in that it runs the denormal assist
2517 * code even for guests (and even in guest context) without going to KVM,
2518 * for speed. POWER9 does not raise denorm exceptions, so this special case
2519 * could be phased out in future to reduce special cases.
2520 */
2521INT_DEFINE_BEGIN(denorm_exception)
2522	IVEC=0x1500
2523	IHSRR=1
2524	IBRANCH_TO_COMMON=0
2525	IKVM_REAL=1
2526INT_DEFINE_END(denorm_exception)
2527
2528EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
2529	GEN_INT_ENTRY denorm_exception, virt=0
2530#ifdef CONFIG_PPC_DENORMALISATION
2531	andis.	r10,r12,(HSRR1_DENORM)@h /* denorm? */
2532	bne+	denorm_assist
2533#endif
2534	GEN_BRANCH_TO_COMMON denorm_exception, virt=0
2535EXC_REAL_END(denorm_exception, 0x1500, 0x100)
2536#ifdef CONFIG_PPC_DENORMALISATION
2537EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
2538	GEN_INT_ENTRY denorm_exception, virt=1
2539	andis.	r10,r12,(HSRR1_DENORM)@h /* denorm? */
2540	bne+	denorm_assist
2541	GEN_BRANCH_TO_COMMON denorm_exception, virt=1
2542EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
2543#else
2544EXC_VIRT_NONE(0x5500, 0x100)
2545#endif
2546
2547#ifdef CONFIG_PPC_DENORMALISATION
2548TRAMP_REAL_BEGIN(denorm_assist)
2549BEGIN_FTR_SECTION
2550/*
2551 * To denormalise we need to move a copy of the register to itself.
2552 * For POWER6 do that here for all FP regs.
2553 */
2554	mfmsr	r10
2555	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2556	xori	r10,r10,(MSR_FE0|MSR_FE1)
2557	mtmsrd	r10
2558	sync
2559
2560	.Lreg=0
2561	.rept 32
2562	fmr	.Lreg,.Lreg
2563	.Lreg=.Lreg+1
2564	.endr
2565
2566FTR_SECTION_ELSE
2567/*
2568 * To denormalise we need to move a copy of the register to itself.
2569 * For POWER7 do that here for the first 32 VSX registers only.
2570 */
2571	mfmsr	r10
2572	oris	r10,r10,MSR_VSX@h
2573	mtmsrd	r10
2574	sync
2575
2576	.Lreg=0
2577	.rept 32
2578	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2579	.Lreg=.Lreg+1
2580	.endr
2581
2582ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
2583
2584BEGIN_FTR_SECTION
2585	b	denorm_done
2586END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2587/*
2588 * To denormalise we need to move a copy of the register to itself.
2589 * For POWER8 we need to do that for all 64 VSX registers
2590 */
2591	.Lreg=32
2592	.rept 32
2593	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2594	.Lreg=.Lreg+1
2595	.endr
2596
2597denorm_done:
2598	mfspr	r11,SPRN_HSRR0
2599	subi	r11,r11,4
2600	mtspr	SPRN_HSRR0,r11
2601	mtcrf	0x80,r9
2602	ld	r9,PACA_EXGEN+EX_R9(r13)
2603BEGIN_FTR_SECTION
2604	ld	r10,PACA_EXGEN+EX_PPR(r13)
2605	mtspr	SPRN_PPR,r10
2606END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2607BEGIN_FTR_SECTION
2608	ld	r10,PACA_EXGEN+EX_CFAR(r13)
2609	mtspr	SPRN_CFAR,r10
2610END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2611	li	r10,0
2612	stb	r10,PACAHSRR_VALID(r13)
2613	ld	r10,PACA_EXGEN+EX_R10(r13)
2614	ld	r11,PACA_EXGEN+EX_R11(r13)
2615	ld	r12,PACA_EXGEN+EX_R12(r13)
2616	ld	r13,PACA_EXGEN+EX_R13(r13)
2617	HRFI_TO_UNKNOWN
2618	b	.
2619#endif
2620
2621EXC_COMMON_BEGIN(denorm_exception_common)
2622	GEN_COMMON denorm_exception
2623	addi	r3,r1,STACK_FRAME_OVERHEAD
2624	bl	unknown_exception
2625	b	interrupt_return_hsrr
2626
2627
2628#ifdef CONFIG_CBE_RAS
2629INT_DEFINE_BEGIN(cbe_maintenance)
2630	IVEC=0x1600
2631	IHSRR=1
2632INT_DEFINE_END(cbe_maintenance)
2633
2634EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
2635	GEN_INT_ENTRY cbe_maintenance, virt=0
2636EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
2637EXC_VIRT_NONE(0x5600, 0x100)
2638EXC_COMMON_BEGIN(cbe_maintenance_common)
2639	GEN_COMMON cbe_maintenance
2640	addi	r3,r1,STACK_FRAME_OVERHEAD
2641	bl	cbe_maintenance_exception
2642	b	interrupt_return_hsrr
2643
2644#else /* CONFIG_CBE_RAS */
2645EXC_REAL_NONE(0x1600, 0x100)
2646EXC_VIRT_NONE(0x5600, 0x100)
2647#endif
2648
2649
2650INT_DEFINE_BEGIN(altivec_assist)
2651	IVEC=0x1700
2652#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2653	IKVM_REAL=1
2654#endif
2655INT_DEFINE_END(altivec_assist)
2656
2657EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
2658	GEN_INT_ENTRY altivec_assist, virt=0
2659EXC_REAL_END(altivec_assist, 0x1700, 0x100)
2660EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
2661	GEN_INT_ENTRY altivec_assist, virt=1
2662EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
2663EXC_COMMON_BEGIN(altivec_assist_common)
2664	GEN_COMMON altivec_assist
2665	addi	r3,r1,STACK_FRAME_OVERHEAD
2666#ifdef CONFIG_ALTIVEC
2667	bl	altivec_assist_exception
2668	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2669#else
2670	bl	unknown_exception
2671#endif
2672	b	interrupt_return_srr
2673
2674
2675#ifdef CONFIG_CBE_RAS
2676INT_DEFINE_BEGIN(cbe_thermal)
2677	IVEC=0x1800
2678	IHSRR=1
2679INT_DEFINE_END(cbe_thermal)
2680
2681EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
2682	GEN_INT_ENTRY cbe_thermal, virt=0
2683EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
2684EXC_VIRT_NONE(0x5800, 0x100)
2685EXC_COMMON_BEGIN(cbe_thermal_common)
2686	GEN_COMMON cbe_thermal
2687	addi	r3,r1,STACK_FRAME_OVERHEAD
2688	bl	cbe_thermal_exception
2689	b	interrupt_return_hsrr
2690
2691#else /* CONFIG_CBE_RAS */
2692EXC_REAL_NONE(0x1800, 0x100)
2693EXC_VIRT_NONE(0x5800, 0x100)
2694#endif
2695
2696
2697#ifdef CONFIG_PPC_WATCHDOG
2698
2699INT_DEFINE_BEGIN(soft_nmi)
2700	IVEC=0x900
2701	ISTACK=0
2702INT_DEFINE_END(soft_nmi)
2703
2704/*
2705 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2706 * stack is one that is usable by maskable interrupts so long as MSR_EE
2707 * remains off. It is used for recovery when something has corrupted the
2708 * normal kernel stack, for example. The "soft NMI" must not use the process
2709 * stack because we want irq disabled sections to avoid touching the stack
2710 * at all (other than PMU interrupts), so use the emergency stack for this,
2711 * and run it entirely with interrupts hard disabled.
2712 */
2713EXC_COMMON_BEGIN(soft_nmi_common)
2714	mr	r10,r1
2715	ld	r1,PACAEMERGSP(r13)
2716	subi	r1,r1,INT_FRAME_SIZE
2717	__GEN_COMMON_BODY soft_nmi
2718
2719	addi	r3,r1,STACK_FRAME_OVERHEAD
2720	bl	soft_nmi_interrupt
2721
2722	/* Clear MSR_RI before setting SRR0 and SRR1. */
2723	li	r9,0
2724	mtmsrd	r9,1
2725
2726	kuap_kernel_restore r9, r10
2727
2728	EXCEPTION_RESTORE_REGS hsrr=0
2729	RFI_TO_KERNEL
2730
2731#endif /* CONFIG_PPC_WATCHDOG */
2732
2733/*
2734 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2735 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2736 * - If it was a doorbell we return immediately since doorbells are edge
2737 *   triggered and won't automatically refire.
2738 * - If it was a HMI we return immediately since we handled it in realmode
2739 *   and it won't refire.
2740 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
2741 * This is called with r10 containing the value to OR to the paca field.
2742 */
2743.macro MASKED_INTERRUPT hsrr=0
2744	.if \hsrr
2745masked_Hinterrupt:
2746	.else
2747masked_interrupt:
2748	.endif
2749	stw	r9,PACA_EXGEN+EX_CCR(r13)
2750	lbz	r9,PACAIRQHAPPENED(r13)
2751	or	r9,r9,r10
2752	stb	r9,PACAIRQHAPPENED(r13)
2753
2754	.if ! \hsrr
2755	cmpwi	r10,PACA_IRQ_DEC
2756	bne	1f
2757	LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
2758	mtspr	SPRN_DEC,r9
2759#ifdef CONFIG_PPC_WATCHDOG
2760	lwz	r9,PACA_EXGEN+EX_CCR(r13)
2761	b	soft_nmi_common
2762#else
2763	b	2f
2764#endif
2765	.endif
2766
27671:	andi.	r10,r10,PACA_IRQ_MUST_HARD_MASK
2768	beq	2f
2769	xori	r12,r12,MSR_EE	/* clear MSR_EE */
2770	.if \hsrr
2771	mtspr	SPRN_HSRR1,r12
2772	.else
2773	mtspr	SPRN_SRR1,r12
2774	.endif
2775	ori	r9,r9,PACA_IRQ_HARD_DIS
2776	stb	r9,PACAIRQHAPPENED(r13)
27772:	/* done */
2778	li	r9,0
2779	.if \hsrr
2780	stb	r9,PACAHSRR_VALID(r13)
2781	.else
2782	stb	r9,PACASRR_VALID(r13)
2783	.endif
2784
2785	SEARCH_RESTART_TABLE
2786	cmpdi	r12,0
2787	beq	3f
2788	.if \hsrr
2789	mtspr	SPRN_HSRR0,r12
2790	.else
2791	mtspr	SPRN_SRR0,r12
2792	.endif
27933:
2794
2795	ld	r9,PACA_EXGEN+EX_CTR(r13)
2796	mtctr	r9
2797	lwz	r9,PACA_EXGEN+EX_CCR(r13)
2798	mtcrf	0x80,r9
2799	std	r1,PACAR1(r13)
2800	ld	r9,PACA_EXGEN+EX_R9(r13)
2801	ld	r10,PACA_EXGEN+EX_R10(r13)
2802	ld	r11,PACA_EXGEN+EX_R11(r13)
2803	ld	r12,PACA_EXGEN+EX_R12(r13)
2804	ld	r13,PACA_EXGEN+EX_R13(r13)
2805	/* May return to masked low address where r13 is not set up */
2806	.if \hsrr
2807	HRFI_TO_KERNEL
2808	.else
2809	RFI_TO_KERNEL
2810	.endif
2811	b	.
2812.endm
2813
2814TRAMP_REAL_BEGIN(stf_barrier_fallback)
2815	std	r9,PACA_EXRFI+EX_R9(r13)
2816	std	r10,PACA_EXRFI+EX_R10(r13)
2817	sync
2818	ld	r9,PACA_EXRFI+EX_R9(r13)
2819	ld	r10,PACA_EXRFI+EX_R10(r13)
2820	ori	31,31,0
2821	.rept 14
2822	b	1f
28231:
2824	.endr
2825	blr
2826
2827/* Clobbers r10, r11, ctr */
2828.macro L1D_DISPLACEMENT_FLUSH
2829	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2830	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2831	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2832	mtctr	r11
2833	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2834
2835	/* order ld/st prior to dcbt stop all streams with flushing */
2836	sync
2837
2838	/*
2839	 * The load addresses are at staggered offsets within cachelines,
2840	 * which suits some pipelines better (on others it should not
2841	 * hurt).
2842	 */
28431:
2844	ld	r11,(0x80 + 8)*0(r10)
2845	ld	r11,(0x80 + 8)*1(r10)
2846	ld	r11,(0x80 + 8)*2(r10)
2847	ld	r11,(0x80 + 8)*3(r10)
2848	ld	r11,(0x80 + 8)*4(r10)
2849	ld	r11,(0x80 + 8)*5(r10)
2850	ld	r11,(0x80 + 8)*6(r10)
2851	ld	r11,(0x80 + 8)*7(r10)
2852	addi	r10,r10,0x80*8
2853	bdnz	1b
2854.endm
2855
2856TRAMP_REAL_BEGIN(entry_flush_fallback)
2857	std	r9,PACA_EXRFI+EX_R9(r13)
2858	std	r10,PACA_EXRFI+EX_R10(r13)
2859	std	r11,PACA_EXRFI+EX_R11(r13)
2860	mfctr	r9
2861	L1D_DISPLACEMENT_FLUSH
2862	mtctr	r9
2863	ld	r9,PACA_EXRFI+EX_R9(r13)
2864	ld	r10,PACA_EXRFI+EX_R10(r13)
2865	ld	r11,PACA_EXRFI+EX_R11(r13)
2866	blr
2867
2868/*
2869 * The SCV entry flush happens with interrupts enabled, so it must disable
2870 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
2871 * (containing LR) does not need to be preserved here because scv entry
2872 * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
2873 */
2874TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
2875	li	r10,0
2876	mtmsrd	r10,1
2877	lbz	r10,PACAIRQHAPPENED(r13)
2878	ori	r10,r10,PACA_IRQ_HARD_DIS
2879	stb	r10,PACAIRQHAPPENED(r13)
2880	std	r11,PACA_EXRFI+EX_R11(r13)
2881	L1D_DISPLACEMENT_FLUSH
2882	ld	r11,PACA_EXRFI+EX_R11(r13)
2883	li	r10,MSR_RI
2884	mtmsrd	r10,1
2885	blr
2886
2887TRAMP_REAL_BEGIN(rfi_flush_fallback)
2888	SET_SCRATCH0(r13);
2889	GET_PACA(r13);
2890	std	r1,PACA_EXRFI+EX_R12(r13)
2891	ld	r1,PACAKSAVE(r13)
2892	std	r9,PACA_EXRFI+EX_R9(r13)
2893	std	r10,PACA_EXRFI+EX_R10(r13)
2894	std	r11,PACA_EXRFI+EX_R11(r13)
2895	mfctr	r9
2896	L1D_DISPLACEMENT_FLUSH
2897	mtctr	r9
2898	ld	r9,PACA_EXRFI+EX_R9(r13)
2899	ld	r10,PACA_EXRFI+EX_R10(r13)
2900	ld	r11,PACA_EXRFI+EX_R11(r13)
2901	ld	r1,PACA_EXRFI+EX_R12(r13)
2902	GET_SCRATCH0(r13);
2903	rfid
2904
2905TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2906	SET_SCRATCH0(r13);
2907	GET_PACA(r13);
2908	std	r1,PACA_EXRFI+EX_R12(r13)
2909	ld	r1,PACAKSAVE(r13)
2910	std	r9,PACA_EXRFI+EX_R9(r13)
2911	std	r10,PACA_EXRFI+EX_R10(r13)
2912	std	r11,PACA_EXRFI+EX_R11(r13)
2913	mfctr	r9
2914	L1D_DISPLACEMENT_FLUSH
2915	mtctr	r9
2916	ld	r9,PACA_EXRFI+EX_R9(r13)
2917	ld	r10,PACA_EXRFI+EX_R10(r13)
2918	ld	r11,PACA_EXRFI+EX_R11(r13)
2919	ld	r1,PACA_EXRFI+EX_R12(r13)
2920	GET_SCRATCH0(r13);
2921	hrfid
2922
2923TRAMP_REAL_BEGIN(rfscv_flush_fallback)
2924	/* system call volatile */
2925	mr	r7,r13
2926	GET_PACA(r13);
2927	mr	r8,r1
2928	ld	r1,PACAKSAVE(r13)
2929	mfctr	r9
2930	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2931	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2932	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2933	mtctr	r11
2934	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2935
2936	/* order ld/st prior to dcbt stop all streams with flushing */
2937	sync
2938
2939	/*
2940	 * The load adresses are at staggered offsets within cachelines,
2941	 * which suits some pipelines better (on others it should not
2942	 * hurt).
2943	 */
29441:
2945	ld	r11,(0x80 + 8)*0(r10)
2946	ld	r11,(0x80 + 8)*1(r10)
2947	ld	r11,(0x80 + 8)*2(r10)
2948	ld	r11,(0x80 + 8)*3(r10)
2949	ld	r11,(0x80 + 8)*4(r10)
2950	ld	r11,(0x80 + 8)*5(r10)
2951	ld	r11,(0x80 + 8)*6(r10)
2952	ld	r11,(0x80 + 8)*7(r10)
2953	addi	r10,r10,0x80*8
2954	bdnz	1b
2955
2956	mtctr	r9
2957	li	r9,0
2958	li	r10,0
2959	li	r11,0
2960	mr	r1,r8
2961	mr	r13,r7
2962	RFSCV
2963
2964USE_TEXT_SECTION()
2965
2966#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2967kvm_interrupt:
2968	/*
2969	 * The conditional branch in KVMTEST can't reach all the way,
2970	 * make a stub.
2971	 */
2972	b	kvmppc_interrupt
2973#endif
2974
2975_GLOBAL(do_uaccess_flush)
2976	UACCESS_FLUSH_FIXUP_SECTION
2977	nop
2978	nop
2979	nop
2980	blr
2981	L1D_DISPLACEMENT_FLUSH
2982	blr
2983_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
2984EXPORT_SYMBOL(do_uaccess_flush)
2985
2986
2987MASKED_INTERRUPT
2988MASKED_INTERRUPT hsrr=1
2989
2990	/*
2991	 * Relocation-on interrupts: A subset of the interrupts can be delivered
2992	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
2993	 * it.  Addresses are the same as the original interrupt addresses, but
2994	 * offset by 0xc000000000004000.
2995	 * It's impossible to receive interrupts below 0x300 via this mechanism.
2996	 * KVM: None of these traps are from the guest ; anything that escalated
2997	 * to HV=1 from HV=0 is delivered via real mode handlers.
2998	 */
2999
3000	/*
3001	 * This uses the standard macro, since the original 0x300 vector
3002	 * only has extra guff for STAB-based processors -- which never
3003	 * come here.
3004	 */
3005
3006USE_FIXED_SECTION(virt_trampolines)
3007	/*
3008	 * All code below __end_soft_masked is treated as soft-masked. If
3009	 * any code runs here with MSR[EE]=1, it must then cope with pending
3010	 * soft interrupt being raised (i.e., by ensuring it is replayed).
3011	 *
3012	 * The __end_interrupts marker must be past the out-of-line (OOL)
3013	 * handlers, so that they are copied to real address 0x100 when running
3014	 * a relocatable kernel. This ensures they can be reached from the short
3015	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
3016	 * directly, without using LOAD_HANDLER().
3017	 */
3018	.align	7
3019	.globl	__end_interrupts
3020__end_interrupts:
3021DEFINE_FIXED_SYMBOL(__end_interrupts)
3022
3023CLOSE_FIXED_SECTION(real_vectors);
3024CLOSE_FIXED_SECTION(real_trampolines);
3025CLOSE_FIXED_SECTION(virt_vectors);
3026CLOSE_FIXED_SECTION(virt_trampolines);
3027
3028USE_TEXT_SECTION()
3029
3030/* MSR[RI] should be clear because this uses SRR[01] */
3031enable_machine_check:
3032	mflr	r0
3033	bcl	20,31,$+4
30340:	mflr	r3
3035	addi	r3,r3,(1f - 0b)
3036	mtspr	SPRN_SRR0,r3
3037	mfmsr	r3
3038	ori	r3,r3,MSR_ME
3039	mtspr	SPRN_SRR1,r3
3040	RFI_TO_KERNEL
30411:	mtlr	r0
3042	blr
3043
3044/* MSR[RI] should be clear because this uses SRR[01] */
3045disable_machine_check:
3046	mflr	r0
3047	bcl	20,31,$+4
30480:	mflr	r3
3049	addi	r3,r3,(1f - 0b)
3050	mtspr	SPRN_SRR0,r3
3051	mfmsr	r3
3052	li	r4,MSR_ME
3053	andc	r3,r3,r4
3054	mtspr	SPRN_SRR1,r3
3055	RFI_TO_KERNEL
30561:	mtlr	r0
3057	blr
3058