1f3601156SNicholas Piggin/* SPDX-License-Identifier: GPL-2.0-only */
2*39326182SMasahiro Yamada#include <linux/export.h>
3f3601156SNicholas Piggin#include <asm/asm-offsets.h>
4f3601156SNicholas Piggin#include <asm/cache.h>
589d35b23SNicholas Piggin#include <asm/code-patching-asm.h>
6f33e0702SNicholas Piggin#include <asm/exception-64s.h>
7f3601156SNicholas Piggin#include <asm/kvm_asm.h>
8f3601156SNicholas Piggin#include <asm/kvm_book3s_asm.h>
989d35b23SNicholas Piggin#include <asm/mmu.h>
10f3601156SNicholas Piggin#include <asm/ppc_asm.h>
1189d35b23SNicholas Piggin#include <asm/ptrace.h>
12f3601156SNicholas Piggin#include <asm/reg.h>
1389d35b23SNicholas Piggin#include <asm/ultravisor-api.h>
14f3601156SNicholas Piggin
15f3601156SNicholas Piggin/*
1631c67cfeSNicholas Piggin * These are branched to from interrupt handlers in exception-64s.S which set
17f3601156SNicholas Piggin * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
18f3601156SNicholas Piggin */
19e2762743SNicholas Piggin
2004ece7b6SNicholas Piggin/*
2104ece7b6SNicholas Piggin * This is a hcall, so register convention is as
22e2762743SNicholas Piggin * Documentation/powerpc/papr_hcalls.rst.
2304ece7b6SNicholas Piggin *
2404ece7b6SNicholas Piggin * This may also be a syscall from PR-KVM userspace that is to be
2504ece7b6SNicholas Piggin * reflected to the PR guest kernel, so registers may be set up for
2604ece7b6SNicholas Piggin * a system call rather than hcall. We don't currently clobber
2704ece7b6SNicholas Piggin * anything here, but the 0xc00 handler has already clobbered CTR
2804ece7b6SNicholas Piggin * and CR0, so PR-KVM can not support a guest kernel that preserves
2904ece7b6SNicholas Piggin * those registers across its system calls.
30e2762743SNicholas Piggin *
31e2762743SNicholas Piggin * The state of registers is as kvmppc_interrupt, except CFAR is not
32e2762743SNicholas Piggin * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
3304ece7b6SNicholas Piggin */
34e2762743SNicholas Piggin.global	kvmppc_hcall
35e2762743SNicholas Piggin.balign IFETCH_ALIGN_BYTES
36e2762743SNicholas Pigginkvmppc_hcall:
3789d35b23SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
3889d35b23SNicholas Piggin	lbz	r10,HSTATE_IN_GUEST(r13)
390bf7e1b2SNicholas Piggin	cmpwi	r10,KVM_GUEST_MODE_HV_P9
4089d35b23SNicholas Piggin	beq	kvmppc_p9_exit_hcall
4189d35b23SNicholas Piggin#endif
42e2762743SNicholas Piggin	ld	r10,PACA_EXGEN+EX_R13(r13)
43e2762743SNicholas Piggin	SET_SCRATCH0(r10)
44e2762743SNicholas Piggin	li	r10,0xc00
45e2762743SNicholas Piggin	/* Now we look like kvmppc_interrupt */
46e2762743SNicholas Piggin	li	r11,PACA_EXGEN
47e2762743SNicholas Piggin	b	.Lgot_save_area
4831c67cfeSNicholas Piggin
4969fdd674SNicholas Piggin/*
5069fdd674SNicholas Piggin * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
5169fdd674SNicholas Piggin * call convention:
5269fdd674SNicholas Piggin *
5369fdd674SNicholas Piggin * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
5469fdd674SNicholas Piggin * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
5569fdd674SNicholas Piggin * guest R13 also saved in SCRATCH0
5669fdd674SNicholas Piggin * R13		= PACA
5769fdd674SNicholas Piggin * R11		= (H)SRR0
5869fdd674SNicholas Piggin * R12		= (H)SRR1
5969fdd674SNicholas Piggin * R9		= guest CR
6069fdd674SNicholas Piggin * PPR is set to medium
6169fdd674SNicholas Piggin *
6269fdd674SNicholas Piggin * With the addition for KVM:
6369fdd674SNicholas Piggin * R10		= trap vector
6469fdd674SNicholas Piggin */
65f3601156SNicholas Piggin.global	kvmppc_interrupt
66f3601156SNicholas Piggin.balign IFETCH_ALIGN_BYTES
67f3601156SNicholas Pigginkvmppc_interrupt:
6889d35b23SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
6989d35b23SNicholas Piggin	std	r10,HSTATE_SCRATCH0(r13)
7089d35b23SNicholas Piggin	lbz	r10,HSTATE_IN_GUEST(r13)
710bf7e1b2SNicholas Piggin	cmpwi	r10,KVM_GUEST_MODE_HV_P9
7289d35b23SNicholas Piggin	beq	kvmppc_p9_exit_interrupt
7389d35b23SNicholas Piggin	ld	r10,HSTATE_SCRATCH0(r13)
7489d35b23SNicholas Piggin#endif
7569fdd674SNicholas Piggin	li	r11,PACA_EXGEN
7669fdd674SNicholas Piggin	cmpdi	r10,0x200
77e2762743SNicholas Piggin	bgt+	.Lgot_save_area
7869fdd674SNicholas Piggin	li	r11,PACA_EXMC
79e2762743SNicholas Piggin	beq	.Lgot_save_area
8069fdd674SNicholas Piggin	li	r11,PACA_EXNMI
81e2762743SNicholas Piggin.Lgot_save_area:
82e2762743SNicholas Piggin	add	r11,r11,r13
8369fdd674SNicholas PigginBEGIN_FTR_SECTION
8469fdd674SNicholas Piggin	ld	r12,EX_CFAR(r11)
8569fdd674SNicholas Piggin	std	r12,HSTATE_CFAR(r13)
8669fdd674SNicholas PigginEND_FTR_SECTION_IFSET(CPU_FTR_CFAR)
8769fdd674SNicholas Piggin	ld	r12,EX_CTR(r11)
8869fdd674SNicholas Piggin	mtctr	r12
8969fdd674SNicholas PigginBEGIN_FTR_SECTION
9069fdd674SNicholas Piggin	ld	r12,EX_PPR(r11)
9169fdd674SNicholas Piggin	std	r12,HSTATE_PPR(r13)
9269fdd674SNicholas PigginEND_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
9369fdd674SNicholas Piggin	ld	r12,EX_R12(r11)
9469fdd674SNicholas Piggin	std	r12,HSTATE_SCRATCH0(r13)
9569fdd674SNicholas Piggin	sldi	r12,r9,32
9669fdd674SNicholas Piggin	or	r12,r12,r10
9769fdd674SNicholas Piggin	ld	r9,EX_R9(r11)
9869fdd674SNicholas Piggin	ld	r10,EX_R10(r11)
9969fdd674SNicholas Piggin	ld	r11,EX_R11(r11)
10069fdd674SNicholas Piggin
101f3601156SNicholas Piggin	/*
10269fdd674SNicholas Piggin	 * Hcalls and other interrupts come here after normalising register
10369fdd674SNicholas Piggin	 * contents and save locations:
10469fdd674SNicholas Piggin	 *
105f3601156SNicholas Piggin	 * R12		= (guest CR << 32) | interrupt vector
106f3601156SNicholas Piggin	 * R13		= PACA
10769fdd674SNicholas Piggin	 * guest R12 saved in shadow HSTATE_SCRATCH0
108f3601156SNicholas Piggin	 * guest R13 saved in SPRN_SCRATCH0
109f3601156SNicholas Piggin	 */
110f3601156SNicholas Piggin	std	r9,HSTATE_SCRATCH2(r13)
111f3601156SNicholas Piggin	lbz	r9,HSTATE_IN_GUEST(r13)
112f33e0702SNicholas Piggin	cmpwi	r9,KVM_GUEST_MODE_SKIP
113f33e0702SNicholas Piggin	beq-	.Lmaybe_skip
114f33e0702SNicholas Piggin.Lno_skip:
115f33e0702SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
116f3601156SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
117f3601156SNicholas Piggin	cmpwi	r9,KVM_GUEST_MODE_GUEST
118f3601156SNicholas Piggin	beq	kvmppc_interrupt_pr
119f3601156SNicholas Piggin#endif
120f3601156SNicholas Piggin	b	kvmppc_interrupt_hv
121f3601156SNicholas Piggin#else
122f3601156SNicholas Piggin	b	kvmppc_interrupt_pr
123f3601156SNicholas Piggin#endif
124f33e0702SNicholas Piggin
125f33e0702SNicholas Piggin/*
126f33e0702SNicholas Piggin * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
12787c78b61SMichael Ellerman * the faulting instruction in guest memory from the hypervisor without
128f33e0702SNicholas Piggin * walking page tables.
129f33e0702SNicholas Piggin *
130f33e0702SNicholas Piggin * When the guest takes a fault that requires the hypervisor to load the
131f33e0702SNicholas Piggin * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
132f33e0702SNicholas Piggin * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
133f33e0702SNicholas Piggin * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
134f33e0702SNicholas Piggin * but loads and stores will access the guest context. This is used to load
135f33e0702SNicholas Piggin * the faulting instruction using the faulting guest effective address.
136f33e0702SNicholas Piggin *
137f33e0702SNicholas Piggin * However the guest context may not be able to translate, or it may cause a
138f33e0702SNicholas Piggin * machine check or other issue, which results in a fault in the host
139f33e0702SNicholas Piggin * (even with KVM-HV).
140f33e0702SNicholas Piggin *
141f33e0702SNicholas Piggin * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
142f33e0702SNicholas Piggin * are (or are likely) caused by that load, the instruction is skipped by
143f33e0702SNicholas Piggin * just returning with the PC advanced +4, where it is noticed the load did
144f33e0702SNicholas Piggin * not execute and it goes to the slow path which walks the page tables to
145f33e0702SNicholas Piggin * read guest memory.
146f33e0702SNicholas Piggin */
147f33e0702SNicholas Piggin.Lmaybe_skip:
148f33e0702SNicholas Piggin	cmpwi	r12,BOOK3S_INTERRUPT_MACHINE_CHECK
149f33e0702SNicholas Piggin	beq	1f
150f33e0702SNicholas Piggin	cmpwi	r12,BOOK3S_INTERRUPT_DATA_STORAGE
151f33e0702SNicholas Piggin	beq	1f
152f33e0702SNicholas Piggin	cmpwi	r12,BOOK3S_INTERRUPT_DATA_SEGMENT
153f33e0702SNicholas Piggin	beq	1f
154f33e0702SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
155f33e0702SNicholas Piggin	/* HSRR interrupts get 2 added to interrupt number */
156f33e0702SNicholas Piggin	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
157f33e0702SNicholas Piggin	beq	2f
158f33e0702SNicholas Piggin#endif
159f33e0702SNicholas Piggin	b	.Lno_skip
160f33e0702SNicholas Piggin1:	mfspr	r9,SPRN_SRR0
161f33e0702SNicholas Piggin	addi	r9,r9,4
162f33e0702SNicholas Piggin	mtspr	SPRN_SRR0,r9
163f33e0702SNicholas Piggin	ld	r12,HSTATE_SCRATCH0(r13)
164f33e0702SNicholas Piggin	ld	r9,HSTATE_SCRATCH2(r13)
165f33e0702SNicholas Piggin	GET_SCRATCH0(r13)
166f33e0702SNicholas Piggin	RFI_TO_KERNEL
167f33e0702SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
168f33e0702SNicholas Piggin2:	mfspr	r9,SPRN_HSRR0
169f33e0702SNicholas Piggin	addi	r9,r9,4
170f33e0702SNicholas Piggin	mtspr	SPRN_HSRR0,r9
171f33e0702SNicholas Piggin	ld	r12,HSTATE_SCRATCH0(r13)
172f33e0702SNicholas Piggin	ld	r9,HSTATE_SCRATCH2(r13)
173f33e0702SNicholas Piggin	GET_SCRATCH0(r13)
174f33e0702SNicholas Piggin	HRFI_TO_KERNEL
175f33e0702SNicholas Piggin#endif
17689d35b23SNicholas Piggin
17789d35b23SNicholas Piggin#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
17889d35b23SNicholas Piggin
17989d35b23SNicholas Piggin/* Stack frame offsets for kvmppc_p9_enter_guest */
18089d35b23SNicholas Piggin#define SFS			(144 + STACK_FRAME_MIN_SIZE)
18189d35b23SNicholas Piggin#define STACK_SLOT_NVGPRS	(SFS - 144)	/* 18 gprs */
18289d35b23SNicholas Piggin
18389d35b23SNicholas Piggin/*
18489d35b23SNicholas Piggin * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
18589d35b23SNicholas Piggin *
1860bf7e1b2SNicholas Piggin * Enter the guest on a ISAv3.0 or later system.
18789d35b23SNicholas Piggin */
18889d35b23SNicholas Piggin.balign	IFETCH_ALIGN_BYTES
18989d35b23SNicholas Piggin_GLOBAL(kvmppc_p9_enter_guest)
19089d35b23SNicholas PigginEXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
19189d35b23SNicholas Piggin	mflr	r0
19289d35b23SNicholas Piggin	std	r0,PPC_LR_STKOFF(r1)
19389d35b23SNicholas Piggin	stdu	r1,-SFS(r1)
19489d35b23SNicholas Piggin
19589d35b23SNicholas Piggin	std	r1,HSTATE_HOST_R1(r13)
19689d35b23SNicholas Piggin
19789d35b23SNicholas Piggin	mfcr	r4
19889d35b23SNicholas Piggin	stw	r4,SFS+8(r1)
19989d35b23SNicholas Piggin
20089d35b23SNicholas Piggin	reg = 14
20189d35b23SNicholas Piggin	.rept	18
20289d35b23SNicholas Piggin	std	reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
20389d35b23SNicholas Piggin	reg = reg + 1
20489d35b23SNicholas Piggin	.endr
20589d35b23SNicholas Piggin
20689d35b23SNicholas Piggin	ld	r4,VCPU_LR(r3)
20789d35b23SNicholas Piggin	mtlr	r4
20889d35b23SNicholas Piggin	ld	r4,VCPU_CTR(r3)
20989d35b23SNicholas Piggin	mtctr	r4
21089d35b23SNicholas Piggin	ld	r4,VCPU_XER(r3)
21189d35b23SNicholas Piggin	mtspr	SPRN_XER,r4
21289d35b23SNicholas Piggin
21389d35b23SNicholas Piggin	ld	r1,VCPU_CR(r3)
21489d35b23SNicholas Piggin
21589d35b23SNicholas PigginBEGIN_FTR_SECTION
21689d35b23SNicholas Piggin	ld	r4,VCPU_CFAR(r3)
21789d35b23SNicholas Piggin	mtspr	SPRN_CFAR,r4
21889d35b23SNicholas PigginEND_FTR_SECTION_IFSET(CPU_FTR_CFAR)
21989d35b23SNicholas PigginBEGIN_FTR_SECTION
22089d35b23SNicholas Piggin	ld	r4,VCPU_PPR(r3)
22189d35b23SNicholas Piggin	mtspr	SPRN_PPR,r4
22289d35b23SNicholas PigginEND_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
22389d35b23SNicholas Piggin
22489d35b23SNicholas Piggin	reg = 4
22589d35b23SNicholas Piggin	.rept	28
22689d35b23SNicholas Piggin	ld	reg,__VCPU_GPR(reg)(r3)
22789d35b23SNicholas Piggin	reg = reg + 1
22889d35b23SNicholas Piggin	.endr
22989d35b23SNicholas Piggin
23089d35b23SNicholas Piggin	ld	r4,VCPU_KVM(r3)
23189d35b23SNicholas Piggin	lbz	r4,KVM_SECURE_GUEST(r4)
23289d35b23SNicholas Piggin	cmpdi	r4,0
23389d35b23SNicholas Piggin	ld	r4,VCPU_GPR(R4)(r3)
23489d35b23SNicholas Piggin	bne	.Lret_to_ultra
23589d35b23SNicholas Piggin
23689d35b23SNicholas Piggin	mtcr	r1
23789d35b23SNicholas Piggin
23889d35b23SNicholas Piggin	ld	r0,VCPU_GPR(R0)(r3)
23989d35b23SNicholas Piggin	ld	r1,VCPU_GPR(R1)(r3)
24089d35b23SNicholas Piggin	ld	r2,VCPU_GPR(R2)(r3)
24189d35b23SNicholas Piggin	ld	r3,VCPU_GPR(R3)(r3)
24289d35b23SNicholas Piggin
24389d35b23SNicholas Piggin	HRFI_TO_GUEST
24489d35b23SNicholas Piggin	b	.
24589d35b23SNicholas Piggin
24689d35b23SNicholas Piggin	/*
24789d35b23SNicholas Piggin	 * Use UV_RETURN ultracall to return control back to the Ultravisor
24889d35b23SNicholas Piggin	 * after processing an hypercall or interrupt that was forwarded
24989d35b23SNicholas Piggin	 * (a.k.a. reflected) to the Hypervisor.
25089d35b23SNicholas Piggin	 *
25189d35b23SNicholas Piggin	 * All registers have already been reloaded except the ucall requires:
25289d35b23SNicholas Piggin	 *   R0 = hcall result
25389d35b23SNicholas Piggin	 *   R2 = SRR1, so UV can detect a synthesized interrupt (if any)
25489d35b23SNicholas Piggin	 *   R3 = UV_RETURN
25589d35b23SNicholas Piggin	 */
25689d35b23SNicholas Piggin.Lret_to_ultra:
25789d35b23SNicholas Piggin	mtcr	r1
25889d35b23SNicholas Piggin	ld	r1,VCPU_GPR(R1)(r3)
25989d35b23SNicholas Piggin
26089d35b23SNicholas Piggin	ld	r0,VCPU_GPR(R3)(r3)
26189d35b23SNicholas Piggin	mfspr	r2,SPRN_SRR1
26289d35b23SNicholas Piggin	LOAD_REG_IMMEDIATE(r3, UV_RETURN)
26389d35b23SNicholas Piggin	sc	2
26489d35b23SNicholas Piggin
26589d35b23SNicholas Piggin/*
26689d35b23SNicholas Piggin * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
26789d35b23SNicholas Piggin * above if the interrupt was taken for a guest that was entered via
26889d35b23SNicholas Piggin * kvmppc_p9_enter_guest().
26989d35b23SNicholas Piggin *
27089d35b23SNicholas Piggin * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
27189d35b23SNicholas Piggin * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
27289d35b23SNicholas Piggin * establishes the host stack and registers to return from the
27389d35b23SNicholas Piggin * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
27489d35b23SNicholas Piggin * (SPRs and FP, VEC, etc).
27589d35b23SNicholas Piggin */
27689d35b23SNicholas Piggin.balign	IFETCH_ALIGN_BYTES
27789d35b23SNicholas Pigginkvmppc_p9_exit_hcall:
27889d35b23SNicholas Piggin	mfspr	r11,SPRN_SRR0
27989d35b23SNicholas Piggin	mfspr	r12,SPRN_SRR1
28089d35b23SNicholas Piggin	li	r10,0xc00
28189d35b23SNicholas Piggin	std	r10,HSTATE_SCRATCH0(r13)
28289d35b23SNicholas Piggin
28389d35b23SNicholas Piggin.balign	IFETCH_ALIGN_BYTES
28489d35b23SNicholas Pigginkvmppc_p9_exit_interrupt:
28589d35b23SNicholas Piggin	/*
2860bf7e1b2SNicholas Piggin	 * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
28789d35b23SNicholas Piggin	 * hypervisor, that means we can't return from the entry stack.
28889d35b23SNicholas Piggin	 */
28989d35b23SNicholas Piggin	rldicl. r10,r12,64-MSR_HV_LG,63
29089d35b23SNicholas Piggin	bne-	kvmppc_p9_bad_interrupt
29189d35b23SNicholas Piggin
29289d35b23SNicholas Piggin	std     r1,HSTATE_SCRATCH1(r13)
29389d35b23SNicholas Piggin	std     r3,HSTATE_SCRATCH2(r13)
29489d35b23SNicholas Piggin	ld	r1,HSTATE_HOST_R1(r13)
29589d35b23SNicholas Piggin	ld	r3,HSTATE_KVM_VCPU(r13)
29689d35b23SNicholas Piggin
29789d35b23SNicholas Piggin	std	r9,VCPU_CR(r3)
29889d35b23SNicholas Piggin
29989d35b23SNicholas Piggin1:
30089d35b23SNicholas Piggin	std	r11,VCPU_PC(r3)
30189d35b23SNicholas Piggin	std	r12,VCPU_MSR(r3)
30289d35b23SNicholas Piggin
30389d35b23SNicholas Piggin	reg = 14
30489d35b23SNicholas Piggin	.rept	18
30589d35b23SNicholas Piggin	std	reg,__VCPU_GPR(reg)(r3)
30689d35b23SNicholas Piggin	reg = reg + 1
30789d35b23SNicholas Piggin	.endr
30889d35b23SNicholas Piggin
30989d35b23SNicholas Piggin	/* r1, r3, r9-r13 are saved to vcpu by C code */
31089d35b23SNicholas Piggin	std	r0,VCPU_GPR(R0)(r3)
31189d35b23SNicholas Piggin	std	r2,VCPU_GPR(R2)(r3)
31289d35b23SNicholas Piggin	reg = 4
31389d35b23SNicholas Piggin	.rept	5
31489d35b23SNicholas Piggin	std	reg,__VCPU_GPR(reg)(r3)
31589d35b23SNicholas Piggin	reg = reg + 1
31689d35b23SNicholas Piggin	.endr
31789d35b23SNicholas Piggin
3188e93fb33SNicholas Piggin	LOAD_PACA_TOC()
31989d35b23SNicholas Piggin
32089d35b23SNicholas Piggin	mflr	r4
32189d35b23SNicholas Piggin	std	r4,VCPU_LR(r3)
32289d35b23SNicholas Piggin	mfspr	r4,SPRN_XER
32389d35b23SNicholas Piggin	std	r4,VCPU_XER(r3)
32489d35b23SNicholas Piggin
32589d35b23SNicholas Piggin	reg = 14
32689d35b23SNicholas Piggin	.rept	18
32789d35b23SNicholas Piggin	ld	reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
32889d35b23SNicholas Piggin	reg = reg + 1
32989d35b23SNicholas Piggin	.endr
33089d35b23SNicholas Piggin
33189d35b23SNicholas Piggin	lwz	r4,SFS+8(r1)
33289d35b23SNicholas Piggin	mtcr	r4
33389d35b23SNicholas Piggin
33489d35b23SNicholas Piggin	/*
33589d35b23SNicholas Piggin	 * Flush the link stack here, before executing the first blr on the
33689d35b23SNicholas Piggin	 * way out of the guest.
33789d35b23SNicholas Piggin	 *
33889d35b23SNicholas Piggin	 * The link stack won't match coming out of the guest anyway so the
33989d35b23SNicholas Piggin	 * only cost is the flush itself. The call clobbers r0.
34089d35b23SNicholas Piggin	 */
34189d35b23SNicholas Piggin1:	nop
34289d35b23SNicholas Piggin	patch_site 1b patch__call_kvm_flush_link_stack_p9
34389d35b23SNicholas Piggin
34489d35b23SNicholas Piggin	addi	r1,r1,SFS
34589d35b23SNicholas Piggin	ld	r0,PPC_LR_STKOFF(r1)
34689d35b23SNicholas Piggin	mtlr	r0
34789d35b23SNicholas Piggin	blr
34889d35b23SNicholas Piggin
34989d35b23SNicholas Piggin/*
35089d35b23SNicholas Piggin * Took an interrupt somewhere right before HRFID to guest, so registers are
35189d35b23SNicholas Piggin * in a bad way. Return things hopefully enough to run host virtual code and
35289d35b23SNicholas Piggin * run the Linux interrupt handler (SRESET or MCE) to print something useful.
35389d35b23SNicholas Piggin *
35489d35b23SNicholas Piggin * We could be really clever and save all host registers in known locations
35589d35b23SNicholas Piggin * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
35689d35b23SNicholas Piggin * return address to a fixup that sets them up again. But that's a lot of
35789d35b23SNicholas Piggin * effort for a small bit of code. Lots of other things to do first.
35889d35b23SNicholas Piggin */
35989d35b23SNicholas Pigginkvmppc_p9_bad_interrupt:
3600bf7e1b2SNicholas PigginBEGIN_MMU_FTR_SECTION
3610bf7e1b2SNicholas Piggin	/*
3620bf7e1b2SNicholas Piggin	 * Hash host doesn't try to recover MMU (requires host SLB reload)
3630bf7e1b2SNicholas Piggin	 */
3640bf7e1b2SNicholas Piggin	b	.
3650bf7e1b2SNicholas PigginEND_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
36689d35b23SNicholas Piggin	/*
36789d35b23SNicholas Piggin	 * Clean up guest registers to give host a chance to run.
36889d35b23SNicholas Piggin	 */
36989d35b23SNicholas Piggin	li	r10,0
37089d35b23SNicholas Piggin	mtspr	SPRN_AMR,r10
37189d35b23SNicholas Piggin	mtspr	SPRN_IAMR,r10
37289d35b23SNicholas Piggin	mtspr	SPRN_CIABR,r10
37389d35b23SNicholas Piggin	mtspr	SPRN_DAWRX0,r10
37489d35b23SNicholas PigginBEGIN_FTR_SECTION
37589d35b23SNicholas Piggin	mtspr	SPRN_DAWRX1,r10
37689d35b23SNicholas PigginEND_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
37789d35b23SNicholas Piggin
37889d35b23SNicholas Piggin	/*
379cf3b16cfSNicholas Piggin	 * Switch to host MMU mode (don't have the real host PID but we aren't
380cf3b16cfSNicholas Piggin	 * going back to userspace).
38189d35b23SNicholas Piggin	 */
382cf3b16cfSNicholas Piggin	hwsync
383cf3b16cfSNicholas Piggin	isync
384cf3b16cfSNicholas Piggin
385cf3b16cfSNicholas Piggin	mtspr	SPRN_PID,r10
386cf3b16cfSNicholas Piggin
38789d35b23SNicholas Piggin	ld	r10, HSTATE_KVM_VCPU(r13)
38889d35b23SNicholas Piggin	ld	r10, VCPU_KVM(r10)
38989d35b23SNicholas Piggin	lwz	r10, KVM_HOST_LPID(r10)
39089d35b23SNicholas Piggin	mtspr	SPRN_LPID,r10
39189d35b23SNicholas Piggin
39289d35b23SNicholas Piggin	ld	r10, HSTATE_KVM_VCPU(r13)
39389d35b23SNicholas Piggin	ld	r10, VCPU_KVM(r10)
39489d35b23SNicholas Piggin	ld	r10, KVM_HOST_LPCR(r10)
39589d35b23SNicholas Piggin	mtspr	SPRN_LPCR,r10
39689d35b23SNicholas Piggin
397cf3b16cfSNicholas Piggin	isync
398cf3b16cfSNicholas Piggin
39989d35b23SNicholas Piggin	/*
40089d35b23SNicholas Piggin	 * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
40189d35b23SNicholas Piggin	 * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
40289d35b23SNicholas Piggin	 */
40389d35b23SNicholas Piggin	li	r10,KVM_GUEST_MODE_NONE
40489d35b23SNicholas Piggin	stb	r10,HSTATE_IN_GUEST(r13)
40589d35b23SNicholas Piggin	li	r10,MSR_RI
40689d35b23SNicholas Piggin	andc	r12,r12,r10
40789d35b23SNicholas Piggin
40889d35b23SNicholas Piggin	/*
40989d35b23SNicholas Piggin	 * Go back to interrupt handler. MCE and SRESET have their specific
41089d35b23SNicholas Piggin	 * PACA save area so they should be used directly. They set up their
41189d35b23SNicholas Piggin	 * own stack. The other handlers all use EXGEN. They will use the
41289d35b23SNicholas Piggin	 * guest r1 if it looks like a kernel stack, so just load the
41389d35b23SNicholas Piggin	 * emergency stack and go to program check for all other interrupts.
41489d35b23SNicholas Piggin	 */
41589d35b23SNicholas Piggin	ld	r10,HSTATE_SCRATCH0(r13)
41689d35b23SNicholas Piggin	cmpwi	r10,BOOK3S_INTERRUPT_MACHINE_CHECK
417af41d286SChristophe Leroy	beq	.Lcall_machine_check_common
41889d35b23SNicholas Piggin
41989d35b23SNicholas Piggin	cmpwi	r10,BOOK3S_INTERRUPT_SYSTEM_RESET
420af41d286SChristophe Leroy	beq	.Lcall_system_reset_common
42189d35b23SNicholas Piggin
42289d35b23SNicholas Piggin	b	.
423af41d286SChristophe Leroy
424af41d286SChristophe Leroy.Lcall_machine_check_common:
425af41d286SChristophe Leroy	b	machine_check_common
426af41d286SChristophe Leroy
427af41d286SChristophe Leroy.Lcall_system_reset_common:
428af41d286SChristophe Leroy	b	system_reset_common
42989d35b23SNicholas Piggin#endif
430