1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
283a49794SMarc Zyngier /*
383a49794SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
483a49794SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
583a49794SMarc Zyngier  *
683a49794SMarc Zyngier  * Derived from arch/arm/include/kvm_emulate.h
783a49794SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
883a49794SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
983a49794SMarc Zyngier  */
1083a49794SMarc Zyngier 
1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__
1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__
1383a49794SMarc Zyngier 
1483a49794SMarc Zyngier #include <linux/kvm_host.h>
15c6d01a94SMark Rutland 
16bd7d95caSMark Rutland #include <asm/debug-monitors.h>
17c6d01a94SMark Rutland #include <asm/esr.h>
1883a49794SMarc Zyngier #include <asm/kvm_arm.h>
1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h>
2083a49794SMarc Zyngier #include <asm/ptrace.h>
214429fc64SAndre Przywara #include <asm/cputype.h>
2268908bf7SMarc Zyngier #include <asm/virt.h>
2383a49794SMarc Zyngier 
24bb666c47SMarc Zyngier #define CURRENT_EL_SP_EL0_VECTOR	0x0
25bb666c47SMarc Zyngier #define CURRENT_EL_SP_ELx_VECTOR	0x200
26bb666c47SMarc Zyngier #define LOWER_EL_AArch64_VECTOR		0x400
27bb666c47SMarc Zyngier #define LOWER_EL_AArch32_VECTOR		0x600
28bb666c47SMarc Zyngier 
29bb666c47SMarc Zyngier enum exception_type {
30bb666c47SMarc Zyngier 	except_type_sync	= 0,
31bb666c47SMarc Zyngier 	except_type_irq		= 0x80,
32bb666c47SMarc Zyngier 	except_type_fiq		= 0x100,
33bb666c47SMarc Zyngier 	except_type_serror	= 0x180,
34bb666c47SMarc Zyngier };
35b547631fSMarc Zyngier 
3647f3a2fcSJintack Lim #define kvm_exception_type_names		\
3747f3a2fcSJintack Lim 	{ except_type_sync,	"SYNC"   },	\
3847f3a2fcSJintack Lim 	{ except_type_irq,	"IRQ"    },	\
3947f3a2fcSJintack Lim 	{ except_type_fiq,	"FIQ"    },	\
4047f3a2fcSJintack Lim 	{ except_type_serror,	"SERROR" }
4147f3a2fcSJintack Lim 
4227b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
436ddbc281SMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4427b190bdSMarc Zyngier 
4583a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4610cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4783a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4883a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
4985ea6b1eSMarc Zyngier void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
5083a49794SMarc Zyngier 
516109c5a6SSean Christopherson void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
526109c5a6SSean Christopherson 
5347f3a2fcSJintack Lim void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5447f3a2fcSJintack Lim int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5547f3a2fcSJintack Lim int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
5647f3a2fcSJintack Lim 
5726bf74bdSReiji Watanabe #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
585c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
59e72341c5SChristoffer Dall {
60e72341c5SChristoffer Dall 	return !(vcpu->arch.hcr_el2 & HCR_RW);
61e72341c5SChristoffer Dall }
6226bf74bdSReiji Watanabe #else
6326bf74bdSReiji Watanabe static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6426bf74bdSReiji Watanabe {
6526bf74bdSReiji Watanabe 	struct kvm *kvm = vcpu->kvm;
6626bf74bdSReiji Watanabe 
6726bf74bdSReiji Watanabe 	WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
6826bf74bdSReiji Watanabe 			       &kvm->arch.flags));
6926bf74bdSReiji Watanabe 
7026bf74bdSReiji Watanabe 	return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
7126bf74bdSReiji Watanabe }
7226bf74bdSReiji Watanabe #endif
73e72341c5SChristoffer Dall 
74b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
75b856a591SChristoffer Dall {
76b856a591SChristoffer Dall 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
7768908bf7SMarc Zyngier 	if (is_kernel_in_hyp_mode())
7868908bf7SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_E2H;
79558daf69SDongjiu Geng 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
80558daf69SDongjiu Geng 		/* route synchronous external abort exceptions to EL2 */
81558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TEA;
82558daf69SDongjiu Geng 		/* trap error record accesses */
83558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TERR;
84558daf69SDongjiu Geng 	}
855c401308SChristoffer Dall 
865c401308SChristoffer Dall 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
87e48d53a9SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_FWB;
885c401308SChristoffer Dall 	} else {
895c401308SChristoffer Dall 		/*
905c401308SChristoffer Dall 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
915c401308SChristoffer Dall 		 * get set in SCTLR_EL1 such that we can detect when the guest
925c401308SChristoffer Dall 		 * MMU gets turned on and do the necessary cache maintenance
935c401308SChristoffer Dall 		 * then.
945c401308SChristoffer Dall 		 */
955c401308SChristoffer Dall 		vcpu->arch.hcr_el2 |= HCR_TVM;
965c401308SChristoffer Dall 	}
97558daf69SDongjiu Geng 
98*c876c3f1SMarc Zyngier 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
99*c876c3f1SMarc Zyngier 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
100*c876c3f1SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TID4;
101*c876c3f1SMarc Zyngier 	else
102*c876c3f1SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TID2;
103*c876c3f1SMarc Zyngier 
10426bf74bdSReiji Watanabe 	if (vcpu_el1_is_32bit(vcpu))
105801f6772SMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_RW;
106f7f2b15cSArd Biesheuvel 
107ea7fc1bbSSteven Price 	if (kvm_has_mte(vcpu->kvm))
108ea7fc1bbSSteven Price 		vcpu->arch.hcr_el2 |= HCR_ATA;
109b856a591SChristoffer Dall }
110b856a591SChristoffer Dall 
1113df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
1123c1e7165SMarc Zyngier {
1133df59d8dSChristoffer Dall 	return (unsigned long *)&vcpu->arch.hcr_el2;
1143c1e7165SMarc Zyngier }
1153c1e7165SMarc Zyngier 
116ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
117de737089SMarc Zyngier {
118de737089SMarc Zyngier 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
1197bdabad1SMarc Zyngier 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
1207bdabad1SMarc Zyngier 	    vcpu->kvm->arch.vgic.nassgireq)
121ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
122ef2e78ddSMarc Zyngier 	else
123ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TWI;
124de737089SMarc Zyngier }
125de737089SMarc Zyngier 
126ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
127de737089SMarc Zyngier {
128de737089SMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWE;
129ef2e78ddSMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWI;
130de737089SMarc Zyngier }
131de737089SMarc Zyngier 
132384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
133384b40caSMark Rutland {
134384b40caSMark Rutland 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
135384b40caSMark Rutland }
136384b40caSMark Rutland 
137384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
138384b40caSMark Rutland {
139384b40caSMark Rutland 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
140384b40caSMark Rutland }
141384b40caSMark Rutland 
142b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
143b7b27facSDongjiu Geng {
144b7b27facSDongjiu Geng 	return vcpu->arch.vsesr_el2;
145b7b27facSDongjiu Geng }
146b7b27facSDongjiu Geng 
1474715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
1484715c14bSJames Morse {
1494715c14bSJames Morse 	vcpu->arch.vsesr_el2 = vsesr;
1504715c14bSJames Morse }
1514715c14bSJames Morse 
1525c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
15383a49794SMarc Zyngier {
154e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
15583a49794SMarc Zyngier }
15683a49794SMarc Zyngier 
1575c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
15883a49794SMarc Zyngier {
159e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
16083a49794SMarc Zyngier }
16183a49794SMarc Zyngier 
1625c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
16383a49794SMarc Zyngier {
164b547631fSMarc Zyngier 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
16583a49794SMarc Zyngier }
16683a49794SMarc Zyngier 
1675c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
16883a49794SMarc Zyngier {
16927b190bdSMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
17027b190bdSMarc Zyngier 		return kvm_condition_valid32(vcpu);
17127b190bdSMarc Zyngier 
17227b190bdSMarc Zyngier 	return true;
17383a49794SMarc Zyngier }
17483a49794SMarc Zyngier 
17583a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
17683a49794SMarc Zyngier {
177256c0960SMark Rutland 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
17883a49794SMarc Zyngier }
17983a49794SMarc Zyngier 
180c0f09634SMarc Zyngier /*
181f6be563aSPavel Fedin  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
182f6be563aSPavel Fedin  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
183f6be563aSPavel Fedin  * AArch32 with banked registers.
184c0f09634SMarc Zyngier  */
1855c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
186bc45a516SPavel Fedin 					 u8 reg_num)
187bc45a516SPavel Fedin {
188e47c2055SMarc Zyngier 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
189bc45a516SPavel Fedin }
190bc45a516SPavel Fedin 
1915c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
192bc45a516SPavel Fedin 				unsigned long val)
193bc45a516SPavel Fedin {
194bc45a516SPavel Fedin 	if (reg_num != 31)
195e47c2055SMarc Zyngier 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
196bc45a516SPavel Fedin }
197bc45a516SPavel Fedin 
1980043b290SChristoffer Dall static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
1990043b290SChristoffer Dall {
2000043b290SChristoffer Dall 	switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
2010043b290SChristoffer Dall 	case PSR_MODE_EL2h:
2020043b290SChristoffer Dall 	case PSR_MODE_EL2t:
2030043b290SChristoffer Dall 		return true;
2040043b290SChristoffer Dall 	default:
2050043b290SChristoffer Dall 		return false;
2060043b290SChristoffer Dall 	}
2070043b290SChristoffer Dall }
2080043b290SChristoffer Dall 
2090043b290SChristoffer Dall static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
2100043b290SChristoffer Dall {
2110043b290SChristoffer Dall 	return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
2120043b290SChristoffer Dall }
2130043b290SChristoffer Dall 
2140043b290SChristoffer Dall static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
2150043b290SChristoffer Dall {
2160043b290SChristoffer Dall 	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
2170043b290SChristoffer Dall }
2180043b290SChristoffer Dall 
2190043b290SChristoffer Dall static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
2200043b290SChristoffer Dall {
2210043b290SChristoffer Dall 	return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
2220043b290SChristoffer Dall }
2230043b290SChristoffer Dall 
2240043b290SChristoffer Dall static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
2250043b290SChristoffer Dall {
2260043b290SChristoffer Dall 	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
2270043b290SChristoffer Dall }
2280043b290SChristoffer Dall 
2290043b290SChristoffer Dall static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
2300043b290SChristoffer Dall {
2310043b290SChristoffer Dall 	return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
2320043b290SChristoffer Dall }
2330043b290SChristoffer Dall 
2340043b290SChristoffer Dall static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
2350043b290SChristoffer Dall {
2360043b290SChristoffer Dall 	/*
2370043b290SChristoffer Dall 	 * We are in a hypervisor context if the vcpu mode is EL2 or
2380043b290SChristoffer Dall 	 * E2H and TGE bits are set. The latter means we are in the user space
2390043b290SChristoffer Dall 	 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
2400043b290SChristoffer Dall 	 *
2410043b290SChristoffer Dall 	 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
2420043b290SChristoffer Dall 	 * rest of the KVM code, and will result in a misbehaving guest.
2430043b290SChristoffer Dall 	 */
2440043b290SChristoffer Dall 	return vcpu_is_el2_ctxt(ctxt) ||
2450043b290SChristoffer Dall 		(__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
2460043b290SChristoffer Dall 		__vcpu_el2_tge_is_set(ctxt);
2470043b290SChristoffer Dall }
2480043b290SChristoffer Dall 
2490043b290SChristoffer Dall static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
2500043b290SChristoffer Dall {
2510043b290SChristoffer Dall 	return __is_hyp_ctxt(&vcpu->arch.ctxt);
2520043b290SChristoffer Dall }
2530043b290SChristoffer Dall 
2541cfbb484SMark Rutland /*
2551cfbb484SMark Rutland  * The layout of SPSR for an AArch32 state is different when observed from an
2561cfbb484SMark Rutland  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
2571cfbb484SMark Rutland  * view given an AArch64 view.
2581cfbb484SMark Rutland  *
2591cfbb484SMark Rutland  * In ARM DDI 0487E.a see:
2601cfbb484SMark Rutland  *
2611cfbb484SMark Rutland  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
2621cfbb484SMark Rutland  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
2631cfbb484SMark Rutland  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
2641cfbb484SMark Rutland  *
2651cfbb484SMark Rutland  * Which show the following differences:
2661cfbb484SMark Rutland  *
2671cfbb484SMark Rutland  * | Bit | AA64 | AA32 | Notes                       |
2681cfbb484SMark Rutland  * +-----+------+------+-----------------------------|
2691cfbb484SMark Rutland  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
2701cfbb484SMark Rutland  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
2711cfbb484SMark Rutland  *
2721cfbb484SMark Rutland  * ... and all other bits are (currently) common.
2731cfbb484SMark Rutland  */
2741cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
2751cfbb484SMark Rutland {
2761cfbb484SMark Rutland 	const unsigned long overlap = BIT(24) | BIT(21);
2771cfbb484SMark Rutland 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
2781cfbb484SMark Rutland 
2791cfbb484SMark Rutland 	spsr &= ~overlap;
2801cfbb484SMark Rutland 
2811cfbb484SMark Rutland 	spsr |= dit << 21;
2821cfbb484SMark Rutland 
2831cfbb484SMark Rutland 	return spsr;
2841cfbb484SMark Rutland }
2851cfbb484SMark Rutland 
28683a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
28783a49794SMarc Zyngier {
2889586a2eaSShannon Zhao 	u32 mode;
28983a49794SMarc Zyngier 
2909586a2eaSShannon Zhao 	if (vcpu_mode_is_32bit(vcpu)) {
291256c0960SMark Rutland 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
292256c0960SMark Rutland 		return mode > PSR_AA32_MODE_USR;
2939586a2eaSShannon Zhao 	}
2949586a2eaSShannon Zhao 
2959586a2eaSShannon Zhao 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
296b547631fSMarc Zyngier 
29783a49794SMarc Zyngier 	return mode != PSR_MODE_EL0t;
29883a49794SMarc Zyngier }
29983a49794SMarc Zyngier 
3000b12620fSAlexandru Elisei static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
30183a49794SMarc Zyngier {
30283a49794SMarc Zyngier 	return vcpu->arch.fault.esr_el2;
30383a49794SMarc Zyngier }
30483a49794SMarc Zyngier 
3055c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
3063e51d435SMarc Zyngier {
3070b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
3083e51d435SMarc Zyngier 
3093e51d435SMarc Zyngier 	if (esr & ESR_ELx_CV)
3103e51d435SMarc Zyngier 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
3113e51d435SMarc Zyngier 
3123e51d435SMarc Zyngier 	return -1;
3133e51d435SMarc Zyngier }
3143e51d435SMarc Zyngier 
3155c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
31683a49794SMarc Zyngier {
31783a49794SMarc Zyngier 	return vcpu->arch.fault.far_el2;
31883a49794SMarc Zyngier }
31983a49794SMarc Zyngier 
3205c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
32183a49794SMarc Zyngier {
32283a49794SMarc Zyngier 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
32383a49794SMarc Zyngier }
32483a49794SMarc Zyngier 
3250067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
3260067df41SJames Morse {
3270067df41SJames Morse 	return vcpu->arch.fault.disr_el1;
3280067df41SJames Morse }
3290067df41SJames Morse 
3300d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
3310d97f884SWei Huang {
3323a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
3330d97f884SWei Huang }
3340d97f884SWei Huang 
3355c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
33683a49794SMarc Zyngier {
3373a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
33883a49794SMarc Zyngier }
33983a49794SMarc Zyngier 
340c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
341c726200dSChristoffer Dall {
3423a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
343c726200dSChristoffer Dall }
344c726200dSChristoffer Dall 
34583a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
34683a49794SMarc Zyngier {
3473a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
34883a49794SMarc Zyngier }
34983a49794SMarc Zyngier 
350b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
351b6ae256aSChristoffer Dall {
3523a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
353b6ae256aSChristoffer Dall }
354b6ae256aSChristoffer Dall 
3555c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
35683a49794SMarc Zyngier {
3573a949f4cSGavin Shan 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
35883a49794SMarc Zyngier }
35983a49794SMarc Zyngier 
360c4ad98e4SMarc Zyngier static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
36183a49794SMarc Zyngier {
3623a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
36383a49794SMarc Zyngier }
36483a49794SMarc Zyngier 
365620cf45fSMarc Zyngier /* Always check for S1PTW *before* using this. */
3665c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
36760e21a0eSWill Deacon {
368620cf45fSMarc Zyngier 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
36960e21a0eSWill Deacon }
37060e21a0eSWill Deacon 
37157c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
37257c841f1SMarc Zyngier {
3733a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
37457c841f1SMarc Zyngier }
37557c841f1SMarc Zyngier 
3765c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
37783a49794SMarc Zyngier {
3783a949f4cSGavin Shan 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
37983a49794SMarc Zyngier }
38083a49794SMarc Zyngier 
38183a49794SMarc Zyngier /* This one is not specific to Data Abort */
3825c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
38383a49794SMarc Zyngier {
3843a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
38583a49794SMarc Zyngier }
38683a49794SMarc Zyngier 
3875c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
38883a49794SMarc Zyngier {
3893a949f4cSGavin Shan 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
39083a49794SMarc Zyngier }
39183a49794SMarc Zyngier 
39283a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
39383a49794SMarc Zyngier {
394c6d01a94SMark Rutland 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
39583a49794SMarc Zyngier }
39683a49794SMarc Zyngier 
397c4ad98e4SMarc Zyngier static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
398c4ad98e4SMarc Zyngier {
399c4ad98e4SMarc Zyngier 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
400c4ad98e4SMarc Zyngier }
401c4ad98e4SMarc Zyngier 
4025c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
40383a49794SMarc Zyngier {
4043a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
4050496daa5SChristoffer Dall }
4060496daa5SChristoffer Dall 
4075c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
4080496daa5SChristoffer Dall {
4093a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
41083a49794SMarc Zyngier }
41183a49794SMarc Zyngier 
4127d894834SYanan Wang static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
4137d894834SYanan Wang {
4147d894834SYanan Wang 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
4157d894834SYanan Wang }
4167d894834SYanan Wang 
417c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
418bb428921SJames Morse {
419a2b83133SDongjiu Geng 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
420b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_EXTABT:
421b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW0:
422b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW1:
423b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW2:
424b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW3:
425b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC:
426b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW0:
427b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW1:
428b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW2:
429b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW3:
430bb428921SJames Morse 		return true;
431bb428921SJames Morse 	default:
432bb428921SJames Morse 		return false;
433bb428921SJames Morse 	}
434bb428921SJames Morse }
435bb428921SJames Morse 
4365c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
437c667186fSMarc Zyngier {
4380b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
4391c839141SAnshuman Khandual 	return ESR_ELx_SYS64_ISS_RT(esr);
440c667186fSMarc Zyngier }
441c667186fSMarc Zyngier 
44264cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
44364cf98faSChristoffer Dall {
444406504c7SMarc Zyngier 	if (kvm_vcpu_abt_iss1tw(vcpu)) {
445406504c7SMarc Zyngier 		/*
446406504c7SMarc Zyngier 		 * Only a permission fault on a S1PTW should be
447406504c7SMarc Zyngier 		 * considered as a write. Otherwise, page tables baked
448406504c7SMarc Zyngier 		 * in a read-only memslot will result in an exception
449406504c7SMarc Zyngier 		 * being delivered in the guest.
450406504c7SMarc Zyngier 		 *
451406504c7SMarc Zyngier 		 * The drawback is that we end-up faulting twice if the
452406504c7SMarc Zyngier 		 * guest is using any of HW AF/DB: a translation fault
453406504c7SMarc Zyngier 		 * to map the page containing the PT (read only at
454406504c7SMarc Zyngier 		 * first), then a permission fault to allow the flags
455406504c7SMarc Zyngier 		 * to be set.
456406504c7SMarc Zyngier 		 */
457406504c7SMarc Zyngier 		switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
458406504c7SMarc Zyngier 		case ESR_ELx_FSC_PERM:
459c4ad98e4SMarc Zyngier 			return true;
460406504c7SMarc Zyngier 		default:
461406504c7SMarc Zyngier 			return false;
462406504c7SMarc Zyngier 		}
463406504c7SMarc Zyngier 	}
464c4ad98e4SMarc Zyngier 
46564cf98faSChristoffer Dall 	if (kvm_vcpu_trap_is_iabt(vcpu))
46664cf98faSChristoffer Dall 		return false;
46764cf98faSChristoffer Dall 
46864cf98faSChristoffer Dall 	return kvm_vcpu_dabt_iswrite(vcpu);
46964cf98faSChristoffer Dall }
47064cf98faSChristoffer Dall 
4714429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
47279c64880SMarc Zyngier {
4738d404c4cSChristoffer Dall 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
47479c64880SMarc Zyngier }
47579c64880SMarc Zyngier 
476ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
477ce94fe93SMarc Zyngier {
4788d404c4cSChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
479256c0960SMark Rutland 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
4808d404c4cSChristoffer Dall 	} else {
4818d404c4cSChristoffer Dall 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
482500ca524SFuad Tabba 		sctlr |= SCTLR_ELx_EE;
4831975fa56SJames Morse 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
4848d404c4cSChristoffer Dall 	}
485ce94fe93SMarc Zyngier }
486ce94fe93SMarc Zyngier 
4876d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
4886d89d2d9SMarc Zyngier {
4896d89d2d9SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
490256c0960SMark Rutland 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
4916d89d2d9SMarc Zyngier 
49269adec18SMarc Zyngier 	if (vcpu_mode_priv(vcpu))
49369adec18SMarc Zyngier 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
49469adec18SMarc Zyngier 	else
49569adec18SMarc Zyngier 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
4966d89d2d9SMarc Zyngier }
4976d89d2d9SMarc Zyngier 
4986d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
4996d89d2d9SMarc Zyngier 						    unsigned long data,
5006d89d2d9SMarc Zyngier 						    unsigned int len)
5016d89d2d9SMarc Zyngier {
5026d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
5036d89d2d9SMarc Zyngier 		switch (len) {
5046d89d2d9SMarc Zyngier 		case 1:
5056d89d2d9SMarc Zyngier 			return data & 0xff;
5066d89d2d9SMarc Zyngier 		case 2:
5076d89d2d9SMarc Zyngier 			return be16_to_cpu(data & 0xffff);
5086d89d2d9SMarc Zyngier 		case 4:
5096d89d2d9SMarc Zyngier 			return be32_to_cpu(data & 0xffffffff);
5106d89d2d9SMarc Zyngier 		default:
5116d89d2d9SMarc Zyngier 			return be64_to_cpu(data);
5126d89d2d9SMarc Zyngier 		}
513b3007086SVictor Kamensky 	} else {
514b3007086SVictor Kamensky 		switch (len) {
515b3007086SVictor Kamensky 		case 1:
516b3007086SVictor Kamensky 			return data & 0xff;
517b3007086SVictor Kamensky 		case 2:
518b3007086SVictor Kamensky 			return le16_to_cpu(data & 0xffff);
519b3007086SVictor Kamensky 		case 4:
520b3007086SVictor Kamensky 			return le32_to_cpu(data & 0xffffffff);
521b3007086SVictor Kamensky 		default:
522b3007086SVictor Kamensky 			return le64_to_cpu(data);
523b3007086SVictor Kamensky 		}
5246d89d2d9SMarc Zyngier 	}
5256d89d2d9SMarc Zyngier 
5266d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
5276d89d2d9SMarc Zyngier }
5286d89d2d9SMarc Zyngier 
5296d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
5306d89d2d9SMarc Zyngier 						    unsigned long data,
5316d89d2d9SMarc Zyngier 						    unsigned int len)
5326d89d2d9SMarc Zyngier {
5336d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
5346d89d2d9SMarc Zyngier 		switch (len) {
5356d89d2d9SMarc Zyngier 		case 1:
5366d89d2d9SMarc Zyngier 			return data & 0xff;
5376d89d2d9SMarc Zyngier 		case 2:
5386d89d2d9SMarc Zyngier 			return cpu_to_be16(data & 0xffff);
5396d89d2d9SMarc Zyngier 		case 4:
5406d89d2d9SMarc Zyngier 			return cpu_to_be32(data & 0xffffffff);
5416d89d2d9SMarc Zyngier 		default:
5426d89d2d9SMarc Zyngier 			return cpu_to_be64(data);
5436d89d2d9SMarc Zyngier 		}
544b3007086SVictor Kamensky 	} else {
545b3007086SVictor Kamensky 		switch (len) {
546b3007086SVictor Kamensky 		case 1:
547b3007086SVictor Kamensky 			return data & 0xff;
548b3007086SVictor Kamensky 		case 2:
549b3007086SVictor Kamensky 			return cpu_to_le16(data & 0xffff);
550b3007086SVictor Kamensky 		case 4:
551b3007086SVictor Kamensky 			return cpu_to_le32(data & 0xffffffff);
552b3007086SVictor Kamensky 		default:
553b3007086SVictor Kamensky 			return cpu_to_le64(data);
554b3007086SVictor Kamensky 		}
5556d89d2d9SMarc Zyngier 	}
5566d89d2d9SMarc Zyngier 
5576d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
5586d89d2d9SMarc Zyngier }
5596d89d2d9SMarc Zyngier 
560cdb5e02eSMarc Zyngier static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
561bd7d95caSMark Rutland {
562e19f2c6cSMarc Zyngier 	WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
563699bb2e0SMarc Zyngier 	vcpu_set_flag(vcpu, INCREMENT_PC);
564bd7d95caSMark Rutland }
565bd7d95caSMark Rutland 
566699bb2e0SMarc Zyngier #define kvm_pend_exception(v, e)					\
567699bb2e0SMarc Zyngier 	do {								\
568e19f2c6cSMarc Zyngier 		WARN_ON(vcpu_get_flag((v), INCREMENT_PC));		\
569699bb2e0SMarc Zyngier 		vcpu_set_flag((v), PENDING_EXCEPTION);			\
570699bb2e0SMarc Zyngier 		vcpu_set_flag((v), e);					\
571699bb2e0SMarc Zyngier 	} while (0)
572699bb2e0SMarc Zyngier 
573699bb2e0SMarc Zyngier 
57466e94d5cSMarc Zyngier static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
57566e94d5cSMarc Zyngier {
57666e94d5cSMarc Zyngier 	return test_bit(feature, vcpu->arch.features);
57766e94d5cSMarc Zyngier }
57866e94d5cSMarc Zyngier 
57983a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */
580