1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
283a49794SMarc Zyngier /*
383a49794SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
483a49794SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
583a49794SMarc Zyngier  *
683a49794SMarc Zyngier  * Derived from arch/arm/include/kvm_emulate.h
783a49794SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
883a49794SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
983a49794SMarc Zyngier  */
1083a49794SMarc Zyngier 
1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__
1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__
1383a49794SMarc Zyngier 
1483a49794SMarc Zyngier #include <linux/kvm_host.h>
15c6d01a94SMark Rutland 
16bd7d95caSMark Rutland #include <asm/debug-monitors.h>
17c6d01a94SMark Rutland #include <asm/esr.h>
1883a49794SMarc Zyngier #include <asm/kvm_arm.h>
1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h>
2083a49794SMarc Zyngier #include <asm/ptrace.h>
214429fc64SAndre Przywara #include <asm/cputype.h>
2268908bf7SMarc Zyngier #include <asm/virt.h>
2383a49794SMarc Zyngier 
24bb666c47SMarc Zyngier #define CURRENT_EL_SP_EL0_VECTOR	0x0
25bb666c47SMarc Zyngier #define CURRENT_EL_SP_ELx_VECTOR	0x200
26bb666c47SMarc Zyngier #define LOWER_EL_AArch64_VECTOR		0x400
27bb666c47SMarc Zyngier #define LOWER_EL_AArch32_VECTOR		0x600
28bb666c47SMarc Zyngier 
29bb666c47SMarc Zyngier enum exception_type {
30bb666c47SMarc Zyngier 	except_type_sync	= 0,
31bb666c47SMarc Zyngier 	except_type_irq		= 0x80,
32bb666c47SMarc Zyngier 	except_type_fiq		= 0x100,
33bb666c47SMarc Zyngier 	except_type_serror	= 0x180,
34bb666c47SMarc Zyngier };
35b547631fSMarc Zyngier 
3627b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
376ddbc281SMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu);
3827b190bdSMarc Zyngier 
3983a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4010cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4183a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4283a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
4385ea6b1eSMarc Zyngier void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
4483a49794SMarc Zyngier 
456109c5a6SSean Christopherson void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
466109c5a6SSean Christopherson 
4726bf74bdSReiji Watanabe #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
485c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
49e72341c5SChristoffer Dall {
50e72341c5SChristoffer Dall 	return !(vcpu->arch.hcr_el2 & HCR_RW);
51e72341c5SChristoffer Dall }
5226bf74bdSReiji Watanabe #else
5326bf74bdSReiji Watanabe static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
5426bf74bdSReiji Watanabe {
5526bf74bdSReiji Watanabe 	struct kvm *kvm = vcpu->kvm;
5626bf74bdSReiji Watanabe 
5726bf74bdSReiji Watanabe 	WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
5826bf74bdSReiji Watanabe 			       &kvm->arch.flags));
5926bf74bdSReiji Watanabe 
6026bf74bdSReiji Watanabe 	return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
6126bf74bdSReiji Watanabe }
6226bf74bdSReiji Watanabe #endif
63e72341c5SChristoffer Dall 
64b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
65b856a591SChristoffer Dall {
66b856a591SChristoffer Dall 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
6768908bf7SMarc Zyngier 	if (is_kernel_in_hyp_mode())
6868908bf7SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_E2H;
69558daf69SDongjiu Geng 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
70558daf69SDongjiu Geng 		/* route synchronous external abort exceptions to EL2 */
71558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TEA;
72558daf69SDongjiu Geng 		/* trap error record accesses */
73558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TERR;
74558daf69SDongjiu Geng 	}
755c401308SChristoffer Dall 
765c401308SChristoffer Dall 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
77e48d53a9SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_FWB;
785c401308SChristoffer Dall 	} else {
795c401308SChristoffer Dall 		/*
805c401308SChristoffer Dall 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
815c401308SChristoffer Dall 		 * get set in SCTLR_EL1 such that we can detect when the guest
825c401308SChristoffer Dall 		 * MMU gets turned on and do the necessary cache maintenance
835c401308SChristoffer Dall 		 * then.
845c401308SChristoffer Dall 		 */
855c401308SChristoffer Dall 		vcpu->arch.hcr_el2 |= HCR_TVM;
865c401308SChristoffer Dall 	}
87558daf69SDongjiu Geng 
8826bf74bdSReiji Watanabe 	if (vcpu_el1_is_32bit(vcpu))
89801f6772SMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_RW;
90f7f2b15cSArd Biesheuvel 
91793acf87SArd Biesheuvel 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
92793acf87SArd Biesheuvel 	    vcpu_el1_is_32bit(vcpu))
93f7f2b15cSArd Biesheuvel 		vcpu->arch.hcr_el2 |= HCR_TID2;
94ea7fc1bbSSteven Price 
95ea7fc1bbSSteven Price 	if (kvm_has_mte(vcpu->kvm))
96ea7fc1bbSSteven Price 		vcpu->arch.hcr_el2 |= HCR_ATA;
97b856a591SChristoffer Dall }
98b856a591SChristoffer Dall 
993df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
1003c1e7165SMarc Zyngier {
1013df59d8dSChristoffer Dall 	return (unsigned long *)&vcpu->arch.hcr_el2;
1023c1e7165SMarc Zyngier }
1033c1e7165SMarc Zyngier 
104ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
105de737089SMarc Zyngier {
106de737089SMarc Zyngier 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
1077bdabad1SMarc Zyngier 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
1087bdabad1SMarc Zyngier 	    vcpu->kvm->arch.vgic.nassgireq)
109ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
110ef2e78ddSMarc Zyngier 	else
111ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TWI;
112de737089SMarc Zyngier }
113de737089SMarc Zyngier 
114ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
115de737089SMarc Zyngier {
116de737089SMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWE;
117ef2e78ddSMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWI;
118de737089SMarc Zyngier }
119de737089SMarc Zyngier 
120384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
121384b40caSMark Rutland {
122384b40caSMark Rutland 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
123384b40caSMark Rutland }
124384b40caSMark Rutland 
125384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
126384b40caSMark Rutland {
127384b40caSMark Rutland 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
128384b40caSMark Rutland }
129384b40caSMark Rutland 
130b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
131b7b27facSDongjiu Geng {
132b7b27facSDongjiu Geng 	return vcpu->arch.vsesr_el2;
133b7b27facSDongjiu Geng }
134b7b27facSDongjiu Geng 
1354715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
1364715c14bSJames Morse {
1374715c14bSJames Morse 	vcpu->arch.vsesr_el2 = vsesr;
1384715c14bSJames Morse }
1394715c14bSJames Morse 
1405c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
14183a49794SMarc Zyngier {
142e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
14383a49794SMarc Zyngier }
14483a49794SMarc Zyngier 
1455c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
14683a49794SMarc Zyngier {
147e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
14883a49794SMarc Zyngier }
14983a49794SMarc Zyngier 
1505c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
15183a49794SMarc Zyngier {
152b547631fSMarc Zyngier 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
15383a49794SMarc Zyngier }
15483a49794SMarc Zyngier 
1555c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
15683a49794SMarc Zyngier {
15727b190bdSMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
15827b190bdSMarc Zyngier 		return kvm_condition_valid32(vcpu);
15927b190bdSMarc Zyngier 
16027b190bdSMarc Zyngier 	return true;
16183a49794SMarc Zyngier }
16283a49794SMarc Zyngier 
16383a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
16483a49794SMarc Zyngier {
165256c0960SMark Rutland 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
16683a49794SMarc Zyngier }
16783a49794SMarc Zyngier 
168c0f09634SMarc Zyngier /*
169f6be563aSPavel Fedin  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
170f6be563aSPavel Fedin  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
171f6be563aSPavel Fedin  * AArch32 with banked registers.
172c0f09634SMarc Zyngier  */
1735c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
174bc45a516SPavel Fedin 					 u8 reg_num)
175bc45a516SPavel Fedin {
176e47c2055SMarc Zyngier 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
177bc45a516SPavel Fedin }
178bc45a516SPavel Fedin 
1795c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
180bc45a516SPavel Fedin 				unsigned long val)
181bc45a516SPavel Fedin {
182bc45a516SPavel Fedin 	if (reg_num != 31)
183e47c2055SMarc Zyngier 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
184bc45a516SPavel Fedin }
185bc45a516SPavel Fedin 
1861cfbb484SMark Rutland /*
1871cfbb484SMark Rutland  * The layout of SPSR for an AArch32 state is different when observed from an
1881cfbb484SMark Rutland  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
1891cfbb484SMark Rutland  * view given an AArch64 view.
1901cfbb484SMark Rutland  *
1911cfbb484SMark Rutland  * In ARM DDI 0487E.a see:
1921cfbb484SMark Rutland  *
1931cfbb484SMark Rutland  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
1941cfbb484SMark Rutland  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
1951cfbb484SMark Rutland  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
1961cfbb484SMark Rutland  *
1971cfbb484SMark Rutland  * Which show the following differences:
1981cfbb484SMark Rutland  *
1991cfbb484SMark Rutland  * | Bit | AA64 | AA32 | Notes                       |
2001cfbb484SMark Rutland  * +-----+------+------+-----------------------------|
2011cfbb484SMark Rutland  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
2021cfbb484SMark Rutland  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
2031cfbb484SMark Rutland  *
2041cfbb484SMark Rutland  * ... and all other bits are (currently) common.
2051cfbb484SMark Rutland  */
2061cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
2071cfbb484SMark Rutland {
2081cfbb484SMark Rutland 	const unsigned long overlap = BIT(24) | BIT(21);
2091cfbb484SMark Rutland 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
2101cfbb484SMark Rutland 
2111cfbb484SMark Rutland 	spsr &= ~overlap;
2121cfbb484SMark Rutland 
2131cfbb484SMark Rutland 	spsr |= dit << 21;
2141cfbb484SMark Rutland 
2151cfbb484SMark Rutland 	return spsr;
2161cfbb484SMark Rutland }
2171cfbb484SMark Rutland 
21883a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
21983a49794SMarc Zyngier {
2209586a2eaSShannon Zhao 	u32 mode;
22183a49794SMarc Zyngier 
2229586a2eaSShannon Zhao 	if (vcpu_mode_is_32bit(vcpu)) {
223256c0960SMark Rutland 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
224256c0960SMark Rutland 		return mode > PSR_AA32_MODE_USR;
2259586a2eaSShannon Zhao 	}
2269586a2eaSShannon Zhao 
2279586a2eaSShannon Zhao 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
228b547631fSMarc Zyngier 
22983a49794SMarc Zyngier 	return mode != PSR_MODE_EL0t;
23083a49794SMarc Zyngier }
23183a49794SMarc Zyngier 
2320b12620fSAlexandru Elisei static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
23383a49794SMarc Zyngier {
23483a49794SMarc Zyngier 	return vcpu->arch.fault.esr_el2;
23583a49794SMarc Zyngier }
23683a49794SMarc Zyngier 
2375c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
2383e51d435SMarc Zyngier {
2390b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
2403e51d435SMarc Zyngier 
2413e51d435SMarc Zyngier 	if (esr & ESR_ELx_CV)
2423e51d435SMarc Zyngier 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
2433e51d435SMarc Zyngier 
2443e51d435SMarc Zyngier 	return -1;
2453e51d435SMarc Zyngier }
2463e51d435SMarc Zyngier 
2475c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
24883a49794SMarc Zyngier {
24983a49794SMarc Zyngier 	return vcpu->arch.fault.far_el2;
25083a49794SMarc Zyngier }
25183a49794SMarc Zyngier 
2525c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
25383a49794SMarc Zyngier {
25483a49794SMarc Zyngier 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
25583a49794SMarc Zyngier }
25683a49794SMarc Zyngier 
2570067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
2580067df41SJames Morse {
2590067df41SJames Morse 	return vcpu->arch.fault.disr_el1;
2600067df41SJames Morse }
2610067df41SJames Morse 
2620d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
2630d97f884SWei Huang {
2643a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
2650d97f884SWei Huang }
2660d97f884SWei Huang 
2675c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
26883a49794SMarc Zyngier {
2693a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
27083a49794SMarc Zyngier }
27183a49794SMarc Zyngier 
272c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
273c726200dSChristoffer Dall {
2743a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
275c726200dSChristoffer Dall }
276c726200dSChristoffer Dall 
27783a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
27883a49794SMarc Zyngier {
2793a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
28083a49794SMarc Zyngier }
28183a49794SMarc Zyngier 
282b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
283b6ae256aSChristoffer Dall {
2843a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
285b6ae256aSChristoffer Dall }
286b6ae256aSChristoffer Dall 
2875c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
28883a49794SMarc Zyngier {
2893a949f4cSGavin Shan 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
29083a49794SMarc Zyngier }
29183a49794SMarc Zyngier 
292c4ad98e4SMarc Zyngier static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
29383a49794SMarc Zyngier {
2943a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
29583a49794SMarc Zyngier }
29683a49794SMarc Zyngier 
297620cf45fSMarc Zyngier /* Always check for S1PTW *before* using this. */
2985c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
29960e21a0eSWill Deacon {
300620cf45fSMarc Zyngier 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
30160e21a0eSWill Deacon }
30260e21a0eSWill Deacon 
30357c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
30457c841f1SMarc Zyngier {
3053a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
30657c841f1SMarc Zyngier }
30757c841f1SMarc Zyngier 
3085c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
30983a49794SMarc Zyngier {
3103a949f4cSGavin Shan 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
31183a49794SMarc Zyngier }
31283a49794SMarc Zyngier 
31383a49794SMarc Zyngier /* This one is not specific to Data Abort */
3145c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
31583a49794SMarc Zyngier {
3163a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
31783a49794SMarc Zyngier }
31883a49794SMarc Zyngier 
3195c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
32083a49794SMarc Zyngier {
3213a949f4cSGavin Shan 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
32283a49794SMarc Zyngier }
32383a49794SMarc Zyngier 
32483a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
32583a49794SMarc Zyngier {
326c6d01a94SMark Rutland 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
32783a49794SMarc Zyngier }
32883a49794SMarc Zyngier 
329c4ad98e4SMarc Zyngier static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
330c4ad98e4SMarc Zyngier {
331c4ad98e4SMarc Zyngier 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
332c4ad98e4SMarc Zyngier }
333c4ad98e4SMarc Zyngier 
3345c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
33583a49794SMarc Zyngier {
3363a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
3370496daa5SChristoffer Dall }
3380496daa5SChristoffer Dall 
3395c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
3400496daa5SChristoffer Dall {
3413a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
34283a49794SMarc Zyngier }
34383a49794SMarc Zyngier 
3447d894834SYanan Wang static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
3457d894834SYanan Wang {
3467d894834SYanan Wang 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
3477d894834SYanan Wang }
3487d894834SYanan Wang 
349c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
350bb428921SJames Morse {
351a2b83133SDongjiu Geng 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
352bb428921SJames Morse 	case FSC_SEA:
353bb428921SJames Morse 	case FSC_SEA_TTW0:
354bb428921SJames Morse 	case FSC_SEA_TTW1:
355bb428921SJames Morse 	case FSC_SEA_TTW2:
356bb428921SJames Morse 	case FSC_SEA_TTW3:
357bb428921SJames Morse 	case FSC_SECC:
358bb428921SJames Morse 	case FSC_SECC_TTW0:
359bb428921SJames Morse 	case FSC_SECC_TTW1:
360bb428921SJames Morse 	case FSC_SECC_TTW2:
361bb428921SJames Morse 	case FSC_SECC_TTW3:
362bb428921SJames Morse 		return true;
363bb428921SJames Morse 	default:
364bb428921SJames Morse 		return false;
365bb428921SJames Morse 	}
366bb428921SJames Morse }
367bb428921SJames Morse 
3685c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
369c667186fSMarc Zyngier {
3700b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
3711c839141SAnshuman Khandual 	return ESR_ELx_SYS64_ISS_RT(esr);
372c667186fSMarc Zyngier }
373c667186fSMarc Zyngier 
37464cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
37564cf98faSChristoffer Dall {
376c4ad98e4SMarc Zyngier 	if (kvm_vcpu_abt_iss1tw(vcpu))
377c4ad98e4SMarc Zyngier 		return true;
378c4ad98e4SMarc Zyngier 
37964cf98faSChristoffer Dall 	if (kvm_vcpu_trap_is_iabt(vcpu))
38064cf98faSChristoffer Dall 		return false;
38164cf98faSChristoffer Dall 
38264cf98faSChristoffer Dall 	return kvm_vcpu_dabt_iswrite(vcpu);
38364cf98faSChristoffer Dall }
38464cf98faSChristoffer Dall 
3854429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
38679c64880SMarc Zyngier {
3878d404c4cSChristoffer Dall 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
38879c64880SMarc Zyngier }
38979c64880SMarc Zyngier 
390ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
391ce94fe93SMarc Zyngier {
3928d404c4cSChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
393256c0960SMark Rutland 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
3948d404c4cSChristoffer Dall 	} else {
3958d404c4cSChristoffer Dall 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
396500ca524SFuad Tabba 		sctlr |= SCTLR_ELx_EE;
3971975fa56SJames Morse 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
3988d404c4cSChristoffer Dall 	}
399ce94fe93SMarc Zyngier }
400ce94fe93SMarc Zyngier 
4016d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
4026d89d2d9SMarc Zyngier {
4036d89d2d9SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
404256c0960SMark Rutland 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
4056d89d2d9SMarc Zyngier 
40669adec18SMarc Zyngier 	if (vcpu_mode_priv(vcpu))
40769adec18SMarc Zyngier 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
40869adec18SMarc Zyngier 	else
40969adec18SMarc Zyngier 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
4106d89d2d9SMarc Zyngier }
4116d89d2d9SMarc Zyngier 
4126d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
4136d89d2d9SMarc Zyngier 						    unsigned long data,
4146d89d2d9SMarc Zyngier 						    unsigned int len)
4156d89d2d9SMarc Zyngier {
4166d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4176d89d2d9SMarc Zyngier 		switch (len) {
4186d89d2d9SMarc Zyngier 		case 1:
4196d89d2d9SMarc Zyngier 			return data & 0xff;
4206d89d2d9SMarc Zyngier 		case 2:
4216d89d2d9SMarc Zyngier 			return be16_to_cpu(data & 0xffff);
4226d89d2d9SMarc Zyngier 		case 4:
4236d89d2d9SMarc Zyngier 			return be32_to_cpu(data & 0xffffffff);
4246d89d2d9SMarc Zyngier 		default:
4256d89d2d9SMarc Zyngier 			return be64_to_cpu(data);
4266d89d2d9SMarc Zyngier 		}
427b3007086SVictor Kamensky 	} else {
428b3007086SVictor Kamensky 		switch (len) {
429b3007086SVictor Kamensky 		case 1:
430b3007086SVictor Kamensky 			return data & 0xff;
431b3007086SVictor Kamensky 		case 2:
432b3007086SVictor Kamensky 			return le16_to_cpu(data & 0xffff);
433b3007086SVictor Kamensky 		case 4:
434b3007086SVictor Kamensky 			return le32_to_cpu(data & 0xffffffff);
435b3007086SVictor Kamensky 		default:
436b3007086SVictor Kamensky 			return le64_to_cpu(data);
437b3007086SVictor Kamensky 		}
4386d89d2d9SMarc Zyngier 	}
4396d89d2d9SMarc Zyngier 
4406d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4416d89d2d9SMarc Zyngier }
4426d89d2d9SMarc Zyngier 
4436d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
4446d89d2d9SMarc Zyngier 						    unsigned long data,
4456d89d2d9SMarc Zyngier 						    unsigned int len)
4466d89d2d9SMarc Zyngier {
4476d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4486d89d2d9SMarc Zyngier 		switch (len) {
4496d89d2d9SMarc Zyngier 		case 1:
4506d89d2d9SMarc Zyngier 			return data & 0xff;
4516d89d2d9SMarc Zyngier 		case 2:
4526d89d2d9SMarc Zyngier 			return cpu_to_be16(data & 0xffff);
4536d89d2d9SMarc Zyngier 		case 4:
4546d89d2d9SMarc Zyngier 			return cpu_to_be32(data & 0xffffffff);
4556d89d2d9SMarc Zyngier 		default:
4566d89d2d9SMarc Zyngier 			return cpu_to_be64(data);
4576d89d2d9SMarc Zyngier 		}
458b3007086SVictor Kamensky 	} else {
459b3007086SVictor Kamensky 		switch (len) {
460b3007086SVictor Kamensky 		case 1:
461b3007086SVictor Kamensky 			return data & 0xff;
462b3007086SVictor Kamensky 		case 2:
463b3007086SVictor Kamensky 			return cpu_to_le16(data & 0xffff);
464b3007086SVictor Kamensky 		case 4:
465b3007086SVictor Kamensky 			return cpu_to_le32(data & 0xffffffff);
466b3007086SVictor Kamensky 		default:
467b3007086SVictor Kamensky 			return cpu_to_le64(data);
468b3007086SVictor Kamensky 		}
4696d89d2d9SMarc Zyngier 	}
4706d89d2d9SMarc Zyngier 
4716d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4726d89d2d9SMarc Zyngier }
4736d89d2d9SMarc Zyngier 
474cdb5e02eSMarc Zyngier static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
475bd7d95caSMark Rutland {
476*699bb2e0SMarc Zyngier 	vcpu_set_flag(vcpu, INCREMENT_PC);
477bd7d95caSMark Rutland }
478bd7d95caSMark Rutland 
479*699bb2e0SMarc Zyngier #define kvm_pend_exception(v, e)					\
480*699bb2e0SMarc Zyngier 	do {								\
481*699bb2e0SMarc Zyngier 		vcpu_set_flag((v), PENDING_EXCEPTION);			\
482*699bb2e0SMarc Zyngier 		vcpu_set_flag((v), e);					\
483*699bb2e0SMarc Zyngier 	} while (0)
484*699bb2e0SMarc Zyngier 
485*699bb2e0SMarc Zyngier 
48666e94d5cSMarc Zyngier static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
48766e94d5cSMarc Zyngier {
48866e94d5cSMarc Zyngier 	return test_bit(feature, vcpu->arch.features);
48966e94d5cSMarc Zyngier }
49066e94d5cSMarc Zyngier 
49183a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */
492