1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
283a49794SMarc Zyngier /*
383a49794SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
483a49794SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
583a49794SMarc Zyngier  *
683a49794SMarc Zyngier  * Derived from arch/arm/include/kvm_emulate.h
783a49794SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
883a49794SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
983a49794SMarc Zyngier  */
1083a49794SMarc Zyngier 
1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__
1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__
1383a49794SMarc Zyngier 
1483a49794SMarc Zyngier #include <linux/kvm_host.h>
15c6d01a94SMark Rutland 
16bd7d95caSMark Rutland #include <asm/debug-monitors.h>
17c6d01a94SMark Rutland #include <asm/esr.h>
1883a49794SMarc Zyngier #include <asm/kvm_arm.h>
1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h>
2083a49794SMarc Zyngier #include <asm/ptrace.h>
214429fc64SAndre Przywara #include <asm/cputype.h>
2268908bf7SMarc Zyngier #include <asm/virt.h>
2383a49794SMarc Zyngier 
24b547631fSMarc Zyngier unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
25a8928195SChristoffer Dall unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26a8928195SChristoffer Dall void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
27b547631fSMarc Zyngier 
2827b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
2927b190bdSMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
3027b190bdSMarc Zyngier 
3183a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu);
3210cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu);
3383a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
3483a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
3574a64a98SMarc Zyngier void kvm_inject_undef32(struct kvm_vcpu *vcpu);
3674a64a98SMarc Zyngier void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3774a64a98SMarc Zyngier void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3883a49794SMarc Zyngier 
395c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
40e72341c5SChristoffer Dall {
41e72341c5SChristoffer Dall 	return !(vcpu->arch.hcr_el2 & HCR_RW);
42e72341c5SChristoffer Dall }
43e72341c5SChristoffer Dall 
44b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45b856a591SChristoffer Dall {
46b856a591SChristoffer Dall 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
4768908bf7SMarc Zyngier 	if (is_kernel_in_hyp_mode())
4868908bf7SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_E2H;
49558daf69SDongjiu Geng 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50558daf69SDongjiu Geng 		/* route synchronous external abort exceptions to EL2 */
51558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TEA;
52558daf69SDongjiu Geng 		/* trap error record accesses */
53558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TERR;
54558daf69SDongjiu Geng 	}
555c401308SChristoffer Dall 
565c401308SChristoffer Dall 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
57e48d53a9SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_FWB;
585c401308SChristoffer Dall 	} else {
595c401308SChristoffer Dall 		/*
605c401308SChristoffer Dall 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
615c401308SChristoffer Dall 		 * get set in SCTLR_EL1 such that we can detect when the guest
625c401308SChristoffer Dall 		 * MMU gets turned on and do the necessary cache maintenance
635c401308SChristoffer Dall 		 * then.
645c401308SChristoffer Dall 		 */
655c401308SChristoffer Dall 		vcpu->arch.hcr_el2 |= HCR_TVM;
665c401308SChristoffer Dall 	}
67558daf69SDongjiu Geng 
68801f6772SMarc Zyngier 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69801f6772SMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_RW;
70005781beSDave Martin 
71005781beSDave Martin 	/*
72005781beSDave Martin 	 * TID3: trap feature register accesses that we virtualise.
73005781beSDave Martin 	 * For now this is conditional, since no AArch32 feature regs
74005781beSDave Martin 	 * are currently virtualised.
75005781beSDave Martin 	 */
76e72341c5SChristoffer Dall 	if (!vcpu_el1_is_32bit(vcpu))
77005781beSDave Martin 		vcpu->arch.hcr_el2 |= HCR_TID3;
78f7f2b15cSArd Biesheuvel 
79793acf87SArd Biesheuvel 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80793acf87SArd Biesheuvel 	    vcpu_el1_is_32bit(vcpu))
81f7f2b15cSArd Biesheuvel 		vcpu->arch.hcr_el2 |= HCR_TID2;
82b856a591SChristoffer Dall }
83b856a591SChristoffer Dall 
843df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
853c1e7165SMarc Zyngier {
863df59d8dSChristoffer Dall 	return (unsigned long *)&vcpu->arch.hcr_el2;
873c1e7165SMarc Zyngier }
883c1e7165SMarc Zyngier 
89ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
90de737089SMarc Zyngier {
91de737089SMarc Zyngier 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
927bdabad1SMarc Zyngier 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
937bdabad1SMarc Zyngier 	    vcpu->kvm->arch.vgic.nassgireq)
94ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
95ef2e78ddSMarc Zyngier 	else
96ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TWI;
97de737089SMarc Zyngier }
98de737089SMarc Zyngier 
99ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
100de737089SMarc Zyngier {
101de737089SMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWE;
102ef2e78ddSMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWI;
103de737089SMarc Zyngier }
104de737089SMarc Zyngier 
105384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
106384b40caSMark Rutland {
107384b40caSMark Rutland 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
108384b40caSMark Rutland }
109384b40caSMark Rutland 
110384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
111384b40caSMark Rutland {
112384b40caSMark Rutland 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113384b40caSMark Rutland }
114384b40caSMark Rutland 
115b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116b7b27facSDongjiu Geng {
117b7b27facSDongjiu Geng 	return vcpu->arch.vsesr_el2;
118b7b27facSDongjiu Geng }
119b7b27facSDongjiu Geng 
1204715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
1214715c14bSJames Morse {
1224715c14bSJames Morse 	vcpu->arch.vsesr_el2 = vsesr;
1234715c14bSJames Morse }
1244715c14bSJames Morse 
1255c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
12683a49794SMarc Zyngier {
12783a49794SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
12883a49794SMarc Zyngier }
12983a49794SMarc Zyngier 
1306d4bd909SChristoffer Dall static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
13183a49794SMarc Zyngier {
13283a49794SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
13383a49794SMarc Zyngier }
13483a49794SMarc Zyngier 
1356d4bd909SChristoffer Dall static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
1366d4bd909SChristoffer Dall {
1376d4bd909SChristoffer Dall 	if (vcpu->arch.sysregs_loaded_on_cpu)
138fdec2a9eSDave Martin 		return read_sysreg_el1(SYS_ELR);
1396d4bd909SChristoffer Dall 	else
1406d4bd909SChristoffer Dall 		return *__vcpu_elr_el1(vcpu);
1416d4bd909SChristoffer Dall }
1426d4bd909SChristoffer Dall 
1436d4bd909SChristoffer Dall static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
1446d4bd909SChristoffer Dall {
1456d4bd909SChristoffer Dall 	if (vcpu->arch.sysregs_loaded_on_cpu)
146fdec2a9eSDave Martin 		write_sysreg_el1(v, SYS_ELR);
1476d4bd909SChristoffer Dall 	else
1486d4bd909SChristoffer Dall 		*__vcpu_elr_el1(vcpu) = v;
1496d4bd909SChristoffer Dall }
1506d4bd909SChristoffer Dall 
1515c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
15283a49794SMarc Zyngier {
15383a49794SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
15483a49794SMarc Zyngier }
15583a49794SMarc Zyngier 
1565c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
15783a49794SMarc Zyngier {
158b547631fSMarc Zyngier 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
15983a49794SMarc Zyngier }
16083a49794SMarc Zyngier 
1615c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
16283a49794SMarc Zyngier {
16327b190bdSMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
16427b190bdSMarc Zyngier 		return kvm_condition_valid32(vcpu);
16527b190bdSMarc Zyngier 
16627b190bdSMarc Zyngier 	return true;
16783a49794SMarc Zyngier }
16883a49794SMarc Zyngier 
16983a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
17083a49794SMarc Zyngier {
171256c0960SMark Rutland 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
17283a49794SMarc Zyngier }
17383a49794SMarc Zyngier 
174c0f09634SMarc Zyngier /*
175f6be563aSPavel Fedin  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
176f6be563aSPavel Fedin  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
177f6be563aSPavel Fedin  * AArch32 with banked registers.
178c0f09634SMarc Zyngier  */
1795c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
180bc45a516SPavel Fedin 					 u8 reg_num)
181bc45a516SPavel Fedin {
182bc45a516SPavel Fedin 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
183bc45a516SPavel Fedin }
184bc45a516SPavel Fedin 
1855c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
186bc45a516SPavel Fedin 				unsigned long val)
187bc45a516SPavel Fedin {
188bc45a516SPavel Fedin 	if (reg_num != 31)
189bc45a516SPavel Fedin 		vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
190bc45a516SPavel Fedin }
191bc45a516SPavel Fedin 
19200536ec4SChristoffer Dall static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
19383a49794SMarc Zyngier {
194a8928195SChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu))
195a8928195SChristoffer Dall 		return vcpu_read_spsr32(vcpu);
19600536ec4SChristoffer Dall 
19700536ec4SChristoffer Dall 	if (vcpu->arch.sysregs_loaded_on_cpu)
198fdec2a9eSDave Martin 		return read_sysreg_el1(SYS_SPSR);
19900536ec4SChristoffer Dall 	else
200a8928195SChristoffer Dall 		return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
20100536ec4SChristoffer Dall }
20200536ec4SChristoffer Dall 
203a8928195SChristoffer Dall static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
20400536ec4SChristoffer Dall {
20500536ec4SChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
206a8928195SChristoffer Dall 		vcpu_write_spsr32(vcpu, v);
20700536ec4SChristoffer Dall 		return;
20800536ec4SChristoffer Dall 	}
20900536ec4SChristoffer Dall 
21000536ec4SChristoffer Dall 	if (vcpu->arch.sysregs_loaded_on_cpu)
211fdec2a9eSDave Martin 		write_sysreg_el1(v, SYS_SPSR);
21200536ec4SChristoffer Dall 	else
213a8928195SChristoffer Dall 		vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
21483a49794SMarc Zyngier }
21583a49794SMarc Zyngier 
2161cfbb484SMark Rutland /*
2171cfbb484SMark Rutland  * The layout of SPSR for an AArch32 state is different when observed from an
2181cfbb484SMark Rutland  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
2191cfbb484SMark Rutland  * view given an AArch64 view.
2201cfbb484SMark Rutland  *
2211cfbb484SMark Rutland  * In ARM DDI 0487E.a see:
2221cfbb484SMark Rutland  *
2231cfbb484SMark Rutland  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
2241cfbb484SMark Rutland  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
2251cfbb484SMark Rutland  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
2261cfbb484SMark Rutland  *
2271cfbb484SMark Rutland  * Which show the following differences:
2281cfbb484SMark Rutland  *
2291cfbb484SMark Rutland  * | Bit | AA64 | AA32 | Notes                       |
2301cfbb484SMark Rutland  * +-----+------+------+-----------------------------|
2311cfbb484SMark Rutland  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
2321cfbb484SMark Rutland  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
2331cfbb484SMark Rutland  *
2341cfbb484SMark Rutland  * ... and all other bits are (currently) common.
2351cfbb484SMark Rutland  */
2361cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
2371cfbb484SMark Rutland {
2381cfbb484SMark Rutland 	const unsigned long overlap = BIT(24) | BIT(21);
2391cfbb484SMark Rutland 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
2401cfbb484SMark Rutland 
2411cfbb484SMark Rutland 	spsr &= ~overlap;
2421cfbb484SMark Rutland 
2431cfbb484SMark Rutland 	spsr |= dit << 21;
2441cfbb484SMark Rutland 
2451cfbb484SMark Rutland 	return spsr;
2461cfbb484SMark Rutland }
2471cfbb484SMark Rutland 
24883a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
24983a49794SMarc Zyngier {
2509586a2eaSShannon Zhao 	u32 mode;
25183a49794SMarc Zyngier 
2529586a2eaSShannon Zhao 	if (vcpu_mode_is_32bit(vcpu)) {
253256c0960SMark Rutland 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
254256c0960SMark Rutland 		return mode > PSR_AA32_MODE_USR;
2559586a2eaSShannon Zhao 	}
2569586a2eaSShannon Zhao 
2579586a2eaSShannon Zhao 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
258b547631fSMarc Zyngier 
25983a49794SMarc Zyngier 	return mode != PSR_MODE_EL0t;
26083a49794SMarc Zyngier }
26183a49794SMarc Zyngier 
2623a949f4cSGavin Shan static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
26383a49794SMarc Zyngier {
26483a49794SMarc Zyngier 	return vcpu->arch.fault.esr_el2;
26583a49794SMarc Zyngier }
26683a49794SMarc Zyngier 
2675c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
2683e51d435SMarc Zyngier {
2693a949f4cSGavin Shan 	u32 esr = kvm_vcpu_get_esr(vcpu);
2703e51d435SMarc Zyngier 
2713e51d435SMarc Zyngier 	if (esr & ESR_ELx_CV)
2723e51d435SMarc Zyngier 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
2733e51d435SMarc Zyngier 
2743e51d435SMarc Zyngier 	return -1;
2753e51d435SMarc Zyngier }
2763e51d435SMarc Zyngier 
2775c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
27883a49794SMarc Zyngier {
27983a49794SMarc Zyngier 	return vcpu->arch.fault.far_el2;
28083a49794SMarc Zyngier }
28183a49794SMarc Zyngier 
2825c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
28383a49794SMarc Zyngier {
28483a49794SMarc Zyngier 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
28583a49794SMarc Zyngier }
28683a49794SMarc Zyngier 
2870067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
2880067df41SJames Morse {
2890067df41SJames Morse 	return vcpu->arch.fault.disr_el1;
2900067df41SJames Morse }
2910067df41SJames Morse 
2920d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
2930d97f884SWei Huang {
2943a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
2950d97f884SWei Huang }
2960d97f884SWei Huang 
2975c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
29883a49794SMarc Zyngier {
2993a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
30083a49794SMarc Zyngier }
30183a49794SMarc Zyngier 
302c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
303c726200dSChristoffer Dall {
3043a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
305c726200dSChristoffer Dall }
306c726200dSChristoffer Dall 
30783a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
30883a49794SMarc Zyngier {
3093a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
31083a49794SMarc Zyngier }
31183a49794SMarc Zyngier 
312b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
313b6ae256aSChristoffer Dall {
3143a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
315b6ae256aSChristoffer Dall }
316b6ae256aSChristoffer Dall 
3175c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
31883a49794SMarc Zyngier {
3193a949f4cSGavin Shan 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
32083a49794SMarc Zyngier }
32183a49794SMarc Zyngier 
3225c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
32383a49794SMarc Zyngier {
3243a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
32583a49794SMarc Zyngier }
32683a49794SMarc Zyngier 
3275c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
32860e21a0eSWill Deacon {
3293a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
33060e21a0eSWill Deacon 		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
33160e21a0eSWill Deacon }
33260e21a0eSWill Deacon 
33357c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
33457c841f1SMarc Zyngier {
3353a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
33657c841f1SMarc Zyngier }
33757c841f1SMarc Zyngier 
3385c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
33983a49794SMarc Zyngier {
3403a949f4cSGavin Shan 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
34183a49794SMarc Zyngier }
34283a49794SMarc Zyngier 
34383a49794SMarc Zyngier /* This one is not specific to Data Abort */
3445c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
34583a49794SMarc Zyngier {
3463a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
34783a49794SMarc Zyngier }
34883a49794SMarc Zyngier 
3495c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
35083a49794SMarc Zyngier {
3513a949f4cSGavin Shan 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
35283a49794SMarc Zyngier }
35383a49794SMarc Zyngier 
35483a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
35583a49794SMarc Zyngier {
356c6d01a94SMark Rutland 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
35783a49794SMarc Zyngier }
35883a49794SMarc Zyngier 
3595c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
36083a49794SMarc Zyngier {
3613a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
3620496daa5SChristoffer Dall }
3630496daa5SChristoffer Dall 
3645c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
3650496daa5SChristoffer Dall {
3663a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
36783a49794SMarc Zyngier }
36883a49794SMarc Zyngier 
369c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
370bb428921SJames Morse {
371a2b83133SDongjiu Geng 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
372bb428921SJames Morse 	case FSC_SEA:
373bb428921SJames Morse 	case FSC_SEA_TTW0:
374bb428921SJames Morse 	case FSC_SEA_TTW1:
375bb428921SJames Morse 	case FSC_SEA_TTW2:
376bb428921SJames Morse 	case FSC_SEA_TTW3:
377bb428921SJames Morse 	case FSC_SECC:
378bb428921SJames Morse 	case FSC_SECC_TTW0:
379bb428921SJames Morse 	case FSC_SECC_TTW1:
380bb428921SJames Morse 	case FSC_SECC_TTW2:
381bb428921SJames Morse 	case FSC_SECC_TTW3:
382bb428921SJames Morse 		return true;
383bb428921SJames Morse 	default:
384bb428921SJames Morse 		return false;
385bb428921SJames Morse 	}
386bb428921SJames Morse }
387bb428921SJames Morse 
3885c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
389c667186fSMarc Zyngier {
3903a949f4cSGavin Shan 	u32 esr = kvm_vcpu_get_esr(vcpu);
3911c839141SAnshuman Khandual 	return ESR_ELx_SYS64_ISS_RT(esr);
392c667186fSMarc Zyngier }
393c667186fSMarc Zyngier 
39464cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
39564cf98faSChristoffer Dall {
39664cf98faSChristoffer Dall 	if (kvm_vcpu_trap_is_iabt(vcpu))
39764cf98faSChristoffer Dall 		return false;
39864cf98faSChristoffer Dall 
39964cf98faSChristoffer Dall 	return kvm_vcpu_dabt_iswrite(vcpu);
40064cf98faSChristoffer Dall }
40164cf98faSChristoffer Dall 
4024429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
40379c64880SMarc Zyngier {
4048d404c4cSChristoffer Dall 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
40579c64880SMarc Zyngier }
40679c64880SMarc Zyngier 
40799adb567SAndre Przywara static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
40899adb567SAndre Przywara {
40999adb567SAndre Przywara 	return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
41099adb567SAndre Przywara }
41199adb567SAndre Przywara 
41299adb567SAndre Przywara static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
41399adb567SAndre Przywara 						      bool flag)
41499adb567SAndre Przywara {
41599adb567SAndre Przywara 	if (flag)
41699adb567SAndre Przywara 		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
41799adb567SAndre Przywara 	else
41899adb567SAndre Przywara 		vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
41999adb567SAndre Przywara }
42099adb567SAndre Przywara 
421ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
422ce94fe93SMarc Zyngier {
4238d404c4cSChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
424256c0960SMark Rutland 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
4258d404c4cSChristoffer Dall 	} else {
4268d404c4cSChristoffer Dall 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
4278d404c4cSChristoffer Dall 		sctlr |= (1 << 25);
4281975fa56SJames Morse 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
4298d404c4cSChristoffer Dall 	}
430ce94fe93SMarc Zyngier }
431ce94fe93SMarc Zyngier 
4326d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
4336d89d2d9SMarc Zyngier {
4346d89d2d9SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
435256c0960SMark Rutland 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
4366d89d2d9SMarc Zyngier 
4378d404c4cSChristoffer Dall 	return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
4386d89d2d9SMarc Zyngier }
4396d89d2d9SMarc Zyngier 
4406d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
4416d89d2d9SMarc Zyngier 						    unsigned long data,
4426d89d2d9SMarc Zyngier 						    unsigned int len)
4436d89d2d9SMarc Zyngier {
4446d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4456d89d2d9SMarc Zyngier 		switch (len) {
4466d89d2d9SMarc Zyngier 		case 1:
4476d89d2d9SMarc Zyngier 			return data & 0xff;
4486d89d2d9SMarc Zyngier 		case 2:
4496d89d2d9SMarc Zyngier 			return be16_to_cpu(data & 0xffff);
4506d89d2d9SMarc Zyngier 		case 4:
4516d89d2d9SMarc Zyngier 			return be32_to_cpu(data & 0xffffffff);
4526d89d2d9SMarc Zyngier 		default:
4536d89d2d9SMarc Zyngier 			return be64_to_cpu(data);
4546d89d2d9SMarc Zyngier 		}
455b3007086SVictor Kamensky 	} else {
456b3007086SVictor Kamensky 		switch (len) {
457b3007086SVictor Kamensky 		case 1:
458b3007086SVictor Kamensky 			return data & 0xff;
459b3007086SVictor Kamensky 		case 2:
460b3007086SVictor Kamensky 			return le16_to_cpu(data & 0xffff);
461b3007086SVictor Kamensky 		case 4:
462b3007086SVictor Kamensky 			return le32_to_cpu(data & 0xffffffff);
463b3007086SVictor Kamensky 		default:
464b3007086SVictor Kamensky 			return le64_to_cpu(data);
465b3007086SVictor Kamensky 		}
4666d89d2d9SMarc Zyngier 	}
4676d89d2d9SMarc Zyngier 
4686d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4696d89d2d9SMarc Zyngier }
4706d89d2d9SMarc Zyngier 
4716d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
4726d89d2d9SMarc Zyngier 						    unsigned long data,
4736d89d2d9SMarc Zyngier 						    unsigned int len)
4746d89d2d9SMarc Zyngier {
4756d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4766d89d2d9SMarc Zyngier 		switch (len) {
4776d89d2d9SMarc Zyngier 		case 1:
4786d89d2d9SMarc Zyngier 			return data & 0xff;
4796d89d2d9SMarc Zyngier 		case 2:
4806d89d2d9SMarc Zyngier 			return cpu_to_be16(data & 0xffff);
4816d89d2d9SMarc Zyngier 		case 4:
4826d89d2d9SMarc Zyngier 			return cpu_to_be32(data & 0xffffffff);
4836d89d2d9SMarc Zyngier 		default:
4846d89d2d9SMarc Zyngier 			return cpu_to_be64(data);
4856d89d2d9SMarc Zyngier 		}
486b3007086SVictor Kamensky 	} else {
487b3007086SVictor Kamensky 		switch (len) {
488b3007086SVictor Kamensky 		case 1:
489b3007086SVictor Kamensky 			return data & 0xff;
490b3007086SVictor Kamensky 		case 2:
491b3007086SVictor Kamensky 			return cpu_to_le16(data & 0xffff);
492b3007086SVictor Kamensky 		case 4:
493b3007086SVictor Kamensky 			return cpu_to_le32(data & 0xffffffff);
494b3007086SVictor Kamensky 		default:
495b3007086SVictor Kamensky 			return cpu_to_le64(data);
496b3007086SVictor Kamensky 		}
4976d89d2d9SMarc Zyngier 	}
4986d89d2d9SMarc Zyngier 
4996d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
5006d89d2d9SMarc Zyngier }
5016d89d2d9SMarc Zyngier 
5025c37f1aeSJames Morse static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
503bd7d95caSMark Rutland {
50430685d78SDave Martin 	if (vcpu_mode_is_32bit(vcpu)) {
505bd7d95caSMark Rutland 		kvm_skip_instr32(vcpu, is_wide_instr);
50630685d78SDave Martin 	} else {
507bd7d95caSMark Rutland 		*vcpu_pc(vcpu) += 4;
50830685d78SDave Martin 		*vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
50930685d78SDave Martin 	}
510bd7d95caSMark Rutland 
511bd7d95caSMark Rutland 	/* advance the singlestep state machine */
512bd7d95caSMark Rutland 	*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
513bd7d95caSMark Rutland }
514bd7d95caSMark Rutland 
515bd7d95caSMark Rutland /*
516bd7d95caSMark Rutland  * Skip an instruction which has been emulated at hyp while most guest sysregs
517bd7d95caSMark Rutland  * are live.
518bd7d95caSMark Rutland  */
5195c37f1aeSJames Morse static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
520bd7d95caSMark Rutland {
521fdec2a9eSDave Martin 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
522fdec2a9eSDave Martin 	vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
523bd7d95caSMark Rutland 
524bd7d95caSMark Rutland 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
525bd7d95caSMark Rutland 
526fdec2a9eSDave Martin 	write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
527fdec2a9eSDave Martin 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
528bd7d95caSMark Rutland }
529bd7d95caSMark Rutland 
53083a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */
531