1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
283a49794SMarc Zyngier /*
383a49794SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
483a49794SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
583a49794SMarc Zyngier  *
683a49794SMarc Zyngier  * Derived from arch/arm/include/kvm_emulate.h
783a49794SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
883a49794SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
983a49794SMarc Zyngier  */
1083a49794SMarc Zyngier 
1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__
1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__
1383a49794SMarc Zyngier 
1483a49794SMarc Zyngier #include <linux/kvm_host.h>
15c6d01a94SMark Rutland 
16bd7d95caSMark Rutland #include <asm/debug-monitors.h>
17c6d01a94SMark Rutland #include <asm/esr.h>
1883a49794SMarc Zyngier #include <asm/kvm_arm.h>
1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h>
2083a49794SMarc Zyngier #include <asm/ptrace.h>
214429fc64SAndre Przywara #include <asm/cputype.h>
2268908bf7SMarc Zyngier #include <asm/virt.h>
2383a49794SMarc Zyngier 
24bb666c47SMarc Zyngier #define CURRENT_EL_SP_EL0_VECTOR	0x0
25bb666c47SMarc Zyngier #define CURRENT_EL_SP_ELx_VECTOR	0x200
26bb666c47SMarc Zyngier #define LOWER_EL_AArch64_VECTOR		0x400
27bb666c47SMarc Zyngier #define LOWER_EL_AArch32_VECTOR		0x600
28bb666c47SMarc Zyngier 
29bb666c47SMarc Zyngier enum exception_type {
30bb666c47SMarc Zyngier 	except_type_sync	= 0,
31bb666c47SMarc Zyngier 	except_type_irq		= 0x80,
32bb666c47SMarc Zyngier 	except_type_fiq		= 0x100,
33bb666c47SMarc Zyngier 	except_type_serror	= 0x180,
34bb666c47SMarc Zyngier };
35b547631fSMarc Zyngier 
3627b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
376ddbc281SMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu);
3827b190bdSMarc Zyngier 
3983a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4010cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4183a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4283a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
4383a49794SMarc Zyngier 
445c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
45e72341c5SChristoffer Dall {
46e72341c5SChristoffer Dall 	return !(vcpu->arch.hcr_el2 & HCR_RW);
47e72341c5SChristoffer Dall }
48e72341c5SChristoffer Dall 
49b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
50b856a591SChristoffer Dall {
51b856a591SChristoffer Dall 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
5268908bf7SMarc Zyngier 	if (is_kernel_in_hyp_mode())
5368908bf7SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_E2H;
54558daf69SDongjiu Geng 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
55558daf69SDongjiu Geng 		/* route synchronous external abort exceptions to EL2 */
56558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TEA;
57558daf69SDongjiu Geng 		/* trap error record accesses */
58558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TERR;
59558daf69SDongjiu Geng 	}
605c401308SChristoffer Dall 
615c401308SChristoffer Dall 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
62e48d53a9SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_FWB;
635c401308SChristoffer Dall 	} else {
645c401308SChristoffer Dall 		/*
655c401308SChristoffer Dall 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
665c401308SChristoffer Dall 		 * get set in SCTLR_EL1 such that we can detect when the guest
675c401308SChristoffer Dall 		 * MMU gets turned on and do the necessary cache maintenance
685c401308SChristoffer Dall 		 * then.
695c401308SChristoffer Dall 		 */
705c401308SChristoffer Dall 		vcpu->arch.hcr_el2 |= HCR_TVM;
715c401308SChristoffer Dall 	}
72558daf69SDongjiu Geng 
73801f6772SMarc Zyngier 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
74801f6772SMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_RW;
75005781beSDave Martin 
76005781beSDave Martin 	/*
77005781beSDave Martin 	 * TID3: trap feature register accesses that we virtualise.
78005781beSDave Martin 	 * For now this is conditional, since no AArch32 feature regs
79005781beSDave Martin 	 * are currently virtualised.
80005781beSDave Martin 	 */
81e72341c5SChristoffer Dall 	if (!vcpu_el1_is_32bit(vcpu))
82005781beSDave Martin 		vcpu->arch.hcr_el2 |= HCR_TID3;
83f7f2b15cSArd Biesheuvel 
84793acf87SArd Biesheuvel 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
85793acf87SArd Biesheuvel 	    vcpu_el1_is_32bit(vcpu))
86f7f2b15cSArd Biesheuvel 		vcpu->arch.hcr_el2 |= HCR_TID2;
87*ea7fc1bbSSteven Price 
88*ea7fc1bbSSteven Price 	if (kvm_has_mte(vcpu->kvm))
89*ea7fc1bbSSteven Price 		vcpu->arch.hcr_el2 |= HCR_ATA;
90b856a591SChristoffer Dall }
91b856a591SChristoffer Dall 
923df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
933c1e7165SMarc Zyngier {
943df59d8dSChristoffer Dall 	return (unsigned long *)&vcpu->arch.hcr_el2;
953c1e7165SMarc Zyngier }
963c1e7165SMarc Zyngier 
97ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
98de737089SMarc Zyngier {
99de737089SMarc Zyngier 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
1007bdabad1SMarc Zyngier 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
1017bdabad1SMarc Zyngier 	    vcpu->kvm->arch.vgic.nassgireq)
102ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
103ef2e78ddSMarc Zyngier 	else
104ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TWI;
105de737089SMarc Zyngier }
106de737089SMarc Zyngier 
107ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
108de737089SMarc Zyngier {
109de737089SMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWE;
110ef2e78ddSMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWI;
111de737089SMarc Zyngier }
112de737089SMarc Zyngier 
113384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
114384b40caSMark Rutland {
115384b40caSMark Rutland 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
116384b40caSMark Rutland }
117384b40caSMark Rutland 
118384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
119384b40caSMark Rutland {
120384b40caSMark Rutland 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
121384b40caSMark Rutland }
122384b40caSMark Rutland 
123b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
124b7b27facSDongjiu Geng {
125b7b27facSDongjiu Geng 	return vcpu->arch.vsesr_el2;
126b7b27facSDongjiu Geng }
127b7b27facSDongjiu Geng 
1284715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
1294715c14bSJames Morse {
1304715c14bSJames Morse 	vcpu->arch.vsesr_el2 = vsesr;
1314715c14bSJames Morse }
1324715c14bSJames Morse 
1335c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
13483a49794SMarc Zyngier {
135e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
13683a49794SMarc Zyngier }
13783a49794SMarc Zyngier 
1385c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
13983a49794SMarc Zyngier {
140e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
14183a49794SMarc Zyngier }
14283a49794SMarc Zyngier 
1435c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
14483a49794SMarc Zyngier {
145b547631fSMarc Zyngier 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
14683a49794SMarc Zyngier }
14783a49794SMarc Zyngier 
1485c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
14983a49794SMarc Zyngier {
15027b190bdSMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
15127b190bdSMarc Zyngier 		return kvm_condition_valid32(vcpu);
15227b190bdSMarc Zyngier 
15327b190bdSMarc Zyngier 	return true;
15483a49794SMarc Zyngier }
15583a49794SMarc Zyngier 
15683a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
15783a49794SMarc Zyngier {
158256c0960SMark Rutland 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
15983a49794SMarc Zyngier }
16083a49794SMarc Zyngier 
161c0f09634SMarc Zyngier /*
162f6be563aSPavel Fedin  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
163f6be563aSPavel Fedin  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
164f6be563aSPavel Fedin  * AArch32 with banked registers.
165c0f09634SMarc Zyngier  */
1665c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
167bc45a516SPavel Fedin 					 u8 reg_num)
168bc45a516SPavel Fedin {
169e47c2055SMarc Zyngier 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
170bc45a516SPavel Fedin }
171bc45a516SPavel Fedin 
1725c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
173bc45a516SPavel Fedin 				unsigned long val)
174bc45a516SPavel Fedin {
175bc45a516SPavel Fedin 	if (reg_num != 31)
176e47c2055SMarc Zyngier 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
177bc45a516SPavel Fedin }
178bc45a516SPavel Fedin 
1791cfbb484SMark Rutland /*
1801cfbb484SMark Rutland  * The layout of SPSR for an AArch32 state is different when observed from an
1811cfbb484SMark Rutland  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
1821cfbb484SMark Rutland  * view given an AArch64 view.
1831cfbb484SMark Rutland  *
1841cfbb484SMark Rutland  * In ARM DDI 0487E.a see:
1851cfbb484SMark Rutland  *
1861cfbb484SMark Rutland  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
1871cfbb484SMark Rutland  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
1881cfbb484SMark Rutland  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
1891cfbb484SMark Rutland  *
1901cfbb484SMark Rutland  * Which show the following differences:
1911cfbb484SMark Rutland  *
1921cfbb484SMark Rutland  * | Bit | AA64 | AA32 | Notes                       |
1931cfbb484SMark Rutland  * +-----+------+------+-----------------------------|
1941cfbb484SMark Rutland  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
1951cfbb484SMark Rutland  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
1961cfbb484SMark Rutland  *
1971cfbb484SMark Rutland  * ... and all other bits are (currently) common.
1981cfbb484SMark Rutland  */
1991cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
2001cfbb484SMark Rutland {
2011cfbb484SMark Rutland 	const unsigned long overlap = BIT(24) | BIT(21);
2021cfbb484SMark Rutland 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
2031cfbb484SMark Rutland 
2041cfbb484SMark Rutland 	spsr &= ~overlap;
2051cfbb484SMark Rutland 
2061cfbb484SMark Rutland 	spsr |= dit << 21;
2071cfbb484SMark Rutland 
2081cfbb484SMark Rutland 	return spsr;
2091cfbb484SMark Rutland }
2101cfbb484SMark Rutland 
21183a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
21283a49794SMarc Zyngier {
2139586a2eaSShannon Zhao 	u32 mode;
21483a49794SMarc Zyngier 
2159586a2eaSShannon Zhao 	if (vcpu_mode_is_32bit(vcpu)) {
216256c0960SMark Rutland 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
217256c0960SMark Rutland 		return mode > PSR_AA32_MODE_USR;
2189586a2eaSShannon Zhao 	}
2199586a2eaSShannon Zhao 
2209586a2eaSShannon Zhao 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
221b547631fSMarc Zyngier 
22283a49794SMarc Zyngier 	return mode != PSR_MODE_EL0t;
22383a49794SMarc Zyngier }
22483a49794SMarc Zyngier 
2253a949f4cSGavin Shan static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
22683a49794SMarc Zyngier {
22783a49794SMarc Zyngier 	return vcpu->arch.fault.esr_el2;
22883a49794SMarc Zyngier }
22983a49794SMarc Zyngier 
2305c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
2313e51d435SMarc Zyngier {
2323a949f4cSGavin Shan 	u32 esr = kvm_vcpu_get_esr(vcpu);
2333e51d435SMarc Zyngier 
2343e51d435SMarc Zyngier 	if (esr & ESR_ELx_CV)
2353e51d435SMarc Zyngier 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
2363e51d435SMarc Zyngier 
2373e51d435SMarc Zyngier 	return -1;
2383e51d435SMarc Zyngier }
2393e51d435SMarc Zyngier 
2405c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
24183a49794SMarc Zyngier {
24283a49794SMarc Zyngier 	return vcpu->arch.fault.far_el2;
24383a49794SMarc Zyngier }
24483a49794SMarc Zyngier 
2455c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
24683a49794SMarc Zyngier {
24783a49794SMarc Zyngier 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
24883a49794SMarc Zyngier }
24983a49794SMarc Zyngier 
2500067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
2510067df41SJames Morse {
2520067df41SJames Morse 	return vcpu->arch.fault.disr_el1;
2530067df41SJames Morse }
2540067df41SJames Morse 
2550d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
2560d97f884SWei Huang {
2573a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
2580d97f884SWei Huang }
2590d97f884SWei Huang 
2605c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
26183a49794SMarc Zyngier {
2623a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
26383a49794SMarc Zyngier }
26483a49794SMarc Zyngier 
265c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
266c726200dSChristoffer Dall {
2673a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
268c726200dSChristoffer Dall }
269c726200dSChristoffer Dall 
27083a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
27183a49794SMarc Zyngier {
2723a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
27383a49794SMarc Zyngier }
27483a49794SMarc Zyngier 
275b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
276b6ae256aSChristoffer Dall {
2773a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
278b6ae256aSChristoffer Dall }
279b6ae256aSChristoffer Dall 
2805c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
28183a49794SMarc Zyngier {
2823a949f4cSGavin Shan 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
28383a49794SMarc Zyngier }
28483a49794SMarc Zyngier 
285c4ad98e4SMarc Zyngier static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
28683a49794SMarc Zyngier {
2873a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
28883a49794SMarc Zyngier }
28983a49794SMarc Zyngier 
290620cf45fSMarc Zyngier /* Always check for S1PTW *before* using this. */
2915c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
29260e21a0eSWill Deacon {
293620cf45fSMarc Zyngier 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
29460e21a0eSWill Deacon }
29560e21a0eSWill Deacon 
29657c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
29757c841f1SMarc Zyngier {
2983a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
29957c841f1SMarc Zyngier }
30057c841f1SMarc Zyngier 
3015c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
30283a49794SMarc Zyngier {
3033a949f4cSGavin Shan 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
30483a49794SMarc Zyngier }
30583a49794SMarc Zyngier 
30683a49794SMarc Zyngier /* This one is not specific to Data Abort */
3075c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
30883a49794SMarc Zyngier {
3093a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
31083a49794SMarc Zyngier }
31183a49794SMarc Zyngier 
3125c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
31383a49794SMarc Zyngier {
3143a949f4cSGavin Shan 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
31583a49794SMarc Zyngier }
31683a49794SMarc Zyngier 
31783a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
31883a49794SMarc Zyngier {
319c6d01a94SMark Rutland 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
32083a49794SMarc Zyngier }
32183a49794SMarc Zyngier 
322c4ad98e4SMarc Zyngier static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
323c4ad98e4SMarc Zyngier {
324c4ad98e4SMarc Zyngier 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
325c4ad98e4SMarc Zyngier }
326c4ad98e4SMarc Zyngier 
3275c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
32883a49794SMarc Zyngier {
3293a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
3300496daa5SChristoffer Dall }
3310496daa5SChristoffer Dall 
3325c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
3330496daa5SChristoffer Dall {
3343a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
33583a49794SMarc Zyngier }
33683a49794SMarc Zyngier 
3377d894834SYanan Wang static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
3387d894834SYanan Wang {
3397d894834SYanan Wang 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
3407d894834SYanan Wang }
3417d894834SYanan Wang 
342c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
343bb428921SJames Morse {
344a2b83133SDongjiu Geng 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
345bb428921SJames Morse 	case FSC_SEA:
346bb428921SJames Morse 	case FSC_SEA_TTW0:
347bb428921SJames Morse 	case FSC_SEA_TTW1:
348bb428921SJames Morse 	case FSC_SEA_TTW2:
349bb428921SJames Morse 	case FSC_SEA_TTW3:
350bb428921SJames Morse 	case FSC_SECC:
351bb428921SJames Morse 	case FSC_SECC_TTW0:
352bb428921SJames Morse 	case FSC_SECC_TTW1:
353bb428921SJames Morse 	case FSC_SECC_TTW2:
354bb428921SJames Morse 	case FSC_SECC_TTW3:
355bb428921SJames Morse 		return true;
356bb428921SJames Morse 	default:
357bb428921SJames Morse 		return false;
358bb428921SJames Morse 	}
359bb428921SJames Morse }
360bb428921SJames Morse 
3615c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
362c667186fSMarc Zyngier {
3633a949f4cSGavin Shan 	u32 esr = kvm_vcpu_get_esr(vcpu);
3641c839141SAnshuman Khandual 	return ESR_ELx_SYS64_ISS_RT(esr);
365c667186fSMarc Zyngier }
366c667186fSMarc Zyngier 
36764cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
36864cf98faSChristoffer Dall {
369c4ad98e4SMarc Zyngier 	if (kvm_vcpu_abt_iss1tw(vcpu))
370c4ad98e4SMarc Zyngier 		return true;
371c4ad98e4SMarc Zyngier 
37264cf98faSChristoffer Dall 	if (kvm_vcpu_trap_is_iabt(vcpu))
37364cf98faSChristoffer Dall 		return false;
37464cf98faSChristoffer Dall 
37564cf98faSChristoffer Dall 	return kvm_vcpu_dabt_iswrite(vcpu);
37664cf98faSChristoffer Dall }
37764cf98faSChristoffer Dall 
3784429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
37979c64880SMarc Zyngier {
3808d404c4cSChristoffer Dall 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
38179c64880SMarc Zyngier }
38279c64880SMarc Zyngier 
383ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
384ce94fe93SMarc Zyngier {
3858d404c4cSChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
386256c0960SMark Rutland 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
3878d404c4cSChristoffer Dall 	} else {
3888d404c4cSChristoffer Dall 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
3898d404c4cSChristoffer Dall 		sctlr |= (1 << 25);
3901975fa56SJames Morse 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
3918d404c4cSChristoffer Dall 	}
392ce94fe93SMarc Zyngier }
393ce94fe93SMarc Zyngier 
3946d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
3956d89d2d9SMarc Zyngier {
3966d89d2d9SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
397256c0960SMark Rutland 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
3986d89d2d9SMarc Zyngier 
3998d404c4cSChristoffer Dall 	return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
4006d89d2d9SMarc Zyngier }
4016d89d2d9SMarc Zyngier 
4026d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
4036d89d2d9SMarc Zyngier 						    unsigned long data,
4046d89d2d9SMarc Zyngier 						    unsigned int len)
4056d89d2d9SMarc Zyngier {
4066d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4076d89d2d9SMarc Zyngier 		switch (len) {
4086d89d2d9SMarc Zyngier 		case 1:
4096d89d2d9SMarc Zyngier 			return data & 0xff;
4106d89d2d9SMarc Zyngier 		case 2:
4116d89d2d9SMarc Zyngier 			return be16_to_cpu(data & 0xffff);
4126d89d2d9SMarc Zyngier 		case 4:
4136d89d2d9SMarc Zyngier 			return be32_to_cpu(data & 0xffffffff);
4146d89d2d9SMarc Zyngier 		default:
4156d89d2d9SMarc Zyngier 			return be64_to_cpu(data);
4166d89d2d9SMarc Zyngier 		}
417b3007086SVictor Kamensky 	} else {
418b3007086SVictor Kamensky 		switch (len) {
419b3007086SVictor Kamensky 		case 1:
420b3007086SVictor Kamensky 			return data & 0xff;
421b3007086SVictor Kamensky 		case 2:
422b3007086SVictor Kamensky 			return le16_to_cpu(data & 0xffff);
423b3007086SVictor Kamensky 		case 4:
424b3007086SVictor Kamensky 			return le32_to_cpu(data & 0xffffffff);
425b3007086SVictor Kamensky 		default:
426b3007086SVictor Kamensky 			return le64_to_cpu(data);
427b3007086SVictor Kamensky 		}
4286d89d2d9SMarc Zyngier 	}
4296d89d2d9SMarc Zyngier 
4306d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4316d89d2d9SMarc Zyngier }
4326d89d2d9SMarc Zyngier 
4336d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
4346d89d2d9SMarc Zyngier 						    unsigned long data,
4356d89d2d9SMarc Zyngier 						    unsigned int len)
4366d89d2d9SMarc Zyngier {
4376d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4386d89d2d9SMarc Zyngier 		switch (len) {
4396d89d2d9SMarc Zyngier 		case 1:
4406d89d2d9SMarc Zyngier 			return data & 0xff;
4416d89d2d9SMarc Zyngier 		case 2:
4426d89d2d9SMarc Zyngier 			return cpu_to_be16(data & 0xffff);
4436d89d2d9SMarc Zyngier 		case 4:
4446d89d2d9SMarc Zyngier 			return cpu_to_be32(data & 0xffffffff);
4456d89d2d9SMarc Zyngier 		default:
4466d89d2d9SMarc Zyngier 			return cpu_to_be64(data);
4476d89d2d9SMarc Zyngier 		}
448b3007086SVictor Kamensky 	} else {
449b3007086SVictor Kamensky 		switch (len) {
450b3007086SVictor Kamensky 		case 1:
451b3007086SVictor Kamensky 			return data & 0xff;
452b3007086SVictor Kamensky 		case 2:
453b3007086SVictor Kamensky 			return cpu_to_le16(data & 0xffff);
454b3007086SVictor Kamensky 		case 4:
455b3007086SVictor Kamensky 			return cpu_to_le32(data & 0xffffffff);
456b3007086SVictor Kamensky 		default:
457b3007086SVictor Kamensky 			return cpu_to_le64(data);
458b3007086SVictor Kamensky 		}
4596d89d2d9SMarc Zyngier 	}
4606d89d2d9SMarc Zyngier 
4616d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4626d89d2d9SMarc Zyngier }
4636d89d2d9SMarc Zyngier 
464cdb5e02eSMarc Zyngier static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
465bd7d95caSMark Rutland {
466cdb5e02eSMarc Zyngier 	vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
467bd7d95caSMark Rutland }
468bd7d95caSMark Rutland 
46966e94d5cSMarc Zyngier static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
47066e94d5cSMarc Zyngier {
47166e94d5cSMarc Zyngier 	return test_bit(feature, vcpu->arch.features);
47266e94d5cSMarc Zyngier }
47366e94d5cSMarc Zyngier 
47483a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */
475