1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
283a49794SMarc Zyngier /*
383a49794SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
483a49794SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
583a49794SMarc Zyngier  *
683a49794SMarc Zyngier  * Derived from arch/arm/include/kvm_emulate.h
783a49794SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
883a49794SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
983a49794SMarc Zyngier  */
1083a49794SMarc Zyngier 
1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__
1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__
1383a49794SMarc Zyngier 
1483a49794SMarc Zyngier #include <linux/kvm_host.h>
15c6d01a94SMark Rutland 
16bd7d95caSMark Rutland #include <asm/debug-monitors.h>
17c6d01a94SMark Rutland #include <asm/esr.h>
1883a49794SMarc Zyngier #include <asm/kvm_arm.h>
1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h>
2083a49794SMarc Zyngier #include <asm/ptrace.h>
214429fc64SAndre Przywara #include <asm/cputype.h>
2268908bf7SMarc Zyngier #include <asm/virt.h>
2383a49794SMarc Zyngier 
24b547631fSMarc Zyngier unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
25a8928195SChristoffer Dall unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26a8928195SChristoffer Dall void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
27b547631fSMarc Zyngier 
2827b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
296ddbc281SMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu);
3027b190bdSMarc Zyngier 
3183a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu);
3210cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu);
3383a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
3483a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
3574a64a98SMarc Zyngier void kvm_inject_undef32(struct kvm_vcpu *vcpu);
3674a64a98SMarc Zyngier void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3774a64a98SMarc Zyngier void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3883a49794SMarc Zyngier 
395c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
40e72341c5SChristoffer Dall {
41e72341c5SChristoffer Dall 	return !(vcpu->arch.hcr_el2 & HCR_RW);
42e72341c5SChristoffer Dall }
43e72341c5SChristoffer Dall 
44b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45b856a591SChristoffer Dall {
46b856a591SChristoffer Dall 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
4768908bf7SMarc Zyngier 	if (is_kernel_in_hyp_mode())
4868908bf7SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_E2H;
49558daf69SDongjiu Geng 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50558daf69SDongjiu Geng 		/* route synchronous external abort exceptions to EL2 */
51558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TEA;
52558daf69SDongjiu Geng 		/* trap error record accesses */
53558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TERR;
54558daf69SDongjiu Geng 	}
555c401308SChristoffer Dall 
565c401308SChristoffer Dall 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
57e48d53a9SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_FWB;
585c401308SChristoffer Dall 	} else {
595c401308SChristoffer Dall 		/*
605c401308SChristoffer Dall 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
615c401308SChristoffer Dall 		 * get set in SCTLR_EL1 such that we can detect when the guest
625c401308SChristoffer Dall 		 * MMU gets turned on and do the necessary cache maintenance
635c401308SChristoffer Dall 		 * then.
645c401308SChristoffer Dall 		 */
655c401308SChristoffer Dall 		vcpu->arch.hcr_el2 |= HCR_TVM;
665c401308SChristoffer Dall 	}
67558daf69SDongjiu Geng 
68801f6772SMarc Zyngier 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69801f6772SMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_RW;
70005781beSDave Martin 
71005781beSDave Martin 	/*
72005781beSDave Martin 	 * TID3: trap feature register accesses that we virtualise.
73005781beSDave Martin 	 * For now this is conditional, since no AArch32 feature regs
74005781beSDave Martin 	 * are currently virtualised.
75005781beSDave Martin 	 */
76e72341c5SChristoffer Dall 	if (!vcpu_el1_is_32bit(vcpu))
77005781beSDave Martin 		vcpu->arch.hcr_el2 |= HCR_TID3;
78f7f2b15cSArd Biesheuvel 
79793acf87SArd Biesheuvel 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80793acf87SArd Biesheuvel 	    vcpu_el1_is_32bit(vcpu))
81f7f2b15cSArd Biesheuvel 		vcpu->arch.hcr_el2 |= HCR_TID2;
82b856a591SChristoffer Dall }
83b856a591SChristoffer Dall 
843df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
853c1e7165SMarc Zyngier {
863df59d8dSChristoffer Dall 	return (unsigned long *)&vcpu->arch.hcr_el2;
873c1e7165SMarc Zyngier }
883c1e7165SMarc Zyngier 
89ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
90de737089SMarc Zyngier {
91de737089SMarc Zyngier 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
927bdabad1SMarc Zyngier 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
937bdabad1SMarc Zyngier 	    vcpu->kvm->arch.vgic.nassgireq)
94ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
95ef2e78ddSMarc Zyngier 	else
96ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TWI;
97de737089SMarc Zyngier }
98de737089SMarc Zyngier 
99ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
100de737089SMarc Zyngier {
101de737089SMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWE;
102ef2e78ddSMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWI;
103de737089SMarc Zyngier }
104de737089SMarc Zyngier 
105384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
106384b40caSMark Rutland {
107384b40caSMark Rutland 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
108384b40caSMark Rutland }
109384b40caSMark Rutland 
110384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
111384b40caSMark Rutland {
112384b40caSMark Rutland 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113384b40caSMark Rutland }
114384b40caSMark Rutland 
115b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116b7b27facSDongjiu Geng {
117b7b27facSDongjiu Geng 	return vcpu->arch.vsesr_el2;
118b7b27facSDongjiu Geng }
119b7b27facSDongjiu Geng 
1204715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
1214715c14bSJames Morse {
1224715c14bSJames Morse 	vcpu->arch.vsesr_el2 = vsesr;
1234715c14bSJames Morse }
1244715c14bSJames Morse 
1255c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
12683a49794SMarc Zyngier {
127e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
12883a49794SMarc Zyngier }
12983a49794SMarc Zyngier 
1305c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
13183a49794SMarc Zyngier {
132e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
13383a49794SMarc Zyngier }
13483a49794SMarc Zyngier 
1355c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
13683a49794SMarc Zyngier {
137b547631fSMarc Zyngier 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
13883a49794SMarc Zyngier }
13983a49794SMarc Zyngier 
1405c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
14183a49794SMarc Zyngier {
14227b190bdSMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
14327b190bdSMarc Zyngier 		return kvm_condition_valid32(vcpu);
14427b190bdSMarc Zyngier 
14527b190bdSMarc Zyngier 	return true;
14683a49794SMarc Zyngier }
14783a49794SMarc Zyngier 
14883a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
14983a49794SMarc Zyngier {
150256c0960SMark Rutland 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
15183a49794SMarc Zyngier }
15283a49794SMarc Zyngier 
153c0f09634SMarc Zyngier /*
154f6be563aSPavel Fedin  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155f6be563aSPavel Fedin  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156f6be563aSPavel Fedin  * AArch32 with banked registers.
157c0f09634SMarc Zyngier  */
1585c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
159bc45a516SPavel Fedin 					 u8 reg_num)
160bc45a516SPavel Fedin {
161e47c2055SMarc Zyngier 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
162bc45a516SPavel Fedin }
163bc45a516SPavel Fedin 
1645c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
165bc45a516SPavel Fedin 				unsigned long val)
166bc45a516SPavel Fedin {
167bc45a516SPavel Fedin 	if (reg_num != 31)
168e47c2055SMarc Zyngier 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
169bc45a516SPavel Fedin }
170bc45a516SPavel Fedin 
17100536ec4SChristoffer Dall static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
17283a49794SMarc Zyngier {
173a8928195SChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu))
174a8928195SChristoffer Dall 		return vcpu_read_spsr32(vcpu);
17500536ec4SChristoffer Dall 
17600536ec4SChristoffer Dall 	if (vcpu->arch.sysregs_loaded_on_cpu)
177fdec2a9eSDave Martin 		return read_sysreg_el1(SYS_SPSR);
17800536ec4SChristoffer Dall 	else
179710f1982SMarc Zyngier 		return __vcpu_sys_reg(vcpu, SPSR_EL1);
18000536ec4SChristoffer Dall }
18100536ec4SChristoffer Dall 
182a8928195SChristoffer Dall static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
18300536ec4SChristoffer Dall {
18400536ec4SChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
185a8928195SChristoffer Dall 		vcpu_write_spsr32(vcpu, v);
18600536ec4SChristoffer Dall 		return;
18700536ec4SChristoffer Dall 	}
18800536ec4SChristoffer Dall 
18900536ec4SChristoffer Dall 	if (vcpu->arch.sysregs_loaded_on_cpu)
190fdec2a9eSDave Martin 		write_sysreg_el1(v, SYS_SPSR);
19100536ec4SChristoffer Dall 	else
192710f1982SMarc Zyngier 		__vcpu_sys_reg(vcpu, SPSR_EL1) = v;
19383a49794SMarc Zyngier }
19483a49794SMarc Zyngier 
1951cfbb484SMark Rutland /*
1961cfbb484SMark Rutland  * The layout of SPSR for an AArch32 state is different when observed from an
1971cfbb484SMark Rutland  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
1981cfbb484SMark Rutland  * view given an AArch64 view.
1991cfbb484SMark Rutland  *
2001cfbb484SMark Rutland  * In ARM DDI 0487E.a see:
2011cfbb484SMark Rutland  *
2021cfbb484SMark Rutland  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
2031cfbb484SMark Rutland  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
2041cfbb484SMark Rutland  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
2051cfbb484SMark Rutland  *
2061cfbb484SMark Rutland  * Which show the following differences:
2071cfbb484SMark Rutland  *
2081cfbb484SMark Rutland  * | Bit | AA64 | AA32 | Notes                       |
2091cfbb484SMark Rutland  * +-----+------+------+-----------------------------|
2101cfbb484SMark Rutland  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
2111cfbb484SMark Rutland  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
2121cfbb484SMark Rutland  *
2131cfbb484SMark Rutland  * ... and all other bits are (currently) common.
2141cfbb484SMark Rutland  */
2151cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
2161cfbb484SMark Rutland {
2171cfbb484SMark Rutland 	const unsigned long overlap = BIT(24) | BIT(21);
2181cfbb484SMark Rutland 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
2191cfbb484SMark Rutland 
2201cfbb484SMark Rutland 	spsr &= ~overlap;
2211cfbb484SMark Rutland 
2221cfbb484SMark Rutland 	spsr |= dit << 21;
2231cfbb484SMark Rutland 
2241cfbb484SMark Rutland 	return spsr;
2251cfbb484SMark Rutland }
2261cfbb484SMark Rutland 
22783a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
22883a49794SMarc Zyngier {
2299586a2eaSShannon Zhao 	u32 mode;
23083a49794SMarc Zyngier 
2319586a2eaSShannon Zhao 	if (vcpu_mode_is_32bit(vcpu)) {
232256c0960SMark Rutland 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
233256c0960SMark Rutland 		return mode > PSR_AA32_MODE_USR;
2349586a2eaSShannon Zhao 	}
2359586a2eaSShannon Zhao 
2369586a2eaSShannon Zhao 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
237b547631fSMarc Zyngier 
23883a49794SMarc Zyngier 	return mode != PSR_MODE_EL0t;
23983a49794SMarc Zyngier }
24083a49794SMarc Zyngier 
2413a949f4cSGavin Shan static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
24283a49794SMarc Zyngier {
24383a49794SMarc Zyngier 	return vcpu->arch.fault.esr_el2;
24483a49794SMarc Zyngier }
24583a49794SMarc Zyngier 
2465c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
2473e51d435SMarc Zyngier {
2483a949f4cSGavin Shan 	u32 esr = kvm_vcpu_get_esr(vcpu);
2493e51d435SMarc Zyngier 
2503e51d435SMarc Zyngier 	if (esr & ESR_ELx_CV)
2513e51d435SMarc Zyngier 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
2523e51d435SMarc Zyngier 
2533e51d435SMarc Zyngier 	return -1;
2543e51d435SMarc Zyngier }
2553e51d435SMarc Zyngier 
2565c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
25783a49794SMarc Zyngier {
25883a49794SMarc Zyngier 	return vcpu->arch.fault.far_el2;
25983a49794SMarc Zyngier }
26083a49794SMarc Zyngier 
2615c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
26283a49794SMarc Zyngier {
26383a49794SMarc Zyngier 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
26483a49794SMarc Zyngier }
26583a49794SMarc Zyngier 
2660067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
2670067df41SJames Morse {
2680067df41SJames Morse 	return vcpu->arch.fault.disr_el1;
2690067df41SJames Morse }
2700067df41SJames Morse 
2710d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
2720d97f884SWei Huang {
2733a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
2740d97f884SWei Huang }
2750d97f884SWei Huang 
2765c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
27783a49794SMarc Zyngier {
2783a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
27983a49794SMarc Zyngier }
28083a49794SMarc Zyngier 
281c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
282c726200dSChristoffer Dall {
2833a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
284c726200dSChristoffer Dall }
285c726200dSChristoffer Dall 
28683a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
28783a49794SMarc Zyngier {
2883a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
28983a49794SMarc Zyngier }
29083a49794SMarc Zyngier 
291b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
292b6ae256aSChristoffer Dall {
2933a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
294b6ae256aSChristoffer Dall }
295b6ae256aSChristoffer Dall 
2965c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
29783a49794SMarc Zyngier {
2983a949f4cSGavin Shan 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
29983a49794SMarc Zyngier }
30083a49794SMarc Zyngier 
301c4ad98e4SMarc Zyngier static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
30283a49794SMarc Zyngier {
3033a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
30483a49794SMarc Zyngier }
30583a49794SMarc Zyngier 
306620cf45fSMarc Zyngier /* Always check for S1PTW *before* using this. */
3075c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
30860e21a0eSWill Deacon {
309620cf45fSMarc Zyngier 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
31060e21a0eSWill Deacon }
31160e21a0eSWill Deacon 
31257c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
31357c841f1SMarc Zyngier {
3143a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
31557c841f1SMarc Zyngier }
31657c841f1SMarc Zyngier 
3175c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
31883a49794SMarc Zyngier {
3193a949f4cSGavin Shan 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
32083a49794SMarc Zyngier }
32183a49794SMarc Zyngier 
32283a49794SMarc Zyngier /* This one is not specific to Data Abort */
3235c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
32483a49794SMarc Zyngier {
3253a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
32683a49794SMarc Zyngier }
32783a49794SMarc Zyngier 
3285c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
32983a49794SMarc Zyngier {
3303a949f4cSGavin Shan 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
33183a49794SMarc Zyngier }
33283a49794SMarc Zyngier 
33383a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
33483a49794SMarc Zyngier {
335c6d01a94SMark Rutland 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
33683a49794SMarc Zyngier }
33783a49794SMarc Zyngier 
338c4ad98e4SMarc Zyngier static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
339c4ad98e4SMarc Zyngier {
340c4ad98e4SMarc Zyngier 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
341c4ad98e4SMarc Zyngier }
342c4ad98e4SMarc Zyngier 
3435c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
34483a49794SMarc Zyngier {
3453a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
3460496daa5SChristoffer Dall }
3470496daa5SChristoffer Dall 
3485c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
3490496daa5SChristoffer Dall {
3503a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
35183a49794SMarc Zyngier }
35283a49794SMarc Zyngier 
353c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
354bb428921SJames Morse {
355a2b83133SDongjiu Geng 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
356bb428921SJames Morse 	case FSC_SEA:
357bb428921SJames Morse 	case FSC_SEA_TTW0:
358bb428921SJames Morse 	case FSC_SEA_TTW1:
359bb428921SJames Morse 	case FSC_SEA_TTW2:
360bb428921SJames Morse 	case FSC_SEA_TTW3:
361bb428921SJames Morse 	case FSC_SECC:
362bb428921SJames Morse 	case FSC_SECC_TTW0:
363bb428921SJames Morse 	case FSC_SECC_TTW1:
364bb428921SJames Morse 	case FSC_SECC_TTW2:
365bb428921SJames Morse 	case FSC_SECC_TTW3:
366bb428921SJames Morse 		return true;
367bb428921SJames Morse 	default:
368bb428921SJames Morse 		return false;
369bb428921SJames Morse 	}
370bb428921SJames Morse }
371bb428921SJames Morse 
3725c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
373c667186fSMarc Zyngier {
3743a949f4cSGavin Shan 	u32 esr = kvm_vcpu_get_esr(vcpu);
3751c839141SAnshuman Khandual 	return ESR_ELx_SYS64_ISS_RT(esr);
376c667186fSMarc Zyngier }
377c667186fSMarc Zyngier 
37864cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
37964cf98faSChristoffer Dall {
380c4ad98e4SMarc Zyngier 	if (kvm_vcpu_abt_iss1tw(vcpu))
381c4ad98e4SMarc Zyngier 		return true;
382c4ad98e4SMarc Zyngier 
38364cf98faSChristoffer Dall 	if (kvm_vcpu_trap_is_iabt(vcpu))
38464cf98faSChristoffer Dall 		return false;
38564cf98faSChristoffer Dall 
38664cf98faSChristoffer Dall 	return kvm_vcpu_dabt_iswrite(vcpu);
38764cf98faSChristoffer Dall }
38864cf98faSChristoffer Dall 
3894429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
39079c64880SMarc Zyngier {
3918d404c4cSChristoffer Dall 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
39279c64880SMarc Zyngier }
39379c64880SMarc Zyngier 
394ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
395ce94fe93SMarc Zyngier {
3968d404c4cSChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
397256c0960SMark Rutland 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
3988d404c4cSChristoffer Dall 	} else {
3998d404c4cSChristoffer Dall 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
4008d404c4cSChristoffer Dall 		sctlr |= (1 << 25);
4011975fa56SJames Morse 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
4028d404c4cSChristoffer Dall 	}
403ce94fe93SMarc Zyngier }
404ce94fe93SMarc Zyngier 
4056d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
4066d89d2d9SMarc Zyngier {
4076d89d2d9SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
408256c0960SMark Rutland 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
4096d89d2d9SMarc Zyngier 
4108d404c4cSChristoffer Dall 	return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
4116d89d2d9SMarc Zyngier }
4126d89d2d9SMarc Zyngier 
4136d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
4146d89d2d9SMarc Zyngier 						    unsigned long data,
4156d89d2d9SMarc Zyngier 						    unsigned int len)
4166d89d2d9SMarc Zyngier {
4176d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4186d89d2d9SMarc Zyngier 		switch (len) {
4196d89d2d9SMarc Zyngier 		case 1:
4206d89d2d9SMarc Zyngier 			return data & 0xff;
4216d89d2d9SMarc Zyngier 		case 2:
4226d89d2d9SMarc Zyngier 			return be16_to_cpu(data & 0xffff);
4236d89d2d9SMarc Zyngier 		case 4:
4246d89d2d9SMarc Zyngier 			return be32_to_cpu(data & 0xffffffff);
4256d89d2d9SMarc Zyngier 		default:
4266d89d2d9SMarc Zyngier 			return be64_to_cpu(data);
4276d89d2d9SMarc Zyngier 		}
428b3007086SVictor Kamensky 	} else {
429b3007086SVictor Kamensky 		switch (len) {
430b3007086SVictor Kamensky 		case 1:
431b3007086SVictor Kamensky 			return data & 0xff;
432b3007086SVictor Kamensky 		case 2:
433b3007086SVictor Kamensky 			return le16_to_cpu(data & 0xffff);
434b3007086SVictor Kamensky 		case 4:
435b3007086SVictor Kamensky 			return le32_to_cpu(data & 0xffffffff);
436b3007086SVictor Kamensky 		default:
437b3007086SVictor Kamensky 			return le64_to_cpu(data);
438b3007086SVictor Kamensky 		}
4396d89d2d9SMarc Zyngier 	}
4406d89d2d9SMarc Zyngier 
4416d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4426d89d2d9SMarc Zyngier }
4436d89d2d9SMarc Zyngier 
4446d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
4456d89d2d9SMarc Zyngier 						    unsigned long data,
4466d89d2d9SMarc Zyngier 						    unsigned int len)
4476d89d2d9SMarc Zyngier {
4486d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4496d89d2d9SMarc Zyngier 		switch (len) {
4506d89d2d9SMarc Zyngier 		case 1:
4516d89d2d9SMarc Zyngier 			return data & 0xff;
4526d89d2d9SMarc Zyngier 		case 2:
4536d89d2d9SMarc Zyngier 			return cpu_to_be16(data & 0xffff);
4546d89d2d9SMarc Zyngier 		case 4:
4556d89d2d9SMarc Zyngier 			return cpu_to_be32(data & 0xffffffff);
4566d89d2d9SMarc Zyngier 		default:
4576d89d2d9SMarc Zyngier 			return cpu_to_be64(data);
4586d89d2d9SMarc Zyngier 		}
459b3007086SVictor Kamensky 	} else {
460b3007086SVictor Kamensky 		switch (len) {
461b3007086SVictor Kamensky 		case 1:
462b3007086SVictor Kamensky 			return data & 0xff;
463b3007086SVictor Kamensky 		case 2:
464b3007086SVictor Kamensky 			return cpu_to_le16(data & 0xffff);
465b3007086SVictor Kamensky 		case 4:
466b3007086SVictor Kamensky 			return cpu_to_le32(data & 0xffffffff);
467b3007086SVictor Kamensky 		default:
468b3007086SVictor Kamensky 			return cpu_to_le64(data);
469b3007086SVictor Kamensky 		}
4706d89d2d9SMarc Zyngier 	}
4716d89d2d9SMarc Zyngier 
4726d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
4736d89d2d9SMarc Zyngier }
4746d89d2d9SMarc Zyngier 
475*cdb5e02eSMarc Zyngier static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
476bd7d95caSMark Rutland {
477*cdb5e02eSMarc Zyngier 	vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
478bd7d95caSMark Rutland }
479bd7d95caSMark Rutland 
48083a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */
481