1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
283a49794SMarc Zyngier /*
383a49794SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
483a49794SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
583a49794SMarc Zyngier  *
683a49794SMarc Zyngier  * Derived from arch/arm/include/kvm_emulate.h
783a49794SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
883a49794SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
983a49794SMarc Zyngier  */
1083a49794SMarc Zyngier 
1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__
1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__
1383a49794SMarc Zyngier 
1483a49794SMarc Zyngier #include <linux/kvm_host.h>
15c6d01a94SMark Rutland 
16bd7d95caSMark Rutland #include <asm/debug-monitors.h>
17c6d01a94SMark Rutland #include <asm/esr.h>
1883a49794SMarc Zyngier #include <asm/kvm_arm.h>
1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h>
2083a49794SMarc Zyngier #include <asm/ptrace.h>
214429fc64SAndre Przywara #include <asm/cputype.h>
2268908bf7SMarc Zyngier #include <asm/virt.h>
2383a49794SMarc Zyngier 
24bb666c47SMarc Zyngier #define CURRENT_EL_SP_EL0_VECTOR	0x0
25bb666c47SMarc Zyngier #define CURRENT_EL_SP_ELx_VECTOR	0x200
26bb666c47SMarc Zyngier #define LOWER_EL_AArch64_VECTOR		0x400
27bb666c47SMarc Zyngier #define LOWER_EL_AArch32_VECTOR		0x600
28bb666c47SMarc Zyngier 
29bb666c47SMarc Zyngier enum exception_type {
30bb666c47SMarc Zyngier 	except_type_sync	= 0,
31bb666c47SMarc Zyngier 	except_type_irq		= 0x80,
32bb666c47SMarc Zyngier 	except_type_fiq		= 0x100,
33bb666c47SMarc Zyngier 	except_type_serror	= 0x180,
34bb666c47SMarc Zyngier };
35b547631fSMarc Zyngier 
3647f3a2fcSJintack Lim #define kvm_exception_type_names		\
3747f3a2fcSJintack Lim 	{ except_type_sync,	"SYNC"   },	\
3847f3a2fcSJintack Lim 	{ except_type_irq,	"IRQ"    },	\
3947f3a2fcSJintack Lim 	{ except_type_fiq,	"FIQ"    },	\
4047f3a2fcSJintack Lim 	{ except_type_serror,	"SERROR" }
4147f3a2fcSJintack Lim 
4227b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
436ddbc281SMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4427b190bdSMarc Zyngier 
4583a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4610cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4783a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4883a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
4985ea6b1eSMarc Zyngier void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
5083a49794SMarc Zyngier 
516109c5a6SSean Christopherson void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
526109c5a6SSean Christopherson 
5347f3a2fcSJintack Lim void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5447f3a2fcSJintack Lim int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5547f3a2fcSJintack Lim int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
5647f3a2fcSJintack Lim 
5726bf74bdSReiji Watanabe #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)585c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
59e72341c5SChristoffer Dall {
60e72341c5SChristoffer Dall 	return !(vcpu->arch.hcr_el2 & HCR_RW);
61e72341c5SChristoffer Dall }
6226bf74bdSReiji Watanabe #else
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)6326bf74bdSReiji Watanabe static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6426bf74bdSReiji Watanabe {
652251e9ffSOliver Upton 	return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
6626bf74bdSReiji Watanabe }
6726bf74bdSReiji Watanabe #endif
68e72341c5SChristoffer Dall 
vcpu_reset_hcr(struct kvm_vcpu * vcpu)69b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
70b856a591SChristoffer Dall {
71b856a591SChristoffer Dall 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
7238cba550SMarc Zyngier 	if (has_vhe() || has_hvhe())
7368908bf7SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_E2H;
74558daf69SDongjiu Geng 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
75558daf69SDongjiu Geng 		/* route synchronous external abort exceptions to EL2 */
76558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TEA;
77558daf69SDongjiu Geng 		/* trap error record accesses */
78558daf69SDongjiu Geng 		vcpu->arch.hcr_el2 |= HCR_TERR;
79558daf69SDongjiu Geng 	}
805c401308SChristoffer Dall 
815c401308SChristoffer Dall 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
82e48d53a9SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_FWB;
835c401308SChristoffer Dall 	} else {
845c401308SChristoffer Dall 		/*
855c401308SChristoffer Dall 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
865c401308SChristoffer Dall 		 * get set in SCTLR_EL1 such that we can detect when the guest
875c401308SChristoffer Dall 		 * MMU gets turned on and do the necessary cache maintenance
885c401308SChristoffer Dall 		 * then.
895c401308SChristoffer Dall 		 */
905c401308SChristoffer Dall 		vcpu->arch.hcr_el2 |= HCR_TVM;
915c401308SChristoffer Dall 	}
92558daf69SDongjiu Geng 
93c876c3f1SMarc Zyngier 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
94c876c3f1SMarc Zyngier 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
95c876c3f1SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TID4;
96c876c3f1SMarc Zyngier 	else
97c876c3f1SMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TID2;
98c876c3f1SMarc Zyngier 
9926bf74bdSReiji Watanabe 	if (vcpu_el1_is_32bit(vcpu))
100801f6772SMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_RW;
101f7f2b15cSArd Biesheuvel 
102ea7fc1bbSSteven Price 	if (kvm_has_mte(vcpu->kvm))
103ea7fc1bbSSteven Price 		vcpu->arch.hcr_el2 |= HCR_ATA;
104b856a591SChristoffer Dall }
105b856a591SChristoffer Dall 
vcpu_hcr(struct kvm_vcpu * vcpu)1063df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
1073c1e7165SMarc Zyngier {
1083df59d8dSChristoffer Dall 	return (unsigned long *)&vcpu->arch.hcr_el2;
1093c1e7165SMarc Zyngier }
1103c1e7165SMarc Zyngier 
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)111ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
112de737089SMarc Zyngier {
113de737089SMarc Zyngier 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
1147bdabad1SMarc Zyngier 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
1157bdabad1SMarc Zyngier 	    vcpu->kvm->arch.vgic.nassgireq)
116ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
117ef2e78ddSMarc Zyngier 	else
118ef2e78ddSMarc Zyngier 		vcpu->arch.hcr_el2 |= HCR_TWI;
119de737089SMarc Zyngier }
120de737089SMarc Zyngier 
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)121ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
122de737089SMarc Zyngier {
123de737089SMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWE;
124ef2e78ddSMarc Zyngier 	vcpu->arch.hcr_el2 |= HCR_TWI;
125de737089SMarc Zyngier }
126de737089SMarc Zyngier 
vcpu_ptrauth_enable(struct kvm_vcpu * vcpu)127384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
128384b40caSMark Rutland {
129384b40caSMark Rutland 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
130384b40caSMark Rutland }
131384b40caSMark Rutland 
vcpu_ptrauth_disable(struct kvm_vcpu * vcpu)132384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
133384b40caSMark Rutland {
134384b40caSMark Rutland 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
135384b40caSMark Rutland }
136384b40caSMark Rutland 
vcpu_get_vsesr(struct kvm_vcpu * vcpu)137b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
138b7b27facSDongjiu Geng {
139b7b27facSDongjiu Geng 	return vcpu->arch.vsesr_el2;
140b7b27facSDongjiu Geng }
141b7b27facSDongjiu Geng 
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)1424715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
1434715c14bSJames Morse {
1444715c14bSJames Morse 	vcpu->arch.vsesr_el2 = vsesr;
1454715c14bSJames Morse }
1464715c14bSJames Morse 
vcpu_pc(const struct kvm_vcpu * vcpu)1475c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
14883a49794SMarc Zyngier {
149e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
15083a49794SMarc Zyngier }
15183a49794SMarc Zyngier 
vcpu_cpsr(const struct kvm_vcpu * vcpu)1525c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
15383a49794SMarc Zyngier {
154e47c2055SMarc Zyngier 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
15583a49794SMarc Zyngier }
15683a49794SMarc Zyngier 
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)1575c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
15883a49794SMarc Zyngier {
159b547631fSMarc Zyngier 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
16083a49794SMarc Zyngier }
16183a49794SMarc Zyngier 
kvm_condition_valid(const struct kvm_vcpu * vcpu)1625c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
16383a49794SMarc Zyngier {
16427b190bdSMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
16527b190bdSMarc Zyngier 		return kvm_condition_valid32(vcpu);
16627b190bdSMarc Zyngier 
16727b190bdSMarc Zyngier 	return true;
16883a49794SMarc Zyngier }
16983a49794SMarc Zyngier 
vcpu_set_thumb(struct kvm_vcpu * vcpu)17083a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
17183a49794SMarc Zyngier {
172256c0960SMark Rutland 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
17383a49794SMarc Zyngier }
17483a49794SMarc Zyngier 
175c0f09634SMarc Zyngier /*
176f6be563aSPavel Fedin  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
177f6be563aSPavel Fedin  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
178f6be563aSPavel Fedin  * AArch32 with banked registers.
179c0f09634SMarc Zyngier  */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)1805c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
181bc45a516SPavel Fedin 					 u8 reg_num)
182bc45a516SPavel Fedin {
183e47c2055SMarc Zyngier 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
184bc45a516SPavel Fedin }
185bc45a516SPavel Fedin 
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)1865c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
187bc45a516SPavel Fedin 				unsigned long val)
188bc45a516SPavel Fedin {
189bc45a516SPavel Fedin 	if (reg_num != 31)
190e47c2055SMarc Zyngier 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
191bc45a516SPavel Fedin }
192bc45a516SPavel Fedin 
vcpu_is_el2_ctxt(const struct kvm_cpu_context * ctxt)1930043b290SChristoffer Dall static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
1940043b290SChristoffer Dall {
1950043b290SChristoffer Dall 	switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
1960043b290SChristoffer Dall 	case PSR_MODE_EL2h:
1970043b290SChristoffer Dall 	case PSR_MODE_EL2t:
1980043b290SChristoffer Dall 		return true;
1990043b290SChristoffer Dall 	default:
2000043b290SChristoffer Dall 		return false;
2010043b290SChristoffer Dall 	}
2020043b290SChristoffer Dall }
2030043b290SChristoffer Dall 
vcpu_is_el2(const struct kvm_vcpu * vcpu)2040043b290SChristoffer Dall static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
2050043b290SChristoffer Dall {
2060043b290SChristoffer Dall 	return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
2070043b290SChristoffer Dall }
2080043b290SChristoffer Dall 
__vcpu_el2_e2h_is_set(const struct kvm_cpu_context * ctxt)2090043b290SChristoffer Dall static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
2100043b290SChristoffer Dall {
2110043b290SChristoffer Dall 	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
2120043b290SChristoffer Dall }
2130043b290SChristoffer Dall 
vcpu_el2_e2h_is_set(const struct kvm_vcpu * vcpu)2140043b290SChristoffer Dall static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
2150043b290SChristoffer Dall {
2160043b290SChristoffer Dall 	return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
2170043b290SChristoffer Dall }
2180043b290SChristoffer Dall 
__vcpu_el2_tge_is_set(const struct kvm_cpu_context * ctxt)2190043b290SChristoffer Dall static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
2200043b290SChristoffer Dall {
2210043b290SChristoffer Dall 	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
2220043b290SChristoffer Dall }
2230043b290SChristoffer Dall 
vcpu_el2_tge_is_set(const struct kvm_vcpu * vcpu)2240043b290SChristoffer Dall static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
2250043b290SChristoffer Dall {
2260043b290SChristoffer Dall 	return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
2270043b290SChristoffer Dall }
2280043b290SChristoffer Dall 
__is_hyp_ctxt(const struct kvm_cpu_context * ctxt)2290043b290SChristoffer Dall static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
2300043b290SChristoffer Dall {
2310043b290SChristoffer Dall 	/*
2320043b290SChristoffer Dall 	 * We are in a hypervisor context if the vcpu mode is EL2 or
2330043b290SChristoffer Dall 	 * E2H and TGE bits are set. The latter means we are in the user space
2340043b290SChristoffer Dall 	 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
2350043b290SChristoffer Dall 	 *
2360043b290SChristoffer Dall 	 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
2370043b290SChristoffer Dall 	 * rest of the KVM code, and will result in a misbehaving guest.
2380043b290SChristoffer Dall 	 */
2390043b290SChristoffer Dall 	return vcpu_is_el2_ctxt(ctxt) ||
2400043b290SChristoffer Dall 		(__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
2410043b290SChristoffer Dall 		__vcpu_el2_tge_is_set(ctxt);
2420043b290SChristoffer Dall }
2430043b290SChristoffer Dall 
is_hyp_ctxt(const struct kvm_vcpu * vcpu)2440043b290SChristoffer Dall static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
2450043b290SChristoffer Dall {
2460043b290SChristoffer Dall 	return __is_hyp_ctxt(&vcpu->arch.ctxt);
2470043b290SChristoffer Dall }
2480043b290SChristoffer Dall 
2491cfbb484SMark Rutland /*
2501cfbb484SMark Rutland  * The layout of SPSR for an AArch32 state is different when observed from an
2511cfbb484SMark Rutland  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
2521cfbb484SMark Rutland  * view given an AArch64 view.
2531cfbb484SMark Rutland  *
2541cfbb484SMark Rutland  * In ARM DDI 0487E.a see:
2551cfbb484SMark Rutland  *
2561cfbb484SMark Rutland  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
2571cfbb484SMark Rutland  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
2581cfbb484SMark Rutland  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
2591cfbb484SMark Rutland  *
2601cfbb484SMark Rutland  * Which show the following differences:
2611cfbb484SMark Rutland  *
2621cfbb484SMark Rutland  * | Bit | AA64 | AA32 | Notes                       |
2631cfbb484SMark Rutland  * +-----+------+------+-----------------------------|
2641cfbb484SMark Rutland  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
2651cfbb484SMark Rutland  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
2661cfbb484SMark Rutland  *
2671cfbb484SMark Rutland  * ... and all other bits are (currently) common.
2681cfbb484SMark Rutland  */
host_spsr_to_spsr32(unsigned long spsr)2691cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
2701cfbb484SMark Rutland {
2711cfbb484SMark Rutland 	const unsigned long overlap = BIT(24) | BIT(21);
2721cfbb484SMark Rutland 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
2731cfbb484SMark Rutland 
2741cfbb484SMark Rutland 	spsr &= ~overlap;
2751cfbb484SMark Rutland 
2761cfbb484SMark Rutland 	spsr |= dit << 21;
2771cfbb484SMark Rutland 
2781cfbb484SMark Rutland 	return spsr;
2791cfbb484SMark Rutland }
2801cfbb484SMark Rutland 
vcpu_mode_priv(const struct kvm_vcpu * vcpu)28183a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
28283a49794SMarc Zyngier {
2839586a2eaSShannon Zhao 	u32 mode;
28483a49794SMarc Zyngier 
2859586a2eaSShannon Zhao 	if (vcpu_mode_is_32bit(vcpu)) {
286256c0960SMark Rutland 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
287256c0960SMark Rutland 		return mode > PSR_AA32_MODE_USR;
2889586a2eaSShannon Zhao 	}
2899586a2eaSShannon Zhao 
2909586a2eaSShannon Zhao 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
291b547631fSMarc Zyngier 
29283a49794SMarc Zyngier 	return mode != PSR_MODE_EL0t;
29383a49794SMarc Zyngier }
29483a49794SMarc Zyngier 
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)2950b12620fSAlexandru Elisei static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
29683a49794SMarc Zyngier {
29783a49794SMarc Zyngier 	return vcpu->arch.fault.esr_el2;
29883a49794SMarc Zyngier }
29983a49794SMarc Zyngier 
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)3005c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
3013e51d435SMarc Zyngier {
3020b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
3033e51d435SMarc Zyngier 
3043e51d435SMarc Zyngier 	if (esr & ESR_ELx_CV)
3053e51d435SMarc Zyngier 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
3063e51d435SMarc Zyngier 
3073e51d435SMarc Zyngier 	return -1;
3083e51d435SMarc Zyngier }
3093e51d435SMarc Zyngier 
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)3105c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
31183a49794SMarc Zyngier {
31283a49794SMarc Zyngier 	return vcpu->arch.fault.far_el2;
31383a49794SMarc Zyngier }
31483a49794SMarc Zyngier 
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)3155c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
31683a49794SMarc Zyngier {
31783a49794SMarc Zyngier 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
31883a49794SMarc Zyngier }
31983a49794SMarc Zyngier 
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)3200067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
3210067df41SJames Morse {
3220067df41SJames Morse 	return vcpu->arch.fault.disr_el1;
3230067df41SJames Morse }
3240067df41SJames Morse 
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)3250d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
3260d97f884SWei Huang {
3273a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
3280d97f884SWei Huang }
3290d97f884SWei Huang 
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)3305c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
33183a49794SMarc Zyngier {
3323a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
33383a49794SMarc Zyngier }
33483a49794SMarc Zyngier 
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)335c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
336c726200dSChristoffer Dall {
3373a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
338c726200dSChristoffer Dall }
339c726200dSChristoffer Dall 
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)34083a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
34183a49794SMarc Zyngier {
3423a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
34383a49794SMarc Zyngier }
34483a49794SMarc Zyngier 
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)345b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
346b6ae256aSChristoffer Dall {
3473a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
348b6ae256aSChristoffer Dall }
349b6ae256aSChristoffer Dall 
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)3505c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
35183a49794SMarc Zyngier {
3523a949f4cSGavin Shan 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
35383a49794SMarc Zyngier }
35483a49794SMarc Zyngier 
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)355c4ad98e4SMarc Zyngier static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
35683a49794SMarc Zyngier {
3573a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
35883a49794SMarc Zyngier }
35983a49794SMarc Zyngier 
360620cf45fSMarc Zyngier /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)3615c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
36260e21a0eSWill Deacon {
363620cf45fSMarc Zyngier 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
36460e21a0eSWill Deacon }
36560e21a0eSWill Deacon 
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)36657c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
36757c841f1SMarc Zyngier {
3683a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
36957c841f1SMarc Zyngier }
37057c841f1SMarc Zyngier 
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)3715c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
37283a49794SMarc Zyngier {
3733a949f4cSGavin Shan 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
37483a49794SMarc Zyngier }
37583a49794SMarc Zyngier 
37683a49794SMarc Zyngier /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)3775c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
37883a49794SMarc Zyngier {
3793a949f4cSGavin Shan 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
38083a49794SMarc Zyngier }
38183a49794SMarc Zyngier 
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)3825c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
38383a49794SMarc Zyngier {
3843a949f4cSGavin Shan 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
38583a49794SMarc Zyngier }
38683a49794SMarc Zyngier 
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)38783a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
38883a49794SMarc Zyngier {
389c6d01a94SMark Rutland 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
39083a49794SMarc Zyngier }
39183a49794SMarc Zyngier 
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)392c4ad98e4SMarc Zyngier static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
393c4ad98e4SMarc Zyngier {
394c4ad98e4SMarc Zyngier 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
395c4ad98e4SMarc Zyngier }
396c4ad98e4SMarc Zyngier 
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)3975c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
39883a49794SMarc Zyngier {
3993a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
4000496daa5SChristoffer Dall }
4010496daa5SChristoffer Dall 
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)4025c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
4030496daa5SChristoffer Dall {
4043a949f4cSGavin Shan 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
40583a49794SMarc Zyngier }
40683a49794SMarc Zyngier 
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu * vcpu)4077d894834SYanan Wang static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
4087d894834SYanan Wang {
4097d894834SYanan Wang 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
4107d894834SYanan Wang }
4117d894834SYanan Wang 
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)412c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
413bb428921SJames Morse {
414a2b83133SDongjiu Geng 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
415b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_EXTABT:
416b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW0:
417b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW1:
418b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW2:
419b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SEA_TTW3:
420b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC:
421b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW0:
422b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW1:
423b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW2:
424b0803ba7SMarc Zyngier 	case ESR_ELx_FSC_SECC_TTW3:
425bb428921SJames Morse 		return true;
426bb428921SJames Morse 	default:
427bb428921SJames Morse 		return false;
428bb428921SJames Morse 	}
429bb428921SJames Morse }
430bb428921SJames Morse 
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)4315c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
432c667186fSMarc Zyngier {
4330b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
4341c839141SAnshuman Khandual 	return ESR_ELx_SYS64_ISS_RT(esr);
435c667186fSMarc Zyngier }
436c667186fSMarc Zyngier 
kvm_is_write_fault(struct kvm_vcpu * vcpu)43764cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
43864cf98faSChristoffer Dall {
439406504c7SMarc Zyngier 	if (kvm_vcpu_abt_iss1tw(vcpu)) {
440406504c7SMarc Zyngier 		/*
441406504c7SMarc Zyngier 		 * Only a permission fault on a S1PTW should be
442406504c7SMarc Zyngier 		 * considered as a write. Otherwise, page tables baked
443406504c7SMarc Zyngier 		 * in a read-only memslot will result in an exception
444406504c7SMarc Zyngier 		 * being delivered in the guest.
445406504c7SMarc Zyngier 		 *
446406504c7SMarc Zyngier 		 * The drawback is that we end-up faulting twice if the
447406504c7SMarc Zyngier 		 * guest is using any of HW AF/DB: a translation fault
448406504c7SMarc Zyngier 		 * to map the page containing the PT (read only at
449406504c7SMarc Zyngier 		 * first), then a permission fault to allow the flags
450406504c7SMarc Zyngier 		 * to be set.
451406504c7SMarc Zyngier 		 */
452406504c7SMarc Zyngier 		switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
453406504c7SMarc Zyngier 		case ESR_ELx_FSC_PERM:
454c4ad98e4SMarc Zyngier 			return true;
455406504c7SMarc Zyngier 		default:
456406504c7SMarc Zyngier 			return false;
457406504c7SMarc Zyngier 		}
458406504c7SMarc Zyngier 	}
459c4ad98e4SMarc Zyngier 
46064cf98faSChristoffer Dall 	if (kvm_vcpu_trap_is_iabt(vcpu))
46164cf98faSChristoffer Dall 		return false;
46264cf98faSChristoffer Dall 
46364cf98faSChristoffer Dall 	return kvm_vcpu_dabt_iswrite(vcpu);
46464cf98faSChristoffer Dall }
46564cf98faSChristoffer Dall 
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)4664429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
46779c64880SMarc Zyngier {
4688d404c4cSChristoffer Dall 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
46979c64880SMarc Zyngier }
47079c64880SMarc Zyngier 
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)471ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
472ce94fe93SMarc Zyngier {
4738d404c4cSChristoffer Dall 	if (vcpu_mode_is_32bit(vcpu)) {
474256c0960SMark Rutland 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
4758d404c4cSChristoffer Dall 	} else {
4768d404c4cSChristoffer Dall 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
477500ca524SFuad Tabba 		sctlr |= SCTLR_ELx_EE;
4781975fa56SJames Morse 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
4798d404c4cSChristoffer Dall 	}
480ce94fe93SMarc Zyngier }
481ce94fe93SMarc Zyngier 
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)4826d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
4836d89d2d9SMarc Zyngier {
4846d89d2d9SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu))
485256c0960SMark Rutland 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
4866d89d2d9SMarc Zyngier 
48769adec18SMarc Zyngier 	if (vcpu_mode_priv(vcpu))
48869adec18SMarc Zyngier 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
48969adec18SMarc Zyngier 	else
49069adec18SMarc Zyngier 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
4916d89d2d9SMarc Zyngier }
4926d89d2d9SMarc Zyngier 
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)4936d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
4946d89d2d9SMarc Zyngier 						    unsigned long data,
4956d89d2d9SMarc Zyngier 						    unsigned int len)
4966d89d2d9SMarc Zyngier {
4976d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
4986d89d2d9SMarc Zyngier 		switch (len) {
4996d89d2d9SMarc Zyngier 		case 1:
5006d89d2d9SMarc Zyngier 			return data & 0xff;
5016d89d2d9SMarc Zyngier 		case 2:
5026d89d2d9SMarc Zyngier 			return be16_to_cpu(data & 0xffff);
5036d89d2d9SMarc Zyngier 		case 4:
5046d89d2d9SMarc Zyngier 			return be32_to_cpu(data & 0xffffffff);
5056d89d2d9SMarc Zyngier 		default:
5066d89d2d9SMarc Zyngier 			return be64_to_cpu(data);
5076d89d2d9SMarc Zyngier 		}
508b3007086SVictor Kamensky 	} else {
509b3007086SVictor Kamensky 		switch (len) {
510b3007086SVictor Kamensky 		case 1:
511b3007086SVictor Kamensky 			return data & 0xff;
512b3007086SVictor Kamensky 		case 2:
513b3007086SVictor Kamensky 			return le16_to_cpu(data & 0xffff);
514b3007086SVictor Kamensky 		case 4:
515b3007086SVictor Kamensky 			return le32_to_cpu(data & 0xffffffff);
516b3007086SVictor Kamensky 		default:
517b3007086SVictor Kamensky 			return le64_to_cpu(data);
518b3007086SVictor Kamensky 		}
5196d89d2d9SMarc Zyngier 	}
5206d89d2d9SMarc Zyngier 
5216d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
5226d89d2d9SMarc Zyngier }
5236d89d2d9SMarc Zyngier 
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)5246d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
5256d89d2d9SMarc Zyngier 						    unsigned long data,
5266d89d2d9SMarc Zyngier 						    unsigned int len)
5276d89d2d9SMarc Zyngier {
5286d89d2d9SMarc Zyngier 	if (kvm_vcpu_is_be(vcpu)) {
5296d89d2d9SMarc Zyngier 		switch (len) {
5306d89d2d9SMarc Zyngier 		case 1:
5316d89d2d9SMarc Zyngier 			return data & 0xff;
5326d89d2d9SMarc Zyngier 		case 2:
5336d89d2d9SMarc Zyngier 			return cpu_to_be16(data & 0xffff);
5346d89d2d9SMarc Zyngier 		case 4:
5356d89d2d9SMarc Zyngier 			return cpu_to_be32(data & 0xffffffff);
5366d89d2d9SMarc Zyngier 		default:
5376d89d2d9SMarc Zyngier 			return cpu_to_be64(data);
5386d89d2d9SMarc Zyngier 		}
539b3007086SVictor Kamensky 	} else {
540b3007086SVictor Kamensky 		switch (len) {
541b3007086SVictor Kamensky 		case 1:
542b3007086SVictor Kamensky 			return data & 0xff;
543b3007086SVictor Kamensky 		case 2:
544b3007086SVictor Kamensky 			return cpu_to_le16(data & 0xffff);
545b3007086SVictor Kamensky 		case 4:
546b3007086SVictor Kamensky 			return cpu_to_le32(data & 0xffffffff);
547b3007086SVictor Kamensky 		default:
548b3007086SVictor Kamensky 			return cpu_to_le64(data);
549b3007086SVictor Kamensky 		}
5506d89d2d9SMarc Zyngier 	}
5516d89d2d9SMarc Zyngier 
5526d89d2d9SMarc Zyngier 	return data;		/* Leave LE untouched */
5536d89d2d9SMarc Zyngier }
5546d89d2d9SMarc Zyngier 
kvm_incr_pc(struct kvm_vcpu * vcpu)555cdb5e02eSMarc Zyngier static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
556bd7d95caSMark Rutland {
557e19f2c6cSMarc Zyngier 	WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
558699bb2e0SMarc Zyngier 	vcpu_set_flag(vcpu, INCREMENT_PC);
559bd7d95caSMark Rutland }
560bd7d95caSMark Rutland 
561699bb2e0SMarc Zyngier #define kvm_pend_exception(v, e)					\
562699bb2e0SMarc Zyngier 	do {								\
563e19f2c6cSMarc Zyngier 		WARN_ON(vcpu_get_flag((v), INCREMENT_PC));		\
564699bb2e0SMarc Zyngier 		vcpu_set_flag((v), PENDING_EXCEPTION);			\
565699bb2e0SMarc Zyngier 		vcpu_set_flag((v), e);					\
566699bb2e0SMarc Zyngier 	} while (0)
567699bb2e0SMarc Zyngier 
568699bb2e0SMarc Zyngier 
vcpu_has_feature(struct kvm_vcpu * vcpu,int feature)56966e94d5cSMarc Zyngier static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
57066e94d5cSMarc Zyngier {
57166e94d5cSMarc Zyngier 	return test_bit(feature, vcpu->arch.features);
57266e94d5cSMarc Zyngier }
57366e94d5cSMarc Zyngier 
kvm_write_cptr_el2(u64 val)57490ae31c6SFuad Tabba static __always_inline void kvm_write_cptr_el2(u64 val)
57590ae31c6SFuad Tabba {
57690ae31c6SFuad Tabba 	if (has_vhe() || has_hvhe())
57790ae31c6SFuad Tabba 		write_sysreg(val, cpacr_el1);
57890ae31c6SFuad Tabba 	else
57990ae31c6SFuad Tabba 		write_sysreg(val, cptr_el2);
58090ae31c6SFuad Tabba }
58190ae31c6SFuad Tabba 
kvm_get_reset_cptr_el2(struct kvm_vcpu * vcpu)58275c76ab5SMarc Zyngier static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
58375c76ab5SMarc Zyngier {
58475c76ab5SMarc Zyngier 	u64 val;
58575c76ab5SMarc Zyngier 
58675c76ab5SMarc Zyngier 	if (has_vhe()) {
58775c76ab5SMarc Zyngier 		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
58875c76ab5SMarc Zyngier 		       CPACR_EL1_ZEN_EL1EN);
589*375110abSFuad Tabba 		if (cpus_have_final_cap(ARM64_SME))
590*375110abSFuad Tabba 			val |= CPACR_EL1_SMEN_EL1EN;
59175c76ab5SMarc Zyngier 	} else if (has_hvhe()) {
59275c76ab5SMarc Zyngier 		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
5937af0d5e5SFuad Tabba 
5947af0d5e5SFuad Tabba 		if (!vcpu_has_sve(vcpu) ||
5957af0d5e5SFuad Tabba 		    (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
5967af0d5e5SFuad Tabba 			val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
597*375110abSFuad Tabba 		if (cpus_have_final_cap(ARM64_SME))
598*375110abSFuad Tabba 			val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
59975c76ab5SMarc Zyngier 	} else {
60075c76ab5SMarc Zyngier 		val = CPTR_NVHE_EL2_RES1;
60175c76ab5SMarc Zyngier 
60275c76ab5SMarc Zyngier 		if (vcpu_has_sve(vcpu) &&
60375c76ab5SMarc Zyngier 		    (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
60475c76ab5SMarc Zyngier 			val |= CPTR_EL2_TZ;
60575c76ab5SMarc Zyngier 		if (cpus_have_final_cap(ARM64_SME))
60675c76ab5SMarc Zyngier 			val &= ~CPTR_EL2_TSM;
60775c76ab5SMarc Zyngier 	}
60875c76ab5SMarc Zyngier 
60975c76ab5SMarc Zyngier 	return val;
61075c76ab5SMarc Zyngier }
61175c76ab5SMarc Zyngier 
kvm_reset_cptr_el2(struct kvm_vcpu * vcpu)61275c76ab5SMarc Zyngier static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
61375c76ab5SMarc Zyngier {
61475c76ab5SMarc Zyngier 	u64 val = kvm_get_reset_cptr_el2(vcpu);
61575c76ab5SMarc Zyngier 
61690ae31c6SFuad Tabba 	kvm_write_cptr_el2(val);
61775c76ab5SMarc Zyngier }
61883a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */
619