1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 283a49794SMarc Zyngier /* 383a49794SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 483a49794SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 583a49794SMarc Zyngier * 683a49794SMarc Zyngier * Derived from arch/arm/include/kvm_emulate.h 783a49794SMarc Zyngier * Copyright (C) 2012 - Virtual Open Systems and Columbia University 883a49794SMarc Zyngier * Author: Christoffer Dall <c.dall@virtualopensystems.com> 983a49794SMarc Zyngier */ 1083a49794SMarc Zyngier 1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__ 1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__ 1383a49794SMarc Zyngier 1483a49794SMarc Zyngier #include <linux/kvm_host.h> 15c6d01a94SMark Rutland 16bd7d95caSMark Rutland #include <asm/debug-monitors.h> 17c6d01a94SMark Rutland #include <asm/esr.h> 1883a49794SMarc Zyngier #include <asm/kvm_arm.h> 1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h> 2083a49794SMarc Zyngier #include <asm/ptrace.h> 214429fc64SAndre Przywara #include <asm/cputype.h> 2268908bf7SMarc Zyngier #include <asm/virt.h> 2383a49794SMarc Zyngier 24*bb666c47SMarc Zyngier #define CURRENT_EL_SP_EL0_VECTOR 0x0 25*bb666c47SMarc Zyngier #define CURRENT_EL_SP_ELx_VECTOR 0x200 26*bb666c47SMarc Zyngier #define LOWER_EL_AArch64_VECTOR 0x400 27*bb666c47SMarc Zyngier #define LOWER_EL_AArch32_VECTOR 0x600 28*bb666c47SMarc Zyngier 29*bb666c47SMarc Zyngier enum exception_type { 30*bb666c47SMarc Zyngier except_type_sync = 0, 31*bb666c47SMarc Zyngier except_type_irq = 0x80, 32*bb666c47SMarc Zyngier except_type_fiq = 0x100, 33*bb666c47SMarc Zyngier except_type_serror = 0x180, 34*bb666c47SMarc Zyngier }; 35*bb666c47SMarc Zyngier 36b547631fSMarc Zyngier unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 37a8928195SChristoffer Dall unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); 38a8928195SChristoffer Dall void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); 39b547631fSMarc Zyngier 4027b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 416ddbc281SMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu); 4227b190bdSMarc Zyngier 4383a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu); 4410cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu); 4583a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 4683a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 4774a64a98SMarc Zyngier void kvm_inject_undef32(struct kvm_vcpu *vcpu); 4874a64a98SMarc Zyngier void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 4974a64a98SMarc Zyngier void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 5083a49794SMarc Zyngier 515c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 52e72341c5SChristoffer Dall { 53e72341c5SChristoffer Dall return !(vcpu->arch.hcr_el2 & HCR_RW); 54e72341c5SChristoffer Dall } 55e72341c5SChristoffer Dall 56b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 57b856a591SChristoffer Dall { 58b856a591SChristoffer Dall vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 5968908bf7SMarc Zyngier if (is_kernel_in_hyp_mode()) 6068908bf7SMarc Zyngier vcpu->arch.hcr_el2 |= HCR_E2H; 61558daf69SDongjiu Geng if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { 62558daf69SDongjiu Geng /* route synchronous external abort exceptions to EL2 */ 63558daf69SDongjiu Geng vcpu->arch.hcr_el2 |= HCR_TEA; 64558daf69SDongjiu Geng /* trap error record accesses */ 65558daf69SDongjiu Geng vcpu->arch.hcr_el2 |= HCR_TERR; 66558daf69SDongjiu Geng } 675c401308SChristoffer Dall 685c401308SChristoffer Dall if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { 69e48d53a9SMarc Zyngier vcpu->arch.hcr_el2 |= HCR_FWB; 705c401308SChristoffer Dall } else { 715c401308SChristoffer Dall /* 725c401308SChristoffer Dall * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C 735c401308SChristoffer Dall * get set in SCTLR_EL1 such that we can detect when the guest 745c401308SChristoffer Dall * MMU gets turned on and do the necessary cache maintenance 755c401308SChristoffer Dall * then. 765c401308SChristoffer Dall */ 775c401308SChristoffer Dall vcpu->arch.hcr_el2 |= HCR_TVM; 785c401308SChristoffer Dall } 79558daf69SDongjiu Geng 80801f6772SMarc Zyngier if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 81801f6772SMarc Zyngier vcpu->arch.hcr_el2 &= ~HCR_RW; 82005781beSDave Martin 83005781beSDave Martin /* 84005781beSDave Martin * TID3: trap feature register accesses that we virtualise. 85005781beSDave Martin * For now this is conditional, since no AArch32 feature regs 86005781beSDave Martin * are currently virtualised. 87005781beSDave Martin */ 88e72341c5SChristoffer Dall if (!vcpu_el1_is_32bit(vcpu)) 89005781beSDave Martin vcpu->arch.hcr_el2 |= HCR_TID3; 90f7f2b15cSArd Biesheuvel 91793acf87SArd Biesheuvel if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || 92793acf87SArd Biesheuvel vcpu_el1_is_32bit(vcpu)) 93f7f2b15cSArd Biesheuvel vcpu->arch.hcr_el2 |= HCR_TID2; 94b856a591SChristoffer Dall } 95b856a591SChristoffer Dall 963df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) 973c1e7165SMarc Zyngier { 983df59d8dSChristoffer Dall return (unsigned long *)&vcpu->arch.hcr_el2; 993c1e7165SMarc Zyngier } 1003c1e7165SMarc Zyngier 101ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) 102de737089SMarc Zyngier { 103de737089SMarc Zyngier vcpu->arch.hcr_el2 &= ~HCR_TWE; 1047bdabad1SMarc Zyngier if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || 1057bdabad1SMarc Zyngier vcpu->kvm->arch.vgic.nassgireq) 106ef2e78ddSMarc Zyngier vcpu->arch.hcr_el2 &= ~HCR_TWI; 107ef2e78ddSMarc Zyngier else 108ef2e78ddSMarc Zyngier vcpu->arch.hcr_el2 |= HCR_TWI; 109de737089SMarc Zyngier } 110de737089SMarc Zyngier 111ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) 112de737089SMarc Zyngier { 113de737089SMarc Zyngier vcpu->arch.hcr_el2 |= HCR_TWE; 114ef2e78ddSMarc Zyngier vcpu->arch.hcr_el2 |= HCR_TWI; 115de737089SMarc Zyngier } 116de737089SMarc Zyngier 117384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) 118384b40caSMark Rutland { 119384b40caSMark Rutland vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); 120384b40caSMark Rutland } 121384b40caSMark Rutland 122384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) 123384b40caSMark Rutland { 124384b40caSMark Rutland vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); 125384b40caSMark Rutland } 126384b40caSMark Rutland 127b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) 128b7b27facSDongjiu Geng { 129b7b27facSDongjiu Geng return vcpu->arch.vsesr_el2; 130b7b27facSDongjiu Geng } 131b7b27facSDongjiu Geng 1324715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) 1334715c14bSJames Morse { 1344715c14bSJames Morse vcpu->arch.vsesr_el2 = vsesr; 1354715c14bSJames Morse } 1364715c14bSJames Morse 1375c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 13883a49794SMarc Zyngier { 139e47c2055SMarc Zyngier return (unsigned long *)&vcpu_gp_regs(vcpu)->pc; 14083a49794SMarc Zyngier } 14183a49794SMarc Zyngier 1425c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 14383a49794SMarc Zyngier { 144e47c2055SMarc Zyngier return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate; 14583a49794SMarc Zyngier } 14683a49794SMarc Zyngier 1475c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 14883a49794SMarc Zyngier { 149b547631fSMarc Zyngier return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 15083a49794SMarc Zyngier } 15183a49794SMarc Zyngier 1525c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 15383a49794SMarc Zyngier { 15427b190bdSMarc Zyngier if (vcpu_mode_is_32bit(vcpu)) 15527b190bdSMarc Zyngier return kvm_condition_valid32(vcpu); 15627b190bdSMarc Zyngier 15727b190bdSMarc Zyngier return true; 15883a49794SMarc Zyngier } 15983a49794SMarc Zyngier 16083a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 16183a49794SMarc Zyngier { 162256c0960SMark Rutland *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; 16383a49794SMarc Zyngier } 16483a49794SMarc Zyngier 165c0f09634SMarc Zyngier /* 166f6be563aSPavel Fedin * vcpu_get_reg and vcpu_set_reg should always be passed a register number 167f6be563aSPavel Fedin * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 168f6be563aSPavel Fedin * AArch32 with banked registers. 169c0f09634SMarc Zyngier */ 1705c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 171bc45a516SPavel Fedin u8 reg_num) 172bc45a516SPavel Fedin { 173e47c2055SMarc Zyngier return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num]; 174bc45a516SPavel Fedin } 175bc45a516SPavel Fedin 1765c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 177bc45a516SPavel Fedin unsigned long val) 178bc45a516SPavel Fedin { 179bc45a516SPavel Fedin if (reg_num != 31) 180e47c2055SMarc Zyngier vcpu_gp_regs(vcpu)->regs[reg_num] = val; 181bc45a516SPavel Fedin } 182bc45a516SPavel Fedin 18300536ec4SChristoffer Dall static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) 18483a49794SMarc Zyngier { 185a8928195SChristoffer Dall if (vcpu_mode_is_32bit(vcpu)) 186a8928195SChristoffer Dall return vcpu_read_spsr32(vcpu); 18700536ec4SChristoffer Dall 18800536ec4SChristoffer Dall if (vcpu->arch.sysregs_loaded_on_cpu) 189fdec2a9eSDave Martin return read_sysreg_el1(SYS_SPSR); 19000536ec4SChristoffer Dall else 191710f1982SMarc Zyngier return __vcpu_sys_reg(vcpu, SPSR_EL1); 19200536ec4SChristoffer Dall } 19300536ec4SChristoffer Dall 194a8928195SChristoffer Dall static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) 19500536ec4SChristoffer Dall { 19600536ec4SChristoffer Dall if (vcpu_mode_is_32bit(vcpu)) { 197a8928195SChristoffer Dall vcpu_write_spsr32(vcpu, v); 19800536ec4SChristoffer Dall return; 19900536ec4SChristoffer Dall } 20000536ec4SChristoffer Dall 20100536ec4SChristoffer Dall if (vcpu->arch.sysregs_loaded_on_cpu) 202fdec2a9eSDave Martin write_sysreg_el1(v, SYS_SPSR); 20300536ec4SChristoffer Dall else 204710f1982SMarc Zyngier __vcpu_sys_reg(vcpu, SPSR_EL1) = v; 20583a49794SMarc Zyngier } 20683a49794SMarc Zyngier 2071cfbb484SMark Rutland /* 2081cfbb484SMark Rutland * The layout of SPSR for an AArch32 state is different when observed from an 2091cfbb484SMark Rutland * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32 2101cfbb484SMark Rutland * view given an AArch64 view. 2111cfbb484SMark Rutland * 2121cfbb484SMark Rutland * In ARM DDI 0487E.a see: 2131cfbb484SMark Rutland * 2141cfbb484SMark Rutland * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426 2151cfbb484SMark Rutland * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256 2161cfbb484SMark Rutland * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280 2171cfbb484SMark Rutland * 2181cfbb484SMark Rutland * Which show the following differences: 2191cfbb484SMark Rutland * 2201cfbb484SMark Rutland * | Bit | AA64 | AA32 | Notes | 2211cfbb484SMark Rutland * +-----+------+------+-----------------------------| 2221cfbb484SMark Rutland * | 24 | DIT | J | J is RES0 in ARMv8 | 2231cfbb484SMark Rutland * | 21 | SS | DIT | SS doesn't exist in AArch32 | 2241cfbb484SMark Rutland * 2251cfbb484SMark Rutland * ... and all other bits are (currently) common. 2261cfbb484SMark Rutland */ 2271cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) 2281cfbb484SMark Rutland { 2291cfbb484SMark Rutland const unsigned long overlap = BIT(24) | BIT(21); 2301cfbb484SMark Rutland unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT); 2311cfbb484SMark Rutland 2321cfbb484SMark Rutland spsr &= ~overlap; 2331cfbb484SMark Rutland 2341cfbb484SMark Rutland spsr |= dit << 21; 2351cfbb484SMark Rutland 2361cfbb484SMark Rutland return spsr; 2371cfbb484SMark Rutland } 2381cfbb484SMark Rutland 23983a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 24083a49794SMarc Zyngier { 2419586a2eaSShannon Zhao u32 mode; 24283a49794SMarc Zyngier 2439586a2eaSShannon Zhao if (vcpu_mode_is_32bit(vcpu)) { 244256c0960SMark Rutland mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; 245256c0960SMark Rutland return mode > PSR_AA32_MODE_USR; 2469586a2eaSShannon Zhao } 2479586a2eaSShannon Zhao 2489586a2eaSShannon Zhao mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 249b547631fSMarc Zyngier 25083a49794SMarc Zyngier return mode != PSR_MODE_EL0t; 25183a49794SMarc Zyngier } 25283a49794SMarc Zyngier 2533a949f4cSGavin Shan static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) 25483a49794SMarc Zyngier { 25583a49794SMarc Zyngier return vcpu->arch.fault.esr_el2; 25683a49794SMarc Zyngier } 25783a49794SMarc Zyngier 2585c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 2593e51d435SMarc Zyngier { 2603a949f4cSGavin Shan u32 esr = kvm_vcpu_get_esr(vcpu); 2613e51d435SMarc Zyngier 2623e51d435SMarc Zyngier if (esr & ESR_ELx_CV) 2633e51d435SMarc Zyngier return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 2643e51d435SMarc Zyngier 2653e51d435SMarc Zyngier return -1; 2663e51d435SMarc Zyngier } 2673e51d435SMarc Zyngier 2685c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 26983a49794SMarc Zyngier { 27083a49794SMarc Zyngier return vcpu->arch.fault.far_el2; 27183a49794SMarc Zyngier } 27283a49794SMarc Zyngier 2735c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 27483a49794SMarc Zyngier { 27583a49794SMarc Zyngier return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 27683a49794SMarc Zyngier } 27783a49794SMarc Zyngier 2780067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) 2790067df41SJames Morse { 2800067df41SJames Morse return vcpu->arch.fault.disr_el1; 2810067df41SJames Morse } 2820067df41SJames Morse 2830d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 2840d97f884SWei Huang { 2853a949f4cSGavin Shan return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK; 2860d97f884SWei Huang } 2870d97f884SWei Huang 2885c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 28983a49794SMarc Zyngier { 2903a949f4cSGavin Shan return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV); 29183a49794SMarc Zyngier } 29283a49794SMarc Zyngier 293c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) 294c726200dSChristoffer Dall { 2953a949f4cSGavin Shan return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); 296c726200dSChristoffer Dall } 297c726200dSChristoffer Dall 29883a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 29983a49794SMarc Zyngier { 3003a949f4cSGavin Shan return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE); 30183a49794SMarc Zyngier } 30283a49794SMarc Zyngier 303b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) 304b6ae256aSChristoffer Dall { 3053a949f4cSGavin Shan return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF); 306b6ae256aSChristoffer Dall } 307b6ae256aSChristoffer Dall 3085c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 30983a49794SMarc Zyngier { 3103a949f4cSGavin Shan return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 31183a49794SMarc Zyngier } 31283a49794SMarc Zyngier 313c4ad98e4SMarc Zyngier static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) 31483a49794SMarc Zyngier { 3153a949f4cSGavin Shan return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); 31683a49794SMarc Zyngier } 31783a49794SMarc Zyngier 318620cf45fSMarc Zyngier /* Always check for S1PTW *before* using this. */ 3195c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 32060e21a0eSWill Deacon { 321620cf45fSMarc Zyngier return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR; 32260e21a0eSWill Deacon } 32360e21a0eSWill Deacon 32457c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 32557c841f1SMarc Zyngier { 3263a949f4cSGavin Shan return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM); 32757c841f1SMarc Zyngier } 32857c841f1SMarc Zyngier 3295c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 33083a49794SMarc Zyngier { 3313a949f4cSGavin Shan return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 33283a49794SMarc Zyngier } 33383a49794SMarc Zyngier 33483a49794SMarc Zyngier /* This one is not specific to Data Abort */ 3355c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 33683a49794SMarc Zyngier { 3373a949f4cSGavin Shan return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL); 33883a49794SMarc Zyngier } 33983a49794SMarc Zyngier 3405c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 34183a49794SMarc Zyngier { 3423a949f4cSGavin Shan return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); 34383a49794SMarc Zyngier } 34483a49794SMarc Zyngier 34583a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 34683a49794SMarc Zyngier { 347c6d01a94SMark Rutland return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 34883a49794SMarc Zyngier } 34983a49794SMarc Zyngier 350c4ad98e4SMarc Zyngier static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) 351c4ad98e4SMarc Zyngier { 352c4ad98e4SMarc Zyngier return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); 353c4ad98e4SMarc Zyngier } 354c4ad98e4SMarc Zyngier 3555c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 35683a49794SMarc Zyngier { 3573a949f4cSGavin Shan return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; 3580496daa5SChristoffer Dall } 3590496daa5SChristoffer Dall 3605c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 3610496daa5SChristoffer Dall { 3623a949f4cSGavin Shan return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; 36383a49794SMarc Zyngier } 36483a49794SMarc Zyngier 365c9a636f2SWill Deacon static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) 366bb428921SJames Morse { 367a2b83133SDongjiu Geng switch (kvm_vcpu_trap_get_fault(vcpu)) { 368bb428921SJames Morse case FSC_SEA: 369bb428921SJames Morse case FSC_SEA_TTW0: 370bb428921SJames Morse case FSC_SEA_TTW1: 371bb428921SJames Morse case FSC_SEA_TTW2: 372bb428921SJames Morse case FSC_SEA_TTW3: 373bb428921SJames Morse case FSC_SECC: 374bb428921SJames Morse case FSC_SECC_TTW0: 375bb428921SJames Morse case FSC_SECC_TTW1: 376bb428921SJames Morse case FSC_SECC_TTW2: 377bb428921SJames Morse case FSC_SECC_TTW3: 378bb428921SJames Morse return true; 379bb428921SJames Morse default: 380bb428921SJames Morse return false; 381bb428921SJames Morse } 382bb428921SJames Morse } 383bb428921SJames Morse 3845c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 385c667186fSMarc Zyngier { 3863a949f4cSGavin Shan u32 esr = kvm_vcpu_get_esr(vcpu); 3871c839141SAnshuman Khandual return ESR_ELx_SYS64_ISS_RT(esr); 388c667186fSMarc Zyngier } 389c667186fSMarc Zyngier 39064cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 39164cf98faSChristoffer Dall { 392c4ad98e4SMarc Zyngier if (kvm_vcpu_abt_iss1tw(vcpu)) 393c4ad98e4SMarc Zyngier return true; 394c4ad98e4SMarc Zyngier 39564cf98faSChristoffer Dall if (kvm_vcpu_trap_is_iabt(vcpu)) 39664cf98faSChristoffer Dall return false; 39764cf98faSChristoffer Dall 39864cf98faSChristoffer Dall return kvm_vcpu_dabt_iswrite(vcpu); 39964cf98faSChristoffer Dall } 40064cf98faSChristoffer Dall 4014429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 40279c64880SMarc Zyngier { 4038d404c4cSChristoffer Dall return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 40479c64880SMarc Zyngier } 40579c64880SMarc Zyngier 406ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 407ce94fe93SMarc Zyngier { 4088d404c4cSChristoffer Dall if (vcpu_mode_is_32bit(vcpu)) { 409256c0960SMark Rutland *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; 4108d404c4cSChristoffer Dall } else { 4118d404c4cSChristoffer Dall u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); 4128d404c4cSChristoffer Dall sctlr |= (1 << 25); 4131975fa56SJames Morse vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); 4148d404c4cSChristoffer Dall } 415ce94fe93SMarc Zyngier } 416ce94fe93SMarc Zyngier 4176d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 4186d89d2d9SMarc Zyngier { 4196d89d2d9SMarc Zyngier if (vcpu_mode_is_32bit(vcpu)) 420256c0960SMark Rutland return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); 4216d89d2d9SMarc Zyngier 4228d404c4cSChristoffer Dall return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); 4236d89d2d9SMarc Zyngier } 4246d89d2d9SMarc Zyngier 4256d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 4266d89d2d9SMarc Zyngier unsigned long data, 4276d89d2d9SMarc Zyngier unsigned int len) 4286d89d2d9SMarc Zyngier { 4296d89d2d9SMarc Zyngier if (kvm_vcpu_is_be(vcpu)) { 4306d89d2d9SMarc Zyngier switch (len) { 4316d89d2d9SMarc Zyngier case 1: 4326d89d2d9SMarc Zyngier return data & 0xff; 4336d89d2d9SMarc Zyngier case 2: 4346d89d2d9SMarc Zyngier return be16_to_cpu(data & 0xffff); 4356d89d2d9SMarc Zyngier case 4: 4366d89d2d9SMarc Zyngier return be32_to_cpu(data & 0xffffffff); 4376d89d2d9SMarc Zyngier default: 4386d89d2d9SMarc Zyngier return be64_to_cpu(data); 4396d89d2d9SMarc Zyngier } 440b3007086SVictor Kamensky } else { 441b3007086SVictor Kamensky switch (len) { 442b3007086SVictor Kamensky case 1: 443b3007086SVictor Kamensky return data & 0xff; 444b3007086SVictor Kamensky case 2: 445b3007086SVictor Kamensky return le16_to_cpu(data & 0xffff); 446b3007086SVictor Kamensky case 4: 447b3007086SVictor Kamensky return le32_to_cpu(data & 0xffffffff); 448b3007086SVictor Kamensky default: 449b3007086SVictor Kamensky return le64_to_cpu(data); 450b3007086SVictor Kamensky } 4516d89d2d9SMarc Zyngier } 4526d89d2d9SMarc Zyngier 4536d89d2d9SMarc Zyngier return data; /* Leave LE untouched */ 4546d89d2d9SMarc Zyngier } 4556d89d2d9SMarc Zyngier 4566d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 4576d89d2d9SMarc Zyngier unsigned long data, 4586d89d2d9SMarc Zyngier unsigned int len) 4596d89d2d9SMarc Zyngier { 4606d89d2d9SMarc Zyngier if (kvm_vcpu_is_be(vcpu)) { 4616d89d2d9SMarc Zyngier switch (len) { 4626d89d2d9SMarc Zyngier case 1: 4636d89d2d9SMarc Zyngier return data & 0xff; 4646d89d2d9SMarc Zyngier case 2: 4656d89d2d9SMarc Zyngier return cpu_to_be16(data & 0xffff); 4666d89d2d9SMarc Zyngier case 4: 4676d89d2d9SMarc Zyngier return cpu_to_be32(data & 0xffffffff); 4686d89d2d9SMarc Zyngier default: 4696d89d2d9SMarc Zyngier return cpu_to_be64(data); 4706d89d2d9SMarc Zyngier } 471b3007086SVictor Kamensky } else { 472b3007086SVictor Kamensky switch (len) { 473b3007086SVictor Kamensky case 1: 474b3007086SVictor Kamensky return data & 0xff; 475b3007086SVictor Kamensky case 2: 476b3007086SVictor Kamensky return cpu_to_le16(data & 0xffff); 477b3007086SVictor Kamensky case 4: 478b3007086SVictor Kamensky return cpu_to_le32(data & 0xffffffff); 479b3007086SVictor Kamensky default: 480b3007086SVictor Kamensky return cpu_to_le64(data); 481b3007086SVictor Kamensky } 4826d89d2d9SMarc Zyngier } 4836d89d2d9SMarc Zyngier 4846d89d2d9SMarc Zyngier return data; /* Leave LE untouched */ 4856d89d2d9SMarc Zyngier } 4866d89d2d9SMarc Zyngier 487cdb5e02eSMarc Zyngier static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) 488bd7d95caSMark Rutland { 489cdb5e02eSMarc Zyngier vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC; 490bd7d95caSMark Rutland } 491bd7d95caSMark Rutland 49283a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */ 493