1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 283a49794SMarc Zyngier /* 383a49794SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 483a49794SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 583a49794SMarc Zyngier * 683a49794SMarc Zyngier * Derived from arch/arm/include/kvm_emulate.h 783a49794SMarc Zyngier * Copyright (C) 2012 - Virtual Open Systems and Columbia University 883a49794SMarc Zyngier * Author: Christoffer Dall <c.dall@virtualopensystems.com> 983a49794SMarc Zyngier */ 1083a49794SMarc Zyngier 1183a49794SMarc Zyngier #ifndef __ARM64_KVM_EMULATE_H__ 1283a49794SMarc Zyngier #define __ARM64_KVM_EMULATE_H__ 1383a49794SMarc Zyngier 1483a49794SMarc Zyngier #include <linux/kvm_host.h> 15c6d01a94SMark Rutland 16bd7d95caSMark Rutland #include <asm/debug-monitors.h> 17c6d01a94SMark Rutland #include <asm/esr.h> 1883a49794SMarc Zyngier #include <asm/kvm_arm.h> 1900536ec4SChristoffer Dall #include <asm/kvm_hyp.h> 2083a49794SMarc Zyngier #include <asm/ptrace.h> 214429fc64SAndre Przywara #include <asm/cputype.h> 2268908bf7SMarc Zyngier #include <asm/virt.h> 2383a49794SMarc Zyngier 24b547631fSMarc Zyngier unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 25a8928195SChristoffer Dall unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); 26a8928195SChristoffer Dall void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); 27b547631fSMarc Zyngier 2827b190bdSMarc Zyngier bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 2927b190bdSMarc Zyngier void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 3027b190bdSMarc Zyngier 3183a49794SMarc Zyngier void kvm_inject_undefined(struct kvm_vcpu *vcpu); 3210cf3390SMarc Zyngier void kvm_inject_vabt(struct kvm_vcpu *vcpu); 3383a49794SMarc Zyngier void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 3483a49794SMarc Zyngier void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 3574a64a98SMarc Zyngier void kvm_inject_undef32(struct kvm_vcpu *vcpu); 3674a64a98SMarc Zyngier void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 3774a64a98SMarc Zyngier void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 3883a49794SMarc Zyngier 395c37f1aeSJames Morse static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 40e72341c5SChristoffer Dall { 41e72341c5SChristoffer Dall return !(vcpu->arch.hcr_el2 & HCR_RW); 42e72341c5SChristoffer Dall } 43e72341c5SChristoffer Dall 44b856a591SChristoffer Dall static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 45b856a591SChristoffer Dall { 46b856a591SChristoffer Dall vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 4768908bf7SMarc Zyngier if (is_kernel_in_hyp_mode()) 4868908bf7SMarc Zyngier vcpu->arch.hcr_el2 |= HCR_E2H; 49558daf69SDongjiu Geng if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { 50558daf69SDongjiu Geng /* route synchronous external abort exceptions to EL2 */ 51558daf69SDongjiu Geng vcpu->arch.hcr_el2 |= HCR_TEA; 52558daf69SDongjiu Geng /* trap error record accesses */ 53558daf69SDongjiu Geng vcpu->arch.hcr_el2 |= HCR_TERR; 54558daf69SDongjiu Geng } 555c401308SChristoffer Dall 565c401308SChristoffer Dall if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { 57e48d53a9SMarc Zyngier vcpu->arch.hcr_el2 |= HCR_FWB; 585c401308SChristoffer Dall } else { 595c401308SChristoffer Dall /* 605c401308SChristoffer Dall * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C 615c401308SChristoffer Dall * get set in SCTLR_EL1 such that we can detect when the guest 625c401308SChristoffer Dall * MMU gets turned on and do the necessary cache maintenance 635c401308SChristoffer Dall * then. 645c401308SChristoffer Dall */ 655c401308SChristoffer Dall vcpu->arch.hcr_el2 |= HCR_TVM; 665c401308SChristoffer Dall } 67558daf69SDongjiu Geng 68801f6772SMarc Zyngier if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 69801f6772SMarc Zyngier vcpu->arch.hcr_el2 &= ~HCR_RW; 70005781beSDave Martin 71005781beSDave Martin /* 72005781beSDave Martin * TID3: trap feature register accesses that we virtualise. 73005781beSDave Martin * For now this is conditional, since no AArch32 feature regs 74005781beSDave Martin * are currently virtualised. 75005781beSDave Martin */ 76e72341c5SChristoffer Dall if (!vcpu_el1_is_32bit(vcpu)) 77005781beSDave Martin vcpu->arch.hcr_el2 |= HCR_TID3; 78f7f2b15cSArd Biesheuvel 79793acf87SArd Biesheuvel if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || 80793acf87SArd Biesheuvel vcpu_el1_is_32bit(vcpu)) 81f7f2b15cSArd Biesheuvel vcpu->arch.hcr_el2 |= HCR_TID2; 82b856a591SChristoffer Dall } 83b856a591SChristoffer Dall 843df59d8dSChristoffer Dall static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) 853c1e7165SMarc Zyngier { 863df59d8dSChristoffer Dall return (unsigned long *)&vcpu->arch.hcr_el2; 873c1e7165SMarc Zyngier } 883c1e7165SMarc Zyngier 89ef2e78ddSMarc Zyngier static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) 90de737089SMarc Zyngier { 91de737089SMarc Zyngier vcpu->arch.hcr_el2 &= ~HCR_TWE; 92ef2e78ddSMarc Zyngier if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count)) 93ef2e78ddSMarc Zyngier vcpu->arch.hcr_el2 &= ~HCR_TWI; 94ef2e78ddSMarc Zyngier else 95ef2e78ddSMarc Zyngier vcpu->arch.hcr_el2 |= HCR_TWI; 96de737089SMarc Zyngier } 97de737089SMarc Zyngier 98ef2e78ddSMarc Zyngier static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) 99de737089SMarc Zyngier { 100de737089SMarc Zyngier vcpu->arch.hcr_el2 |= HCR_TWE; 101ef2e78ddSMarc Zyngier vcpu->arch.hcr_el2 |= HCR_TWI; 102de737089SMarc Zyngier } 103de737089SMarc Zyngier 104384b40caSMark Rutland static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) 105384b40caSMark Rutland { 106384b40caSMark Rutland vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); 107384b40caSMark Rutland } 108384b40caSMark Rutland 109384b40caSMark Rutland static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) 110384b40caSMark Rutland { 111384b40caSMark Rutland vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); 112384b40caSMark Rutland } 113384b40caSMark Rutland 114384b40caSMark Rutland static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) 115384b40caSMark Rutland { 116384b40caSMark Rutland if (vcpu_has_ptrauth(vcpu)) 117384b40caSMark Rutland vcpu_ptrauth_disable(vcpu); 118384b40caSMark Rutland } 119384b40caSMark Rutland 120b7b27facSDongjiu Geng static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) 121b7b27facSDongjiu Geng { 122b7b27facSDongjiu Geng return vcpu->arch.vsesr_el2; 123b7b27facSDongjiu Geng } 124b7b27facSDongjiu Geng 1254715c14bSJames Morse static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) 1264715c14bSJames Morse { 1274715c14bSJames Morse vcpu->arch.vsesr_el2 = vsesr; 1284715c14bSJames Morse } 1294715c14bSJames Morse 1305c37f1aeSJames Morse static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 13183a49794SMarc Zyngier { 13283a49794SMarc Zyngier return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 13383a49794SMarc Zyngier } 13483a49794SMarc Zyngier 1356d4bd909SChristoffer Dall static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) 13683a49794SMarc Zyngier { 13783a49794SMarc Zyngier return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; 13883a49794SMarc Zyngier } 13983a49794SMarc Zyngier 1406d4bd909SChristoffer Dall static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) 1416d4bd909SChristoffer Dall { 1426d4bd909SChristoffer Dall if (vcpu->arch.sysregs_loaded_on_cpu) 143fdec2a9eSDave Martin return read_sysreg_el1(SYS_ELR); 1446d4bd909SChristoffer Dall else 1456d4bd909SChristoffer Dall return *__vcpu_elr_el1(vcpu); 1466d4bd909SChristoffer Dall } 1476d4bd909SChristoffer Dall 1486d4bd909SChristoffer Dall static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) 1496d4bd909SChristoffer Dall { 1506d4bd909SChristoffer Dall if (vcpu->arch.sysregs_loaded_on_cpu) 151fdec2a9eSDave Martin write_sysreg_el1(v, SYS_ELR); 1526d4bd909SChristoffer Dall else 1536d4bd909SChristoffer Dall *__vcpu_elr_el1(vcpu) = v; 1546d4bd909SChristoffer Dall } 1556d4bd909SChristoffer Dall 1565c37f1aeSJames Morse static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 15783a49794SMarc Zyngier { 15883a49794SMarc Zyngier return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; 15983a49794SMarc Zyngier } 16083a49794SMarc Zyngier 1615c37f1aeSJames Morse static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 16283a49794SMarc Zyngier { 163b547631fSMarc Zyngier return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 16483a49794SMarc Zyngier } 16583a49794SMarc Zyngier 1665c37f1aeSJames Morse static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 16783a49794SMarc Zyngier { 16827b190bdSMarc Zyngier if (vcpu_mode_is_32bit(vcpu)) 16927b190bdSMarc Zyngier return kvm_condition_valid32(vcpu); 17027b190bdSMarc Zyngier 17127b190bdSMarc Zyngier return true; 17283a49794SMarc Zyngier } 17383a49794SMarc Zyngier 17483a49794SMarc Zyngier static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 17583a49794SMarc Zyngier { 176256c0960SMark Rutland *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; 17783a49794SMarc Zyngier } 17883a49794SMarc Zyngier 179c0f09634SMarc Zyngier /* 180f6be563aSPavel Fedin * vcpu_get_reg and vcpu_set_reg should always be passed a register number 181f6be563aSPavel Fedin * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 182f6be563aSPavel Fedin * AArch32 with banked registers. 183c0f09634SMarc Zyngier */ 1845c37f1aeSJames Morse static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 185bc45a516SPavel Fedin u8 reg_num) 186bc45a516SPavel Fedin { 187bc45a516SPavel Fedin return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 188bc45a516SPavel Fedin } 189bc45a516SPavel Fedin 1905c37f1aeSJames Morse static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 191bc45a516SPavel Fedin unsigned long val) 192bc45a516SPavel Fedin { 193bc45a516SPavel Fedin if (reg_num != 31) 194bc45a516SPavel Fedin vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; 195bc45a516SPavel Fedin } 196bc45a516SPavel Fedin 19700536ec4SChristoffer Dall static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) 19883a49794SMarc Zyngier { 199a8928195SChristoffer Dall if (vcpu_mode_is_32bit(vcpu)) 200a8928195SChristoffer Dall return vcpu_read_spsr32(vcpu); 20100536ec4SChristoffer Dall 20200536ec4SChristoffer Dall if (vcpu->arch.sysregs_loaded_on_cpu) 203fdec2a9eSDave Martin return read_sysreg_el1(SYS_SPSR); 20400536ec4SChristoffer Dall else 205a8928195SChristoffer Dall return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 20600536ec4SChristoffer Dall } 20700536ec4SChristoffer Dall 208a8928195SChristoffer Dall static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) 20900536ec4SChristoffer Dall { 21000536ec4SChristoffer Dall if (vcpu_mode_is_32bit(vcpu)) { 211a8928195SChristoffer Dall vcpu_write_spsr32(vcpu, v); 21200536ec4SChristoffer Dall return; 21300536ec4SChristoffer Dall } 21400536ec4SChristoffer Dall 21500536ec4SChristoffer Dall if (vcpu->arch.sysregs_loaded_on_cpu) 216fdec2a9eSDave Martin write_sysreg_el1(v, SYS_SPSR); 21700536ec4SChristoffer Dall else 218a8928195SChristoffer Dall vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; 21983a49794SMarc Zyngier } 22083a49794SMarc Zyngier 2211cfbb484SMark Rutland /* 2221cfbb484SMark Rutland * The layout of SPSR for an AArch32 state is different when observed from an 2231cfbb484SMark Rutland * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32 2241cfbb484SMark Rutland * view given an AArch64 view. 2251cfbb484SMark Rutland * 2261cfbb484SMark Rutland * In ARM DDI 0487E.a see: 2271cfbb484SMark Rutland * 2281cfbb484SMark Rutland * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426 2291cfbb484SMark Rutland * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256 2301cfbb484SMark Rutland * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280 2311cfbb484SMark Rutland * 2321cfbb484SMark Rutland * Which show the following differences: 2331cfbb484SMark Rutland * 2341cfbb484SMark Rutland * | Bit | AA64 | AA32 | Notes | 2351cfbb484SMark Rutland * +-----+------+------+-----------------------------| 2361cfbb484SMark Rutland * | 24 | DIT | J | J is RES0 in ARMv8 | 2371cfbb484SMark Rutland * | 21 | SS | DIT | SS doesn't exist in AArch32 | 2381cfbb484SMark Rutland * 2391cfbb484SMark Rutland * ... and all other bits are (currently) common. 2401cfbb484SMark Rutland */ 2411cfbb484SMark Rutland static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) 2421cfbb484SMark Rutland { 2431cfbb484SMark Rutland const unsigned long overlap = BIT(24) | BIT(21); 2441cfbb484SMark Rutland unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT); 2451cfbb484SMark Rutland 2461cfbb484SMark Rutland spsr &= ~overlap; 2471cfbb484SMark Rutland 2481cfbb484SMark Rutland spsr |= dit << 21; 2491cfbb484SMark Rutland 2501cfbb484SMark Rutland return spsr; 2511cfbb484SMark Rutland } 2521cfbb484SMark Rutland 25383a49794SMarc Zyngier static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 25483a49794SMarc Zyngier { 2559586a2eaSShannon Zhao u32 mode; 25683a49794SMarc Zyngier 2579586a2eaSShannon Zhao if (vcpu_mode_is_32bit(vcpu)) { 258256c0960SMark Rutland mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; 259256c0960SMark Rutland return mode > PSR_AA32_MODE_USR; 2609586a2eaSShannon Zhao } 2619586a2eaSShannon Zhao 2629586a2eaSShannon Zhao mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 263b547631fSMarc Zyngier 26483a49794SMarc Zyngier return mode != PSR_MODE_EL0t; 26583a49794SMarc Zyngier } 26683a49794SMarc Zyngier 2675c37f1aeSJames Morse static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 26883a49794SMarc Zyngier { 26983a49794SMarc Zyngier return vcpu->arch.fault.esr_el2; 27083a49794SMarc Zyngier } 27183a49794SMarc Zyngier 2725c37f1aeSJames Morse static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 2733e51d435SMarc Zyngier { 2743e51d435SMarc Zyngier u32 esr = kvm_vcpu_get_hsr(vcpu); 2753e51d435SMarc Zyngier 2763e51d435SMarc Zyngier if (esr & ESR_ELx_CV) 2773e51d435SMarc Zyngier return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 2783e51d435SMarc Zyngier 2793e51d435SMarc Zyngier return -1; 2803e51d435SMarc Zyngier } 2813e51d435SMarc Zyngier 2825c37f1aeSJames Morse static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 28383a49794SMarc Zyngier { 28483a49794SMarc Zyngier return vcpu->arch.fault.far_el2; 28583a49794SMarc Zyngier } 28683a49794SMarc Zyngier 2875c37f1aeSJames Morse static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 28883a49794SMarc Zyngier { 28983a49794SMarc Zyngier return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 29083a49794SMarc Zyngier } 29183a49794SMarc Zyngier 2920067df41SJames Morse static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) 2930067df41SJames Morse { 2940067df41SJames Morse return vcpu->arch.fault.disr_el1; 2950067df41SJames Morse } 2960067df41SJames Morse 2970d97f884SWei Huang static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 2980d97f884SWei Huang { 2991c6007d5SPaolo Bonzini return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 3000d97f884SWei Huang } 3010d97f884SWei Huang 3025c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 30383a49794SMarc Zyngier { 304c6d01a94SMark Rutland return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 30583a49794SMarc Zyngier } 30683a49794SMarc Zyngier 307c726200dSChristoffer Dall static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) 308c726200dSChristoffer Dall { 309c726200dSChristoffer Dall return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); 310c726200dSChristoffer Dall } 311c726200dSChristoffer Dall 31283a49794SMarc Zyngier static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 31383a49794SMarc Zyngier { 314c6d01a94SMark Rutland return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); 31583a49794SMarc Zyngier } 31683a49794SMarc Zyngier 317b6ae256aSChristoffer Dall static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) 318b6ae256aSChristoffer Dall { 319b6ae256aSChristoffer Dall return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); 320b6ae256aSChristoffer Dall } 321b6ae256aSChristoffer Dall 3225c37f1aeSJames Morse static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 32383a49794SMarc Zyngier { 324c6d01a94SMark Rutland return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 32583a49794SMarc Zyngier } 32683a49794SMarc Zyngier 3275c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 32883a49794SMarc Zyngier { 329c6d01a94SMark Rutland return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 33083a49794SMarc Zyngier } 33183a49794SMarc Zyngier 3325c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 33360e21a0eSWill Deacon { 33460e21a0eSWill Deacon return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || 33560e21a0eSWill Deacon kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 33660e21a0eSWill Deacon } 33760e21a0eSWill Deacon 33857c841f1SMarc Zyngier static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 33957c841f1SMarc Zyngier { 34057c841f1SMarc Zyngier return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 34157c841f1SMarc Zyngier } 34257c841f1SMarc Zyngier 3435c37f1aeSJames Morse static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 34483a49794SMarc Zyngier { 345c6d01a94SMark Rutland return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 34683a49794SMarc Zyngier } 34783a49794SMarc Zyngier 34883a49794SMarc Zyngier /* This one is not specific to Data Abort */ 3495c37f1aeSJames Morse static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 35083a49794SMarc Zyngier { 351c6d01a94SMark Rutland return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); 35283a49794SMarc Zyngier } 35383a49794SMarc Zyngier 3545c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 35583a49794SMarc Zyngier { 356561454e2SMark Rutland return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 35783a49794SMarc Zyngier } 35883a49794SMarc Zyngier 35983a49794SMarc Zyngier static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 36083a49794SMarc Zyngier { 361c6d01a94SMark Rutland return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 36283a49794SMarc Zyngier } 36383a49794SMarc Zyngier 3645c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 36583a49794SMarc Zyngier { 366c6d01a94SMark Rutland return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; 3670496daa5SChristoffer Dall } 3680496daa5SChristoffer Dall 3695c37f1aeSJames Morse static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 3700496daa5SChristoffer Dall { 371c6d01a94SMark Rutland return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 37283a49794SMarc Zyngier } 37383a49794SMarc Zyngier 3745c37f1aeSJames Morse static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 375bb428921SJames Morse { 376a2b83133SDongjiu Geng switch (kvm_vcpu_trap_get_fault(vcpu)) { 377bb428921SJames Morse case FSC_SEA: 378bb428921SJames Morse case FSC_SEA_TTW0: 379bb428921SJames Morse case FSC_SEA_TTW1: 380bb428921SJames Morse case FSC_SEA_TTW2: 381bb428921SJames Morse case FSC_SEA_TTW3: 382bb428921SJames Morse case FSC_SECC: 383bb428921SJames Morse case FSC_SECC_TTW0: 384bb428921SJames Morse case FSC_SECC_TTW1: 385bb428921SJames Morse case FSC_SECC_TTW2: 386bb428921SJames Morse case FSC_SECC_TTW3: 387bb428921SJames Morse return true; 388bb428921SJames Morse default: 389bb428921SJames Morse return false; 390bb428921SJames Morse } 391bb428921SJames Morse } 392bb428921SJames Morse 3935c37f1aeSJames Morse static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 394c667186fSMarc Zyngier { 395c667186fSMarc Zyngier u32 esr = kvm_vcpu_get_hsr(vcpu); 3961c839141SAnshuman Khandual return ESR_ELx_SYS64_ISS_RT(esr); 397c667186fSMarc Zyngier } 398c667186fSMarc Zyngier 39964cf98faSChristoffer Dall static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 40064cf98faSChristoffer Dall { 40164cf98faSChristoffer Dall if (kvm_vcpu_trap_is_iabt(vcpu)) 40264cf98faSChristoffer Dall return false; 40364cf98faSChristoffer Dall 40464cf98faSChristoffer Dall return kvm_vcpu_dabt_iswrite(vcpu); 40564cf98faSChristoffer Dall } 40664cf98faSChristoffer Dall 4074429fc64SAndre Przywara static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 40879c64880SMarc Zyngier { 4098d404c4cSChristoffer Dall return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 41079c64880SMarc Zyngier } 41179c64880SMarc Zyngier 41299adb567SAndre Przywara static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) 41399adb567SAndre Przywara { 41499adb567SAndre Przywara return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG; 41599adb567SAndre Przywara } 41699adb567SAndre Przywara 41799adb567SAndre Przywara static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, 41899adb567SAndre Przywara bool flag) 41999adb567SAndre Przywara { 42099adb567SAndre Przywara if (flag) 42199adb567SAndre Przywara vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 42299adb567SAndre Przywara else 42399adb567SAndre Przywara vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG; 42499adb567SAndre Przywara } 42599adb567SAndre Przywara 426ce94fe93SMarc Zyngier static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 427ce94fe93SMarc Zyngier { 4288d404c4cSChristoffer Dall if (vcpu_mode_is_32bit(vcpu)) { 429256c0960SMark Rutland *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; 4308d404c4cSChristoffer Dall } else { 4318d404c4cSChristoffer Dall u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); 4328d404c4cSChristoffer Dall sctlr |= (1 << 25); 4331975fa56SJames Morse vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); 4348d404c4cSChristoffer Dall } 435ce94fe93SMarc Zyngier } 436ce94fe93SMarc Zyngier 4376d89d2d9SMarc Zyngier static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 4386d89d2d9SMarc Zyngier { 4396d89d2d9SMarc Zyngier if (vcpu_mode_is_32bit(vcpu)) 440256c0960SMark Rutland return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); 4416d89d2d9SMarc Zyngier 4428d404c4cSChristoffer Dall return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); 4436d89d2d9SMarc Zyngier } 4446d89d2d9SMarc Zyngier 4456d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 4466d89d2d9SMarc Zyngier unsigned long data, 4476d89d2d9SMarc Zyngier unsigned int len) 4486d89d2d9SMarc Zyngier { 4496d89d2d9SMarc Zyngier if (kvm_vcpu_is_be(vcpu)) { 4506d89d2d9SMarc Zyngier switch (len) { 4516d89d2d9SMarc Zyngier case 1: 4526d89d2d9SMarc Zyngier return data & 0xff; 4536d89d2d9SMarc Zyngier case 2: 4546d89d2d9SMarc Zyngier return be16_to_cpu(data & 0xffff); 4556d89d2d9SMarc Zyngier case 4: 4566d89d2d9SMarc Zyngier return be32_to_cpu(data & 0xffffffff); 4576d89d2d9SMarc Zyngier default: 4586d89d2d9SMarc Zyngier return be64_to_cpu(data); 4596d89d2d9SMarc Zyngier } 460b3007086SVictor Kamensky } else { 461b3007086SVictor Kamensky switch (len) { 462b3007086SVictor Kamensky case 1: 463b3007086SVictor Kamensky return data & 0xff; 464b3007086SVictor Kamensky case 2: 465b3007086SVictor Kamensky return le16_to_cpu(data & 0xffff); 466b3007086SVictor Kamensky case 4: 467b3007086SVictor Kamensky return le32_to_cpu(data & 0xffffffff); 468b3007086SVictor Kamensky default: 469b3007086SVictor Kamensky return le64_to_cpu(data); 470b3007086SVictor Kamensky } 4716d89d2d9SMarc Zyngier } 4726d89d2d9SMarc Zyngier 4736d89d2d9SMarc Zyngier return data; /* Leave LE untouched */ 4746d89d2d9SMarc Zyngier } 4756d89d2d9SMarc Zyngier 4766d89d2d9SMarc Zyngier static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 4776d89d2d9SMarc Zyngier unsigned long data, 4786d89d2d9SMarc Zyngier unsigned int len) 4796d89d2d9SMarc Zyngier { 4806d89d2d9SMarc Zyngier if (kvm_vcpu_is_be(vcpu)) { 4816d89d2d9SMarc Zyngier switch (len) { 4826d89d2d9SMarc Zyngier case 1: 4836d89d2d9SMarc Zyngier return data & 0xff; 4846d89d2d9SMarc Zyngier case 2: 4856d89d2d9SMarc Zyngier return cpu_to_be16(data & 0xffff); 4866d89d2d9SMarc Zyngier case 4: 4876d89d2d9SMarc Zyngier return cpu_to_be32(data & 0xffffffff); 4886d89d2d9SMarc Zyngier default: 4896d89d2d9SMarc Zyngier return cpu_to_be64(data); 4906d89d2d9SMarc Zyngier } 491b3007086SVictor Kamensky } else { 492b3007086SVictor Kamensky switch (len) { 493b3007086SVictor Kamensky case 1: 494b3007086SVictor Kamensky return data & 0xff; 495b3007086SVictor Kamensky case 2: 496b3007086SVictor Kamensky return cpu_to_le16(data & 0xffff); 497b3007086SVictor Kamensky case 4: 498b3007086SVictor Kamensky return cpu_to_le32(data & 0xffffffff); 499b3007086SVictor Kamensky default: 500b3007086SVictor Kamensky return cpu_to_le64(data); 501b3007086SVictor Kamensky } 5026d89d2d9SMarc Zyngier } 5036d89d2d9SMarc Zyngier 5046d89d2d9SMarc Zyngier return data; /* Leave LE untouched */ 5056d89d2d9SMarc Zyngier } 5066d89d2d9SMarc Zyngier 5075c37f1aeSJames Morse static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 508bd7d95caSMark Rutland { 509bd7d95caSMark Rutland if (vcpu_mode_is_32bit(vcpu)) 510bd7d95caSMark Rutland kvm_skip_instr32(vcpu, is_wide_instr); 511bd7d95caSMark Rutland else 512bd7d95caSMark Rutland *vcpu_pc(vcpu) += 4; 513bd7d95caSMark Rutland 514bd7d95caSMark Rutland /* advance the singlestep state machine */ 515bd7d95caSMark Rutland *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 516bd7d95caSMark Rutland } 517bd7d95caSMark Rutland 518bd7d95caSMark Rutland /* 519bd7d95caSMark Rutland * Skip an instruction which has been emulated at hyp while most guest sysregs 520bd7d95caSMark Rutland * are live. 521bd7d95caSMark Rutland */ 5225c37f1aeSJames Morse static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) 523bd7d95caSMark Rutland { 524fdec2a9eSDave Martin *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 525fdec2a9eSDave Martin vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); 526bd7d95caSMark Rutland 527bd7d95caSMark Rutland kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 528bd7d95caSMark Rutland 529fdec2a9eSDave Martin write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); 530fdec2a9eSDave Martin write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 531bd7d95caSMark Rutland } 532bd7d95caSMark Rutland 53383a49794SMarc Zyngier #endif /* __ARM64_KVM_EMULATE_H__ */ 534