109cf57ebSDavid Brazdil // SPDX-License-Identifier: GPL-2.0-only 209cf57ebSDavid Brazdil /* 309cf57ebSDavid Brazdil * Copyright (C) 2015 - ARM Ltd 409cf57ebSDavid Brazdil * Author: Marc Zyngier <marc.zyngier@arm.com> 509cf57ebSDavid Brazdil */ 609cf57ebSDavid Brazdil 709cf57ebSDavid Brazdil #include <hyp/switch.h> 813aeb9b4SDavid Brazdil #include <hyp/sysreg-sr.h> 909cf57ebSDavid Brazdil 1009cf57ebSDavid Brazdil #include <linux/arm-smccc.h> 1109cf57ebSDavid Brazdil #include <linux/kvm_host.h> 1209cf57ebSDavid Brazdil #include <linux/types.h> 1309cf57ebSDavid Brazdil #include <linux/jump_label.h> 1409cf57ebSDavid Brazdil #include <uapi/linux/psci.h> 1509cf57ebSDavid Brazdil 1609cf57ebSDavid Brazdil #include <kvm/arm_psci.h> 1709cf57ebSDavid Brazdil 1809cf57ebSDavid Brazdil #include <asm/barrier.h> 1909cf57ebSDavid Brazdil #include <asm/cpufeature.h> 2009cf57ebSDavid Brazdil #include <asm/kprobes.h> 2109cf57ebSDavid Brazdil #include <asm/kvm_asm.h> 2209cf57ebSDavid Brazdil #include <asm/kvm_emulate.h> 2309cf57ebSDavid Brazdil #include <asm/kvm_hyp.h> 2409cf57ebSDavid Brazdil #include <asm/kvm_mmu.h> 2509cf57ebSDavid Brazdil #include <asm/fpsimd.h> 2609cf57ebSDavid Brazdil #include <asm/debug-monitors.h> 2709cf57ebSDavid Brazdil #include <asm/processor.h> 2809cf57ebSDavid Brazdil 293061725dSMarc Zyngier #include <nvhe/fixed_config.h> 301025c8c0SQuentin Perret #include <nvhe/mem_protect.h> 311025c8c0SQuentin Perret 3214ef9d04SMarc Zyngier /* Non-VHE specific context */ 3314ef9d04SMarc Zyngier DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); 3414ef9d04SMarc Zyngier DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 3514ef9d04SMarc Zyngier DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); 362a1198c9SDavid Brazdil 37879e5ac7SKalesh Singh extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); 38879e5ac7SKalesh Singh 39c50cb043SDavid Brazdil static void __activate_traps(struct kvm_vcpu *vcpu) 4009cf57ebSDavid Brazdil { 4109cf57ebSDavid Brazdil u64 val; 4209cf57ebSDavid Brazdil 4309cf57ebSDavid Brazdil ___activate_traps(vcpu); 4409cf57ebSDavid Brazdil __activate_traps_common(vcpu); 4509cf57ebSDavid Brazdil 46cd496228SFuad Tabba val = vcpu->arch.cptr_el2; 478c8010d6SMarc Zyngier val |= CPTR_EL2_TTA | CPTR_EL2_TAM; 48e9ada6c2SMarc Zyngier if (!guest_owns_fp_regs(vcpu)) { 498c8010d6SMarc Zyngier val |= CPTR_EL2_TFP | CPTR_EL2_TZ; 5009cf57ebSDavid Brazdil __activate_traps_fpsimd32(vcpu); 5109cf57ebSDavid Brazdil } 5251729fb1SMark Brown if (cpus_have_final_cap(ARM64_SME)) 5351729fb1SMark Brown val |= CPTR_EL2_TSM; 5409cf57ebSDavid Brazdil 5509cf57ebSDavid Brazdil write_sysreg(val, cptr_el2); 5614ef9d04SMarc Zyngier write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); 5709cf57ebSDavid Brazdil 5809cf57ebSDavid Brazdil if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 5909cf57ebSDavid Brazdil struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; 6009cf57ebSDavid Brazdil 6109cf57ebSDavid Brazdil isb(); 6209cf57ebSDavid Brazdil /* 6309cf57ebSDavid Brazdil * At this stage, and thanks to the above isb(), S2 is 6409cf57ebSDavid Brazdil * configured and enabled. We can now restore the guest's S1 6509cf57ebSDavid Brazdil * configuration: SCTLR, and only then TCR. 6609cf57ebSDavid Brazdil */ 6771071acfSMarc Zyngier write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); 6809cf57ebSDavid Brazdil isb(); 6971071acfSMarc Zyngier write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); 7009cf57ebSDavid Brazdil } 7109cf57ebSDavid Brazdil } 7209cf57ebSDavid Brazdil 73c50cb043SDavid Brazdil static void __deactivate_traps(struct kvm_vcpu *vcpu) 7409cf57ebSDavid Brazdil { 756e3bfbb2SAndrew Scull extern char __kvm_hyp_host_vector[]; 761460b4b2SFuad Tabba u64 cptr; 7709cf57ebSDavid Brazdil 7809cf57ebSDavid Brazdil ___deactivate_traps(vcpu); 7909cf57ebSDavid Brazdil 8009cf57ebSDavid Brazdil if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 8109cf57ebSDavid Brazdil u64 val; 8209cf57ebSDavid Brazdil 8309cf57ebSDavid Brazdil /* 8409cf57ebSDavid Brazdil * Set the TCR and SCTLR registers in the exact opposite 8509cf57ebSDavid Brazdil * sequence as __activate_traps (first prevent walks, 8609cf57ebSDavid Brazdil * then force the MMU on). A generous sprinkling of isb() 8709cf57ebSDavid Brazdil * ensure that things happen in this exact order. 8809cf57ebSDavid Brazdil */ 8909cf57ebSDavid Brazdil val = read_sysreg_el1(SYS_TCR); 9009cf57ebSDavid Brazdil write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR); 9109cf57ebSDavid Brazdil isb(); 9209cf57ebSDavid Brazdil val = read_sysreg_el1(SYS_SCTLR); 9309cf57ebSDavid Brazdil write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR); 9409cf57ebSDavid Brazdil isb(); 9509cf57ebSDavid Brazdil } 9609cf57ebSDavid Brazdil 971460b4b2SFuad Tabba __deactivate_traps_common(vcpu); 9809cf57ebSDavid Brazdil 99734864c1SQuentin Perret write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); 10009cf57ebSDavid Brazdil 101beed0906SMarc Zyngier cptr = CPTR_EL2_DEFAULT; 102f8077b0dSMarc Zyngier if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) 103beed0906SMarc Zyngier cptr |= CPTR_EL2_TZ; 10451729fb1SMark Brown if (cpus_have_final_cap(ARM64_SME)) 10551729fb1SMark Brown cptr &= ~CPTR_EL2_TSM; 106beed0906SMarc Zyngier 107beed0906SMarc Zyngier write_sysreg(cptr, cptr_el2); 108501a67a2SAndrew Scull write_sysreg(__kvm_hyp_host_vector, vbar_el2); 10909cf57ebSDavid Brazdil } 11009cf57ebSDavid Brazdil 11109cf57ebSDavid Brazdil /* Save VGICv3 state on non-VHE systems */ 112c50cb043SDavid Brazdil static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu) 11309cf57ebSDavid Brazdil { 11409cf57ebSDavid Brazdil if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { 11509cf57ebSDavid Brazdil __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); 11609cf57ebSDavid Brazdil __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 11709cf57ebSDavid Brazdil } 11809cf57ebSDavid Brazdil } 11909cf57ebSDavid Brazdil 12043b233b1SWei-Lin Chang /* Restore VGICv3 state on non-VHE systems */ 121c50cb043SDavid Brazdil static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) 12209cf57ebSDavid Brazdil { 12309cf57ebSDavid Brazdil if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { 12409cf57ebSDavid Brazdil __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 12509cf57ebSDavid Brazdil __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); 12609cf57ebSDavid Brazdil } 12709cf57ebSDavid Brazdil } 12809cf57ebSDavid Brazdil 129bd61395aSRandy Dunlap /* 13009cf57ebSDavid Brazdil * Disable host events, enable guest events 13109cf57ebSDavid Brazdil */ 13220492a62SMarc Zyngier #ifdef CONFIG_HW_PERF_EVENTS 13384d751a0SFuad Tabba static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu) 13409cf57ebSDavid Brazdil { 13584d751a0SFuad Tabba struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 13609cf57ebSDavid Brazdil 13709cf57ebSDavid Brazdil if (pmu->events_host) 13809cf57ebSDavid Brazdil write_sysreg(pmu->events_host, pmcntenclr_el0); 13909cf57ebSDavid Brazdil 14009cf57ebSDavid Brazdil if (pmu->events_guest) 14109cf57ebSDavid Brazdil write_sysreg(pmu->events_guest, pmcntenset_el0); 14209cf57ebSDavid Brazdil 14309cf57ebSDavid Brazdil return (pmu->events_host || pmu->events_guest); 14409cf57ebSDavid Brazdil } 14509cf57ebSDavid Brazdil 146bd61395aSRandy Dunlap /* 14709cf57ebSDavid Brazdil * Disable guest events, enable host events 14809cf57ebSDavid Brazdil */ 14984d751a0SFuad Tabba static void __pmu_switch_to_host(struct kvm_vcpu *vcpu) 15009cf57ebSDavid Brazdil { 15184d751a0SFuad Tabba struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 15209cf57ebSDavid Brazdil 15309cf57ebSDavid Brazdil if (pmu->events_guest) 15409cf57ebSDavid Brazdil write_sysreg(pmu->events_guest, pmcntenclr_el0); 15509cf57ebSDavid Brazdil 15609cf57ebSDavid Brazdil if (pmu->events_host) 15709cf57ebSDavid Brazdil write_sysreg(pmu->events_host, pmcntenset_el0); 15809cf57ebSDavid Brazdil } 15920492a62SMarc Zyngier #else 16020492a62SMarc Zyngier #define __pmu_switch_to_guest(v) ({ false; }) 16120492a62SMarc Zyngier #define __pmu_switch_to_host(v) do {} while (0) 16220492a62SMarc Zyngier #endif 16309cf57ebSDavid Brazdil 164bd61395aSRandy Dunlap /* 1651423afcbSFuad Tabba * Handler for protected VM MSR, MRS or System instruction execution in AArch64. 1661423afcbSFuad Tabba * 1671423afcbSFuad Tabba * Returns true if the hypervisor has handled the exit, and control should go 1681423afcbSFuad Tabba * back to the guest, or false if it hasn't. 1691423afcbSFuad Tabba */ 1701423afcbSFuad Tabba static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) 1711423afcbSFuad Tabba { 17207305590SMarc Zyngier /* 17307305590SMarc Zyngier * Make sure we handle the exit for workarounds and ptrauth 17407305590SMarc Zyngier * before the pKVM handling, as the latter could decide to 17507305590SMarc Zyngier * UNDEF. 17607305590SMarc Zyngier */ 17707305590SMarc Zyngier return (kvm_hyp_handle_sysreg(vcpu, exit_code) || 17807305590SMarc Zyngier kvm_handle_pvm_sysreg(vcpu, exit_code)); 1791423afcbSFuad Tabba } 1801423afcbSFuad Tabba 1818fb20461SMarc Zyngier static const exit_handler_fn hyp_exit_handlers[] = { 1828fb20461SMarc Zyngier [0 ... ESR_ELx_EC_MAX] = NULL, 1838fb20461SMarc Zyngier [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, 1848fb20461SMarc Zyngier [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg, 1858fb20461SMarc Zyngier [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd, 1868fb20461SMarc Zyngier [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, 1878fb20461SMarc Zyngier [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, 1888fb20461SMarc Zyngier [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, 1898fb20461SMarc Zyngier [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, 1908fb20461SMarc Zyngier }; 1918fb20461SMarc Zyngier 1921423afcbSFuad Tabba static const exit_handler_fn pvm_exit_handlers[] = { 1931423afcbSFuad Tabba [0 ... ESR_ELx_EC_MAX] = NULL, 1941423afcbSFuad Tabba [ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64, 1951423afcbSFuad Tabba [ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted, 1964d2e469eSOliver Upton [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, 1971423afcbSFuad Tabba [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, 1981423afcbSFuad Tabba [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, 1991423afcbSFuad Tabba [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, 2001423afcbSFuad Tabba }; 2011423afcbSFuad Tabba 2020c7639ccSMarc Zyngier static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) 2038fb20461SMarc Zyngier { 2040c7639ccSMarc Zyngier if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))) 2051423afcbSFuad Tabba return pvm_exit_handlers; 2061423afcbSFuad Tabba 2078fb20461SMarc Zyngier return hyp_exit_handlers; 2088fb20461SMarc Zyngier } 2098fb20461SMarc Zyngier 2105f39efc4SFuad Tabba /* 2115f39efc4SFuad Tabba * Some guests (e.g., protected VMs) are not be allowed to run in AArch32. 2125f39efc4SFuad Tabba * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a 2135f39efc4SFuad Tabba * guest from dropping to AArch32 EL0 if implemented by the CPU. If the 2145f39efc4SFuad Tabba * hypervisor spots a guest in such a state ensure it is handled, and don't 2155f39efc4SFuad Tabba * trust the host to spot or fix it. The check below is based on the one in 2165f39efc4SFuad Tabba * kvm_arch_vcpu_ioctl_run(). 2175f39efc4SFuad Tabba * 2185f39efc4SFuad Tabba * Returns false if the guest ran in AArch32 when it shouldn't have, and 2195f39efc4SFuad Tabba * thus should exit to the host, or true if a the guest run loop can continue. 2205f39efc4SFuad Tabba */ 2217183b2b5SMarc Zyngier static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) 2225f39efc4SFuad Tabba { 2235f39efc4SFuad Tabba struct kvm *kvm = kern_hyp_va(vcpu->kvm); 2245f39efc4SFuad Tabba 2255f39efc4SFuad Tabba if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) { 2265f39efc4SFuad Tabba /* 2275f39efc4SFuad Tabba * As we have caught the guest red-handed, decide that it isn't 2285f39efc4SFuad Tabba * fit for purpose anymore by making the vcpu invalid. The VMM 2295f39efc4SFuad Tabba * can try and fix it by re-initializing the vcpu with 2305f39efc4SFuad Tabba * KVM_ARM_VCPU_INIT, however, this is likely not possible for 2315f39efc4SFuad Tabba * protected VMs. 2325f39efc4SFuad Tabba */ 2335f39efc4SFuad Tabba vcpu->arch.target = -1; 234271b7286SMarc Zyngier *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); 235271b7286SMarc Zyngier *exit_code |= ARM_EXCEPTION_IL; 2365f39efc4SFuad Tabba } 2375f39efc4SFuad Tabba } 2385f39efc4SFuad Tabba 23909cf57ebSDavid Brazdil /* Switch to the guest for legacy non-VHE systems */ 240c50cb043SDavid Brazdil int __kvm_vcpu_run(struct kvm_vcpu *vcpu) 24109cf57ebSDavid Brazdil { 24209cf57ebSDavid Brazdil struct kvm_cpu_context *host_ctxt; 24309cf57ebSDavid Brazdil struct kvm_cpu_context *guest_ctxt; 244923a547dSMarc Zyngier struct kvm_s2_mmu *mmu; 24509cf57ebSDavid Brazdil bool pmu_switch_needed; 24609cf57ebSDavid Brazdil u64 exit_code; 24709cf57ebSDavid Brazdil 24809cf57ebSDavid Brazdil /* 24909cf57ebSDavid Brazdil * Having IRQs masked via PMR when entering the guest means the GIC 25009cf57ebSDavid Brazdil * will not signal the CPU of interrupts of lower priority, and the 25109cf57ebSDavid Brazdil * only way to get out will be via guest exceptions. 25209cf57ebSDavid Brazdil * Naturally, we want to avoid this. 25309cf57ebSDavid Brazdil */ 25409cf57ebSDavid Brazdil if (system_uses_irq_prio_masking()) { 25509cf57ebSDavid Brazdil gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); 25609cf57ebSDavid Brazdil pmr_sync(); 25709cf57ebSDavid Brazdil } 25809cf57ebSDavid Brazdil 259717cf94aSDavid Brazdil host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 26009cf57ebSDavid Brazdil host_ctxt->__hyp_running_vcpu = vcpu; 26109cf57ebSDavid Brazdil guest_ctxt = &vcpu->arch.ctxt; 26209cf57ebSDavid Brazdil 26384d751a0SFuad Tabba pmu_switch_needed = __pmu_switch_to_guest(vcpu); 26409cf57ebSDavid Brazdil 26509cf57ebSDavid Brazdil __sysreg_save_state_nvhe(host_ctxt); 266b96b0c5dSSuzuki K Poulose /* 267b96b0c5dSSuzuki K Poulose * We must flush and disable the SPE buffer for nVHE, as 268b96b0c5dSSuzuki K Poulose * the translation regime(EL1&0) is going to be loaded with 269b96b0c5dSSuzuki K Poulose * that of the guest. And we must do this before we change the 270b96b0c5dSSuzuki K Poulose * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and 271b96b0c5dSSuzuki K Poulose * before we load guest Stage1. 272b96b0c5dSSuzuki K Poulose */ 273b96b0c5dSSuzuki K Poulose __debug_save_host_buffers_nvhe(vcpu); 27409cf57ebSDavid Brazdil 275*55b5bac1SMarc Zyngier /* 276*55b5bac1SMarc Zyngier * We're about to restore some new MMU state. Make sure 277*55b5bac1SMarc Zyngier * ongoing page-table walks that have started before we 278*55b5bac1SMarc Zyngier * trapped to EL2 have completed. This also synchronises the 279*55b5bac1SMarc Zyngier * above disabling of SPE and TRBE. 280*55b5bac1SMarc Zyngier * 281*55b5bac1SMarc Zyngier * See DDI0487I.a D8.1.5 "Out-of-context translation regimes", 282*55b5bac1SMarc Zyngier * rule R_LFHQG and subsequent information statements. 283*55b5bac1SMarc Zyngier */ 284*55b5bac1SMarc Zyngier dsb(nsh); 285*55b5bac1SMarc Zyngier 286f5e30680SMarc Zyngier __kvm_adjust_pc(vcpu); 287cdb5e02eSMarc Zyngier 28809cf57ebSDavid Brazdil /* 28909cf57ebSDavid Brazdil * We must restore the 32-bit state before the sysregs, thanks 29009cf57ebSDavid Brazdil * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). 29109cf57ebSDavid Brazdil * 29209cf57ebSDavid Brazdil * Also, and in order to be able to deal with erratum #1319537 (A57) 29309cf57ebSDavid Brazdil * and #1319367 (A72), we must ensure that all VM-related sysreg are 29409cf57ebSDavid Brazdil * restored before we enable S2 translation. 29509cf57ebSDavid Brazdil */ 29609cf57ebSDavid Brazdil __sysreg32_restore_state(vcpu); 29709cf57ebSDavid Brazdil __sysreg_restore_state_nvhe(guest_ctxt); 29809cf57ebSDavid Brazdil 299923a547dSMarc Zyngier mmu = kern_hyp_va(vcpu->arch.hw_mmu); 3004efc0edeSMarc Zyngier __load_stage2(mmu, kern_hyp_va(mmu->arch)); 30109cf57ebSDavid Brazdil __activate_traps(vcpu); 30209cf57ebSDavid Brazdil 30309cf57ebSDavid Brazdil __hyp_vgic_restore_state(vcpu); 30409cf57ebSDavid Brazdil __timer_enable_traps(vcpu); 30509cf57ebSDavid Brazdil 30609cf57ebSDavid Brazdil __debug_switch_to_guest(vcpu); 30709cf57ebSDavid Brazdil 30809cf57ebSDavid Brazdil do { 30909cf57ebSDavid Brazdil /* Jump in the fire! */ 310b619d9aaSAndrew Scull exit_code = __guest_enter(vcpu); 31109cf57ebSDavid Brazdil 31209cf57ebSDavid Brazdil /* And we're baaack! */ 31309cf57ebSDavid Brazdil } while (fixup_guest_exit(vcpu, &exit_code)); 31409cf57ebSDavid Brazdil 31509cf57ebSDavid Brazdil __sysreg_save_state_nvhe(guest_ctxt); 31609cf57ebSDavid Brazdil __sysreg32_save_state(vcpu); 31709cf57ebSDavid Brazdil __timer_disable_traps(vcpu); 31809cf57ebSDavid Brazdil __hyp_vgic_save_state(vcpu); 31909cf57ebSDavid Brazdil 320*55b5bac1SMarc Zyngier /* 321*55b5bac1SMarc Zyngier * Same thing as before the guest run: we're about to switch 322*55b5bac1SMarc Zyngier * the MMU context, so let's make sure we don't have any 323*55b5bac1SMarc Zyngier * ongoing EL1&0 translations. 324*55b5bac1SMarc Zyngier */ 325*55b5bac1SMarc Zyngier dsb(nsh); 326*55b5bac1SMarc Zyngier 32709cf57ebSDavid Brazdil __deactivate_traps(vcpu); 328501a67a2SAndrew Scull __load_host_stage2(); 32909cf57ebSDavid Brazdil 33009cf57ebSDavid Brazdil __sysreg_restore_state_nvhe(host_ctxt); 33109cf57ebSDavid Brazdil 332f8077b0dSMarc Zyngier if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) 33309cf57ebSDavid Brazdil __fpsimd_save_fpexc32(vcpu); 33409cf57ebSDavid Brazdil 335b96b0c5dSSuzuki K Poulose __debug_switch_to_host(vcpu); 33609cf57ebSDavid Brazdil /* 33709cf57ebSDavid Brazdil * This must come after restoring the host sysregs, since a non-VHE 33809cf57ebSDavid Brazdil * system may enable SPE here and make use of the TTBRs. 33909cf57ebSDavid Brazdil */ 340b96b0c5dSSuzuki K Poulose __debug_restore_host_buffers_nvhe(vcpu); 34109cf57ebSDavid Brazdil 34209cf57ebSDavid Brazdil if (pmu_switch_needed) 34384d751a0SFuad Tabba __pmu_switch_to_host(vcpu); 34409cf57ebSDavid Brazdil 34509cf57ebSDavid Brazdil /* Returning to host will clear PSR.I, remask PMR if needed */ 34609cf57ebSDavid Brazdil if (system_uses_irq_prio_masking()) 34709cf57ebSDavid Brazdil gic_write_pmr(GIC_PRIO_IRQOFF); 34809cf57ebSDavid Brazdil 349a2e102e2SAndrew Scull host_ctxt->__hyp_running_vcpu = NULL; 350a2e102e2SAndrew Scull 35109cf57ebSDavid Brazdil return exit_code; 35209cf57ebSDavid Brazdil } 35309cf57ebSDavid Brazdil 35466de19faSKalesh Singh asmlinkage void __noreturn hyp_panic(void) 35509cf57ebSDavid Brazdil { 35609cf57ebSDavid Brazdil u64 spsr = read_sysreg_el2(SYS_SPSR); 35709cf57ebSDavid Brazdil u64 elr = read_sysreg_el2(SYS_ELR); 35896d389caSRob Herring u64 par = read_sysreg_par(); 3596a0259edSAndrew Scull struct kvm_cpu_context *host_ctxt; 3606a0259edSAndrew Scull struct kvm_vcpu *vcpu; 36109cf57ebSDavid Brazdil 36214ef9d04SMarc Zyngier host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 3636a0259edSAndrew Scull vcpu = host_ctxt->__hyp_running_vcpu; 3646a0259edSAndrew Scull 365a2e102e2SAndrew Scull if (vcpu) { 36609cf57ebSDavid Brazdil __timer_disable_traps(vcpu); 36709cf57ebSDavid Brazdil __deactivate_traps(vcpu); 368501a67a2SAndrew Scull __load_host_stage2(); 36909cf57ebSDavid Brazdil __sysreg_restore_state_nvhe(host_ctxt); 37009cf57ebSDavid Brazdil } 37109cf57ebSDavid Brazdil 372879e5ac7SKalesh Singh /* Prepare to dump kvm nvhe hyp stacktrace */ 373879e5ac7SKalesh Singh kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0), 374879e5ac7SKalesh Singh _THIS_IP_); 375879e5ac7SKalesh Singh 376c4b000c3SAndrew Scull __hyp_do_panic(host_ctxt, spsr, elr, par); 37709cf57ebSDavid Brazdil unreachable(); 37809cf57ebSDavid Brazdil } 379e9ee186bSJames Morse 38066de19faSKalesh Singh asmlinkage void __noreturn hyp_panic_bad_stack(void) 38166de19faSKalesh Singh { 38266de19faSKalesh Singh hyp_panic(); 38366de19faSKalesh Singh } 38466de19faSKalesh Singh 385e9ee186bSJames Morse asmlinkage void kvm_unexpected_el2_exception(void) 386e9ee186bSJames Morse { 3871c3ace2bSQuentin Perret __kvm_unexpected_el2_exception(); 388e9ee186bSJames Morse } 389