1e6b673b7SDave Martin // SPDX-License-Identifier: GPL-2.0 2e6b673b7SDave Martin /* 3e6b673b7SDave Martin * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers 4e6b673b7SDave Martin * 5e6b673b7SDave Martin * Copyright 2018 Arm Limited 6e6b673b7SDave Martin * Author: Dave Martin <Dave.Martin@arm.com> 7e6b673b7SDave Martin */ 8b045e4d0SDave Martin #include <linux/irqflags.h> 9e6b673b7SDave Martin #include <linux/sched.h> 10e6b673b7SDave Martin #include <linux/kvm_host.h> 1104950674SDave Martin #include <asm/fpsimd.h> 12e6b673b7SDave Martin #include <asm/kvm_asm.h> 1383857371SMarc Zyngier #include <asm/kvm_hyp.h> 14e6b673b7SDave Martin #include <asm/kvm_mmu.h> 15b3eb56b6SDave Martin #include <asm/sysreg.h> 16e6b673b7SDave Martin 17*52b28657SQuentin Perret void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu) 18*52b28657SQuentin Perret { 19*52b28657SQuentin Perret struct task_struct *p = vcpu->arch.parent_task; 20*52b28657SQuentin Perret struct user_fpsimd_state *fpsimd; 21*52b28657SQuentin Perret 22*52b28657SQuentin Perret if (!is_protected_kvm_enabled() || !p) 23*52b28657SQuentin Perret return; 24*52b28657SQuentin Perret 25*52b28657SQuentin Perret fpsimd = &p->thread.uw.fpsimd_state; 26*52b28657SQuentin Perret kvm_unshare_hyp(fpsimd, fpsimd + 1); 27*52b28657SQuentin Perret put_task_struct(p); 28*52b28657SQuentin Perret } 29*52b28657SQuentin Perret 30e6b673b7SDave Martin /* 31e6b673b7SDave Martin * Called on entry to KVM_RUN unless this vcpu previously ran at least 32e6b673b7SDave Martin * once and the most recent prior KVM_RUN for this vcpu was called from 33e6b673b7SDave Martin * the same task as current (highly likely). 34e6b673b7SDave Martin * 35e6b673b7SDave Martin * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), 36e6b673b7SDave Martin * such that on entering hyp the relevant parts of current are already 37e6b673b7SDave Martin * mapped. 38e6b673b7SDave Martin */ 39e6b673b7SDave Martin int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) 40e6b673b7SDave Martin { 41e6b673b7SDave Martin int ret; 42e6b673b7SDave Martin 43e6b673b7SDave Martin struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; 44e6b673b7SDave Martin 45*52b28657SQuentin Perret kvm_vcpu_unshare_task_fp(vcpu); 46*52b28657SQuentin Perret 47bee14bcaSMarc Zyngier /* Make sure the host task fpsimd state is visible to hyp: */ 483f868e14SQuentin Perret ret = kvm_share_hyp(fpsimd, fpsimd + 1); 49*52b28657SQuentin Perret if (ret) 50*52b28657SQuentin Perret return ret; 51*52b28657SQuentin Perret 52e6b673b7SDave Martin vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); 532d761dbfSMarc Zyngier 54*52b28657SQuentin Perret /* 55*52b28657SQuentin Perret * We need to keep current's task_struct pinned until its data has been 56*52b28657SQuentin Perret * unshared with the hypervisor to make sure it is not re-used by the 57*52b28657SQuentin Perret * kernel and donated to someone else while already shared -- see 58*52b28657SQuentin Perret * kvm_vcpu_unshare_task_fp() for the matching put_task_struct(). 59*52b28657SQuentin Perret */ 60*52b28657SQuentin Perret if (is_protected_kvm_enabled()) { 61*52b28657SQuentin Perret get_task_struct(current); 62*52b28657SQuentin Perret vcpu->arch.parent_task = current; 63*52b28657SQuentin Perret } 64*52b28657SQuentin Perret 65*52b28657SQuentin Perret return 0; 66e6b673b7SDave Martin } 67e6b673b7SDave Martin 68e6b673b7SDave Martin /* 69e6b673b7SDave Martin * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. 70e6b673b7SDave Martin * The actual loading is done by the FPSIMD access trap taken to hyp. 71e6b673b7SDave Martin * 72e6b673b7SDave Martin * Here, we just set the correct metadata to indicate that the FPSIMD 73e6b673b7SDave Martin * state in the cpu regs (if any) belongs to current on the host. 74e6b673b7SDave Martin */ 75e6b673b7SDave Martin void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) 76e6b673b7SDave Martin { 77e6b673b7SDave Martin BUG_ON(!current->mm); 788383741aSMarc Zyngier BUG_ON(test_thread_flag(TIF_SVE)); 79e6b673b7SDave Martin 808383741aSMarc Zyngier vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED; 81e6b673b7SDave Martin vcpu->arch.flags |= KVM_ARM64_FP_HOST; 82b3eb56b6SDave Martin 83b3eb56b6SDave Martin if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) 84b3eb56b6SDave Martin vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; 85e6b673b7SDave Martin } 86e6b673b7SDave Martin 87af9a0e21SMarc Zyngier void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) 88af9a0e21SMarc Zyngier { 89af9a0e21SMarc Zyngier if (test_thread_flag(TIF_FOREIGN_FPSTATE)) 90af9a0e21SMarc Zyngier vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE; 91af9a0e21SMarc Zyngier else 92af9a0e21SMarc Zyngier vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE; 93af9a0e21SMarc Zyngier } 94af9a0e21SMarc Zyngier 95e6b673b7SDave Martin /* 96e6b673b7SDave Martin * If the guest FPSIMD state was loaded, update the host's context 97e6b673b7SDave Martin * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu 98e6b673b7SDave Martin * so that they will be written back if the kernel clobbers them due to 99e6b673b7SDave Martin * kernel-mode NEON before re-entry into the guest. 100e6b673b7SDave Martin */ 101e6b673b7SDave Martin void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) 102e6b673b7SDave Martin { 103e6b673b7SDave Martin WARN_ON_ONCE(!irqs_disabled()); 104e6b673b7SDave Martin 105e6b673b7SDave Martin if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { 106e47c2055SMarc Zyngier fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, 107b43b5dd9SDave Martin vcpu->arch.sve_state, 108b43b5dd9SDave Martin vcpu->arch.sve_max_vl); 10904950674SDave Martin 110e6b673b7SDave Martin clear_thread_flag(TIF_FOREIGN_FPSTATE); 111b43b5dd9SDave Martin update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); 112e6b673b7SDave Martin } 113e6b673b7SDave Martin } 114e6b673b7SDave Martin 115e6b673b7SDave Martin /* 116e6b673b7SDave Martin * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the 117e6b673b7SDave Martin * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu 118e6b673b7SDave Martin * disappears and another task or vcpu appears that recycles the same 119e6b673b7SDave Martin * struct fpsimd_state. 120e6b673b7SDave Martin */ 121e6b673b7SDave Martin void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) 122e6b673b7SDave Martin { 123b045e4d0SDave Martin unsigned long flags; 124b045e4d0SDave Martin 125b045e4d0SDave Martin local_irq_save(flags); 126e6b673b7SDave Martin 127e6b673b7SDave Martin if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { 1288383741aSMarc Zyngier if (vcpu_has_sve(vcpu)) { 12983857371SMarc Zyngier __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); 130b145a843SMarc Zyngier 1318c8010d6SMarc Zyngier /* Restore the VL that was saved when bound to the CPU */ 1328c8010d6SMarc Zyngier if (!has_vhe()) 1338c8010d6SMarc Zyngier sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, 1348c8010d6SMarc Zyngier SYS_ZCR_EL1); 1358c8010d6SMarc Zyngier } 1368c8010d6SMarc Zyngier 137b145a843SMarc Zyngier fpsimd_save_and_flush_cpu_state(); 1388383741aSMarc Zyngier } else if (has_vhe() && system_supports_sve()) { 139b3eb56b6SDave Martin /* 140b3eb56b6SDave Martin * The FPSIMD/SVE state in the CPU has not been touched, and we 141b3eb56b6SDave Martin * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been 142b3eb56b6SDave Martin * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE 143b3eb56b6SDave Martin * for EL0. To avoid spurious traps, restore the trap state 144b3eb56b6SDave Martin * seen by kvm_arch_vcpu_load_fp(): 145b3eb56b6SDave Martin */ 146b3eb56b6SDave Martin if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) 147b3eb56b6SDave Martin sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); 148b3eb56b6SDave Martin else 149b3eb56b6SDave Martin sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); 150e6b673b7SDave Martin } 151e6b673b7SDave Martin 1528383741aSMarc Zyngier update_thread_flag(TIF_SVE, 0); 1532955bcc8SDave Martin 154b045e4d0SDave Martin local_irq_restore(flags); 155e6b673b7SDave Martin } 156