1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers 4 * 5 * Copyright 2018 Arm Limited 6 * Author: Dave Martin <Dave.Martin@arm.com> 7 */ 8 #include <linux/irqflags.h> 9 #include <linux/sched.h> 10 #include <linux/kvm_host.h> 11 #include <asm/fpsimd.h> 12 #include <asm/kvm_asm.h> 13 #include <asm/kvm_hyp.h> 14 #include <asm/kvm_mmu.h> 15 #include <asm/sysreg.h> 16 17 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu) 18 { 19 struct task_struct *p = vcpu->arch.parent_task; 20 struct user_fpsimd_state *fpsimd; 21 22 if (!is_protected_kvm_enabled() || !p) 23 return; 24 25 fpsimd = &p->thread.uw.fpsimd_state; 26 kvm_unshare_hyp(fpsimd, fpsimd + 1); 27 put_task_struct(p); 28 } 29 30 /* 31 * Called on entry to KVM_RUN unless this vcpu previously ran at least 32 * once and the most recent prior KVM_RUN for this vcpu was called from 33 * the same task as current (highly likely). 34 * 35 * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), 36 * such that on entering hyp the relevant parts of current are already 37 * mapped. 38 */ 39 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) 40 { 41 int ret; 42 43 struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; 44 45 kvm_vcpu_unshare_task_fp(vcpu); 46 47 /* Make sure the host task fpsimd state is visible to hyp: */ 48 ret = kvm_share_hyp(fpsimd, fpsimd + 1); 49 if (ret) 50 return ret; 51 52 vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); 53 54 /* 55 * We need to keep current's task_struct pinned until its data has been 56 * unshared with the hypervisor to make sure it is not re-used by the 57 * kernel and donated to someone else while already shared -- see 58 * kvm_vcpu_unshare_task_fp() for the matching put_task_struct(). 59 */ 60 if (is_protected_kvm_enabled()) { 61 get_task_struct(current); 62 vcpu->arch.parent_task = current; 63 } 64 65 return 0; 66 } 67 68 /* 69 * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. 70 * The actual loading is done by the FPSIMD access trap taken to hyp. 71 * 72 * Here, we just set the correct metadata to indicate that the FPSIMD 73 * state in the cpu regs (if any) belongs to current on the host. 74 */ 75 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) 76 { 77 BUG_ON(!current->mm); 78 BUG_ON(test_thread_flag(TIF_SVE)); 79 80 if (!system_supports_fpsimd()) 81 return; 82 83 vcpu->arch.fp_state = FP_STATE_HOST_OWNED; 84 85 vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); 86 if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) 87 vcpu_set_flag(vcpu, HOST_SVE_ENABLED); 88 89 /* 90 * We don't currently support SME guests but if we leave 91 * things in streaming mode then when the guest starts running 92 * FPSIMD or SVE code it may generate SME traps so as a 93 * special case if we are in streaming mode we force the host 94 * state to be saved now and exit streaming mode so that we 95 * don't have to handle any SME traps for valid guest 96 * operations. Do this for ZA as well for now for simplicity. 97 */ 98 if (system_supports_sme()) { 99 vcpu_clear_flag(vcpu, HOST_SME_ENABLED); 100 if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) 101 vcpu_set_flag(vcpu, HOST_SME_ENABLED); 102 103 if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { 104 vcpu->arch.fp_state = FP_STATE_FREE; 105 fpsimd_save_and_flush_cpu_state(); 106 } 107 } 108 } 109 110 /* 111 * Called just before entering the guest once we are no longer preemptable 112 * and interrupts are disabled. If we have managed to run anything using 113 * FP while we were preemptible (such as off the back of an interrupt), 114 * then neither the host nor the guest own the FP hardware (and it was the 115 * responsibility of the code that used FP to save the existing state). 116 */ 117 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) 118 { 119 if (test_thread_flag(TIF_FOREIGN_FPSTATE)) 120 vcpu->arch.fp_state = FP_STATE_FREE; 121 } 122 123 /* 124 * Called just after exiting the guest. If the guest FPSIMD state 125 * was loaded, update the host's context tracking data mark the CPU 126 * FPSIMD regs as dirty and belonging to vcpu so that they will be 127 * written back if the kernel clobbers them due to kernel-mode NEON 128 * before re-entry into the guest. 129 */ 130 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) 131 { 132 WARN_ON_ONCE(!irqs_disabled()); 133 134 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { 135 /* 136 * Currently we do not support SME guests so SVCR is 137 * always 0 and we just need a variable to point to. 138 */ 139 fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, 140 vcpu->arch.sve_state, 141 vcpu->arch.sve_max_vl, 142 NULL, 0, &vcpu->arch.svcr); 143 144 clear_thread_flag(TIF_FOREIGN_FPSTATE); 145 update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); 146 } 147 } 148 149 /* 150 * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the 151 * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu 152 * disappears and another task or vcpu appears that recycles the same 153 * struct fpsimd_state. 154 */ 155 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) 156 { 157 unsigned long flags; 158 159 local_irq_save(flags); 160 161 /* 162 * If we have VHE then the Hyp code will reset CPACR_EL1 to 163 * CPACR_EL1_DEFAULT and we need to reenable SME. 164 */ 165 if (has_vhe() && system_supports_sme()) { 166 /* Also restore EL0 state seen on entry */ 167 if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) 168 sysreg_clear_set(CPACR_EL1, 0, 169 CPACR_EL1_SMEN_EL0EN | 170 CPACR_EL1_SMEN_EL1EN); 171 else 172 sysreg_clear_set(CPACR_EL1, 173 CPACR_EL1_SMEN_EL0EN, 174 CPACR_EL1_SMEN_EL1EN); 175 } 176 177 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { 178 if (vcpu_has_sve(vcpu)) { 179 __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); 180 181 /* Restore the VL that was saved when bound to the CPU */ 182 if (!has_vhe()) 183 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, 184 SYS_ZCR_EL1); 185 } 186 187 fpsimd_save_and_flush_cpu_state(); 188 } else if (has_vhe() && system_supports_sve()) { 189 /* 190 * The FPSIMD/SVE state in the CPU has not been touched, and we 191 * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been 192 * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE 193 * for EL0. To avoid spurious traps, restore the trap state 194 * seen by kvm_arch_vcpu_load_fp(): 195 */ 196 if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) 197 sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); 198 else 199 sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); 200 } 201 202 update_thread_flag(TIF_SVE, 0); 203 204 local_irq_restore(flags); 205 } 206