1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <hyp/switch.h> 8 #include <hyp/sysreg-sr.h> 9 10 #include <linux/arm-smccc.h> 11 #include <linux/kvm_host.h> 12 #include <linux/types.h> 13 #include <linux/jump_label.h> 14 #include <uapi/linux/psci.h> 15 16 #include <kvm/arm_psci.h> 17 18 #include <asm/barrier.h> 19 #include <asm/cpufeature.h> 20 #include <asm/kprobes.h> 21 #include <asm/kvm_asm.h> 22 #include <asm/kvm_emulate.h> 23 #include <asm/kvm_hyp.h> 24 #include <asm/kvm_mmu.h> 25 #include <asm/fpsimd.h> 26 #include <asm/debug-monitors.h> 27 #include <asm/processor.h> 28 #include <asm/thread_info.h> 29 30 #include <nvhe/mem_protect.h> 31 32 /* Non-VHE specific context */ 33 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); 34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 35 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); 36 37 static void __activate_traps(struct kvm_vcpu *vcpu) 38 { 39 u64 val; 40 41 ___activate_traps(vcpu); 42 __activate_traps_common(vcpu); 43 44 val = vcpu->arch.cptr_el2; 45 val |= CPTR_EL2_TTA | CPTR_EL2_TAM; 46 if (!update_fp_enabled(vcpu)) { 47 val |= CPTR_EL2_TFP | CPTR_EL2_TZ; 48 __activate_traps_fpsimd32(vcpu); 49 } 50 51 write_sysreg(val, cptr_el2); 52 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); 53 54 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 55 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; 56 57 isb(); 58 /* 59 * At this stage, and thanks to the above isb(), S2 is 60 * configured and enabled. We can now restore the guest's S1 61 * configuration: SCTLR, and only then TCR. 62 */ 63 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); 64 isb(); 65 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); 66 } 67 } 68 69 static void __deactivate_traps(struct kvm_vcpu *vcpu) 70 { 71 extern char __kvm_hyp_host_vector[]; 72 u64 cptr; 73 74 ___deactivate_traps(vcpu); 75 76 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 77 u64 val; 78 79 /* 80 * Set the TCR and SCTLR registers in the exact opposite 81 * sequence as __activate_traps (first prevent walks, 82 * then force the MMU on). A generous sprinkling of isb() 83 * ensure that things happen in this exact order. 84 */ 85 val = read_sysreg_el1(SYS_TCR); 86 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR); 87 isb(); 88 val = read_sysreg_el1(SYS_SCTLR); 89 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR); 90 isb(); 91 } 92 93 __deactivate_traps_common(vcpu); 94 95 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); 96 97 cptr = CPTR_EL2_DEFAULT; 98 if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)) 99 cptr |= CPTR_EL2_TZ; 100 101 write_sysreg(cptr, cptr_el2); 102 write_sysreg(__kvm_hyp_host_vector, vbar_el2); 103 } 104 105 /* Save VGICv3 state on non-VHE systems */ 106 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu) 107 { 108 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { 109 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); 110 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 111 } 112 } 113 114 /* Restore VGICv3 state on non_VEH systems */ 115 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) 116 { 117 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { 118 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 119 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); 120 } 121 } 122 123 /** 124 * Disable host events, enable guest events 125 */ 126 static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) 127 { 128 struct kvm_host_data *host; 129 struct kvm_pmu_events *pmu; 130 131 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); 132 pmu = &host->pmu_events; 133 134 if (pmu->events_host) 135 write_sysreg(pmu->events_host, pmcntenclr_el0); 136 137 if (pmu->events_guest) 138 write_sysreg(pmu->events_guest, pmcntenset_el0); 139 140 return (pmu->events_host || pmu->events_guest); 141 } 142 143 /** 144 * Disable guest events, enable host events 145 */ 146 static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) 147 { 148 struct kvm_host_data *host; 149 struct kvm_pmu_events *pmu; 150 151 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); 152 pmu = &host->pmu_events; 153 154 if (pmu->events_guest) 155 write_sysreg(pmu->events_guest, pmcntenclr_el0); 156 157 if (pmu->events_host) 158 write_sysreg(pmu->events_host, pmcntenset_el0); 159 } 160 161 /* Switch to the guest for legacy non-VHE systems */ 162 int __kvm_vcpu_run(struct kvm_vcpu *vcpu) 163 { 164 struct kvm_cpu_context *host_ctxt; 165 struct kvm_cpu_context *guest_ctxt; 166 struct kvm_s2_mmu *mmu; 167 bool pmu_switch_needed; 168 u64 exit_code; 169 170 /* 171 * Having IRQs masked via PMR when entering the guest means the GIC 172 * will not signal the CPU of interrupts of lower priority, and the 173 * only way to get out will be via guest exceptions. 174 * Naturally, we want to avoid this. 175 */ 176 if (system_uses_irq_prio_masking()) { 177 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); 178 pmr_sync(); 179 } 180 181 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 182 host_ctxt->__hyp_running_vcpu = vcpu; 183 guest_ctxt = &vcpu->arch.ctxt; 184 185 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); 186 187 __sysreg_save_state_nvhe(host_ctxt); 188 /* 189 * We must flush and disable the SPE buffer for nVHE, as 190 * the translation regime(EL1&0) is going to be loaded with 191 * that of the guest. And we must do this before we change the 192 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and 193 * before we load guest Stage1. 194 */ 195 __debug_save_host_buffers_nvhe(vcpu); 196 197 __kvm_adjust_pc(vcpu); 198 199 /* 200 * We must restore the 32-bit state before the sysregs, thanks 201 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). 202 * 203 * Also, and in order to be able to deal with erratum #1319537 (A57) 204 * and #1319367 (A72), we must ensure that all VM-related sysreg are 205 * restored before we enable S2 translation. 206 */ 207 __sysreg32_restore_state(vcpu); 208 __sysreg_restore_state_nvhe(guest_ctxt); 209 210 mmu = kern_hyp_va(vcpu->arch.hw_mmu); 211 __load_stage2(mmu, kern_hyp_va(mmu->arch)); 212 __activate_traps(vcpu); 213 214 __hyp_vgic_restore_state(vcpu); 215 __timer_enable_traps(vcpu); 216 217 __debug_switch_to_guest(vcpu); 218 219 do { 220 /* Jump in the fire! */ 221 exit_code = __guest_enter(vcpu); 222 223 /* And we're baaack! */ 224 } while (fixup_guest_exit(vcpu, &exit_code)); 225 226 __sysreg_save_state_nvhe(guest_ctxt); 227 __sysreg32_save_state(vcpu); 228 __timer_disable_traps(vcpu); 229 __hyp_vgic_save_state(vcpu); 230 231 __deactivate_traps(vcpu); 232 __load_host_stage2(); 233 234 __sysreg_restore_state_nvhe(host_ctxt); 235 236 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) 237 __fpsimd_save_fpexc32(vcpu); 238 239 __debug_switch_to_host(vcpu); 240 /* 241 * This must come after restoring the host sysregs, since a non-VHE 242 * system may enable SPE here and make use of the TTBRs. 243 */ 244 __debug_restore_host_buffers_nvhe(vcpu); 245 246 if (pmu_switch_needed) 247 __pmu_switch_to_host(host_ctxt); 248 249 /* Returning to host will clear PSR.I, remask PMR if needed */ 250 if (system_uses_irq_prio_masking()) 251 gic_write_pmr(GIC_PRIO_IRQOFF); 252 253 host_ctxt->__hyp_running_vcpu = NULL; 254 255 return exit_code; 256 } 257 258 void __noreturn hyp_panic(void) 259 { 260 u64 spsr = read_sysreg_el2(SYS_SPSR); 261 u64 elr = read_sysreg_el2(SYS_ELR); 262 u64 par = read_sysreg_par(); 263 struct kvm_cpu_context *host_ctxt; 264 struct kvm_vcpu *vcpu; 265 266 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 267 vcpu = host_ctxt->__hyp_running_vcpu; 268 269 if (vcpu) { 270 __timer_disable_traps(vcpu); 271 __deactivate_traps(vcpu); 272 __load_host_stage2(); 273 __sysreg_restore_state_nvhe(host_ctxt); 274 } 275 276 __hyp_do_panic(host_ctxt, spsr, elr, par); 277 unreachable(); 278 } 279 280 asmlinkage void kvm_unexpected_el2_exception(void) 281 { 282 return __kvm_unexpected_el2_exception(); 283 } 284