1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <hyp/switch.h> 8 #include <hyp/sysreg-sr.h> 9 10 #include <linux/arm-smccc.h> 11 #include <linux/kvm_host.h> 12 #include <linux/types.h> 13 #include <linux/jump_label.h> 14 #include <uapi/linux/psci.h> 15 16 #include <kvm/arm_psci.h> 17 18 #include <asm/barrier.h> 19 #include <asm/cpufeature.h> 20 #include <asm/kprobes.h> 21 #include <asm/kvm_asm.h> 22 #include <asm/kvm_emulate.h> 23 #include <asm/kvm_hyp.h> 24 #include <asm/kvm_mmu.h> 25 #include <asm/fpsimd.h> 26 #include <asm/debug-monitors.h> 27 #include <asm/processor.h> 28 #include <asm/thread_info.h> 29 30 static void __activate_traps(struct kvm_vcpu *vcpu) 31 { 32 u64 val; 33 34 ___activate_traps(vcpu); 35 __activate_traps_common(vcpu); 36 37 val = CPTR_EL2_DEFAULT; 38 val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; 39 if (!update_fp_enabled(vcpu)) { 40 val |= CPTR_EL2_TFP; 41 __activate_traps_fpsimd32(vcpu); 42 } 43 44 write_sysreg(val, cptr_el2); 45 46 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 47 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; 48 49 isb(); 50 /* 51 * At this stage, and thanks to the above isb(), S2 is 52 * configured and enabled. We can now restore the guest's S1 53 * configuration: SCTLR, and only then TCR. 54 */ 55 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); 56 isb(); 57 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); 58 } 59 } 60 61 static void __deactivate_traps(struct kvm_vcpu *vcpu) 62 { 63 u64 mdcr_el2; 64 65 ___deactivate_traps(vcpu); 66 67 mdcr_el2 = read_sysreg(mdcr_el2); 68 69 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 70 u64 val; 71 72 /* 73 * Set the TCR and SCTLR registers in the exact opposite 74 * sequence as __activate_traps (first prevent walks, 75 * then force the MMU on). A generous sprinkling of isb() 76 * ensure that things happen in this exact order. 77 */ 78 val = read_sysreg_el1(SYS_TCR); 79 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR); 80 isb(); 81 val = read_sysreg_el1(SYS_SCTLR); 82 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR); 83 isb(); 84 } 85 86 __deactivate_traps_common(); 87 88 mdcr_el2 &= MDCR_EL2_HPMN_MASK; 89 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; 90 91 write_sysreg(mdcr_el2, mdcr_el2); 92 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); 93 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); 94 } 95 96 static void __deactivate_vm(struct kvm_vcpu *vcpu) 97 { 98 write_sysreg(0, vttbr_el2); 99 } 100 101 /* Save VGICv3 state on non-VHE systems */ 102 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu) 103 { 104 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { 105 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); 106 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 107 } 108 } 109 110 /* Restore VGICv3 state on non_VEH systems */ 111 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) 112 { 113 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { 114 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); 115 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); 116 } 117 } 118 119 /** 120 * Disable host events, enable guest events 121 */ 122 static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) 123 { 124 struct kvm_host_data *host; 125 struct kvm_pmu_events *pmu; 126 127 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); 128 pmu = &host->pmu_events; 129 130 if (pmu->events_host) 131 write_sysreg(pmu->events_host, pmcntenclr_el0); 132 133 if (pmu->events_guest) 134 write_sysreg(pmu->events_guest, pmcntenset_el0); 135 136 return (pmu->events_host || pmu->events_guest); 137 } 138 139 /** 140 * Disable guest events, enable host events 141 */ 142 static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) 143 { 144 struct kvm_host_data *host; 145 struct kvm_pmu_events *pmu; 146 147 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); 148 pmu = &host->pmu_events; 149 150 if (pmu->events_guest) 151 write_sysreg(pmu->events_guest, pmcntenclr_el0); 152 153 if (pmu->events_host) 154 write_sysreg(pmu->events_host, pmcntenset_el0); 155 } 156 157 /* Switch to the guest for legacy non-VHE systems */ 158 int __kvm_vcpu_run(struct kvm_vcpu *vcpu) 159 { 160 struct kvm_cpu_context *host_ctxt; 161 struct kvm_cpu_context *guest_ctxt; 162 bool pmu_switch_needed; 163 u64 exit_code; 164 165 /* 166 * Having IRQs masked via PMR when entering the guest means the GIC 167 * will not signal the CPU of interrupts of lower priority, and the 168 * only way to get out will be via guest exceptions. 169 * Naturally, we want to avoid this. 170 */ 171 if (system_uses_irq_prio_masking()) { 172 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); 173 pmr_sync(); 174 } 175 176 vcpu = kern_hyp_va(vcpu); 177 178 host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; 179 host_ctxt->__hyp_running_vcpu = vcpu; 180 guest_ctxt = &vcpu->arch.ctxt; 181 182 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); 183 184 __sysreg_save_state_nvhe(host_ctxt); 185 186 /* 187 * We must restore the 32-bit state before the sysregs, thanks 188 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). 189 * 190 * Also, and in order to be able to deal with erratum #1319537 (A57) 191 * and #1319367 (A72), we must ensure that all VM-related sysreg are 192 * restored before we enable S2 translation. 193 */ 194 __sysreg32_restore_state(vcpu); 195 __sysreg_restore_state_nvhe(guest_ctxt); 196 197 __activate_vm(kern_hyp_va(vcpu->arch.hw_mmu)); 198 __activate_traps(vcpu); 199 200 __hyp_vgic_restore_state(vcpu); 201 __timer_enable_traps(vcpu); 202 203 __debug_switch_to_guest(vcpu); 204 205 __set_guest_arch_workaround_state(vcpu); 206 207 do { 208 /* Jump in the fire! */ 209 exit_code = __guest_enter(vcpu, host_ctxt); 210 211 /* And we're baaack! */ 212 } while (fixup_guest_exit(vcpu, &exit_code)); 213 214 __set_host_arch_workaround_state(vcpu); 215 216 __sysreg_save_state_nvhe(guest_ctxt); 217 __sysreg32_save_state(vcpu); 218 __timer_disable_traps(vcpu); 219 __hyp_vgic_save_state(vcpu); 220 221 __deactivate_traps(vcpu); 222 __deactivate_vm(vcpu); 223 224 __sysreg_restore_state_nvhe(host_ctxt); 225 226 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) 227 __fpsimd_save_fpexc32(vcpu); 228 229 /* 230 * This must come after restoring the host sysregs, since a non-VHE 231 * system may enable SPE here and make use of the TTBRs. 232 */ 233 __debug_switch_to_host(vcpu); 234 235 if (pmu_switch_needed) 236 __pmu_switch_to_host(host_ctxt); 237 238 /* Returning to host will clear PSR.I, remask PMR if needed */ 239 if (system_uses_irq_prio_masking()) 240 gic_write_pmr(GIC_PRIO_IRQOFF); 241 242 return exit_code; 243 } 244 245 void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) 246 { 247 u64 spsr = read_sysreg_el2(SYS_SPSR); 248 u64 elr = read_sysreg_el2(SYS_ELR); 249 u64 par = read_sysreg(par_el1); 250 struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu; 251 unsigned long str_va; 252 253 if (read_sysreg(vttbr_el2)) { 254 __timer_disable_traps(vcpu); 255 __deactivate_traps(vcpu); 256 __deactivate_vm(vcpu); 257 __sysreg_restore_state_nvhe(host_ctxt); 258 } 259 260 /* 261 * Force the panic string to be loaded from the literal pool, 262 * making sure it is a kernel address and not a PC-relative 263 * reference. 264 */ 265 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); 266 267 __hyp_do_panic(str_va, 268 spsr, elr, 269 read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR), 270 read_sysreg(hpfar_el2), par, vcpu); 271 unreachable(); 272 } 273 274 asmlinkage void kvm_unexpected_el2_exception(void) 275 { 276 return __kvm_unexpected_el2_exception(); 277 } 278