1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM64_KVM_HYP_SWITCH_H__ 8 #define __ARM64_KVM_HYP_SWITCH_H__ 9 10 #include <hyp/adjust_pc.h> 11 12 #include <linux/arm-smccc.h> 13 #include <linux/kvm_host.h> 14 #include <linux/types.h> 15 #include <linux/jump_label.h> 16 #include <uapi/linux/psci.h> 17 18 #include <kvm/arm_psci.h> 19 20 #include <asm/barrier.h> 21 #include <asm/cpufeature.h> 22 #include <asm/extable.h> 23 #include <asm/kprobes.h> 24 #include <asm/kvm_asm.h> 25 #include <asm/kvm_emulate.h> 26 #include <asm/kvm_hyp.h> 27 #include <asm/kvm_mmu.h> 28 #include <asm/fpsimd.h> 29 #include <asm/debug-monitors.h> 30 #include <asm/processor.h> 31 #include <asm/thread_info.h> 32 33 extern struct exception_table_entry __start___kvm_ex_table; 34 extern struct exception_table_entry __stop___kvm_ex_table; 35 36 /* Check whether the FP regs were dirtied while in the host-side run loop: */ 37 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) 38 { 39 /* 40 * When the system doesn't support FP/SIMD, we cannot rely on 41 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an 42 * abort on the very first access to FP and thus we should never 43 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always 44 * trap the accesses. 45 */ 46 if (!system_supports_fpsimd() || 47 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) 48 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | 49 KVM_ARM64_FP_HOST); 50 51 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED); 52 } 53 54 /* Save the 32-bit only FPSIMD system register state */ 55 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) 56 { 57 if (!vcpu_el1_is_32bit(vcpu)) 58 return; 59 60 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); 61 } 62 63 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) 64 { 65 /* 66 * We are about to set CPTR_EL2.TFP to trap all floating point 67 * register accesses to EL2, however, the ARM ARM clearly states that 68 * traps are only taken to EL2 if the operation would not otherwise 69 * trap to EL1. Therefore, always make sure that for 32-bit guests, 70 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. 71 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to 72 * it will cause an exception. 73 */ 74 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { 75 write_sysreg(1 << 30, fpexc32_el2); 76 isb(); 77 } 78 } 79 80 static inline void __activate_traps_common(struct kvm_vcpu *vcpu) 81 { 82 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ 83 write_sysreg(1 << 15, hstr_el2); 84 85 /* 86 * Make sure we trap PMU access from EL0 to EL2. Also sanitize 87 * PMSELR_EL0 to make sure it never contains the cycle 88 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at 89 * EL1 instead of being trapped to EL2. 90 */ 91 if (kvm_arm_support_pmu_v3()) { 92 write_sysreg(0, pmselr_el0); 93 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); 94 } 95 96 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); 97 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 98 } 99 100 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) 101 { 102 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); 103 104 write_sysreg(0, hstr_el2); 105 if (kvm_arm_support_pmu_v3()) 106 write_sysreg(0, pmuserenr_el0); 107 } 108 109 static inline void ___activate_traps(struct kvm_vcpu *vcpu) 110 { 111 u64 hcr = vcpu->arch.hcr_el2; 112 113 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) 114 hcr |= HCR_TVM; 115 116 write_sysreg(hcr, hcr_el2); 117 118 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) 119 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); 120 } 121 122 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) 123 { 124 /* 125 * If we pended a virtual abort, preserve it until it gets 126 * cleared. See D1.14.3 (Virtual Interrupts) for details, but 127 * the crucial bit is "On taking a vSError interrupt, 128 * HCR_EL2.VSE is cleared to 0." 129 */ 130 if (vcpu->arch.hcr_el2 & HCR_VSE) { 131 vcpu->arch.hcr_el2 &= ~HCR_VSE; 132 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; 133 } 134 } 135 136 static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) 137 { 138 u64 par, tmp; 139 140 /* 141 * Resolve the IPA the hard way using the guest VA. 142 * 143 * Stage-1 translation already validated the memory access 144 * rights. As such, we can use the EL1 translation regime, and 145 * don't have to distinguish between EL0 and EL1 access. 146 * 147 * We do need to save/restore PAR_EL1 though, as we haven't 148 * saved the guest context yet, and we may return early... 149 */ 150 par = read_sysreg_par(); 151 if (!__kvm_at("s1e1r", far)) 152 tmp = read_sysreg_par(); 153 else 154 tmp = SYS_PAR_EL1_F; /* back to the guest */ 155 write_sysreg(par, par_el1); 156 157 if (unlikely(tmp & SYS_PAR_EL1_F)) 158 return false; /* Translation failed, back to guest */ 159 160 /* Convert PAR to HPFAR format */ 161 *hpfar = PAR_TO_HPFAR(tmp); 162 return true; 163 } 164 165 static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault) 166 { 167 u64 hpfar, far; 168 169 far = read_sysreg_el2(SYS_FAR); 170 171 /* 172 * The HPFAR can be invalid if the stage 2 fault did not 173 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW 174 * bit is clear) and one of the two following cases are true: 175 * 1. The fault was due to a permission fault 176 * 2. The processor carries errata 834220 177 * 178 * Therefore, for all non S1PTW faults where we either have a 179 * permission fault or the errata workaround is enabled, we 180 * resolve the IPA using the AT instruction. 181 */ 182 if (!(esr & ESR_ELx_S1PTW) && 183 (cpus_have_final_cap(ARM64_WORKAROUND_834220) || 184 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { 185 if (!__translate_far_to_hpfar(far, &hpfar)) 186 return false; 187 } else { 188 hpfar = read_sysreg(hpfar_el2); 189 } 190 191 fault->far_el2 = far; 192 fault->hpfar_el2 = hpfar; 193 return true; 194 } 195 196 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) 197 { 198 u8 ec; 199 u64 esr; 200 201 esr = vcpu->arch.fault.esr_el2; 202 ec = ESR_ELx_EC(esr); 203 204 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) 205 return true; 206 207 return __get_fault_info(esr, &vcpu->arch.fault); 208 } 209 210 static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) 211 { 212 struct thread_struct *thread; 213 214 thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct, 215 uw.fpsimd_state); 216 217 __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr); 218 } 219 220 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) 221 { 222 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); 223 __sve_restore_state(vcpu_sve_pffr(vcpu), 224 &vcpu->arch.ctxt.fp_regs.fpsr); 225 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); 226 } 227 228 /* Check for an FPSIMD/SVE trap and handle as appropriate */ 229 static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) 230 { 231 bool sve_guest, sve_host; 232 u8 esr_ec; 233 u64 reg; 234 235 if (!system_supports_fpsimd()) 236 return false; 237 238 if (system_supports_sve()) { 239 sve_guest = vcpu_has_sve(vcpu); 240 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; 241 } else { 242 sve_guest = false; 243 sve_host = false; 244 } 245 246 esr_ec = kvm_vcpu_trap_get_class(vcpu); 247 if (esr_ec != ESR_ELx_EC_FP_ASIMD && 248 esr_ec != ESR_ELx_EC_SVE) 249 return false; 250 251 /* Don't handle SVE traps for non-SVE vcpus here: */ 252 if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) 253 return false; 254 255 /* Valid trap. Switch the context: */ 256 if (has_vhe()) { 257 reg = CPACR_EL1_FPEN; 258 if (sve_guest) 259 reg |= CPACR_EL1_ZEN; 260 261 sysreg_clear_set(cpacr_el1, 0, reg); 262 } else { 263 reg = CPTR_EL2_TFP; 264 if (sve_guest) 265 reg |= CPTR_EL2_TZ; 266 267 sysreg_clear_set(cptr_el2, reg, 0); 268 } 269 isb(); 270 271 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { 272 if (sve_host) 273 __hyp_sve_save_host(vcpu); 274 else 275 __fpsimd_save_state(vcpu->arch.host_fpsimd_state); 276 277 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; 278 } 279 280 if (sve_guest) 281 __hyp_sve_restore_guest(vcpu); 282 else 283 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); 284 285 /* Skip restoring fpexc32 for AArch64 guests */ 286 if (!(read_sysreg(hcr_el2) & HCR_RW)) 287 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); 288 289 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED; 290 291 return true; 292 } 293 294 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) 295 { 296 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 297 int rt = kvm_vcpu_sys_get_rt(vcpu); 298 u64 val = vcpu_get_reg(vcpu, rt); 299 300 /* 301 * The normal sysreg handling code expects to see the traps, 302 * let's not do anything here. 303 */ 304 if (vcpu->arch.hcr_el2 & HCR_TVM) 305 return false; 306 307 switch (sysreg) { 308 case SYS_SCTLR_EL1: 309 write_sysreg_el1(val, SYS_SCTLR); 310 break; 311 case SYS_TTBR0_EL1: 312 write_sysreg_el1(val, SYS_TTBR0); 313 break; 314 case SYS_TTBR1_EL1: 315 write_sysreg_el1(val, SYS_TTBR1); 316 break; 317 case SYS_TCR_EL1: 318 write_sysreg_el1(val, SYS_TCR); 319 break; 320 case SYS_ESR_EL1: 321 write_sysreg_el1(val, SYS_ESR); 322 break; 323 case SYS_FAR_EL1: 324 write_sysreg_el1(val, SYS_FAR); 325 break; 326 case SYS_AFSR0_EL1: 327 write_sysreg_el1(val, SYS_AFSR0); 328 break; 329 case SYS_AFSR1_EL1: 330 write_sysreg_el1(val, SYS_AFSR1); 331 break; 332 case SYS_MAIR_EL1: 333 write_sysreg_el1(val, SYS_MAIR); 334 break; 335 case SYS_AMAIR_EL1: 336 write_sysreg_el1(val, SYS_AMAIR); 337 break; 338 case SYS_CONTEXTIDR_EL1: 339 write_sysreg_el1(val, SYS_CONTEXTIDR); 340 break; 341 default: 342 return false; 343 } 344 345 __kvm_skip_instr(vcpu); 346 return true; 347 } 348 349 static inline bool esr_is_ptrauth_trap(u32 esr) 350 { 351 u32 ec = ESR_ELx_EC(esr); 352 353 if (ec == ESR_ELx_EC_PAC) 354 return true; 355 356 if (ec != ESR_ELx_EC_SYS64) 357 return false; 358 359 switch (esr_sys64_to_sysreg(esr)) { 360 case SYS_APIAKEYLO_EL1: 361 case SYS_APIAKEYHI_EL1: 362 case SYS_APIBKEYLO_EL1: 363 case SYS_APIBKEYHI_EL1: 364 case SYS_APDAKEYLO_EL1: 365 case SYS_APDAKEYHI_EL1: 366 case SYS_APDBKEYLO_EL1: 367 case SYS_APDBKEYHI_EL1: 368 case SYS_APGAKEYLO_EL1: 369 case SYS_APGAKEYHI_EL1: 370 return true; 371 } 372 373 return false; 374 } 375 376 #define __ptrauth_save_key(ctxt, key) \ 377 do { \ 378 u64 __val; \ 379 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ 380 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ 381 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ 382 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ 383 } while(0) 384 385 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 386 387 static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) 388 { 389 struct kvm_cpu_context *ctxt; 390 u64 val; 391 392 if (!vcpu_has_ptrauth(vcpu) || 393 !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) 394 return false; 395 396 ctxt = this_cpu_ptr(&kvm_hyp_ctxt); 397 __ptrauth_save_key(ctxt, APIA); 398 __ptrauth_save_key(ctxt, APIB); 399 __ptrauth_save_key(ctxt, APDA); 400 __ptrauth_save_key(ctxt, APDB); 401 __ptrauth_save_key(ctxt, APGA); 402 403 vcpu_ptrauth_enable(vcpu); 404 405 val = read_sysreg(hcr_el2); 406 val |= (HCR_API | HCR_APK); 407 write_sysreg(val, hcr_el2); 408 409 return true; 410 } 411 412 /* 413 * Return true when we were able to fixup the guest exit and should return to 414 * the guest, false when we should restore the host state and return to the 415 * main run loop. 416 */ 417 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) 418 { 419 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) 420 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); 421 422 if (ARM_SERROR_PENDING(*exit_code)) { 423 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 424 425 /* 426 * HVC already have an adjusted PC, which we need to 427 * correct in order to return to after having injected 428 * the SError. 429 * 430 * SMC, on the other hand, is *trapped*, meaning its 431 * preferred return address is the SMC itself. 432 */ 433 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64) 434 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR); 435 } 436 437 /* 438 * We're using the raw exception code in order to only process 439 * the trap if no SError is pending. We will come back to the 440 * same PC once the SError has been injected, and replay the 441 * trapping instruction. 442 */ 443 if (*exit_code != ARM_EXCEPTION_TRAP) 444 goto exit; 445 446 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && 447 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && 448 handle_tx2_tvm(vcpu)) 449 goto guest; 450 451 /* 452 * We trap the first access to the FP/SIMD to save the host context 453 * and restore the guest context lazily. 454 * If FP/SIMD is not implemented, handle the trap and inject an 455 * undefined instruction exception to the guest. 456 * Similarly for trapped SVE accesses. 457 */ 458 if (__hyp_handle_fpsimd(vcpu)) 459 goto guest; 460 461 if (__hyp_handle_ptrauth(vcpu)) 462 goto guest; 463 464 if (!__populate_fault_info(vcpu)) 465 goto guest; 466 467 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { 468 bool valid; 469 470 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && 471 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && 472 kvm_vcpu_dabt_isvalid(vcpu) && 473 !kvm_vcpu_abt_issea(vcpu) && 474 !kvm_vcpu_abt_iss1tw(vcpu); 475 476 if (valid) { 477 int ret = __vgic_v2_perform_cpuif_access(vcpu); 478 479 if (ret == 1) 480 goto guest; 481 482 /* Promote an illegal access to an SError.*/ 483 if (ret == -1) 484 *exit_code = ARM_EXCEPTION_EL1_SERROR; 485 486 goto exit; 487 } 488 } 489 490 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 491 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 || 492 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { 493 int ret = __vgic_v3_perform_cpuif_access(vcpu); 494 495 if (ret == 1) 496 goto guest; 497 } 498 499 exit: 500 /* Return to the host kernel and handle the exit */ 501 return false; 502 503 guest: 504 /* Re-enter the guest */ 505 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); 506 return true; 507 } 508 509 static inline void __kvm_unexpected_el2_exception(void) 510 { 511 extern char __guest_exit_panic[]; 512 unsigned long addr, fixup; 513 struct exception_table_entry *entry, *end; 514 unsigned long elr_el2 = read_sysreg(elr_el2); 515 516 entry = &__start___kvm_ex_table; 517 end = &__stop___kvm_ex_table; 518 519 while (entry < end) { 520 addr = (unsigned long)&entry->insn + entry->insn; 521 fixup = (unsigned long)&entry->fixup + entry->fixup; 522 523 if (addr != elr_el2) { 524 entry++; 525 continue; 526 } 527 528 write_sysreg(fixup, elr_el2); 529 return; 530 } 531 532 /* Trigger a panic after restoring the hyp context. */ 533 write_sysreg(__guest_exit_panic, elr_el2); 534 } 535 536 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ 537