1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/percpu.h> 20 #include <linux/psci.h> 21 #include <asm/arch_gicv3.h> 22 #include <asm/barrier.h> 23 #include <asm/cpufeature.h> 24 #include <asm/cputype.h> 25 #include <asm/daifflags.h> 26 #include <asm/fpsimd.h> 27 #include <asm/kvm.h> 28 #include <asm/kvm_asm.h> 29 30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 31 32 #define KVM_HALT_POLL_NS_DEFAULT 500000 33 34 #include <kvm/arm_vgic.h> 35 #include <kvm/arm_arch_timer.h> 36 #include <kvm/arm_pmu.h> 37 38 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 39 40 #define KVM_VCPU_MAX_FEATURES 7 41 42 #define KVM_REQ_SLEEP \ 43 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 44 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 45 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 46 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 47 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 48 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 49 50 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 51 KVM_DIRTY_LOG_INITIALLY_SET) 52 53 #define KVM_HAVE_MMU_RWLOCK 54 55 /* 56 * Mode of operation configurable with kvm-arm.mode early param. 57 * See Documentation/admin-guide/kernel-parameters.txt for more information. 58 */ 59 enum kvm_mode { 60 KVM_MODE_DEFAULT, 61 KVM_MODE_PROTECTED, 62 KVM_MODE_NONE, 63 }; 64 enum kvm_mode kvm_get_mode(void); 65 66 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 67 68 extern unsigned int kvm_sve_max_vl; 69 int kvm_arm_init_sve(void); 70 71 u32 __attribute_const__ kvm_target_cpu(void); 72 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 73 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 74 75 struct kvm_vmid { 76 atomic64_t id; 77 }; 78 79 struct kvm_s2_mmu { 80 struct kvm_vmid vmid; 81 82 /* 83 * stage2 entry level table 84 * 85 * Two kvm_s2_mmu structures in the same VM can point to the same 86 * pgd here. This happens when running a guest using a 87 * translation regime that isn't affected by its own stage-2 88 * translation, such as a non-VHE hypervisor running at vEL2, or 89 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 90 * canonical stage-2 page tables. 91 */ 92 phys_addr_t pgd_phys; 93 struct kvm_pgtable *pgt; 94 95 /* The last vcpu id that ran on each physical CPU */ 96 int __percpu *last_vcpu_ran; 97 98 struct kvm_arch *arch; 99 }; 100 101 struct kvm_arch_memory_slot { 102 }; 103 104 struct kvm_arch { 105 struct kvm_s2_mmu mmu; 106 107 /* VTCR_EL2 value for this VM */ 108 u64 vtcr; 109 110 /* Interrupt controller */ 111 struct vgic_dist vgic; 112 113 /* Mandated version of PSCI */ 114 u32 psci_version; 115 116 /* 117 * If we encounter a data abort without valid instruction syndrome 118 * information, report this to user space. User space can (and 119 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 120 * supported. 121 */ 122 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 123 /* Memory Tagging Extension enabled for the guest */ 124 #define KVM_ARCH_FLAG_MTE_ENABLED 1 125 /* At least one vCPU has ran in the VM */ 126 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 127 /* 128 * The following two bits are used to indicate the guest's EL1 129 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT 130 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set. 131 * Otherwise, the guest's EL1 register width has not yet been 132 * determined yet. 133 */ 134 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3 135 #define KVM_ARCH_FLAG_EL1_32BIT 4 136 137 unsigned long flags; 138 139 /* 140 * VM-wide PMU filter, implemented as a bitmap and big enough for 141 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 142 */ 143 unsigned long *pmu_filter; 144 struct arm_pmu *arm_pmu; 145 146 cpumask_var_t supported_cpus; 147 148 u8 pfr0_csv2; 149 u8 pfr0_csv3; 150 }; 151 152 struct kvm_vcpu_fault_info { 153 u32 esr_el2; /* Hyp Syndrom Register */ 154 u64 far_el2; /* Hyp Fault Address Register */ 155 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 156 u64 disr_el1; /* Deferred [SError] Status Register */ 157 }; 158 159 enum vcpu_sysreg { 160 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 161 MPIDR_EL1, /* MultiProcessor Affinity Register */ 162 CSSELR_EL1, /* Cache Size Selection Register */ 163 SCTLR_EL1, /* System Control Register */ 164 ACTLR_EL1, /* Auxiliary Control Register */ 165 CPACR_EL1, /* Coprocessor Access Control */ 166 ZCR_EL1, /* SVE Control */ 167 TTBR0_EL1, /* Translation Table Base Register 0 */ 168 TTBR1_EL1, /* Translation Table Base Register 1 */ 169 TCR_EL1, /* Translation Control Register */ 170 ESR_EL1, /* Exception Syndrome Register */ 171 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ 172 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ 173 FAR_EL1, /* Fault Address Register */ 174 MAIR_EL1, /* Memory Attribute Indirection Register */ 175 VBAR_EL1, /* Vector Base Address Register */ 176 CONTEXTIDR_EL1, /* Context ID Register */ 177 TPIDR_EL0, /* Thread ID, User R/W */ 178 TPIDRRO_EL0, /* Thread ID, User R/O */ 179 TPIDR_EL1, /* Thread ID, Privileged */ 180 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ 181 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 182 PAR_EL1, /* Physical Address Register */ 183 MDSCR_EL1, /* Monitor Debug System Control Register */ 184 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 185 OSLSR_EL1, /* OS Lock Status Register */ 186 DISR_EL1, /* Deferred Interrupt Status Register */ 187 188 /* Performance Monitors Registers */ 189 PMCR_EL0, /* Control Register */ 190 PMSELR_EL0, /* Event Counter Selection Register */ 191 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 192 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 193 PMCCNTR_EL0, /* Cycle Counter Register */ 194 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 195 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 196 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 197 PMCNTENSET_EL0, /* Count Enable Set Register */ 198 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 199 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 200 PMUSERENR_EL0, /* User Enable Register */ 201 202 /* Pointer Authentication Registers in a strict increasing order. */ 203 APIAKEYLO_EL1, 204 APIAKEYHI_EL1, 205 APIBKEYLO_EL1, 206 APIBKEYHI_EL1, 207 APDAKEYLO_EL1, 208 APDAKEYHI_EL1, 209 APDBKEYLO_EL1, 210 APDBKEYHI_EL1, 211 APGAKEYLO_EL1, 212 APGAKEYHI_EL1, 213 214 ELR_EL1, 215 SP_EL1, 216 SPSR_EL1, 217 218 CNTVOFF_EL2, 219 CNTV_CVAL_EL0, 220 CNTV_CTL_EL0, 221 CNTP_CVAL_EL0, 222 CNTP_CTL_EL0, 223 224 /* Memory Tagging Extension registers */ 225 RGSR_EL1, /* Random Allocation Tag Seed Register */ 226 GCR_EL1, /* Tag Control Register */ 227 TFSR_EL1, /* Tag Fault Status Register (EL1) */ 228 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 229 230 /* 32bit specific registers. Keep them at the end of the range */ 231 DACR32_EL2, /* Domain Access Control Register */ 232 IFSR32_EL2, /* Instruction Fault Status Register */ 233 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 234 DBGVCR32_EL2, /* Debug Vector Catch Register */ 235 236 NR_SYS_REGS /* Nothing after this line! */ 237 }; 238 239 struct kvm_cpu_context { 240 struct user_pt_regs regs; /* sp = sp_el0 */ 241 242 u64 spsr_abt; 243 u64 spsr_und; 244 u64 spsr_irq; 245 u64 spsr_fiq; 246 247 struct user_fpsimd_state fp_regs; 248 249 u64 sys_regs[NR_SYS_REGS]; 250 251 struct kvm_vcpu *__hyp_running_vcpu; 252 }; 253 254 struct kvm_pmu_events { 255 u32 events_host; 256 u32 events_guest; 257 }; 258 259 struct kvm_host_data { 260 struct kvm_cpu_context host_ctxt; 261 struct kvm_pmu_events pmu_events; 262 }; 263 264 struct kvm_host_psci_config { 265 /* PSCI version used by host. */ 266 u32 version; 267 268 /* Function IDs used by host if version is v0.1. */ 269 struct psci_0_1_function_ids function_ids_0_1; 270 271 bool psci_0_1_cpu_suspend_implemented; 272 bool psci_0_1_cpu_on_implemented; 273 bool psci_0_1_cpu_off_implemented; 274 bool psci_0_1_migrate_implemented; 275 }; 276 277 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 278 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 279 280 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 281 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 282 283 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 284 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 285 286 struct vcpu_reset_state { 287 unsigned long pc; 288 unsigned long r0; 289 bool be; 290 bool reset; 291 }; 292 293 struct kvm_vcpu_arch { 294 struct kvm_cpu_context ctxt; 295 void *sve_state; 296 unsigned int sve_max_vl; 297 298 /* Stage 2 paging state used by the hardware on next switch */ 299 struct kvm_s2_mmu *hw_mmu; 300 301 /* Values of trap registers for the guest. */ 302 u64 hcr_el2; 303 u64 mdcr_el2; 304 u64 cptr_el2; 305 306 /* Values of trap registers for the host before guest entry. */ 307 u64 mdcr_el2_host; 308 309 /* Exception Information */ 310 struct kvm_vcpu_fault_info fault; 311 312 /* Miscellaneous vcpu state flags */ 313 u64 flags; 314 315 /* 316 * We maintain more than a single set of debug registers to support 317 * debugging the guest from the host and to maintain separate host and 318 * guest state during world switches. vcpu_debug_state are the debug 319 * registers of the vcpu as the guest sees them. host_debug_state are 320 * the host registers which are saved and restored during 321 * world switches. external_debug_state contains the debug 322 * values we want to debug the guest. This is set via the 323 * KVM_SET_GUEST_DEBUG ioctl. 324 * 325 * debug_ptr points to the set of debug registers that should be loaded 326 * onto the hardware when running the guest. 327 */ 328 struct kvm_guest_debug_arch *debug_ptr; 329 struct kvm_guest_debug_arch vcpu_debug_state; 330 struct kvm_guest_debug_arch external_debug_state; 331 332 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ 333 struct task_struct *parent_task; 334 335 struct { 336 /* {Break,watch}point registers */ 337 struct kvm_guest_debug_arch regs; 338 /* Statistical profiling extension */ 339 u64 pmscr_el1; 340 /* Self-hosted trace */ 341 u64 trfcr_el1; 342 } host_debug_state; 343 344 /* VGIC state */ 345 struct vgic_cpu vgic_cpu; 346 struct arch_timer_cpu timer_cpu; 347 struct kvm_pmu pmu; 348 349 /* 350 * Anything that is not used directly from assembly code goes 351 * here. 352 */ 353 354 /* 355 * Guest registers we preserve during guest debugging. 356 * 357 * These shadow registers are updated by the kvm_handle_sys_reg 358 * trap handler if the guest accesses or updates them while we 359 * are using guest debug. 360 */ 361 struct { 362 u32 mdscr_el1; 363 } guest_debug_preserved; 364 365 /* vcpu power-off state */ 366 bool power_off; 367 368 /* Don't run the guest (internal implementation need) */ 369 bool pause; 370 371 /* Cache some mmu pages needed inside spinlock regions */ 372 struct kvm_mmu_memory_cache mmu_page_cache; 373 374 /* Target CPU and feature flags */ 375 int target; 376 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 377 378 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 379 u64 vsesr_el2; 380 381 /* Additional reset state */ 382 struct vcpu_reset_state reset_state; 383 384 /* True when deferrable sysregs are loaded on the physical CPU, 385 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ 386 bool sysregs_loaded_on_cpu; 387 388 /* Guest PV state */ 389 struct { 390 u64 last_steal; 391 gpa_t base; 392 } steal; 393 }; 394 395 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 396 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 397 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 398 399 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 400 401 #define vcpu_sve_state_size(vcpu) ({ \ 402 size_t __size_ret; \ 403 unsigned int __vcpu_vq; \ 404 \ 405 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 406 __size_ret = 0; \ 407 } else { \ 408 __vcpu_vq = vcpu_sve_max_vq(vcpu); \ 409 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 410 } \ 411 \ 412 __size_ret; \ 413 }) 414 415 /* vcpu_arch flags field values: */ 416 #define KVM_ARM64_DEBUG_DIRTY (1 << 0) 417 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ 418 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ 419 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ 420 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ 421 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ 422 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ 423 #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ 424 /* 425 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be 426 * set together with an exception... 427 */ 428 #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ 429 #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ 430 /* 431 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can 432 * take the following values: 433 * 434 * For AArch32 EL1: 435 */ 436 #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) 437 #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) 438 #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) 439 /* For AArch64: */ 440 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) 441 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) 442 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9) 443 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) 444 #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) 445 #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) 446 447 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ 448 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ 449 #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) 450 #define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */ 451 452 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 453 KVM_GUESTDBG_USE_SW_BP | \ 454 KVM_GUESTDBG_USE_HW | \ 455 KVM_GUESTDBG_SINGLESTEP) 456 457 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 458 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) 459 460 #ifdef CONFIG_ARM64_PTR_AUTH 461 #define vcpu_has_ptrauth(vcpu) \ 462 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 463 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 464 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) 465 #else 466 #define vcpu_has_ptrauth(vcpu) false 467 #endif 468 469 #define vcpu_on_unsupported_cpu(vcpu) \ 470 ((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU) 471 472 #define vcpu_set_on_unsupported_cpu(vcpu) \ 473 ((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU) 474 475 #define vcpu_clear_on_unsupported_cpu(vcpu) \ 476 ((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU) 477 478 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 479 480 /* 481 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 482 * memory backed version of a register, and not the one most recently 483 * accessed by a running VCPU. For example, for userspace access or 484 * for system registers that are never context switched, but only 485 * emulated. 486 */ 487 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) 488 489 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 490 491 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) 492 493 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 494 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 495 496 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 497 { 498 /* 499 * *** VHE ONLY *** 500 * 501 * System registers listed in the switch are not saved on every 502 * exit from the guest but are only saved on vcpu_put. 503 * 504 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 505 * should never be listed below, because the guest cannot modify its 506 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 507 * thread when emulating cross-VCPU communication. 508 */ 509 if (!has_vhe()) 510 return false; 511 512 switch (reg) { 513 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; 514 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 515 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 516 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 517 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 518 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 519 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 520 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 521 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 522 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 523 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 524 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 525 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 526 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 527 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 528 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 529 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 530 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 531 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 532 case PAR_EL1: *val = read_sysreg_par(); break; 533 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 534 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 535 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 536 default: return false; 537 } 538 539 return true; 540 } 541 542 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 543 { 544 /* 545 * *** VHE ONLY *** 546 * 547 * System registers listed in the switch are not restored on every 548 * entry to the guest but are only restored on vcpu_load. 549 * 550 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 551 * should never be listed below, because the MPIDR should only be set 552 * once, before running the VCPU, and never changed later. 553 */ 554 if (!has_vhe()) 555 return false; 556 557 switch (reg) { 558 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; 559 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 560 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 561 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 562 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 563 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 564 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 565 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 566 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 567 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 568 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 569 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 570 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 571 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 572 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 573 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 574 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 575 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 576 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 577 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 578 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 579 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 580 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 581 default: return false; 582 } 583 584 return true; 585 } 586 587 struct kvm_vm_stat { 588 struct kvm_vm_stat_generic generic; 589 }; 590 591 struct kvm_vcpu_stat { 592 struct kvm_vcpu_stat_generic generic; 593 u64 hvc_exit_stat; 594 u64 wfe_exit_stat; 595 u64 wfi_exit_stat; 596 u64 mmio_exit_user; 597 u64 mmio_exit_kernel; 598 u64 signal_exits; 599 u64 exits; 600 }; 601 602 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 603 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 604 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 605 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 606 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 607 608 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 609 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 610 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 611 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 612 613 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 614 struct kvm_vcpu_events *events); 615 616 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 617 struct kvm_vcpu_events *events); 618 619 #define KVM_ARCH_WANT_MMU_NOTIFIER 620 621 void kvm_arm_halt_guest(struct kvm *kvm); 622 void kvm_arm_resume_guest(struct kvm *kvm); 623 624 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) 625 626 #ifndef __KVM_NVHE_HYPERVISOR__ 627 #define kvm_call_hyp_nvhe(f, ...) \ 628 ({ \ 629 struct arm_smccc_res res; \ 630 \ 631 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 632 ##__VA_ARGS__, &res); \ 633 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 634 \ 635 res.a1; \ 636 }) 637 638 /* 639 * The couple of isb() below are there to guarantee the same behaviour 640 * on VHE as on !VHE, where the eret to EL1 acts as a context 641 * synchronization event. 642 */ 643 #define kvm_call_hyp(f, ...) \ 644 do { \ 645 if (has_vhe()) { \ 646 f(__VA_ARGS__); \ 647 isb(); \ 648 } else { \ 649 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 650 } \ 651 } while(0) 652 653 #define kvm_call_hyp_ret(f, ...) \ 654 ({ \ 655 typeof(f(__VA_ARGS__)) ret; \ 656 \ 657 if (has_vhe()) { \ 658 ret = f(__VA_ARGS__); \ 659 isb(); \ 660 } else { \ 661 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 662 } \ 663 \ 664 ret; \ 665 }) 666 #else /* __KVM_NVHE_HYPERVISOR__ */ 667 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 668 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 669 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 670 #endif /* __KVM_NVHE_HYPERVISOR__ */ 671 672 void force_vm_exit(const cpumask_t *mask); 673 674 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 675 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 676 677 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 678 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 679 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 680 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 681 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 682 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 683 684 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 685 686 void kvm_sys_reg_table_init(void); 687 688 /* MMIO helpers */ 689 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 690 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 691 692 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 693 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 694 695 /* 696 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 697 * arrived in guest context. For arm64, any event that arrives while a vCPU is 698 * loaded is considered to be "in guest". 699 */ 700 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 701 { 702 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 703 } 704 705 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 706 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 707 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 708 709 bool kvm_arm_pvtime_supported(void); 710 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 711 struct kvm_device_attr *attr); 712 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 713 struct kvm_device_attr *attr); 714 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 715 struct kvm_device_attr *attr); 716 717 extern unsigned int kvm_arm_vmid_bits; 718 int kvm_arm_vmid_alloc_init(void); 719 void kvm_arm_vmid_alloc_free(void); 720 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 721 void kvm_arm_vmid_clear_active(void); 722 723 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 724 { 725 vcpu_arch->steal.base = GPA_INVALID; 726 } 727 728 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 729 { 730 return (vcpu_arch->steal.base != GPA_INVALID); 731 } 732 733 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 734 735 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 736 737 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 738 739 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 740 { 741 /* The host's MPIDR is immutable, so let's set it up at boot time */ 742 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 743 } 744 745 static inline bool kvm_system_needs_idmapped_vectors(void) 746 { 747 return cpus_have_const_cap(ARM64_SPECTRE_V3A); 748 } 749 750 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 751 752 static inline void kvm_arch_hardware_unsetup(void) {} 753 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 754 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 755 756 void kvm_arm_init_debug(void); 757 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); 758 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 759 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 760 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 761 762 #define kvm_vcpu_os_lock_enabled(vcpu) \ 763 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK)) 764 765 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 766 struct kvm_device_attr *attr); 767 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 768 struct kvm_device_attr *attr); 769 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 770 struct kvm_device_attr *attr); 771 772 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 773 struct kvm_arm_copy_mte_tags *copy_tags); 774 775 /* Guest/host FPSIMD coordination helpers */ 776 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 777 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 778 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); 779 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 780 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 781 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); 782 783 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 784 { 785 return (!has_vhe() && attr->exclude_host); 786 } 787 788 /* Flags for host debug state */ 789 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); 790 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); 791 792 #ifdef CONFIG_KVM 793 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 794 void kvm_clr_pmu_events(u32 clr); 795 796 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 797 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 798 #else 799 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 800 static inline void kvm_clr_pmu_events(u32 clr) {} 801 #endif 802 803 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 804 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 805 806 int kvm_set_ipa_limit(void); 807 808 #define __KVM_HAVE_ARCH_VM_ALLOC 809 struct kvm *kvm_arch_alloc_vm(void); 810 811 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 812 813 static inline bool kvm_vm_is_protected(struct kvm *kvm) 814 { 815 return false; 816 } 817 818 void kvm_init_protected_traps(struct kvm_vcpu *vcpu); 819 820 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 821 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 822 823 #define kvm_arm_vcpu_sve_finalized(vcpu) \ 824 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) 825 826 #define kvm_has_mte(kvm) \ 827 (system_supports_mte() && \ 828 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) 829 #define kvm_vcpu_has_pmu(vcpu) \ 830 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) 831 832 int kvm_trng_call(struct kvm_vcpu *vcpu); 833 #ifdef CONFIG_KVM 834 extern phys_addr_t hyp_mem_base; 835 extern phys_addr_t hyp_mem_size; 836 void __init kvm_hyp_reserve(void); 837 #else 838 static inline void kvm_hyp_reserve(void) { } 839 #endif 840 841 #endif /* __ARM64_KVM_HOST_H__ */ 842