1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/percpu.h> 20 #include <linux/psci.h> 21 #include <asm/arch_gicv3.h> 22 #include <asm/barrier.h> 23 #include <asm/cpufeature.h> 24 #include <asm/cputype.h> 25 #include <asm/daifflags.h> 26 #include <asm/fpsimd.h> 27 #include <asm/kvm.h> 28 #include <asm/kvm_asm.h> 29 #include <asm/thread_info.h> 30 31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 32 33 #define KVM_HALT_POLL_NS_DEFAULT 500000 34 35 #include <kvm/arm_vgic.h> 36 #include <kvm/arm_arch_timer.h> 37 #include <kvm/arm_pmu.h> 38 39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 40 41 #define KVM_VCPU_MAX_FEATURES 7 42 43 #define KVM_REQ_SLEEP \ 44 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 45 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 46 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 47 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 48 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 49 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 50 51 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 52 KVM_DIRTY_LOG_INITIALLY_SET) 53 54 /* 55 * Mode of operation configurable with kvm-arm.mode early param. 56 * See Documentation/admin-guide/kernel-parameters.txt for more information. 57 */ 58 enum kvm_mode { 59 KVM_MODE_DEFAULT, 60 KVM_MODE_PROTECTED, 61 }; 62 enum kvm_mode kvm_get_mode(void); 63 64 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 65 66 extern unsigned int kvm_sve_max_vl; 67 int kvm_arm_init_sve(void); 68 69 int __attribute_const__ kvm_target_cpu(void); 70 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 71 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 72 73 struct kvm_vmid { 74 /* The VMID generation used for the virt. memory system */ 75 u64 vmid_gen; 76 u32 vmid; 77 }; 78 79 struct kvm_s2_mmu { 80 struct kvm_vmid vmid; 81 82 /* 83 * stage2 entry level table 84 * 85 * Two kvm_s2_mmu structures in the same VM can point to the same 86 * pgd here. This happens when running a guest using a 87 * translation regime that isn't affected by its own stage-2 88 * translation, such as a non-VHE hypervisor running at vEL2, or 89 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 90 * canonical stage-2 page tables. 91 */ 92 phys_addr_t pgd_phys; 93 struct kvm_pgtable *pgt; 94 95 /* The last vcpu id that ran on each physical CPU */ 96 int __percpu *last_vcpu_ran; 97 98 struct kvm_arch *arch; 99 }; 100 101 struct kvm_arch_memory_slot { 102 }; 103 104 struct kvm_arch { 105 struct kvm_s2_mmu mmu; 106 107 /* VTCR_EL2 value for this VM */ 108 u64 vtcr; 109 110 /* The maximum number of vCPUs depends on the used GIC model */ 111 int max_vcpus; 112 113 /* Interrupt controller */ 114 struct vgic_dist vgic; 115 116 /* Mandated version of PSCI */ 117 u32 psci_version; 118 119 /* 120 * If we encounter a data abort without valid instruction syndrome 121 * information, report this to user space. User space can (and 122 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 123 * supported. 124 */ 125 bool return_nisv_io_abort_to_user; 126 127 /* 128 * VM-wide PMU filter, implemented as a bitmap and big enough for 129 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 130 */ 131 unsigned long *pmu_filter; 132 unsigned int pmuver; 133 134 u8 pfr0_csv2; 135 u8 pfr0_csv3; 136 137 /* Memory Tagging Extension enabled for the guest */ 138 bool mte_enabled; 139 }; 140 141 struct kvm_vcpu_fault_info { 142 u32 esr_el2; /* Hyp Syndrom Register */ 143 u64 far_el2; /* Hyp Fault Address Register */ 144 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 145 u64 disr_el1; /* Deferred [SError] Status Register */ 146 }; 147 148 enum vcpu_sysreg { 149 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 150 MPIDR_EL1, /* MultiProcessor Affinity Register */ 151 CSSELR_EL1, /* Cache Size Selection Register */ 152 SCTLR_EL1, /* System Control Register */ 153 ACTLR_EL1, /* Auxiliary Control Register */ 154 CPACR_EL1, /* Coprocessor Access Control */ 155 ZCR_EL1, /* SVE Control */ 156 TTBR0_EL1, /* Translation Table Base Register 0 */ 157 TTBR1_EL1, /* Translation Table Base Register 1 */ 158 TCR_EL1, /* Translation Control Register */ 159 ESR_EL1, /* Exception Syndrome Register */ 160 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ 161 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ 162 FAR_EL1, /* Fault Address Register */ 163 MAIR_EL1, /* Memory Attribute Indirection Register */ 164 VBAR_EL1, /* Vector Base Address Register */ 165 CONTEXTIDR_EL1, /* Context ID Register */ 166 TPIDR_EL0, /* Thread ID, User R/W */ 167 TPIDRRO_EL0, /* Thread ID, User R/O */ 168 TPIDR_EL1, /* Thread ID, Privileged */ 169 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ 170 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 171 PAR_EL1, /* Physical Address Register */ 172 MDSCR_EL1, /* Monitor Debug System Control Register */ 173 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 174 DISR_EL1, /* Deferred Interrupt Status Register */ 175 176 /* Performance Monitors Registers */ 177 PMCR_EL0, /* Control Register */ 178 PMSELR_EL0, /* Event Counter Selection Register */ 179 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 180 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 181 PMCCNTR_EL0, /* Cycle Counter Register */ 182 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 183 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 184 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 185 PMCNTENSET_EL0, /* Count Enable Set Register */ 186 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 187 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 188 PMUSERENR_EL0, /* User Enable Register */ 189 190 /* Pointer Authentication Registers in a strict increasing order. */ 191 APIAKEYLO_EL1, 192 APIAKEYHI_EL1, 193 APIBKEYLO_EL1, 194 APIBKEYHI_EL1, 195 APDAKEYLO_EL1, 196 APDAKEYHI_EL1, 197 APDBKEYLO_EL1, 198 APDBKEYHI_EL1, 199 APGAKEYLO_EL1, 200 APGAKEYHI_EL1, 201 202 ELR_EL1, 203 SP_EL1, 204 SPSR_EL1, 205 206 CNTVOFF_EL2, 207 CNTV_CVAL_EL0, 208 CNTV_CTL_EL0, 209 CNTP_CVAL_EL0, 210 CNTP_CTL_EL0, 211 212 /* Memory Tagging Extension registers */ 213 RGSR_EL1, /* Random Allocation Tag Seed Register */ 214 GCR_EL1, /* Tag Control Register */ 215 TFSR_EL1, /* Tag Fault Status Register (EL1) */ 216 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 217 218 /* 32bit specific registers. Keep them at the end of the range */ 219 DACR32_EL2, /* Domain Access Control Register */ 220 IFSR32_EL2, /* Instruction Fault Status Register */ 221 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 222 DBGVCR32_EL2, /* Debug Vector Catch Register */ 223 224 NR_SYS_REGS /* Nothing after this line! */ 225 }; 226 227 struct kvm_cpu_context { 228 struct user_pt_regs regs; /* sp = sp_el0 */ 229 230 u64 spsr_abt; 231 u64 spsr_und; 232 u64 spsr_irq; 233 u64 spsr_fiq; 234 235 struct user_fpsimd_state fp_regs; 236 237 u64 sys_regs[NR_SYS_REGS]; 238 239 struct kvm_vcpu *__hyp_running_vcpu; 240 }; 241 242 struct kvm_pmu_events { 243 u32 events_host; 244 u32 events_guest; 245 }; 246 247 struct kvm_host_data { 248 struct kvm_cpu_context host_ctxt; 249 struct kvm_pmu_events pmu_events; 250 }; 251 252 struct kvm_host_psci_config { 253 /* PSCI version used by host. */ 254 u32 version; 255 256 /* Function IDs used by host if version is v0.1. */ 257 struct psci_0_1_function_ids function_ids_0_1; 258 259 bool psci_0_1_cpu_suspend_implemented; 260 bool psci_0_1_cpu_on_implemented; 261 bool psci_0_1_cpu_off_implemented; 262 bool psci_0_1_migrate_implemented; 263 }; 264 265 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 266 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 267 268 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 269 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 270 271 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 272 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 273 274 struct vcpu_reset_state { 275 unsigned long pc; 276 unsigned long r0; 277 bool be; 278 bool reset; 279 }; 280 281 struct kvm_vcpu_arch { 282 struct kvm_cpu_context ctxt; 283 void *sve_state; 284 unsigned int sve_max_vl; 285 286 /* Stage 2 paging state used by the hardware on next switch */ 287 struct kvm_s2_mmu *hw_mmu; 288 289 /* HYP configuration */ 290 u64 hcr_el2; 291 u32 mdcr_el2; 292 293 /* Exception Information */ 294 struct kvm_vcpu_fault_info fault; 295 296 /* State of various workarounds, see kvm_asm.h for bit assignment */ 297 u64 workaround_flags; 298 299 /* Miscellaneous vcpu state flags */ 300 u64 flags; 301 302 /* 303 * We maintain more than a single set of debug registers to support 304 * debugging the guest from the host and to maintain separate host and 305 * guest state during world switches. vcpu_debug_state are the debug 306 * registers of the vcpu as the guest sees them. host_debug_state are 307 * the host registers which are saved and restored during 308 * world switches. external_debug_state contains the debug 309 * values we want to debug the guest. This is set via the 310 * KVM_SET_GUEST_DEBUG ioctl. 311 * 312 * debug_ptr points to the set of debug registers that should be loaded 313 * onto the hardware when running the guest. 314 */ 315 struct kvm_guest_debug_arch *debug_ptr; 316 struct kvm_guest_debug_arch vcpu_debug_state; 317 struct kvm_guest_debug_arch external_debug_state; 318 319 struct thread_info *host_thread_info; /* hyp VA */ 320 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ 321 322 struct { 323 /* {Break,watch}point registers */ 324 struct kvm_guest_debug_arch regs; 325 /* Statistical profiling extension */ 326 u64 pmscr_el1; 327 /* Self-hosted trace */ 328 u64 trfcr_el1; 329 } host_debug_state; 330 331 /* VGIC state */ 332 struct vgic_cpu vgic_cpu; 333 struct arch_timer_cpu timer_cpu; 334 struct kvm_pmu pmu; 335 336 /* 337 * Anything that is not used directly from assembly code goes 338 * here. 339 */ 340 341 /* 342 * Guest registers we preserve during guest debugging. 343 * 344 * These shadow registers are updated by the kvm_handle_sys_reg 345 * trap handler if the guest accesses or updates them while we 346 * are using guest debug. 347 */ 348 struct { 349 u32 mdscr_el1; 350 } guest_debug_preserved; 351 352 /* vcpu power-off state */ 353 bool power_off; 354 355 /* Don't run the guest (internal implementation need) */ 356 bool pause; 357 358 /* Cache some mmu pages needed inside spinlock regions */ 359 struct kvm_mmu_memory_cache mmu_page_cache; 360 361 /* Target CPU and feature flags */ 362 int target; 363 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 364 365 /* Detect first run of a vcpu */ 366 bool has_run_once; 367 368 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 369 u64 vsesr_el2; 370 371 /* Additional reset state */ 372 struct vcpu_reset_state reset_state; 373 374 /* True when deferrable sysregs are loaded on the physical CPU, 375 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ 376 bool sysregs_loaded_on_cpu; 377 378 /* Guest PV state */ 379 struct { 380 u64 last_steal; 381 gpa_t base; 382 } steal; 383 }; 384 385 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 386 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 387 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 388 389 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 390 391 #define vcpu_sve_state_size(vcpu) ({ \ 392 size_t __size_ret; \ 393 unsigned int __vcpu_vq; \ 394 \ 395 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 396 __size_ret = 0; \ 397 } else { \ 398 __vcpu_vq = vcpu_sve_max_vq(vcpu); \ 399 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 400 } \ 401 \ 402 __size_ret; \ 403 }) 404 405 /* vcpu_arch flags field values: */ 406 #define KVM_ARM64_DEBUG_DIRTY (1 << 0) 407 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ 408 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ 409 #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ 410 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ 411 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ 412 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ 413 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ 414 #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ 415 #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ 416 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ 417 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ 418 419 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 420 KVM_GUESTDBG_USE_SW_BP | \ 421 KVM_GUESTDBG_USE_HW | \ 422 KVM_GUESTDBG_SINGLESTEP) 423 /* 424 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can 425 * take the following values: 426 * 427 * For AArch32 EL1: 428 */ 429 #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) 430 #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) 431 #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) 432 /* For AArch64: */ 433 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) 434 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) 435 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9) 436 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) 437 #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) 438 #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) 439 440 /* 441 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be 442 * set together with an exception... 443 */ 444 #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ 445 446 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 447 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) 448 449 #ifdef CONFIG_ARM64_PTR_AUTH 450 #define vcpu_has_ptrauth(vcpu) \ 451 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 452 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 453 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) 454 #else 455 #define vcpu_has_ptrauth(vcpu) false 456 #endif 457 458 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 459 460 /* 461 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 462 * memory backed version of a register, and not the one most recently 463 * accessed by a running VCPU. For example, for userspace access or 464 * for system registers that are never context switched, but only 465 * emulated. 466 */ 467 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) 468 469 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 470 471 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) 472 473 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 474 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 475 476 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 477 { 478 /* 479 * *** VHE ONLY *** 480 * 481 * System registers listed in the switch are not saved on every 482 * exit from the guest but are only saved on vcpu_put. 483 * 484 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 485 * should never be listed below, because the guest cannot modify its 486 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 487 * thread when emulating cross-VCPU communication. 488 */ 489 if (!has_vhe()) 490 return false; 491 492 switch (reg) { 493 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; 494 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 495 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 496 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 497 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 498 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 499 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 500 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 501 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 502 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 503 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 504 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 505 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 506 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 507 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 508 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 509 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 510 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 511 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 512 case PAR_EL1: *val = read_sysreg_par(); break; 513 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 514 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 515 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 516 default: return false; 517 } 518 519 return true; 520 } 521 522 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 523 { 524 /* 525 * *** VHE ONLY *** 526 * 527 * System registers listed in the switch are not restored on every 528 * entry to the guest but are only restored on vcpu_load. 529 * 530 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 531 * should never be listed below, because the MPIDR should only be set 532 * once, before running the VCPU, and never changed later. 533 */ 534 if (!has_vhe()) 535 return false; 536 537 switch (reg) { 538 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; 539 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 540 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 541 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 542 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 543 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 544 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 545 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 546 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 547 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 548 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 549 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 550 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 551 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 552 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 553 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 554 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 555 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 556 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 557 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 558 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 559 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 560 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 561 default: return false; 562 } 563 564 return true; 565 } 566 567 struct kvm_vm_stat { 568 struct kvm_vm_stat_generic generic; 569 }; 570 571 struct kvm_vcpu_stat { 572 struct kvm_vcpu_stat_generic generic; 573 u64 hvc_exit_stat; 574 u64 wfe_exit_stat; 575 u64 wfi_exit_stat; 576 u64 mmio_exit_user; 577 u64 mmio_exit_kernel; 578 u64 exits; 579 }; 580 581 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 582 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 583 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 584 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 585 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 586 587 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 588 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 589 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 590 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 591 592 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 593 struct kvm_vcpu_events *events); 594 595 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 596 struct kvm_vcpu_events *events); 597 598 #define KVM_ARCH_WANT_MMU_NOTIFIER 599 600 void kvm_arm_halt_guest(struct kvm *kvm); 601 void kvm_arm_resume_guest(struct kvm *kvm); 602 603 #ifndef __KVM_NVHE_HYPERVISOR__ 604 #define kvm_call_hyp_nvhe(f, ...) \ 605 ({ \ 606 struct arm_smccc_res res; \ 607 \ 608 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 609 ##__VA_ARGS__, &res); \ 610 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 611 \ 612 res.a1; \ 613 }) 614 615 /* 616 * The couple of isb() below are there to guarantee the same behaviour 617 * on VHE as on !VHE, where the eret to EL1 acts as a context 618 * synchronization event. 619 */ 620 #define kvm_call_hyp(f, ...) \ 621 do { \ 622 if (has_vhe()) { \ 623 f(__VA_ARGS__); \ 624 isb(); \ 625 } else { \ 626 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 627 } \ 628 } while(0) 629 630 #define kvm_call_hyp_ret(f, ...) \ 631 ({ \ 632 typeof(f(__VA_ARGS__)) ret; \ 633 \ 634 if (has_vhe()) { \ 635 ret = f(__VA_ARGS__); \ 636 isb(); \ 637 } else { \ 638 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 639 } \ 640 \ 641 ret; \ 642 }) 643 #else /* __KVM_NVHE_HYPERVISOR__ */ 644 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 645 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 646 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 647 #endif /* __KVM_NVHE_HYPERVISOR__ */ 648 649 void force_vm_exit(const cpumask_t *mask); 650 651 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 652 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 653 654 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 655 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 656 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 657 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 658 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 659 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 660 661 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 662 663 void kvm_sys_reg_table_init(void); 664 665 /* MMIO helpers */ 666 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 667 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 668 669 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 670 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 671 672 int kvm_perf_init(void); 673 int kvm_perf_teardown(void); 674 675 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 676 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 677 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 678 679 bool kvm_arm_pvtime_supported(void); 680 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 681 struct kvm_device_attr *attr); 682 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 683 struct kvm_device_attr *attr); 684 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 685 struct kvm_device_attr *attr); 686 687 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 688 { 689 vcpu_arch->steal.base = GPA_INVALID; 690 } 691 692 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 693 { 694 return (vcpu_arch->steal.base != GPA_INVALID); 695 } 696 697 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 698 699 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 700 701 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 702 703 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 704 { 705 /* The host's MPIDR is immutable, so let's set it up at boot time */ 706 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 707 } 708 709 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 710 711 static inline void kvm_arch_hardware_unsetup(void) {} 712 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 713 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 714 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 715 716 void kvm_arm_init_debug(void); 717 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); 718 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 719 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 720 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 721 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 722 struct kvm_device_attr *attr); 723 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 724 struct kvm_device_attr *attr); 725 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 726 struct kvm_device_attr *attr); 727 728 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 729 struct kvm_arm_copy_mte_tags *copy_tags); 730 731 /* Guest/host FPSIMD coordination helpers */ 732 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 733 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 734 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 735 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 736 737 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 738 { 739 return (!has_vhe() && attr->exclude_host); 740 } 741 742 /* Flags for host debug state */ 743 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); 744 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); 745 746 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ 747 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 748 { 749 return kvm_arch_vcpu_run_map_fp(vcpu); 750 } 751 752 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 753 void kvm_clr_pmu_events(u32 clr); 754 755 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 756 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 757 #else 758 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 759 static inline void kvm_clr_pmu_events(u32 clr) {} 760 #endif 761 762 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 763 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 764 765 int kvm_set_ipa_limit(void); 766 767 #define __KVM_HAVE_ARCH_VM_ALLOC 768 struct kvm *kvm_arch_alloc_vm(void); 769 void kvm_arch_free_vm(struct kvm *kvm); 770 771 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 772 773 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 774 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 775 776 #define kvm_arm_vcpu_sve_finalized(vcpu) \ 777 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) 778 779 #define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled) 780 #define kvm_vcpu_has_pmu(vcpu) \ 781 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) 782 783 int kvm_trng_call(struct kvm_vcpu *vcpu); 784 #ifdef CONFIG_KVM 785 extern phys_addr_t hyp_mem_base; 786 extern phys_addr_t hyp_mem_size; 787 void __init kvm_hyp_reserve(void); 788 #else 789 static inline void kvm_hyp_reserve(void) { } 790 #endif 791 792 #endif /* __ARM64_KVM_HOST_H__ */ 793