1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/percpu.h> 20 #include <linux/psci.h> 21 #include <asm/arch_gicv3.h> 22 #include <asm/barrier.h> 23 #include <asm/cpufeature.h> 24 #include <asm/cputype.h> 25 #include <asm/daifflags.h> 26 #include <asm/fpsimd.h> 27 #include <asm/kvm.h> 28 #include <asm/kvm_asm.h> 29 30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 31 32 #define KVM_HALT_POLL_NS_DEFAULT 500000 33 34 #include <kvm/arm_vgic.h> 35 #include <kvm/arm_arch_timer.h> 36 #include <kvm/arm_pmu.h> 37 38 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 39 40 #define KVM_VCPU_MAX_FEATURES 7 41 42 #define KVM_REQ_SLEEP \ 43 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 44 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 45 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 46 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 47 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 48 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 49 50 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 51 KVM_DIRTY_LOG_INITIALLY_SET) 52 53 /* 54 * Mode of operation configurable with kvm-arm.mode early param. 55 * See Documentation/admin-guide/kernel-parameters.txt for more information. 56 */ 57 enum kvm_mode { 58 KVM_MODE_DEFAULT, 59 KVM_MODE_PROTECTED, 60 KVM_MODE_NONE, 61 }; 62 enum kvm_mode kvm_get_mode(void); 63 64 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 65 66 extern unsigned int kvm_sve_max_vl; 67 int kvm_arm_init_sve(void); 68 69 u32 __attribute_const__ kvm_target_cpu(void); 70 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 71 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 72 73 struct kvm_vmid { 74 atomic64_t id; 75 }; 76 77 struct kvm_s2_mmu { 78 struct kvm_vmid vmid; 79 80 /* 81 * stage2 entry level table 82 * 83 * Two kvm_s2_mmu structures in the same VM can point to the same 84 * pgd here. This happens when running a guest using a 85 * translation regime that isn't affected by its own stage-2 86 * translation, such as a non-VHE hypervisor running at vEL2, or 87 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 88 * canonical stage-2 page tables. 89 */ 90 phys_addr_t pgd_phys; 91 struct kvm_pgtable *pgt; 92 93 /* The last vcpu id that ran on each physical CPU */ 94 int __percpu *last_vcpu_ran; 95 96 struct kvm_arch *arch; 97 }; 98 99 struct kvm_arch_memory_slot { 100 }; 101 102 struct kvm_arch { 103 struct kvm_s2_mmu mmu; 104 105 /* VTCR_EL2 value for this VM */ 106 u64 vtcr; 107 108 /* The maximum number of vCPUs depends on the used GIC model */ 109 int max_vcpus; 110 111 /* Interrupt controller */ 112 struct vgic_dist vgic; 113 114 /* Mandated version of PSCI */ 115 u32 psci_version; 116 117 /* 118 * If we encounter a data abort without valid instruction syndrome 119 * information, report this to user space. User space can (and 120 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 121 * supported. 122 */ 123 bool return_nisv_io_abort_to_user; 124 125 /* 126 * VM-wide PMU filter, implemented as a bitmap and big enough for 127 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 128 */ 129 unsigned long *pmu_filter; 130 unsigned int pmuver; 131 132 u8 pfr0_csv2; 133 u8 pfr0_csv3; 134 135 /* Memory Tagging Extension enabled for the guest */ 136 bool mte_enabled; 137 }; 138 139 struct kvm_vcpu_fault_info { 140 u32 esr_el2; /* Hyp Syndrom Register */ 141 u64 far_el2; /* Hyp Fault Address Register */ 142 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 143 u64 disr_el1; /* Deferred [SError] Status Register */ 144 }; 145 146 enum vcpu_sysreg { 147 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 148 MPIDR_EL1, /* MultiProcessor Affinity Register */ 149 CSSELR_EL1, /* Cache Size Selection Register */ 150 SCTLR_EL1, /* System Control Register */ 151 ACTLR_EL1, /* Auxiliary Control Register */ 152 CPACR_EL1, /* Coprocessor Access Control */ 153 ZCR_EL1, /* SVE Control */ 154 TTBR0_EL1, /* Translation Table Base Register 0 */ 155 TTBR1_EL1, /* Translation Table Base Register 1 */ 156 TCR_EL1, /* Translation Control Register */ 157 ESR_EL1, /* Exception Syndrome Register */ 158 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ 159 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ 160 FAR_EL1, /* Fault Address Register */ 161 MAIR_EL1, /* Memory Attribute Indirection Register */ 162 VBAR_EL1, /* Vector Base Address Register */ 163 CONTEXTIDR_EL1, /* Context ID Register */ 164 TPIDR_EL0, /* Thread ID, User R/W */ 165 TPIDRRO_EL0, /* Thread ID, User R/O */ 166 TPIDR_EL1, /* Thread ID, Privileged */ 167 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ 168 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 169 PAR_EL1, /* Physical Address Register */ 170 MDSCR_EL1, /* Monitor Debug System Control Register */ 171 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 172 DISR_EL1, /* Deferred Interrupt Status Register */ 173 174 /* Performance Monitors Registers */ 175 PMCR_EL0, /* Control Register */ 176 PMSELR_EL0, /* Event Counter Selection Register */ 177 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 178 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 179 PMCCNTR_EL0, /* Cycle Counter Register */ 180 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 181 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 182 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 183 PMCNTENSET_EL0, /* Count Enable Set Register */ 184 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 185 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 186 PMUSERENR_EL0, /* User Enable Register */ 187 188 /* Pointer Authentication Registers in a strict increasing order. */ 189 APIAKEYLO_EL1, 190 APIAKEYHI_EL1, 191 APIBKEYLO_EL1, 192 APIBKEYHI_EL1, 193 APDAKEYLO_EL1, 194 APDAKEYHI_EL1, 195 APDBKEYLO_EL1, 196 APDBKEYHI_EL1, 197 APGAKEYLO_EL1, 198 APGAKEYHI_EL1, 199 200 ELR_EL1, 201 SP_EL1, 202 SPSR_EL1, 203 204 CNTVOFF_EL2, 205 CNTV_CVAL_EL0, 206 CNTV_CTL_EL0, 207 CNTP_CVAL_EL0, 208 CNTP_CTL_EL0, 209 210 /* Memory Tagging Extension registers */ 211 RGSR_EL1, /* Random Allocation Tag Seed Register */ 212 GCR_EL1, /* Tag Control Register */ 213 TFSR_EL1, /* Tag Fault Status Register (EL1) */ 214 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 215 216 /* 32bit specific registers. Keep them at the end of the range */ 217 DACR32_EL2, /* Domain Access Control Register */ 218 IFSR32_EL2, /* Instruction Fault Status Register */ 219 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 220 DBGVCR32_EL2, /* Debug Vector Catch Register */ 221 222 NR_SYS_REGS /* Nothing after this line! */ 223 }; 224 225 struct kvm_cpu_context { 226 struct user_pt_regs regs; /* sp = sp_el0 */ 227 228 u64 spsr_abt; 229 u64 spsr_und; 230 u64 spsr_irq; 231 u64 spsr_fiq; 232 233 struct user_fpsimd_state fp_regs; 234 235 u64 sys_regs[NR_SYS_REGS]; 236 237 struct kvm_vcpu *__hyp_running_vcpu; 238 }; 239 240 struct kvm_pmu_events { 241 u32 events_host; 242 u32 events_guest; 243 }; 244 245 struct kvm_host_data { 246 struct kvm_cpu_context host_ctxt; 247 struct kvm_pmu_events pmu_events; 248 }; 249 250 struct kvm_host_psci_config { 251 /* PSCI version used by host. */ 252 u32 version; 253 254 /* Function IDs used by host if version is v0.1. */ 255 struct psci_0_1_function_ids function_ids_0_1; 256 257 bool psci_0_1_cpu_suspend_implemented; 258 bool psci_0_1_cpu_on_implemented; 259 bool psci_0_1_cpu_off_implemented; 260 bool psci_0_1_migrate_implemented; 261 }; 262 263 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 264 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 265 266 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 267 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 268 269 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 270 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 271 272 struct vcpu_reset_state { 273 unsigned long pc; 274 unsigned long r0; 275 bool be; 276 bool reset; 277 }; 278 279 struct kvm_vcpu_arch { 280 struct kvm_cpu_context ctxt; 281 void *sve_state; 282 unsigned int sve_max_vl; 283 284 /* Stage 2 paging state used by the hardware on next switch */ 285 struct kvm_s2_mmu *hw_mmu; 286 287 /* Values of trap registers for the guest. */ 288 u64 hcr_el2; 289 u64 mdcr_el2; 290 u64 cptr_el2; 291 292 /* Values of trap registers for the host before guest entry. */ 293 u64 mdcr_el2_host; 294 295 /* Exception Information */ 296 struct kvm_vcpu_fault_info fault; 297 298 /* Miscellaneous vcpu state flags */ 299 u64 flags; 300 301 /* 302 * We maintain more than a single set of debug registers to support 303 * debugging the guest from the host and to maintain separate host and 304 * guest state during world switches. vcpu_debug_state are the debug 305 * registers of the vcpu as the guest sees them. host_debug_state are 306 * the host registers which are saved and restored during 307 * world switches. external_debug_state contains the debug 308 * values we want to debug the guest. This is set via the 309 * KVM_SET_GUEST_DEBUG ioctl. 310 * 311 * debug_ptr points to the set of debug registers that should be loaded 312 * onto the hardware when running the guest. 313 */ 314 struct kvm_guest_debug_arch *debug_ptr; 315 struct kvm_guest_debug_arch vcpu_debug_state; 316 struct kvm_guest_debug_arch external_debug_state; 317 318 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ 319 struct task_struct *parent_task; 320 321 struct { 322 /* {Break,watch}point registers */ 323 struct kvm_guest_debug_arch regs; 324 /* Statistical profiling extension */ 325 u64 pmscr_el1; 326 /* Self-hosted trace */ 327 u64 trfcr_el1; 328 } host_debug_state; 329 330 /* VGIC state */ 331 struct vgic_cpu vgic_cpu; 332 struct arch_timer_cpu timer_cpu; 333 struct kvm_pmu pmu; 334 335 /* 336 * Anything that is not used directly from assembly code goes 337 * here. 338 */ 339 340 /* 341 * Guest registers we preserve during guest debugging. 342 * 343 * These shadow registers are updated by the kvm_handle_sys_reg 344 * trap handler if the guest accesses or updates them while we 345 * are using guest debug. 346 */ 347 struct { 348 u32 mdscr_el1; 349 } guest_debug_preserved; 350 351 /* vcpu power-off state */ 352 bool power_off; 353 354 /* Don't run the guest (internal implementation need) */ 355 bool pause; 356 357 /* Cache some mmu pages needed inside spinlock regions */ 358 struct kvm_mmu_memory_cache mmu_page_cache; 359 360 /* Target CPU and feature flags */ 361 int target; 362 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 363 364 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 365 u64 vsesr_el2; 366 367 /* Additional reset state */ 368 struct vcpu_reset_state reset_state; 369 370 /* True when deferrable sysregs are loaded on the physical CPU, 371 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ 372 bool sysregs_loaded_on_cpu; 373 374 /* Guest PV state */ 375 struct { 376 u64 last_steal; 377 gpa_t base; 378 } steal; 379 }; 380 381 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 382 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 383 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 384 385 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 386 387 #define vcpu_sve_state_size(vcpu) ({ \ 388 size_t __size_ret; \ 389 unsigned int __vcpu_vq; \ 390 \ 391 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 392 __size_ret = 0; \ 393 } else { \ 394 __vcpu_vq = vcpu_sve_max_vq(vcpu); \ 395 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 396 } \ 397 \ 398 __size_ret; \ 399 }) 400 401 /* vcpu_arch flags field values: */ 402 #define KVM_ARM64_DEBUG_DIRTY (1 << 0) 403 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ 404 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ 405 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ 406 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ 407 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ 408 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ 409 #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ 410 /* 411 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be 412 * set together with an exception... 413 */ 414 #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ 415 #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ 416 /* 417 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can 418 * take the following values: 419 * 420 * For AArch32 EL1: 421 */ 422 #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) 423 #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) 424 #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) 425 /* For AArch64: */ 426 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) 427 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) 428 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9) 429 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) 430 #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) 431 #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) 432 433 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ 434 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ 435 #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) 436 437 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 438 KVM_GUESTDBG_USE_SW_BP | \ 439 KVM_GUESTDBG_USE_HW | \ 440 KVM_GUESTDBG_SINGLESTEP) 441 442 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 443 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) 444 445 #ifdef CONFIG_ARM64_PTR_AUTH 446 #define vcpu_has_ptrauth(vcpu) \ 447 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 448 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 449 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) 450 #else 451 #define vcpu_has_ptrauth(vcpu) false 452 #endif 453 454 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 455 456 /* 457 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 458 * memory backed version of a register, and not the one most recently 459 * accessed by a running VCPU. For example, for userspace access or 460 * for system registers that are never context switched, but only 461 * emulated. 462 */ 463 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) 464 465 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 466 467 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) 468 469 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 470 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 471 472 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 473 { 474 /* 475 * *** VHE ONLY *** 476 * 477 * System registers listed in the switch are not saved on every 478 * exit from the guest but are only saved on vcpu_put. 479 * 480 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 481 * should never be listed below, because the guest cannot modify its 482 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 483 * thread when emulating cross-VCPU communication. 484 */ 485 if (!has_vhe()) 486 return false; 487 488 switch (reg) { 489 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; 490 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 491 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 492 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 493 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 494 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 495 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 496 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 497 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 498 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 499 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 500 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 501 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 502 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 503 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 504 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 505 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 506 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 507 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 508 case PAR_EL1: *val = read_sysreg_par(); break; 509 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 510 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 511 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 512 default: return false; 513 } 514 515 return true; 516 } 517 518 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 519 { 520 /* 521 * *** VHE ONLY *** 522 * 523 * System registers listed in the switch are not restored on every 524 * entry to the guest but are only restored on vcpu_load. 525 * 526 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 527 * should never be listed below, because the MPIDR should only be set 528 * once, before running the VCPU, and never changed later. 529 */ 530 if (!has_vhe()) 531 return false; 532 533 switch (reg) { 534 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; 535 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 536 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 537 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 538 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 539 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 540 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 541 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 542 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 543 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 544 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 545 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 546 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 547 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 548 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 549 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 550 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 551 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 552 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 553 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 554 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 555 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 556 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 557 default: return false; 558 } 559 560 return true; 561 } 562 563 struct kvm_vm_stat { 564 struct kvm_vm_stat_generic generic; 565 }; 566 567 struct kvm_vcpu_stat { 568 struct kvm_vcpu_stat_generic generic; 569 u64 hvc_exit_stat; 570 u64 wfe_exit_stat; 571 u64 wfi_exit_stat; 572 u64 mmio_exit_user; 573 u64 mmio_exit_kernel; 574 u64 signal_exits; 575 u64 exits; 576 }; 577 578 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 579 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 580 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 581 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 582 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 583 584 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 585 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 586 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 587 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 588 589 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 590 struct kvm_vcpu_events *events); 591 592 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 593 struct kvm_vcpu_events *events); 594 595 #define KVM_ARCH_WANT_MMU_NOTIFIER 596 597 void kvm_arm_halt_guest(struct kvm *kvm); 598 void kvm_arm_resume_guest(struct kvm *kvm); 599 600 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) 601 602 #ifndef __KVM_NVHE_HYPERVISOR__ 603 #define kvm_call_hyp_nvhe(f, ...) \ 604 ({ \ 605 struct arm_smccc_res res; \ 606 \ 607 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 608 ##__VA_ARGS__, &res); \ 609 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 610 \ 611 res.a1; \ 612 }) 613 614 /* 615 * The couple of isb() below are there to guarantee the same behaviour 616 * on VHE as on !VHE, where the eret to EL1 acts as a context 617 * synchronization event. 618 */ 619 #define kvm_call_hyp(f, ...) \ 620 do { \ 621 if (has_vhe()) { \ 622 f(__VA_ARGS__); \ 623 isb(); \ 624 } else { \ 625 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 626 } \ 627 } while(0) 628 629 #define kvm_call_hyp_ret(f, ...) \ 630 ({ \ 631 typeof(f(__VA_ARGS__)) ret; \ 632 \ 633 if (has_vhe()) { \ 634 ret = f(__VA_ARGS__); \ 635 isb(); \ 636 } else { \ 637 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 638 } \ 639 \ 640 ret; \ 641 }) 642 #else /* __KVM_NVHE_HYPERVISOR__ */ 643 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 644 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 645 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 646 #endif /* __KVM_NVHE_HYPERVISOR__ */ 647 648 void force_vm_exit(const cpumask_t *mask); 649 650 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 651 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 652 653 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 654 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 655 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 656 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 657 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 658 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 659 660 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 661 662 void kvm_sys_reg_table_init(void); 663 664 /* MMIO helpers */ 665 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 666 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 667 668 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 669 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 670 671 /* 672 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 673 * arrived in guest context. For arm64, any event that arrives while a vCPU is 674 * loaded is considered to be "in guest". 675 */ 676 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 677 { 678 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 679 } 680 681 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 682 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 683 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 684 685 bool kvm_arm_pvtime_supported(void); 686 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 687 struct kvm_device_attr *attr); 688 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 689 struct kvm_device_attr *attr); 690 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 691 struct kvm_device_attr *attr); 692 693 extern unsigned int kvm_arm_vmid_bits; 694 int kvm_arm_vmid_alloc_init(void); 695 void kvm_arm_vmid_alloc_free(void); 696 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 697 void kvm_arm_vmid_clear_active(void); 698 699 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 700 { 701 vcpu_arch->steal.base = GPA_INVALID; 702 } 703 704 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 705 { 706 return (vcpu_arch->steal.base != GPA_INVALID); 707 } 708 709 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 710 711 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 712 713 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 714 715 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 716 { 717 /* The host's MPIDR is immutable, so let's set it up at boot time */ 718 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 719 } 720 721 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 722 723 static inline void kvm_arch_hardware_unsetup(void) {} 724 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 725 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 726 727 void kvm_arm_init_debug(void); 728 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); 729 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 730 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 731 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 732 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 733 struct kvm_device_attr *attr); 734 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 735 struct kvm_device_attr *attr); 736 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 737 struct kvm_device_attr *attr); 738 739 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 740 struct kvm_arm_copy_mte_tags *copy_tags); 741 742 /* Guest/host FPSIMD coordination helpers */ 743 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 744 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 745 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); 746 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 747 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 748 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); 749 750 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 751 { 752 return (!has_vhe() && attr->exclude_host); 753 } 754 755 /* Flags for host debug state */ 756 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); 757 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); 758 759 #ifdef CONFIG_KVM 760 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 761 void kvm_clr_pmu_events(u32 clr); 762 763 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 764 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 765 #else 766 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 767 static inline void kvm_clr_pmu_events(u32 clr) {} 768 #endif 769 770 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 771 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 772 773 int kvm_set_ipa_limit(void); 774 775 #define __KVM_HAVE_ARCH_VM_ALLOC 776 struct kvm *kvm_arch_alloc_vm(void); 777 778 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 779 780 static inline bool kvm_vm_is_protected(struct kvm *kvm) 781 { 782 return false; 783 } 784 785 void kvm_init_protected_traps(struct kvm_vcpu *vcpu); 786 787 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 788 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 789 790 #define kvm_arm_vcpu_sve_finalized(vcpu) \ 791 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) 792 793 #define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled) 794 #define kvm_vcpu_has_pmu(vcpu) \ 795 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) 796 797 int kvm_trng_call(struct kvm_vcpu *vcpu); 798 #ifdef CONFIG_KVM 799 extern phys_addr_t hyp_mem_base; 800 extern phys_addr_t hyp_mem_size; 801 void __init kvm_hyp_reserve(void); 802 #else 803 static inline void kvm_hyp_reserve(void) { } 804 #endif 805 806 #endif /* __ARM64_KVM_HOST_H__ */ 807