1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/percpu.h> 20 #include <asm/arch_gicv3.h> 21 #include <asm/barrier.h> 22 #include <asm/cpufeature.h> 23 #include <asm/cputype.h> 24 #include <asm/daifflags.h> 25 #include <asm/fpsimd.h> 26 #include <asm/kvm.h> 27 #include <asm/kvm_asm.h> 28 #include <asm/thread_info.h> 29 30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 31 32 #define KVM_USER_MEM_SLOTS 512 33 #define KVM_HALT_POLL_NS_DEFAULT 500000 34 35 #include <kvm/arm_vgic.h> 36 #include <kvm/arm_arch_timer.h> 37 #include <kvm/arm_pmu.h> 38 39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 40 41 #define KVM_VCPU_MAX_FEATURES 7 42 43 #define KVM_REQ_SLEEP \ 44 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 45 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 46 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 47 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 48 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 49 50 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 51 KVM_DIRTY_LOG_INITIALLY_SET) 52 53 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 54 55 extern unsigned int kvm_sve_max_vl; 56 int kvm_arm_init_sve(void); 57 58 int __attribute_const__ kvm_target_cpu(void); 59 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 60 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 61 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); 62 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); 63 64 struct kvm_vmid { 65 /* The VMID generation used for the virt. memory system */ 66 u64 vmid_gen; 67 u32 vmid; 68 }; 69 70 struct kvm_s2_mmu { 71 struct kvm_vmid vmid; 72 73 /* 74 * stage2 entry level table 75 * 76 * Two kvm_s2_mmu structures in the same VM can point to the same 77 * pgd here. This happens when running a guest using a 78 * translation regime that isn't affected by its own stage-2 79 * translation, such as a non-VHE hypervisor running at vEL2, or 80 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 81 * canonical stage-2 page tables. 82 */ 83 phys_addr_t pgd_phys; 84 struct kvm_pgtable *pgt; 85 86 /* The last vcpu id that ran on each physical CPU */ 87 int __percpu *last_vcpu_ran; 88 89 struct kvm *kvm; 90 }; 91 92 struct kvm_arch { 93 struct kvm_s2_mmu mmu; 94 95 /* VTCR_EL2 value for this VM */ 96 u64 vtcr; 97 98 /* The maximum number of vCPUs depends on the used GIC model */ 99 int max_vcpus; 100 101 /* Interrupt controller */ 102 struct vgic_dist vgic; 103 104 /* Mandated version of PSCI */ 105 u32 psci_version; 106 107 /* 108 * If we encounter a data abort without valid instruction syndrome 109 * information, report this to user space. User space can (and 110 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 111 * supported. 112 */ 113 bool return_nisv_io_abort_to_user; 114 115 /* 116 * VM-wide PMU filter, implemented as a bitmap and big enough for 117 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 118 */ 119 unsigned long *pmu_filter; 120 unsigned int pmuver; 121 }; 122 123 struct kvm_vcpu_fault_info { 124 u32 esr_el2; /* Hyp Syndrom Register */ 125 u64 far_el2; /* Hyp Fault Address Register */ 126 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 127 u64 disr_el1; /* Deferred [SError] Status Register */ 128 }; 129 130 enum vcpu_sysreg { 131 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 132 MPIDR_EL1, /* MultiProcessor Affinity Register */ 133 CSSELR_EL1, /* Cache Size Selection Register */ 134 SCTLR_EL1, /* System Control Register */ 135 ACTLR_EL1, /* Auxiliary Control Register */ 136 CPACR_EL1, /* Coprocessor Access Control */ 137 ZCR_EL1, /* SVE Control */ 138 TTBR0_EL1, /* Translation Table Base Register 0 */ 139 TTBR1_EL1, /* Translation Table Base Register 1 */ 140 TCR_EL1, /* Translation Control Register */ 141 ESR_EL1, /* Exception Syndrome Register */ 142 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ 143 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ 144 FAR_EL1, /* Fault Address Register */ 145 MAIR_EL1, /* Memory Attribute Indirection Register */ 146 VBAR_EL1, /* Vector Base Address Register */ 147 CONTEXTIDR_EL1, /* Context ID Register */ 148 TPIDR_EL0, /* Thread ID, User R/W */ 149 TPIDRRO_EL0, /* Thread ID, User R/O */ 150 TPIDR_EL1, /* Thread ID, Privileged */ 151 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ 152 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 153 PAR_EL1, /* Physical Address Register */ 154 MDSCR_EL1, /* Monitor Debug System Control Register */ 155 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 156 DISR_EL1, /* Deferred Interrupt Status Register */ 157 158 /* Performance Monitors Registers */ 159 PMCR_EL0, /* Control Register */ 160 PMSELR_EL0, /* Event Counter Selection Register */ 161 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 162 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 163 PMCCNTR_EL0, /* Cycle Counter Register */ 164 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 165 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 166 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 167 PMCNTENSET_EL0, /* Count Enable Set Register */ 168 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 169 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 170 PMSWINC_EL0, /* Software Increment Register */ 171 PMUSERENR_EL0, /* User Enable Register */ 172 173 /* Pointer Authentication Registers in a strict increasing order. */ 174 APIAKEYLO_EL1, 175 APIAKEYHI_EL1, 176 APIBKEYLO_EL1, 177 APIBKEYHI_EL1, 178 APDAKEYLO_EL1, 179 APDAKEYHI_EL1, 180 APDBKEYLO_EL1, 181 APDBKEYHI_EL1, 182 APGAKEYLO_EL1, 183 APGAKEYHI_EL1, 184 185 ELR_EL1, 186 SP_EL1, 187 SPSR_EL1, 188 189 CNTVOFF_EL2, 190 CNTV_CVAL_EL0, 191 CNTV_CTL_EL0, 192 CNTP_CVAL_EL0, 193 CNTP_CTL_EL0, 194 195 /* 32bit specific registers. Keep them at the end of the range */ 196 DACR32_EL2, /* Domain Access Control Register */ 197 IFSR32_EL2, /* Instruction Fault Status Register */ 198 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 199 DBGVCR32_EL2, /* Debug Vector Catch Register */ 200 201 NR_SYS_REGS /* Nothing after this line! */ 202 }; 203 204 /* 32bit mapping */ 205 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 206 #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ 207 #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ 208 #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ 209 #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ 210 #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ 211 #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ 212 #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ 213 #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ 214 #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ 215 #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ 216 #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ 217 #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ 218 #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ 219 #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ 220 #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ 221 #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ 222 #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ 223 #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ 224 #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ 225 #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ 226 #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ 227 #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ 228 #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ 229 #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ 230 #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ 231 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ 232 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ 233 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ 234 235 #define cp14_DBGDSCRext (MDSCR_EL1 * 2) 236 #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) 237 #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) 238 #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) 239 #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) 240 #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) 241 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2) 242 243 #define NR_COPRO_REGS (NR_SYS_REGS * 2) 244 245 struct kvm_cpu_context { 246 struct user_pt_regs regs; /* sp = sp_el0 */ 247 248 u64 spsr_abt; 249 u64 spsr_und; 250 u64 spsr_irq; 251 u64 spsr_fiq; 252 253 struct user_fpsimd_state fp_regs; 254 255 union { 256 u64 sys_regs[NR_SYS_REGS]; 257 u32 copro[NR_COPRO_REGS]; 258 }; 259 260 struct kvm_vcpu *__hyp_running_vcpu; 261 }; 262 263 struct kvm_pmu_events { 264 u32 events_host; 265 u32 events_guest; 266 }; 267 268 struct kvm_host_data { 269 struct kvm_cpu_context host_ctxt; 270 struct kvm_pmu_events pmu_events; 271 }; 272 273 struct vcpu_reset_state { 274 unsigned long pc; 275 unsigned long r0; 276 bool be; 277 bool reset; 278 }; 279 280 struct kvm_vcpu_arch { 281 struct kvm_cpu_context ctxt; 282 void *sve_state; 283 unsigned int sve_max_vl; 284 285 /* Stage 2 paging state used by the hardware on next switch */ 286 struct kvm_s2_mmu *hw_mmu; 287 288 /* HYP configuration */ 289 u64 hcr_el2; 290 u32 mdcr_el2; 291 292 /* Exception Information */ 293 struct kvm_vcpu_fault_info fault; 294 295 /* State of various workarounds, see kvm_asm.h for bit assignment */ 296 u64 workaround_flags; 297 298 /* Miscellaneous vcpu state flags */ 299 u64 flags; 300 301 /* 302 * We maintain more than a single set of debug registers to support 303 * debugging the guest from the host and to maintain separate host and 304 * guest state during world switches. vcpu_debug_state are the debug 305 * registers of the vcpu as the guest sees them. host_debug_state are 306 * the host registers which are saved and restored during 307 * world switches. external_debug_state contains the debug 308 * values we want to debug the guest. This is set via the 309 * KVM_SET_GUEST_DEBUG ioctl. 310 * 311 * debug_ptr points to the set of debug registers that should be loaded 312 * onto the hardware when running the guest. 313 */ 314 struct kvm_guest_debug_arch *debug_ptr; 315 struct kvm_guest_debug_arch vcpu_debug_state; 316 struct kvm_guest_debug_arch external_debug_state; 317 318 struct thread_info *host_thread_info; /* hyp VA */ 319 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ 320 321 struct { 322 /* {Break,watch}point registers */ 323 struct kvm_guest_debug_arch regs; 324 /* Statistical profiling extension */ 325 u64 pmscr_el1; 326 } host_debug_state; 327 328 /* VGIC state */ 329 struct vgic_cpu vgic_cpu; 330 struct arch_timer_cpu timer_cpu; 331 struct kvm_pmu pmu; 332 333 /* 334 * Anything that is not used directly from assembly code goes 335 * here. 336 */ 337 338 /* 339 * Guest registers we preserve during guest debugging. 340 * 341 * These shadow registers are updated by the kvm_handle_sys_reg 342 * trap handler if the guest accesses or updates them while we 343 * are using guest debug. 344 */ 345 struct { 346 u32 mdscr_el1; 347 } guest_debug_preserved; 348 349 /* vcpu power-off state */ 350 bool power_off; 351 352 /* Don't run the guest (internal implementation need) */ 353 bool pause; 354 355 /* Cache some mmu pages needed inside spinlock regions */ 356 struct kvm_mmu_memory_cache mmu_page_cache; 357 358 /* Target CPU and feature flags */ 359 int target; 360 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 361 362 /* Detect first run of a vcpu */ 363 bool has_run_once; 364 365 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 366 u64 vsesr_el2; 367 368 /* Additional reset state */ 369 struct vcpu_reset_state reset_state; 370 371 /* True when deferrable sysregs are loaded on the physical CPU, 372 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ 373 bool sysregs_loaded_on_cpu; 374 375 /* Guest PV state */ 376 struct { 377 u64 last_steal; 378 gpa_t base; 379 } steal; 380 }; 381 382 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 383 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ 384 sve_ffr_offset((vcpu)->arch.sve_max_vl))) 385 386 #define vcpu_sve_state_size(vcpu) ({ \ 387 size_t __size_ret; \ 388 unsigned int __vcpu_vq; \ 389 \ 390 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 391 __size_ret = 0; \ 392 } else { \ 393 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ 394 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 395 } \ 396 \ 397 __size_ret; \ 398 }) 399 400 /* vcpu_arch flags field values: */ 401 #define KVM_ARM64_DEBUG_DIRTY (1 << 0) 402 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ 403 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ 404 #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ 405 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ 406 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ 407 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ 408 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ 409 410 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 411 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) 412 413 #ifdef CONFIG_ARM64_PTR_AUTH 414 #define vcpu_has_ptrauth(vcpu) \ 415 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 416 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 417 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) 418 #else 419 #define vcpu_has_ptrauth(vcpu) false 420 #endif 421 422 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 423 424 /* 425 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 426 * memory backed version of a register, and not the one most recently 427 * accessed by a running VCPU. For example, for userspace access or 428 * for system registers that are never context switched, but only 429 * emulated. 430 */ 431 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) 432 433 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 434 435 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) 436 437 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 438 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 439 440 /* 441 * CP14 and CP15 live in the same array, as they are backed by the 442 * same system registers. 443 */ 444 #define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) 445 446 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) 447 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) 448 449 struct kvm_vm_stat { 450 ulong remote_tlb_flush; 451 }; 452 453 struct kvm_vcpu_stat { 454 u64 halt_successful_poll; 455 u64 halt_attempted_poll; 456 u64 halt_poll_success_ns; 457 u64 halt_poll_fail_ns; 458 u64 halt_poll_invalid; 459 u64 halt_wakeup; 460 u64 hvc_exit_stat; 461 u64 wfe_exit_stat; 462 u64 wfi_exit_stat; 463 u64 mmio_exit_user; 464 u64 mmio_exit_kernel; 465 u64 exits; 466 }; 467 468 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 469 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 470 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 471 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 472 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 473 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 474 struct kvm_vcpu_events *events); 475 476 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 477 struct kvm_vcpu_events *events); 478 479 #define KVM_ARCH_WANT_MMU_NOTIFIER 480 int kvm_unmap_hva_range(struct kvm *kvm, 481 unsigned long start, unsigned long end, unsigned flags); 482 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 483 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 484 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 485 486 void kvm_arm_halt_guest(struct kvm *kvm); 487 void kvm_arm_resume_guest(struct kvm *kvm); 488 489 #define kvm_call_hyp_nvhe(f, ...) \ 490 ({ \ 491 struct arm_smccc_res res; \ 492 \ 493 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 494 ##__VA_ARGS__, &res); \ 495 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 496 \ 497 res.a1; \ 498 }) 499 500 /* 501 * The couple of isb() below are there to guarantee the same behaviour 502 * on VHE as on !VHE, where the eret to EL1 acts as a context 503 * synchronization event. 504 */ 505 #define kvm_call_hyp(f, ...) \ 506 do { \ 507 if (has_vhe()) { \ 508 f(__VA_ARGS__); \ 509 isb(); \ 510 } else { \ 511 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 512 } \ 513 } while(0) 514 515 #define kvm_call_hyp_ret(f, ...) \ 516 ({ \ 517 typeof(f(__VA_ARGS__)) ret; \ 518 \ 519 if (has_vhe()) { \ 520 ret = f(__VA_ARGS__); \ 521 isb(); \ 522 } else { \ 523 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 524 } \ 525 \ 526 ret; \ 527 }) 528 529 void force_vm_exit(const cpumask_t *mask); 530 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); 531 532 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 533 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 534 535 /* MMIO helpers */ 536 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 537 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 538 539 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 540 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 541 542 int kvm_perf_init(void); 543 int kvm_perf_teardown(void); 544 545 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 546 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 547 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 548 549 bool kvm_arm_pvtime_supported(void); 550 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 551 struct kvm_device_attr *attr); 552 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 553 struct kvm_device_attr *attr); 554 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 555 struct kvm_device_attr *attr); 556 557 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 558 { 559 vcpu_arch->steal.base = GPA_INVALID; 560 } 561 562 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 563 { 564 return (vcpu_arch->steal.base != GPA_INVALID); 565 } 566 567 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 568 569 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 570 571 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 572 573 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 574 { 575 /* The host's MPIDR is immutable, so let's set it up at boot time */ 576 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 577 } 578 579 static inline bool kvm_arch_requires_vhe(void) 580 { 581 /* 582 * The Arm architecture specifies that implementation of SVE 583 * requires VHE also to be implemented. The KVM code for arm64 584 * relies on this when SVE is present: 585 */ 586 if (system_supports_sve()) 587 return true; 588 589 return false; 590 } 591 592 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 593 594 static inline void kvm_arch_hardware_unsetup(void) {} 595 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 596 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 597 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 598 599 void kvm_arm_init_debug(void); 600 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 601 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 602 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 603 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 604 struct kvm_device_attr *attr); 605 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 606 struct kvm_device_attr *attr); 607 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 608 struct kvm_device_attr *attr); 609 610 /* Guest/host FPSIMD coordination helpers */ 611 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 612 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 613 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 614 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 615 616 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 617 { 618 return (!has_vhe() && attr->exclude_host); 619 } 620 621 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ 622 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 623 { 624 return kvm_arch_vcpu_run_map_fp(vcpu); 625 } 626 627 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 628 void kvm_clr_pmu_events(u32 clr); 629 630 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 631 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 632 #else 633 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 634 static inline void kvm_clr_pmu_events(u32 clr) {} 635 #endif 636 637 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 638 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 639 640 int kvm_set_ipa_limit(void); 641 642 #define __KVM_HAVE_ARCH_VM_ALLOC 643 struct kvm *kvm_arch_alloc_vm(void); 644 void kvm_arch_free_vm(struct kvm *kvm); 645 646 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 647 648 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 649 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 650 651 #define kvm_arm_vcpu_sve_finalized(vcpu) \ 652 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) 653 654 #endif /* __ARM64_KVM_HOST_H__ */ 655