1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/maple_tree.h> 20 #include <linux/percpu.h> 21 #include <linux/psci.h> 22 #include <asm/arch_gicv3.h> 23 #include <asm/barrier.h> 24 #include <asm/cpufeature.h> 25 #include <asm/cputype.h> 26 #include <asm/daifflags.h> 27 #include <asm/fpsimd.h> 28 #include <asm/kvm.h> 29 #include <asm/kvm_asm.h> 30 31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 32 33 #define KVM_HALT_POLL_NS_DEFAULT 500000 34 35 #include <kvm/arm_vgic.h> 36 #include <kvm/arm_arch_timer.h> 37 #include <kvm/arm_pmu.h> 38 39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 40 41 #define KVM_VCPU_MAX_FEATURES 7 42 43 #define KVM_REQ_SLEEP \ 44 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 45 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 46 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 47 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 48 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 49 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 50 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) 51 52 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 53 KVM_DIRTY_LOG_INITIALLY_SET) 54 55 #define KVM_HAVE_MMU_RWLOCK 56 57 /* 58 * Mode of operation configurable with kvm-arm.mode early param. 59 * See Documentation/admin-guide/kernel-parameters.txt for more information. 60 */ 61 enum kvm_mode { 62 KVM_MODE_DEFAULT, 63 KVM_MODE_PROTECTED, 64 KVM_MODE_NV, 65 KVM_MODE_NONE, 66 }; 67 #ifdef CONFIG_KVM 68 enum kvm_mode kvm_get_mode(void); 69 #else 70 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; 71 #endif 72 73 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 74 75 extern unsigned int __ro_after_init kvm_sve_max_vl; 76 int __init kvm_arm_init_sve(void); 77 78 u32 __attribute_const__ kvm_target_cpu(void); 79 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 80 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 81 82 struct kvm_hyp_memcache { 83 phys_addr_t head; 84 unsigned long nr_pages; 85 }; 86 87 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, 88 phys_addr_t *p, 89 phys_addr_t (*to_pa)(void *virt)) 90 { 91 *p = mc->head; 92 mc->head = to_pa(p); 93 mc->nr_pages++; 94 } 95 96 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc, 97 void *(*to_va)(phys_addr_t phys)) 98 { 99 phys_addr_t *p = to_va(mc->head); 100 101 if (!mc->nr_pages) 102 return NULL; 103 104 mc->head = *p; 105 mc->nr_pages--; 106 107 return p; 108 } 109 110 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc, 111 unsigned long min_pages, 112 void *(*alloc_fn)(void *arg), 113 phys_addr_t (*to_pa)(void *virt), 114 void *arg) 115 { 116 while (mc->nr_pages < min_pages) { 117 phys_addr_t *p = alloc_fn(arg); 118 119 if (!p) 120 return -ENOMEM; 121 push_hyp_memcache(mc, p, to_pa); 122 } 123 124 return 0; 125 } 126 127 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc, 128 void (*free_fn)(void *virt, void *arg), 129 void *(*to_va)(phys_addr_t phys), 130 void *arg) 131 { 132 while (mc->nr_pages) 133 free_fn(pop_hyp_memcache(mc, to_va), arg); 134 } 135 136 void free_hyp_memcache(struct kvm_hyp_memcache *mc); 137 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); 138 139 struct kvm_vmid { 140 atomic64_t id; 141 }; 142 143 struct kvm_s2_mmu { 144 struct kvm_vmid vmid; 145 146 /* 147 * stage2 entry level table 148 * 149 * Two kvm_s2_mmu structures in the same VM can point to the same 150 * pgd here. This happens when running a guest using a 151 * translation regime that isn't affected by its own stage-2 152 * translation, such as a non-VHE hypervisor running at vEL2, or 153 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 154 * canonical stage-2 page tables. 155 */ 156 phys_addr_t pgd_phys; 157 struct kvm_pgtable *pgt; 158 159 /* The last vcpu id that ran on each physical CPU */ 160 int __percpu *last_vcpu_ran; 161 162 struct kvm_arch *arch; 163 }; 164 165 struct kvm_arch_memory_slot { 166 }; 167 168 /** 169 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests 170 * 171 * @std_bmap: Bitmap of standard secure service calls 172 * @std_hyp_bmap: Bitmap of standard hypervisor service calls 173 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls 174 */ 175 struct kvm_smccc_features { 176 unsigned long std_bmap; 177 unsigned long std_hyp_bmap; 178 unsigned long vendor_hyp_bmap; 179 }; 180 181 typedef unsigned int pkvm_handle_t; 182 183 struct kvm_protected_vm { 184 pkvm_handle_t handle; 185 struct kvm_hyp_memcache teardown_mc; 186 }; 187 188 struct kvm_arch { 189 struct kvm_s2_mmu mmu; 190 191 /* VTCR_EL2 value for this VM */ 192 u64 vtcr; 193 194 /* Interrupt controller */ 195 struct vgic_dist vgic; 196 197 /* Timers */ 198 struct arch_timer_vm_data timer_data; 199 200 /* Mandated version of PSCI */ 201 u32 psci_version; 202 203 /* Protects VM-scoped configuration data */ 204 struct mutex config_lock; 205 206 /* 207 * If we encounter a data abort without valid instruction syndrome 208 * information, report this to user space. User space can (and 209 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 210 * supported. 211 */ 212 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 213 /* Memory Tagging Extension enabled for the guest */ 214 #define KVM_ARCH_FLAG_MTE_ENABLED 1 215 /* At least one vCPU has ran in the VM */ 216 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 217 /* 218 * The following two bits are used to indicate the guest's EL1 219 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT 220 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set. 221 * Otherwise, the guest's EL1 register width has not yet been 222 * determined yet. 223 */ 224 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3 225 #define KVM_ARCH_FLAG_EL1_32BIT 4 226 /* PSCI SYSTEM_SUSPEND enabled for the guest */ 227 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 228 /* VM counter offset */ 229 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6 230 /* Timer PPIs made immutable */ 231 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 7 232 /* SMCCC filter initialized for the VM */ 233 #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 8 234 unsigned long flags; 235 236 /* 237 * VM-wide PMU filter, implemented as a bitmap and big enough for 238 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 239 */ 240 unsigned long *pmu_filter; 241 struct arm_pmu *arm_pmu; 242 243 cpumask_var_t supported_cpus; 244 245 u8 pfr0_csv2; 246 u8 pfr0_csv3; 247 struct { 248 u8 imp:4; 249 u8 unimp:4; 250 } dfr0_pmuver; 251 252 /* Hypercall features firmware registers' descriptor */ 253 struct kvm_smccc_features smccc_feat; 254 struct maple_tree smccc_filter; 255 256 /* 257 * For an untrusted host VM, 'pkvm.handle' is used to lookup 258 * the associated pKVM instance in the hypervisor. 259 */ 260 struct kvm_protected_vm pkvm; 261 }; 262 263 struct kvm_vcpu_fault_info { 264 u64 esr_el2; /* Hyp Syndrom Register */ 265 u64 far_el2; /* Hyp Fault Address Register */ 266 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 267 u64 disr_el1; /* Deferred [SError] Status Register */ 268 }; 269 270 enum vcpu_sysreg { 271 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 272 MPIDR_EL1, /* MultiProcessor Affinity Register */ 273 CLIDR_EL1, /* Cache Level ID Register */ 274 CSSELR_EL1, /* Cache Size Selection Register */ 275 SCTLR_EL1, /* System Control Register */ 276 ACTLR_EL1, /* Auxiliary Control Register */ 277 CPACR_EL1, /* Coprocessor Access Control */ 278 ZCR_EL1, /* SVE Control */ 279 TTBR0_EL1, /* Translation Table Base Register 0 */ 280 TTBR1_EL1, /* Translation Table Base Register 1 */ 281 TCR_EL1, /* Translation Control Register */ 282 TCR2_EL1, /* Extended Translation Control Register */ 283 ESR_EL1, /* Exception Syndrome Register */ 284 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ 285 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ 286 FAR_EL1, /* Fault Address Register */ 287 MAIR_EL1, /* Memory Attribute Indirection Register */ 288 VBAR_EL1, /* Vector Base Address Register */ 289 CONTEXTIDR_EL1, /* Context ID Register */ 290 TPIDR_EL0, /* Thread ID, User R/W */ 291 TPIDRRO_EL0, /* Thread ID, User R/O */ 292 TPIDR_EL1, /* Thread ID, Privileged */ 293 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ 294 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 295 PAR_EL1, /* Physical Address Register */ 296 MDSCR_EL1, /* Monitor Debug System Control Register */ 297 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 298 OSLSR_EL1, /* OS Lock Status Register */ 299 DISR_EL1, /* Deferred Interrupt Status Register */ 300 301 /* Performance Monitors Registers */ 302 PMCR_EL0, /* Control Register */ 303 PMSELR_EL0, /* Event Counter Selection Register */ 304 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 305 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 306 PMCCNTR_EL0, /* Cycle Counter Register */ 307 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 308 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 309 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 310 PMCNTENSET_EL0, /* Count Enable Set Register */ 311 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 312 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 313 PMUSERENR_EL0, /* User Enable Register */ 314 315 /* Pointer Authentication Registers in a strict increasing order. */ 316 APIAKEYLO_EL1, 317 APIAKEYHI_EL1, 318 APIBKEYLO_EL1, 319 APIBKEYHI_EL1, 320 APDAKEYLO_EL1, 321 APDAKEYHI_EL1, 322 APDBKEYLO_EL1, 323 APDBKEYHI_EL1, 324 APGAKEYLO_EL1, 325 APGAKEYHI_EL1, 326 327 ELR_EL1, 328 SP_EL1, 329 SPSR_EL1, 330 331 CNTVOFF_EL2, 332 CNTV_CVAL_EL0, 333 CNTV_CTL_EL0, 334 CNTP_CVAL_EL0, 335 CNTP_CTL_EL0, 336 337 /* Memory Tagging Extension registers */ 338 RGSR_EL1, /* Random Allocation Tag Seed Register */ 339 GCR_EL1, /* Tag Control Register */ 340 TFSR_EL1, /* Tag Fault Status Register (EL1) */ 341 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 342 343 /* Permission Indirection Extension registers */ 344 PIR_EL1, /* Permission Indirection Register 1 (EL1) */ 345 PIRE0_EL1, /* Permission Indirection Register 0 (EL1) */ 346 347 /* 32bit specific registers. */ 348 DACR32_EL2, /* Domain Access Control Register */ 349 IFSR32_EL2, /* Instruction Fault Status Register */ 350 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 351 DBGVCR32_EL2, /* Debug Vector Catch Register */ 352 353 /* EL2 registers */ 354 VPIDR_EL2, /* Virtualization Processor ID Register */ 355 VMPIDR_EL2, /* Virtualization Multiprocessor ID Register */ 356 SCTLR_EL2, /* System Control Register (EL2) */ 357 ACTLR_EL2, /* Auxiliary Control Register (EL2) */ 358 HCR_EL2, /* Hypervisor Configuration Register */ 359 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */ 360 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */ 361 HSTR_EL2, /* Hypervisor System Trap Register */ 362 HACR_EL2, /* Hypervisor Auxiliary Control Register */ 363 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */ 364 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */ 365 TCR_EL2, /* Translation Control Register (EL2) */ 366 VTTBR_EL2, /* Virtualization Translation Table Base Register */ 367 VTCR_EL2, /* Virtualization Translation Control Register */ 368 SPSR_EL2, /* EL2 saved program status register */ 369 ELR_EL2, /* EL2 exception link register */ 370 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */ 371 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */ 372 ESR_EL2, /* Exception Syndrome Register (EL2) */ 373 FAR_EL2, /* Fault Address Register (EL2) */ 374 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */ 375 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */ 376 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */ 377 VBAR_EL2, /* Vector Base Address Register (EL2) */ 378 RVBAR_EL2, /* Reset Vector Base Address Register */ 379 CONTEXTIDR_EL2, /* Context ID Register (EL2) */ 380 TPIDR_EL2, /* EL2 Software Thread ID Register */ 381 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ 382 SP_EL2, /* EL2 Stack Pointer */ 383 CNTHP_CTL_EL2, 384 CNTHP_CVAL_EL2, 385 CNTHV_CTL_EL2, 386 CNTHV_CVAL_EL2, 387 388 NR_SYS_REGS /* Nothing after this line! */ 389 }; 390 391 struct kvm_cpu_context { 392 struct user_pt_regs regs; /* sp = sp_el0 */ 393 394 u64 spsr_abt; 395 u64 spsr_und; 396 u64 spsr_irq; 397 u64 spsr_fiq; 398 399 struct user_fpsimd_state fp_regs; 400 401 u64 sys_regs[NR_SYS_REGS]; 402 403 struct kvm_vcpu *__hyp_running_vcpu; 404 }; 405 406 struct kvm_host_data { 407 struct kvm_cpu_context host_ctxt; 408 }; 409 410 struct kvm_host_psci_config { 411 /* PSCI version used by host. */ 412 u32 version; 413 414 /* Function IDs used by host if version is v0.1. */ 415 struct psci_0_1_function_ids function_ids_0_1; 416 417 bool psci_0_1_cpu_suspend_implemented; 418 bool psci_0_1_cpu_on_implemented; 419 bool psci_0_1_cpu_off_implemented; 420 bool psci_0_1_migrate_implemented; 421 }; 422 423 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 424 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 425 426 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 427 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 428 429 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 430 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 431 432 struct vcpu_reset_state { 433 unsigned long pc; 434 unsigned long r0; 435 bool be; 436 bool reset; 437 }; 438 439 struct kvm_vcpu_arch { 440 struct kvm_cpu_context ctxt; 441 442 /* 443 * Guest floating point state 444 * 445 * The architecture has two main floating point extensions, 446 * the original FPSIMD and SVE. These have overlapping 447 * register views, with the FPSIMD V registers occupying the 448 * low 128 bits of the SVE Z registers. When the core 449 * floating point code saves the register state of a task it 450 * records which view it saved in fp_type. 451 */ 452 void *sve_state; 453 enum fp_type fp_type; 454 unsigned int sve_max_vl; 455 u64 svcr; 456 457 /* Stage 2 paging state used by the hardware on next switch */ 458 struct kvm_s2_mmu *hw_mmu; 459 460 /* Values of trap registers for the guest. */ 461 u64 hcr_el2; 462 u64 mdcr_el2; 463 u64 cptr_el2; 464 465 /* Values of trap registers for the host before guest entry. */ 466 u64 mdcr_el2_host; 467 468 /* Exception Information */ 469 struct kvm_vcpu_fault_info fault; 470 471 /* Ownership of the FP regs */ 472 enum { 473 FP_STATE_FREE, 474 FP_STATE_HOST_OWNED, 475 FP_STATE_GUEST_OWNED, 476 } fp_state; 477 478 /* Configuration flags, set once and for all before the vcpu can run */ 479 u8 cflags; 480 481 /* Input flags to the hypervisor code, potentially cleared after use */ 482 u8 iflags; 483 484 /* State flags for kernel bookkeeping, unused by the hypervisor code */ 485 u8 sflags; 486 487 /* 488 * Don't run the guest (internal implementation need). 489 * 490 * Contrary to the flags above, this is set/cleared outside of 491 * a vcpu context, and thus cannot be mixed with the flags 492 * themselves (or the flag accesses need to be made atomic). 493 */ 494 bool pause; 495 496 /* 497 * We maintain more than a single set of debug registers to support 498 * debugging the guest from the host and to maintain separate host and 499 * guest state during world switches. vcpu_debug_state are the debug 500 * registers of the vcpu as the guest sees them. host_debug_state are 501 * the host registers which are saved and restored during 502 * world switches. external_debug_state contains the debug 503 * values we want to debug the guest. This is set via the 504 * KVM_SET_GUEST_DEBUG ioctl. 505 * 506 * debug_ptr points to the set of debug registers that should be loaded 507 * onto the hardware when running the guest. 508 */ 509 struct kvm_guest_debug_arch *debug_ptr; 510 struct kvm_guest_debug_arch vcpu_debug_state; 511 struct kvm_guest_debug_arch external_debug_state; 512 513 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ 514 struct task_struct *parent_task; 515 516 struct { 517 /* {Break,watch}point registers */ 518 struct kvm_guest_debug_arch regs; 519 /* Statistical profiling extension */ 520 u64 pmscr_el1; 521 /* Self-hosted trace */ 522 u64 trfcr_el1; 523 } host_debug_state; 524 525 /* VGIC state */ 526 struct vgic_cpu vgic_cpu; 527 struct arch_timer_cpu timer_cpu; 528 struct kvm_pmu pmu; 529 530 /* 531 * Guest registers we preserve during guest debugging. 532 * 533 * These shadow registers are updated by the kvm_handle_sys_reg 534 * trap handler if the guest accesses or updates them while we 535 * are using guest debug. 536 */ 537 struct { 538 u32 mdscr_el1; 539 bool pstate_ss; 540 } guest_debug_preserved; 541 542 /* vcpu power state */ 543 struct kvm_mp_state mp_state; 544 spinlock_t mp_state_lock; 545 546 /* Cache some mmu pages needed inside spinlock regions */ 547 struct kvm_mmu_memory_cache mmu_page_cache; 548 549 /* Target CPU and feature flags */ 550 int target; 551 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 552 553 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 554 u64 vsesr_el2; 555 556 /* Additional reset state */ 557 struct vcpu_reset_state reset_state; 558 559 /* Guest PV state */ 560 struct { 561 u64 last_steal; 562 gpa_t base; 563 } steal; 564 565 /* Per-vcpu CCSIDR override or NULL */ 566 u32 *ccsidr; 567 }; 568 569 /* 570 * Each 'flag' is composed of a comma-separated triplet: 571 * 572 * - the flag-set it belongs to in the vcpu->arch structure 573 * - the value for that flag 574 * - the mask for that flag 575 * 576 * __vcpu_single_flag() builds such a triplet for a single-bit flag. 577 * unpack_vcpu_flag() extract the flag value from the triplet for 578 * direct use outside of the flag accessors. 579 */ 580 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f) 581 582 #define __unpack_flag(_set, _f, _m) _f 583 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__) 584 585 #define __build_check_flag(v, flagset, f, m) \ 586 do { \ 587 typeof(v->arch.flagset) *_fset; \ 588 \ 589 /* Check that the flags fit in the mask */ \ 590 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \ 591 /* Check that the flags fit in the type */ \ 592 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \ 593 } while (0) 594 595 #define __vcpu_get_flag(v, flagset, f, m) \ 596 ({ \ 597 __build_check_flag(v, flagset, f, m); \ 598 \ 599 READ_ONCE(v->arch.flagset) & (m); \ 600 }) 601 602 /* 603 * Note that the set/clear accessors must be preempt-safe in order to 604 * avoid nesting them with load/put which also manipulate flags... 605 */ 606 #ifdef __KVM_NVHE_HYPERVISOR__ 607 /* the nVHE hypervisor is always non-preemptible */ 608 #define __vcpu_flags_preempt_disable() 609 #define __vcpu_flags_preempt_enable() 610 #else 611 #define __vcpu_flags_preempt_disable() preempt_disable() 612 #define __vcpu_flags_preempt_enable() preempt_enable() 613 #endif 614 615 #define __vcpu_set_flag(v, flagset, f, m) \ 616 do { \ 617 typeof(v->arch.flagset) *fset; \ 618 \ 619 __build_check_flag(v, flagset, f, m); \ 620 \ 621 fset = &v->arch.flagset; \ 622 __vcpu_flags_preempt_disable(); \ 623 if (HWEIGHT(m) > 1) \ 624 *fset &= ~(m); \ 625 *fset |= (f); \ 626 __vcpu_flags_preempt_enable(); \ 627 } while (0) 628 629 #define __vcpu_clear_flag(v, flagset, f, m) \ 630 do { \ 631 typeof(v->arch.flagset) *fset; \ 632 \ 633 __build_check_flag(v, flagset, f, m); \ 634 \ 635 fset = &v->arch.flagset; \ 636 __vcpu_flags_preempt_disable(); \ 637 *fset &= ~(m); \ 638 __vcpu_flags_preempt_enable(); \ 639 } while (0) 640 641 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) 642 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) 643 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) 644 645 /* SVE exposed to guest */ 646 #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0)) 647 /* SVE config completed */ 648 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) 649 /* PTRAUTH exposed to guest */ 650 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) 651 652 /* Exception pending */ 653 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) 654 /* 655 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't 656 * be set together with an exception... 657 */ 658 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) 659 /* Target EL/MODE (not a single flag, but let's abuse the macro) */ 660 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) 661 662 /* Helpers to encode exceptions with minimum fuss */ 663 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) 664 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) 665 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL 666 667 /* 668 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following 669 * values: 670 * 671 * For AArch32 EL1: 672 */ 673 #define EXCEPT_AA32_UND __vcpu_except_flags(0) 674 #define EXCEPT_AA32_IABT __vcpu_except_flags(1) 675 #define EXCEPT_AA32_DABT __vcpu_except_flags(2) 676 /* For AArch64: */ 677 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0) 678 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1) 679 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2) 680 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3) 681 /* For AArch64 with NV: */ 682 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4) 683 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) 684 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) 685 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) 686 /* Guest debug is live */ 687 #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4)) 688 /* Save SPE context if active */ 689 #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5)) 690 /* Save TRBE context if active */ 691 #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) 692 /* vcpu running in HYP context */ 693 #define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7)) 694 695 /* SVE enabled for host EL0 */ 696 #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0)) 697 /* SME enabled for EL0 */ 698 #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1)) 699 /* Physical CPU not in supported_cpus */ 700 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) 701 /* WFIT instruction trapped */ 702 #define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) 703 /* vcpu system registers loaded on physical CPU */ 704 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) 705 /* Software step state is Active-pending */ 706 #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) 707 708 709 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 710 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 711 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 712 713 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 714 715 #define vcpu_sve_state_size(vcpu) ({ \ 716 size_t __size_ret; \ 717 unsigned int __vcpu_vq; \ 718 \ 719 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 720 __size_ret = 0; \ 721 } else { \ 722 __vcpu_vq = vcpu_sve_max_vq(vcpu); \ 723 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 724 } \ 725 \ 726 __size_ret; \ 727 }) 728 729 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 730 KVM_GUESTDBG_USE_SW_BP | \ 731 KVM_GUESTDBG_USE_HW | \ 732 KVM_GUESTDBG_SINGLESTEP) 733 734 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 735 vcpu_get_flag(vcpu, GUEST_HAS_SVE)) 736 737 #ifdef CONFIG_ARM64_PTR_AUTH 738 #define vcpu_has_ptrauth(vcpu) \ 739 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 740 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 741 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH)) 742 #else 743 #define vcpu_has_ptrauth(vcpu) false 744 #endif 745 746 #define vcpu_on_unsupported_cpu(vcpu) \ 747 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU) 748 749 #define vcpu_set_on_unsupported_cpu(vcpu) \ 750 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU) 751 752 #define vcpu_clear_on_unsupported_cpu(vcpu) \ 753 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU) 754 755 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 756 757 /* 758 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 759 * memory backed version of a register, and not the one most recently 760 * accessed by a running VCPU. For example, for userspace access or 761 * for system registers that are never context switched, but only 762 * emulated. 763 */ 764 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) 765 766 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 767 768 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) 769 770 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 771 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 772 773 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 774 { 775 /* 776 * *** VHE ONLY *** 777 * 778 * System registers listed in the switch are not saved on every 779 * exit from the guest but are only saved on vcpu_put. 780 * 781 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 782 * should never be listed below, because the guest cannot modify its 783 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 784 * thread when emulating cross-VCPU communication. 785 */ 786 if (!has_vhe()) 787 return false; 788 789 switch (reg) { 790 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 791 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 792 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 793 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 794 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 795 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 796 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 797 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 798 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 799 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 800 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 801 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 802 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 803 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 804 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 805 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 806 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 807 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 808 case PAR_EL1: *val = read_sysreg_par(); break; 809 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 810 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 811 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 812 default: return false; 813 } 814 815 return true; 816 } 817 818 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 819 { 820 /* 821 * *** VHE ONLY *** 822 * 823 * System registers listed in the switch are not restored on every 824 * entry to the guest but are only restored on vcpu_load. 825 * 826 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 827 * should never be listed below, because the MPIDR should only be set 828 * once, before running the VCPU, and never changed later. 829 */ 830 if (!has_vhe()) 831 return false; 832 833 switch (reg) { 834 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 835 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 836 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 837 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 838 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 839 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 840 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 841 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 842 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 843 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 844 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 845 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 846 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 847 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 848 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 849 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 850 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 851 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 852 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 853 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 854 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 855 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 856 default: return false; 857 } 858 859 return true; 860 } 861 862 struct kvm_vm_stat { 863 struct kvm_vm_stat_generic generic; 864 }; 865 866 struct kvm_vcpu_stat { 867 struct kvm_vcpu_stat_generic generic; 868 u64 hvc_exit_stat; 869 u64 wfe_exit_stat; 870 u64 wfi_exit_stat; 871 u64 mmio_exit_user; 872 u64 mmio_exit_kernel; 873 u64 signal_exits; 874 u64 exits; 875 }; 876 877 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 878 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 879 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 880 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 881 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 882 883 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 884 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 885 886 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 887 struct kvm_vcpu_events *events); 888 889 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 890 struct kvm_vcpu_events *events); 891 892 #define KVM_ARCH_WANT_MMU_NOTIFIER 893 894 void kvm_arm_halt_guest(struct kvm *kvm); 895 void kvm_arm_resume_guest(struct kvm *kvm); 896 897 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) 898 899 #ifndef __KVM_NVHE_HYPERVISOR__ 900 #define kvm_call_hyp_nvhe(f, ...) \ 901 ({ \ 902 struct arm_smccc_res res; \ 903 \ 904 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 905 ##__VA_ARGS__, &res); \ 906 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 907 \ 908 res.a1; \ 909 }) 910 911 /* 912 * The couple of isb() below are there to guarantee the same behaviour 913 * on VHE as on !VHE, where the eret to EL1 acts as a context 914 * synchronization event. 915 */ 916 #define kvm_call_hyp(f, ...) \ 917 do { \ 918 if (has_vhe()) { \ 919 f(__VA_ARGS__); \ 920 isb(); \ 921 } else { \ 922 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 923 } \ 924 } while(0) 925 926 #define kvm_call_hyp_ret(f, ...) \ 927 ({ \ 928 typeof(f(__VA_ARGS__)) ret; \ 929 \ 930 if (has_vhe()) { \ 931 ret = f(__VA_ARGS__); \ 932 isb(); \ 933 } else { \ 934 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 935 } \ 936 \ 937 ret; \ 938 }) 939 #else /* __KVM_NVHE_HYPERVISOR__ */ 940 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 941 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 942 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 943 #endif /* __KVM_NVHE_HYPERVISOR__ */ 944 945 void force_vm_exit(const cpumask_t *mask); 946 947 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 948 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 949 950 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 951 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 952 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 953 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 954 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 955 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 956 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); 957 958 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 959 960 int __init kvm_sys_reg_table_init(void); 961 962 bool lock_all_vcpus(struct kvm *kvm); 963 void unlock_all_vcpus(struct kvm *kvm); 964 965 /* MMIO helpers */ 966 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 967 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 968 969 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 970 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 971 972 /* 973 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 974 * arrived in guest context. For arm64, any event that arrives while a vCPU is 975 * loaded is considered to be "in guest". 976 */ 977 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 978 { 979 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 980 } 981 982 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 983 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 984 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 985 986 bool kvm_arm_pvtime_supported(void); 987 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 988 struct kvm_device_attr *attr); 989 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 990 struct kvm_device_attr *attr); 991 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 992 struct kvm_device_attr *attr); 993 994 extern unsigned int __ro_after_init kvm_arm_vmid_bits; 995 int __init kvm_arm_vmid_alloc_init(void); 996 void __init kvm_arm_vmid_alloc_free(void); 997 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 998 void kvm_arm_vmid_clear_active(void); 999 1000 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 1001 { 1002 vcpu_arch->steal.base = INVALID_GPA; 1003 } 1004 1005 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 1006 { 1007 return (vcpu_arch->steal.base != INVALID_GPA); 1008 } 1009 1010 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 1011 1012 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 1013 1014 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 1015 1016 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 1017 { 1018 /* The host's MPIDR is immutable, so let's set it up at boot time */ 1019 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 1020 } 1021 1022 static inline bool kvm_system_needs_idmapped_vectors(void) 1023 { 1024 return cpus_have_const_cap(ARM64_SPECTRE_V3A); 1025 } 1026 1027 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 1028 1029 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 1030 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 1031 1032 void kvm_arm_init_debug(void); 1033 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); 1034 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 1035 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 1036 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 1037 1038 #define kvm_vcpu_os_lock_enabled(vcpu) \ 1039 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK)) 1040 1041 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 1042 struct kvm_device_attr *attr); 1043 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 1044 struct kvm_device_attr *attr); 1045 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 1046 struct kvm_device_attr *attr); 1047 1048 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 1049 struct kvm_arm_copy_mte_tags *copy_tags); 1050 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, 1051 struct kvm_arm_counter_offset *offset); 1052 1053 /* Guest/host FPSIMD coordination helpers */ 1054 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 1055 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 1056 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); 1057 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 1058 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 1059 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); 1060 1061 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 1062 { 1063 return (!has_vhe() && attr->exclude_host); 1064 } 1065 1066 /* Flags for host debug state */ 1067 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); 1068 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); 1069 1070 #ifdef CONFIG_KVM 1071 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 1072 void kvm_clr_pmu_events(u32 clr); 1073 #else 1074 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 1075 static inline void kvm_clr_pmu_events(u32 clr) {} 1076 #endif 1077 1078 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 1079 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 1080 1081 int __init kvm_set_ipa_limit(void); 1082 1083 #define __KVM_HAVE_ARCH_VM_ALLOC 1084 struct kvm *kvm_arch_alloc_vm(void); 1085 1086 static inline bool kvm_vm_is_protected(struct kvm *kvm) 1087 { 1088 return false; 1089 } 1090 1091 void kvm_init_protected_traps(struct kvm_vcpu *vcpu); 1092 1093 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 1094 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 1095 1096 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED) 1097 1098 #define kvm_has_mte(kvm) \ 1099 (system_supports_mte() && \ 1100 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) 1101 1102 #define kvm_supports_32bit_el0() \ 1103 (system_supports_32bit_el0() && \ 1104 !static_branch_unlikely(&arm64_mismatched_32bit_el0)) 1105 1106 #define kvm_vm_has_ran_once(kvm) \ 1107 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags)) 1108 1109 int kvm_trng_call(struct kvm_vcpu *vcpu); 1110 #ifdef CONFIG_KVM 1111 extern phys_addr_t hyp_mem_base; 1112 extern phys_addr_t hyp_mem_size; 1113 void __init kvm_hyp_reserve(void); 1114 #else 1115 static inline void kvm_hyp_reserve(void) { } 1116 #endif 1117 1118 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); 1119 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); 1120 1121 #endif /* __ARM64_KVM_HOST_H__ */ 1122