1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This header defines architecture specific interfaces, x86 version 6 */ 7 8 #ifndef _ASM_X86_KVM_HOST_H 9 #define _ASM_X86_KVM_HOST_H 10 11 #include <linux/types.h> 12 #include <linux/mm.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/tracepoint.h> 15 #include <linux/cpumask.h> 16 #include <linux/irq_work.h> 17 #include <linux/irq.h> 18 19 #include <linux/kvm.h> 20 #include <linux/kvm_para.h> 21 #include <linux/kvm_types.h> 22 #include <linux/perf_event.h> 23 #include <linux/pvclock_gtod.h> 24 #include <linux/clocksource.h> 25 #include <linux/irqbypass.h> 26 #include <linux/hyperv.h> 27 28 #include <asm/apic.h> 29 #include <asm/pvclock-abi.h> 30 #include <asm/desc.h> 31 #include <asm/mtrr.h> 32 #include <asm/msr-index.h> 33 #include <asm/asm.h> 34 #include <asm/kvm_page_track.h> 35 #include <asm/kvm_vcpu_regs.h> 36 #include <asm/hyperv-tlfs.h> 37 38 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS 39 40 #define KVM_MAX_VCPUS 288 41 #define KVM_SOFT_MAX_VCPUS 240 42 #define KVM_MAX_VCPU_ID 1023 43 #define KVM_USER_MEM_SLOTS 509 44 /* memory slots that are not exposed to userspace */ 45 #define KVM_PRIVATE_MEM_SLOTS 3 46 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 47 48 #define KVM_HALT_POLL_NS_DEFAULT 200000 49 50 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 51 52 /* x86-specific vcpu->requests bit members */ 53 #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) 54 #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) 55 #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) 56 #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) 57 #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) 58 #define KVM_REQ_LOAD_CR3 KVM_ARCH_REQ(5) 59 #define KVM_REQ_EVENT KVM_ARCH_REQ(6) 60 #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) 61 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) 62 #define KVM_REQ_NMI KVM_ARCH_REQ(9) 63 #define KVM_REQ_PMU KVM_ARCH_REQ(10) 64 #define KVM_REQ_PMI KVM_ARCH_REQ(11) 65 #define KVM_REQ_SMI KVM_ARCH_REQ(12) 66 #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) 67 #define KVM_REQ_MCLOCK_INPROGRESS \ 68 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 69 #define KVM_REQ_SCAN_IOAPIC \ 70 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 71 #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16) 72 #define KVM_REQ_APIC_PAGE_RELOAD \ 73 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 74 #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18) 75 #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19) 76 #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20) 77 #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21) 78 #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) 79 #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23) 80 #define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24) 81 82 #define CR0_RESERVED_BITS \ 83 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ 84 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ 85 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) 86 87 #define CR4_RESERVED_BITS \ 88 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 89 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 90 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 91 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ 92 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ 93 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) 94 95 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 96 97 98 99 #define INVALID_PAGE (~(hpa_t)0) 100 #define VALID_PAGE(x) ((x) != INVALID_PAGE) 101 102 #define UNMAPPED_GVA (~(gpa_t)0) 103 104 /* KVM Hugepage definitions for x86 */ 105 enum { 106 PT_PAGE_TABLE_LEVEL = 1, 107 PT_DIRECTORY_LEVEL = 2, 108 PT_PDPE_LEVEL = 3, 109 /* set max level to the biggest one */ 110 PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL, 111 }; 112 #define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \ 113 PT_PAGE_TABLE_LEVEL + 1) 114 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) 115 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) 116 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) 117 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 118 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 119 120 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) 121 { 122 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ 123 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - 124 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 125 } 126 127 #define KVM_PERMILLE_MMU_PAGES 20 128 #define KVM_MIN_ALLOC_MMU_PAGES 64UL 129 #define KVM_MMU_HASH_SHIFT 12 130 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 131 #define KVM_MIN_FREE_MMU_PAGES 5 132 #define KVM_REFILL_PAGES 25 133 #define KVM_MAX_CPUID_ENTRIES 80 134 #define KVM_NR_FIXED_MTRR_REGION 88 135 #define KVM_NR_VAR_MTRR 8 136 137 #define ASYNC_PF_PER_VCPU 64 138 139 enum kvm_reg { 140 VCPU_REGS_RAX = __VCPU_REGS_RAX, 141 VCPU_REGS_RCX = __VCPU_REGS_RCX, 142 VCPU_REGS_RDX = __VCPU_REGS_RDX, 143 VCPU_REGS_RBX = __VCPU_REGS_RBX, 144 VCPU_REGS_RSP = __VCPU_REGS_RSP, 145 VCPU_REGS_RBP = __VCPU_REGS_RBP, 146 VCPU_REGS_RSI = __VCPU_REGS_RSI, 147 VCPU_REGS_RDI = __VCPU_REGS_RDI, 148 #ifdef CONFIG_X86_64 149 VCPU_REGS_R8 = __VCPU_REGS_R8, 150 VCPU_REGS_R9 = __VCPU_REGS_R9, 151 VCPU_REGS_R10 = __VCPU_REGS_R10, 152 VCPU_REGS_R11 = __VCPU_REGS_R11, 153 VCPU_REGS_R12 = __VCPU_REGS_R12, 154 VCPU_REGS_R13 = __VCPU_REGS_R13, 155 VCPU_REGS_R14 = __VCPU_REGS_R14, 156 VCPU_REGS_R15 = __VCPU_REGS_R15, 157 #endif 158 VCPU_REGS_RIP, 159 NR_VCPU_REGS 160 }; 161 162 enum kvm_reg_ex { 163 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 164 VCPU_EXREG_CR3, 165 VCPU_EXREG_RFLAGS, 166 VCPU_EXREG_SEGMENTS, 167 }; 168 169 enum { 170 VCPU_SREG_ES, 171 VCPU_SREG_CS, 172 VCPU_SREG_SS, 173 VCPU_SREG_DS, 174 VCPU_SREG_FS, 175 VCPU_SREG_GS, 176 VCPU_SREG_TR, 177 VCPU_SREG_LDTR, 178 }; 179 180 #include <asm/kvm_emulate.h> 181 182 #define KVM_NR_MEM_OBJS 40 183 184 #define KVM_NR_DB_REGS 4 185 186 #define DR6_BD (1 << 13) 187 #define DR6_BS (1 << 14) 188 #define DR6_BT (1 << 15) 189 #define DR6_RTM (1 << 16) 190 #define DR6_FIXED_1 0xfffe0ff0 191 #define DR6_INIT 0xffff0ff0 192 #define DR6_VOLATILE 0x0001e00f 193 194 #define DR7_BP_EN_MASK 0x000000ff 195 #define DR7_GE (1 << 9) 196 #define DR7_GD (1 << 13) 197 #define DR7_FIXED_1 0x00000400 198 #define DR7_VOLATILE 0xffff2bff 199 200 #define PFERR_PRESENT_BIT 0 201 #define PFERR_WRITE_BIT 1 202 #define PFERR_USER_BIT 2 203 #define PFERR_RSVD_BIT 3 204 #define PFERR_FETCH_BIT 4 205 #define PFERR_PK_BIT 5 206 #define PFERR_GUEST_FINAL_BIT 32 207 #define PFERR_GUEST_PAGE_BIT 33 208 209 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) 210 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) 211 #define PFERR_USER_MASK (1U << PFERR_USER_BIT) 212 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) 213 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) 214 #define PFERR_PK_MASK (1U << PFERR_PK_BIT) 215 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) 216 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) 217 218 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ 219 PFERR_WRITE_MASK | \ 220 PFERR_PRESENT_MASK) 221 222 /* apic attention bits */ 223 #define KVM_APIC_CHECK_VAPIC 0 224 /* 225 * The following bit is set with PV-EOI, unset on EOI. 226 * We detect PV-EOI changes by guest by comparing 227 * this bit with PV-EOI in guest memory. 228 * See the implementation in apic_update_pv_eoi. 229 */ 230 #define KVM_APIC_PV_EOI_PENDING 1 231 232 struct kvm_kernel_irq_routing_entry; 233 234 /* 235 * We don't want allocation failures within the mmu code, so we preallocate 236 * enough memory for a single page fault in a cache. 237 */ 238 struct kvm_mmu_memory_cache { 239 int nobjs; 240 void *objects[KVM_NR_MEM_OBJS]; 241 }; 242 243 /* 244 * the pages used as guest page table on soft mmu are tracked by 245 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used 246 * by indirect shadow page can not be more than 15 bits. 247 * 248 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access, 249 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. 250 */ 251 union kvm_mmu_page_role { 252 u32 word; 253 struct { 254 unsigned level:4; 255 unsigned gpte_is_8_bytes:1; 256 unsigned quadrant:2; 257 unsigned direct:1; 258 unsigned access:3; 259 unsigned invalid:1; 260 unsigned nxe:1; 261 unsigned cr0_wp:1; 262 unsigned smep_andnot_wp:1; 263 unsigned smap_andnot_wp:1; 264 unsigned ad_disabled:1; 265 unsigned guest_mode:1; 266 unsigned :6; 267 268 /* 269 * This is left at the top of the word so that 270 * kvm_memslots_for_spte_role can extract it with a 271 * simple shift. While there is room, give it a whole 272 * byte so it is also faster to load it from memory. 273 */ 274 unsigned smm:8; 275 }; 276 }; 277 278 union kvm_mmu_extended_role { 279 /* 280 * This structure complements kvm_mmu_page_role caching everything needed for 281 * MMU configuration. If nothing in both these structures changed, MMU 282 * re-configuration can be skipped. @valid bit is set on first usage so we don't 283 * treat all-zero structure as valid data. 284 */ 285 u32 word; 286 struct { 287 unsigned int valid:1; 288 unsigned int execonly:1; 289 unsigned int cr0_pg:1; 290 unsigned int cr4_pae:1; 291 unsigned int cr4_pse:1; 292 unsigned int cr4_pke:1; 293 unsigned int cr4_smap:1; 294 unsigned int cr4_smep:1; 295 unsigned int cr4_la57:1; 296 unsigned int maxphyaddr:6; 297 }; 298 }; 299 300 union kvm_mmu_role { 301 u64 as_u64; 302 struct { 303 union kvm_mmu_page_role base; 304 union kvm_mmu_extended_role ext; 305 }; 306 }; 307 308 struct kvm_rmap_head { 309 unsigned long val; 310 }; 311 312 struct kvm_mmu_page { 313 struct list_head link; 314 struct hlist_node hash_link; 315 bool unsync; 316 u8 mmu_valid_gen; 317 bool mmio_cached; 318 319 /* 320 * The following two entries are used to key the shadow page in the 321 * hash table. 322 */ 323 union kvm_mmu_page_role role; 324 gfn_t gfn; 325 326 u64 *spt; 327 /* hold the gfn of each spte inside spt */ 328 gfn_t *gfns; 329 int root_count; /* Currently serving as active root */ 330 unsigned int unsync_children; 331 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 332 DECLARE_BITMAP(unsync_child_bitmap, 512); 333 334 #ifdef CONFIG_X86_32 335 /* 336 * Used out of the mmu-lock to avoid reading spte values while an 337 * update is in progress; see the comments in __get_spte_lockless(). 338 */ 339 int clear_spte_count; 340 #endif 341 342 /* Number of writes since the last time traversal visited this page. */ 343 atomic_t write_flooding_count; 344 }; 345 346 struct kvm_pio_request { 347 unsigned long linear_rip; 348 unsigned long count; 349 int in; 350 int port; 351 int size; 352 }; 353 354 #define PT64_ROOT_MAX_LEVEL 5 355 356 struct rsvd_bits_validate { 357 u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL]; 358 u64 bad_mt_xwr; 359 }; 360 361 struct kvm_mmu_root_info { 362 gpa_t cr3; 363 hpa_t hpa; 364 }; 365 366 #define KVM_MMU_ROOT_INFO_INVALID \ 367 ((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE }) 368 369 #define KVM_MMU_NUM_PREV_ROOTS 3 370 371 /* 372 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, 373 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the 374 * current mmu mode. 375 */ 376 struct kvm_mmu { 377 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 378 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 379 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); 380 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 381 bool prefault); 382 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 383 struct x86_exception *fault); 384 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 385 struct x86_exception *exception); 386 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 387 struct x86_exception *exception); 388 int (*sync_page)(struct kvm_vcpu *vcpu, 389 struct kvm_mmu_page *sp); 390 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); 391 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 392 u64 *spte, const void *pte); 393 hpa_t root_hpa; 394 gpa_t root_cr3; 395 union kvm_mmu_role mmu_role; 396 u8 root_level; 397 u8 shadow_root_level; 398 u8 ept_ad; 399 bool direct_map; 400 struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; 401 402 /* 403 * Bitmap; bit set = permission fault 404 * Byte index: page fault error code [4:1] 405 * Bit index: pte permissions in ACC_* format 406 */ 407 u8 permissions[16]; 408 409 /* 410 * The pkru_mask indicates if protection key checks are needed. It 411 * consists of 16 domains indexed by page fault error code bits [4:1], 412 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. 413 * Each domain has 2 bits which are ANDed with AD and WD from PKRU. 414 */ 415 u32 pkru_mask; 416 417 u64 *pae_root; 418 u64 *lm_root; 419 420 /* 421 * check zero bits on shadow page table entries, these 422 * bits include not only hardware reserved bits but also 423 * the bits spte never used. 424 */ 425 struct rsvd_bits_validate shadow_zero_check; 426 427 struct rsvd_bits_validate guest_rsvd_check; 428 429 /* Can have large pages at levels 2..last_nonleaf_level-1. */ 430 u8 last_nonleaf_level; 431 432 bool nx; 433 434 u64 pdptrs[4]; /* pae */ 435 }; 436 437 struct kvm_tlb_range { 438 u64 start_gfn; 439 u64 pages; 440 }; 441 442 enum pmc_type { 443 KVM_PMC_GP = 0, 444 KVM_PMC_FIXED, 445 }; 446 447 struct kvm_pmc { 448 enum pmc_type type; 449 u8 idx; 450 u64 counter; 451 u64 eventsel; 452 struct perf_event *perf_event; 453 struct kvm_vcpu *vcpu; 454 }; 455 456 struct kvm_pmu { 457 unsigned nr_arch_gp_counters; 458 unsigned nr_arch_fixed_counters; 459 unsigned available_event_types; 460 u64 fixed_ctr_ctrl; 461 u64 global_ctrl; 462 u64 global_status; 463 u64 global_ovf_ctrl; 464 u64 counter_bitmask[2]; 465 u64 global_ctrl_mask; 466 u64 global_ovf_ctrl_mask; 467 u64 reserved_bits; 468 u8 version; 469 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; 470 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; 471 struct irq_work irq_work; 472 u64 reprogram_pmi; 473 }; 474 475 struct kvm_pmu_ops; 476 477 enum { 478 KVM_DEBUGREG_BP_ENABLED = 1, 479 KVM_DEBUGREG_WONT_EXIT = 2, 480 KVM_DEBUGREG_RELOAD = 4, 481 }; 482 483 struct kvm_mtrr_range { 484 u64 base; 485 u64 mask; 486 struct list_head node; 487 }; 488 489 struct kvm_mtrr { 490 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; 491 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; 492 u64 deftype; 493 494 struct list_head head; 495 }; 496 497 /* Hyper-V SynIC timer */ 498 struct kvm_vcpu_hv_stimer { 499 struct hrtimer timer; 500 int index; 501 union hv_stimer_config config; 502 u64 count; 503 u64 exp_time; 504 struct hv_message msg; 505 bool msg_pending; 506 }; 507 508 /* Hyper-V synthetic interrupt controller (SynIC)*/ 509 struct kvm_vcpu_hv_synic { 510 u64 version; 511 u64 control; 512 u64 msg_page; 513 u64 evt_page; 514 atomic64_t sint[HV_SYNIC_SINT_COUNT]; 515 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; 516 DECLARE_BITMAP(auto_eoi_bitmap, 256); 517 DECLARE_BITMAP(vec_bitmap, 256); 518 bool active; 519 bool dont_zero_synic_pages; 520 }; 521 522 /* Hyper-V per vcpu emulation context */ 523 struct kvm_vcpu_hv { 524 u32 vp_index; 525 u64 hv_vapic; 526 s64 runtime_offset; 527 struct kvm_vcpu_hv_synic synic; 528 struct kvm_hyperv_exit exit; 529 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; 530 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); 531 cpumask_t tlb_flush; 532 }; 533 534 struct kvm_vcpu_arch { 535 /* 536 * rip and regs accesses must go through 537 * kvm_{register,rip}_{read,write} functions. 538 */ 539 unsigned long regs[NR_VCPU_REGS]; 540 u32 regs_avail; 541 u32 regs_dirty; 542 543 unsigned long cr0; 544 unsigned long cr0_guest_owned_bits; 545 unsigned long cr2; 546 unsigned long cr3; 547 unsigned long cr4; 548 unsigned long cr4_guest_owned_bits; 549 unsigned long cr8; 550 u32 pkru; 551 u32 hflags; 552 u64 efer; 553 u64 apic_base; 554 struct kvm_lapic *apic; /* kernel irqchip context */ 555 bool apicv_active; 556 bool load_eoi_exitmap_pending; 557 DECLARE_BITMAP(ioapic_handled_vectors, 256); 558 unsigned long apic_attention; 559 int32_t apic_arb_prio; 560 int mp_state; 561 u64 ia32_misc_enable_msr; 562 u64 smbase; 563 u64 smi_count; 564 bool tpr_access_reporting; 565 u64 ia32_xss; 566 u64 microcode_version; 567 u64 arch_capabilities; 568 569 /* 570 * Paging state of the vcpu 571 * 572 * If the vcpu runs in guest mode with two level paging this still saves 573 * the paging mode of the l1 guest. This context is always used to 574 * handle faults. 575 */ 576 struct kvm_mmu *mmu; 577 578 /* Non-nested MMU for L1 */ 579 struct kvm_mmu root_mmu; 580 581 /* L1 MMU when running nested */ 582 struct kvm_mmu guest_mmu; 583 584 /* 585 * Paging state of an L2 guest (used for nested npt) 586 * 587 * This context will save all necessary information to walk page tables 588 * of the an L2 guest. This context is only initialized for page table 589 * walking and not for faulting since we never handle l2 page faults on 590 * the host. 591 */ 592 struct kvm_mmu nested_mmu; 593 594 /* 595 * Pointer to the mmu context currently used for 596 * gva_to_gpa translations. 597 */ 598 struct kvm_mmu *walk_mmu; 599 600 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; 601 struct kvm_mmu_memory_cache mmu_page_cache; 602 struct kvm_mmu_memory_cache mmu_page_header_cache; 603 604 /* 605 * QEMU userspace and the guest each have their own FPU state. 606 * In vcpu_run, we switch between the user and guest FPU contexts. 607 * While running a VCPU, the VCPU thread will have the guest FPU 608 * context. 609 * 610 * Note that while the PKRU state lives inside the fpu registers, 611 * it is switched out separately at VMENTER and VMEXIT time. The 612 * "guest_fpu" state here contains the guest FPU context, with the 613 * host PRKU bits. 614 */ 615 struct fpu *user_fpu; 616 struct fpu *guest_fpu; 617 618 u64 xcr0; 619 u64 guest_supported_xcr0; 620 u32 guest_xstate_size; 621 622 struct kvm_pio_request pio; 623 void *pio_data; 624 625 u8 event_exit_inst_len; 626 627 struct kvm_queued_exception { 628 bool pending; 629 bool injected; 630 bool has_error_code; 631 u8 nr; 632 u32 error_code; 633 unsigned long payload; 634 bool has_payload; 635 u8 nested_apf; 636 } exception; 637 638 struct kvm_queued_interrupt { 639 bool injected; 640 bool soft; 641 u8 nr; 642 } interrupt; 643 644 int halt_request; /* real mode on Intel only */ 645 646 int cpuid_nent; 647 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 648 649 int maxphyaddr; 650 651 /* emulate context */ 652 653 struct x86_emulate_ctxt emulate_ctxt; 654 bool emulate_regs_need_sync_to_vcpu; 655 bool emulate_regs_need_sync_from_vcpu; 656 int (*complete_userspace_io)(struct kvm_vcpu *vcpu); 657 658 gpa_t time; 659 struct pvclock_vcpu_time_info hv_clock; 660 unsigned int hw_tsc_khz; 661 struct gfn_to_hva_cache pv_time; 662 bool pv_time_enabled; 663 /* set guest stopped flag in pvclock flags field */ 664 bool pvclock_set_guest_stopped_request; 665 666 struct { 667 u64 msr_val; 668 u64 last_steal; 669 struct gfn_to_hva_cache stime; 670 struct kvm_steal_time steal; 671 } st; 672 673 u64 tsc_offset; 674 u64 last_guest_tsc; 675 u64 last_host_tsc; 676 u64 tsc_offset_adjustment; 677 u64 this_tsc_nsec; 678 u64 this_tsc_write; 679 u64 this_tsc_generation; 680 bool tsc_catchup; 681 bool tsc_always_catchup; 682 s8 virtual_tsc_shift; 683 u32 virtual_tsc_mult; 684 u32 virtual_tsc_khz; 685 s64 ia32_tsc_adjust_msr; 686 u64 msr_ia32_power_ctl; 687 u64 tsc_scaling_ratio; 688 689 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 690 unsigned nmi_pending; /* NMI queued after currently running handler */ 691 bool nmi_injected; /* Trying to inject an NMI this entry */ 692 bool smi_pending; /* SMI queued after currently running handler */ 693 694 struct kvm_mtrr mtrr_state; 695 u64 pat; 696 697 unsigned switch_db_regs; 698 unsigned long db[KVM_NR_DB_REGS]; 699 unsigned long dr6; 700 unsigned long dr7; 701 unsigned long eff_db[KVM_NR_DB_REGS]; 702 unsigned long guest_debug_dr7; 703 u64 msr_platform_info; 704 u64 msr_misc_features_enables; 705 706 u64 mcg_cap; 707 u64 mcg_status; 708 u64 mcg_ctl; 709 u64 mcg_ext_ctl; 710 u64 *mce_banks; 711 712 /* Cache MMIO info */ 713 u64 mmio_gva; 714 unsigned mmio_access; 715 gfn_t mmio_gfn; 716 u64 mmio_gen; 717 718 struct kvm_pmu pmu; 719 720 /* used for guest single stepping over the given code position */ 721 unsigned long singlestep_rip; 722 723 struct kvm_vcpu_hv hyperv; 724 725 cpumask_var_t wbinvd_dirty_mask; 726 727 unsigned long last_retry_eip; 728 unsigned long last_retry_addr; 729 730 struct { 731 bool halted; 732 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 733 struct gfn_to_hva_cache data; 734 u64 msr_val; 735 u32 id; 736 bool send_user_only; 737 u32 host_apf_reason; 738 unsigned long nested_apf_token; 739 bool delivery_as_pf_vmexit; 740 } apf; 741 742 /* OSVW MSRs (AMD only) */ 743 struct { 744 u64 length; 745 u64 status; 746 } osvw; 747 748 struct { 749 u64 msr_val; 750 struct gfn_to_hva_cache data; 751 } pv_eoi; 752 753 u64 msr_kvm_poll_control; 754 755 /* 756 * Indicate whether the access faults on its page table in guest 757 * which is set when fix page fault and used to detect unhandeable 758 * instruction. 759 */ 760 bool write_fault_to_shadow_pgtable; 761 762 /* set at EPT violation at this point */ 763 unsigned long exit_qualification; 764 765 /* pv related host specific info */ 766 struct { 767 bool pv_unhalted; 768 } pv; 769 770 int pending_ioapic_eoi; 771 int pending_external_vector; 772 773 /* GPA available */ 774 bool gpa_available; 775 gpa_t gpa_val; 776 777 /* be preempted when it's in kernel-mode(cpl=0) */ 778 bool preempted_in_kernel; 779 780 /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ 781 bool l1tf_flush_l1d; 782 783 /* AMD MSRC001_0015 Hardware Configuration */ 784 u64 msr_hwcr; 785 }; 786 787 struct kvm_lpage_info { 788 int disallow_lpage; 789 }; 790 791 struct kvm_arch_memory_slot { 792 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; 793 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 794 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; 795 }; 796 797 /* 798 * We use as the mode the number of bits allocated in the LDR for the 799 * logical processor ID. It happens that these are all powers of two. 800 * This makes it is very easy to detect cases where the APICs are 801 * configured for multiple modes; in that case, we cannot use the map and 802 * hence cannot use kvm_irq_delivery_to_apic_fast either. 803 */ 804 #define KVM_APIC_MODE_XAPIC_CLUSTER 4 805 #define KVM_APIC_MODE_XAPIC_FLAT 8 806 #define KVM_APIC_MODE_X2APIC 16 807 808 struct kvm_apic_map { 809 struct rcu_head rcu; 810 u8 mode; 811 u32 max_apic_id; 812 union { 813 struct kvm_lapic *xapic_flat_map[8]; 814 struct kvm_lapic *xapic_cluster_map[16][4]; 815 }; 816 struct kvm_lapic *phys_map[]; 817 }; 818 819 /* Hyper-V emulation context */ 820 struct kvm_hv { 821 struct mutex hv_lock; 822 u64 hv_guest_os_id; 823 u64 hv_hypercall; 824 u64 hv_tsc_page; 825 826 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ 827 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; 828 u64 hv_crash_ctl; 829 830 HV_REFERENCE_TSC_PAGE tsc_ref; 831 832 struct idr conn_to_evt; 833 834 u64 hv_reenlightenment_control; 835 u64 hv_tsc_emulation_control; 836 u64 hv_tsc_emulation_status; 837 838 /* How many vCPUs have VP index != vCPU index */ 839 atomic_t num_mismatched_vp_indexes; 840 841 struct hv_partition_assist_pg *hv_pa_pg; 842 }; 843 844 enum kvm_irqchip_mode { 845 KVM_IRQCHIP_NONE, 846 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ 847 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ 848 }; 849 850 struct kvm_arch { 851 unsigned long n_used_mmu_pages; 852 unsigned long n_requested_mmu_pages; 853 unsigned long n_max_mmu_pages; 854 unsigned int indirect_shadow_pages; 855 u8 mmu_valid_gen; 856 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 857 /* 858 * Hash table of struct kvm_mmu_page. 859 */ 860 struct list_head active_mmu_pages; 861 struct list_head zapped_obsolete_pages; 862 struct kvm_page_track_notifier_node mmu_sp_tracker; 863 struct kvm_page_track_notifier_head track_notifier_head; 864 865 struct list_head assigned_dev_head; 866 struct iommu_domain *iommu_domain; 867 bool iommu_noncoherent; 868 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA 869 atomic_t noncoherent_dma_count; 870 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE 871 atomic_t assigned_device_count; 872 struct kvm_pic *vpic; 873 struct kvm_ioapic *vioapic; 874 struct kvm_pit *vpit; 875 atomic_t vapics_in_nmi_mode; 876 struct mutex apic_map_lock; 877 struct kvm_apic_map *apic_map; 878 879 bool apic_access_page_done; 880 881 gpa_t wall_clock; 882 883 bool mwait_in_guest; 884 bool hlt_in_guest; 885 bool pause_in_guest; 886 bool cstate_in_guest; 887 888 unsigned long irq_sources_bitmap; 889 s64 kvmclock_offset; 890 raw_spinlock_t tsc_write_lock; 891 u64 last_tsc_nsec; 892 u64 last_tsc_write; 893 u32 last_tsc_khz; 894 u64 cur_tsc_nsec; 895 u64 cur_tsc_write; 896 u64 cur_tsc_offset; 897 u64 cur_tsc_generation; 898 int nr_vcpus_matched_tsc; 899 900 spinlock_t pvclock_gtod_sync_lock; 901 bool use_master_clock; 902 u64 master_kernel_ns; 903 u64 master_cycle_now; 904 struct delayed_work kvmclock_update_work; 905 struct delayed_work kvmclock_sync_work; 906 907 struct kvm_xen_hvm_config xen_hvm_config; 908 909 /* reads protected by irq_srcu, writes by irq_lock */ 910 struct hlist_head mask_notifier_list; 911 912 struct kvm_hv hyperv; 913 914 #ifdef CONFIG_KVM_MMU_AUDIT 915 int audit_point; 916 #endif 917 918 bool backwards_tsc_observed; 919 bool boot_vcpu_runs_old_kvmclock; 920 u32 bsp_vcpu_id; 921 922 u64 disabled_quirks; 923 924 enum kvm_irqchip_mode irqchip_mode; 925 u8 nr_reserved_ioapic_pins; 926 927 bool disabled_lapic_found; 928 929 bool x2apic_format; 930 bool x2apic_broadcast_quirk_disabled; 931 932 bool guest_can_read_msr_platform_info; 933 bool exception_payload_enabled; 934 935 struct kvm_pmu_event_filter *pmu_event_filter; 936 }; 937 938 struct kvm_vm_stat { 939 ulong mmu_shadow_zapped; 940 ulong mmu_pte_write; 941 ulong mmu_pte_updated; 942 ulong mmu_pde_zapped; 943 ulong mmu_flooded; 944 ulong mmu_recycled; 945 ulong mmu_cache_miss; 946 ulong mmu_unsync; 947 ulong remote_tlb_flush; 948 ulong lpages; 949 ulong max_mmu_page_hash_collisions; 950 }; 951 952 struct kvm_vcpu_stat { 953 u64 pf_fixed; 954 u64 pf_guest; 955 u64 tlb_flush; 956 u64 invlpg; 957 958 u64 exits; 959 u64 io_exits; 960 u64 mmio_exits; 961 u64 signal_exits; 962 u64 irq_window_exits; 963 u64 nmi_window_exits; 964 u64 l1d_flush; 965 u64 halt_exits; 966 u64 halt_successful_poll; 967 u64 halt_attempted_poll; 968 u64 halt_poll_invalid; 969 u64 halt_wakeup; 970 u64 request_irq_exits; 971 u64 irq_exits; 972 u64 host_state_reload; 973 u64 fpu_reload; 974 u64 insn_emulation; 975 u64 insn_emulation_fail; 976 u64 hypercalls; 977 u64 irq_injections; 978 u64 nmi_injections; 979 u64 req_event; 980 }; 981 982 struct x86_instruction_info; 983 984 struct msr_data { 985 bool host_initiated; 986 u32 index; 987 u64 data; 988 }; 989 990 struct kvm_lapic_irq { 991 u32 vector; 992 u16 delivery_mode; 993 u16 dest_mode; 994 bool level; 995 u16 trig_mode; 996 u32 shorthand; 997 u32 dest_id; 998 bool msi_redir_hint; 999 }; 1000 1001 struct kvm_x86_ops { 1002 int (*cpu_has_kvm_support)(void); /* __init */ 1003 int (*disabled_by_bios)(void); /* __init */ 1004 int (*hardware_enable)(void); 1005 void (*hardware_disable)(void); 1006 int (*check_processor_compatibility)(void);/* __init */ 1007 int (*hardware_setup)(void); /* __init */ 1008 void (*hardware_unsetup)(void); /* __exit */ 1009 bool (*cpu_has_accelerated_tpr)(void); 1010 bool (*has_emulated_msr)(int index); 1011 void (*cpuid_update)(struct kvm_vcpu *vcpu); 1012 1013 struct kvm *(*vm_alloc)(void); 1014 void (*vm_free)(struct kvm *); 1015 int (*vm_init)(struct kvm *kvm); 1016 void (*vm_destroy)(struct kvm *kvm); 1017 1018 /* Create, but do not attach this VCPU */ 1019 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 1020 void (*vcpu_free)(struct kvm_vcpu *vcpu); 1021 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); 1022 1023 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 1024 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 1025 void (*vcpu_put)(struct kvm_vcpu *vcpu); 1026 1027 void (*update_bp_intercept)(struct kvm_vcpu *vcpu); 1028 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 1029 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 1030 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 1031 void (*get_segment)(struct kvm_vcpu *vcpu, 1032 struct kvm_segment *var, int seg); 1033 int (*get_cpl)(struct kvm_vcpu *vcpu); 1034 void (*set_segment)(struct kvm_vcpu *vcpu, 1035 struct kvm_segment *var, int seg); 1036 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 1037 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 1038 void (*decache_cr3)(struct kvm_vcpu *vcpu); 1039 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 1040 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 1041 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 1042 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 1043 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 1044 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 1045 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 1046 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 1047 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 1048 u64 (*get_dr6)(struct kvm_vcpu *vcpu); 1049 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); 1050 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); 1051 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); 1052 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 1053 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 1054 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 1055 1056 void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); 1057 int (*tlb_remote_flush)(struct kvm *kvm); 1058 int (*tlb_remote_flush_with_range)(struct kvm *kvm, 1059 struct kvm_tlb_range *range); 1060 1061 /* 1062 * Flush any TLB entries associated with the given GVA. 1063 * Does not need to flush GPA->HPA mappings. 1064 * Can potentially get non-canonical addresses through INVLPGs, which 1065 * the implementation may choose to ignore if appropriate. 1066 */ 1067 void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr); 1068 1069 void (*run)(struct kvm_vcpu *vcpu); 1070 int (*handle_exit)(struct kvm_vcpu *vcpu); 1071 int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 1072 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 1073 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); 1074 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 1075 unsigned char *hypercall_addr); 1076 void (*set_irq)(struct kvm_vcpu *vcpu); 1077 void (*set_nmi)(struct kvm_vcpu *vcpu); 1078 void (*queue_exception)(struct kvm_vcpu *vcpu); 1079 void (*cancel_injection)(struct kvm_vcpu *vcpu); 1080 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 1081 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 1082 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 1083 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); 1084 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 1085 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 1086 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 1087 bool (*get_enable_apicv)(struct kvm_vcpu *vcpu); 1088 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); 1089 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); 1090 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); 1091 bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); 1092 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 1093 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); 1094 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); 1095 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); 1096 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); 1097 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 1098 int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); 1099 int (*get_tdp_level)(struct kvm_vcpu *vcpu); 1100 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 1101 int (*get_lpage_level)(void); 1102 bool (*rdtscp_supported)(void); 1103 bool (*invpcid_supported)(void); 1104 1105 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 1106 1107 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 1108 1109 bool (*has_wbinvd_exit)(void); 1110 1111 u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); 1112 /* Returns actual tsc_offset set in active VMCS */ 1113 u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 1114 1115 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 1116 1117 int (*check_intercept)(struct kvm_vcpu *vcpu, 1118 struct x86_instruction_info *info, 1119 enum x86_intercept_stage stage); 1120 void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); 1121 bool (*mpx_supported)(void); 1122 bool (*xsaves_supported)(void); 1123 bool (*umip_emulated)(void); 1124 bool (*pt_supported)(void); 1125 1126 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); 1127 void (*request_immediate_exit)(struct kvm_vcpu *vcpu); 1128 1129 void (*sched_in)(struct kvm_vcpu *kvm, int cpu); 1130 1131 /* 1132 * Arch-specific dirty logging hooks. These hooks are only supposed to 1133 * be valid if the specific arch has hardware-accelerated dirty logging 1134 * mechanism. Currently only for PML on VMX. 1135 * 1136 * - slot_enable_log_dirty: 1137 * called when enabling log dirty mode for the slot. 1138 * - slot_disable_log_dirty: 1139 * called when disabling log dirty mode for the slot. 1140 * also called when slot is created with log dirty disabled. 1141 * - flush_log_dirty: 1142 * called before reporting dirty_bitmap to userspace. 1143 * - enable_log_dirty_pt_masked: 1144 * called when reenabling log dirty for the GFNs in the mask after 1145 * corresponding bits are cleared in slot->dirty_bitmap. 1146 */ 1147 void (*slot_enable_log_dirty)(struct kvm *kvm, 1148 struct kvm_memory_slot *slot); 1149 void (*slot_disable_log_dirty)(struct kvm *kvm, 1150 struct kvm_memory_slot *slot); 1151 void (*flush_log_dirty)(struct kvm *kvm); 1152 void (*enable_log_dirty_pt_masked)(struct kvm *kvm, 1153 struct kvm_memory_slot *slot, 1154 gfn_t offset, unsigned long mask); 1155 int (*write_log_dirty)(struct kvm_vcpu *vcpu); 1156 1157 /* pmu operations of sub-arch */ 1158 const struct kvm_pmu_ops *pmu_ops; 1159 1160 /* 1161 * Architecture specific hooks for vCPU blocking due to 1162 * HLT instruction. 1163 * Returns for .pre_block(): 1164 * - 0 means continue to block the vCPU. 1165 * - 1 means we cannot block the vCPU since some event 1166 * happens during this period, such as, 'ON' bit in 1167 * posted-interrupts descriptor is set. 1168 */ 1169 int (*pre_block)(struct kvm_vcpu *vcpu); 1170 void (*post_block)(struct kvm_vcpu *vcpu); 1171 1172 void (*vcpu_blocking)(struct kvm_vcpu *vcpu); 1173 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); 1174 1175 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, 1176 uint32_t guest_irq, bool set); 1177 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); 1178 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); 1179 1180 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, 1181 bool *expired); 1182 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); 1183 1184 void (*setup_mce)(struct kvm_vcpu *vcpu); 1185 1186 int (*get_nested_state)(struct kvm_vcpu *vcpu, 1187 struct kvm_nested_state __user *user_kvm_nested_state, 1188 unsigned user_data_size); 1189 int (*set_nested_state)(struct kvm_vcpu *vcpu, 1190 struct kvm_nested_state __user *user_kvm_nested_state, 1191 struct kvm_nested_state *kvm_state); 1192 void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu); 1193 1194 int (*smi_allowed)(struct kvm_vcpu *vcpu); 1195 int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); 1196 int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); 1197 int (*enable_smi_window)(struct kvm_vcpu *vcpu); 1198 1199 int (*mem_enc_op)(struct kvm *kvm, void __user *argp); 1200 int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); 1201 int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); 1202 1203 int (*get_msr_feature)(struct kvm_msr_entry *entry); 1204 1205 int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, 1206 uint16_t *vmcs_version); 1207 uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); 1208 1209 bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu); 1210 1211 bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); 1212 int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu); 1213 }; 1214 1215 struct kvm_arch_async_pf { 1216 u32 token; 1217 gfn_t gfn; 1218 unsigned long cr3; 1219 bool direct_map; 1220 }; 1221 1222 extern struct kvm_x86_ops *kvm_x86_ops; 1223 extern struct kmem_cache *x86_fpu_cache; 1224 1225 #define __KVM_HAVE_ARCH_VM_ALLOC 1226 static inline struct kvm *kvm_arch_alloc_vm(void) 1227 { 1228 return kvm_x86_ops->vm_alloc(); 1229 } 1230 1231 static inline void kvm_arch_free_vm(struct kvm *kvm) 1232 { 1233 return kvm_x86_ops->vm_free(kvm); 1234 } 1235 1236 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB 1237 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) 1238 { 1239 if (kvm_x86_ops->tlb_remote_flush && 1240 !kvm_x86_ops->tlb_remote_flush(kvm)) 1241 return 0; 1242 else 1243 return -ENOTSUPP; 1244 } 1245 1246 int kvm_mmu_module_init(void); 1247 void kvm_mmu_module_exit(void); 1248 1249 void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 1250 int kvm_mmu_create(struct kvm_vcpu *vcpu); 1251 void kvm_mmu_init_vm(struct kvm *kvm); 1252 void kvm_mmu_uninit_vm(struct kvm *kvm); 1253 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 1254 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, 1255 u64 acc_track_mask, u64 me_mask); 1256 1257 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 1258 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 1259 struct kvm_memory_slot *memslot); 1260 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, 1261 const struct kvm_memory_slot *memslot); 1262 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, 1263 struct kvm_memory_slot *memslot); 1264 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, 1265 struct kvm_memory_slot *memslot); 1266 void kvm_mmu_slot_set_dirty(struct kvm *kvm, 1267 struct kvm_memory_slot *memslot); 1268 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1269 struct kvm_memory_slot *slot, 1270 gfn_t gfn_offset, unsigned long mask); 1271 void kvm_mmu_zap_all(struct kvm *kvm); 1272 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); 1273 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); 1274 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); 1275 1276 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1277 bool pdptrs_changed(struct kvm_vcpu *vcpu); 1278 1279 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1280 const void *val, int bytes); 1281 1282 struct kvm_irq_mask_notifier { 1283 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 1284 int irq; 1285 struct hlist_node link; 1286 }; 1287 1288 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 1289 struct kvm_irq_mask_notifier *kimn); 1290 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 1291 struct kvm_irq_mask_notifier *kimn); 1292 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 1293 bool mask); 1294 1295 extern bool tdp_enabled; 1296 1297 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); 1298 1299 /* control of guest tsc rate supported? */ 1300 extern bool kvm_has_tsc_control; 1301 /* maximum supported tsc_khz for guests */ 1302 extern u32 kvm_max_guest_tsc_khz; 1303 /* number of bits of the fractional part of the TSC scaling ratio */ 1304 extern u8 kvm_tsc_scaling_ratio_frac_bits; 1305 /* maximum allowed value of TSC scaling ratio */ 1306 extern u64 kvm_max_tsc_scaling_ratio; 1307 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ 1308 extern u64 kvm_default_tsc_scaling_ratio; 1309 1310 extern u64 kvm_mce_cap_supported; 1311 1312 /* 1313 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing 1314 * userspace I/O) to indicate that the emulation context 1315 * should be resued as is, i.e. skip initialization of 1316 * emulation context, instruction fetch and decode. 1317 * 1318 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware. 1319 * Indicates that only select instructions (tagged with 1320 * EmulateOnUD) should be emulated (to minimize the emulator 1321 * attack surface). See also EMULTYPE_TRAP_UD_FORCED. 1322 * 1323 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to 1324 * decode the instruction length. For use *only* by 1325 * kvm_x86_ops->skip_emulated_instruction() implementations. 1326 * 1327 * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to 1328 * retry native execution under certain conditions. 1329 * 1330 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was 1331 * triggered by KVM's magic "force emulation" prefix, 1332 * which is opt in via module param (off by default). 1333 * Bypasses EmulateOnUD restriction despite emulating 1334 * due to an intercepted #UD (see EMULTYPE_TRAP_UD). 1335 * Used to test the full emulator from userspace. 1336 * 1337 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware 1338 * backdoor emulation, which is opt in via module param. 1339 * VMware backoor emulation handles select instructions 1340 * and reinjects the #GP for all other cases. 1341 */ 1342 #define EMULTYPE_NO_DECODE (1 << 0) 1343 #define EMULTYPE_TRAP_UD (1 << 1) 1344 #define EMULTYPE_SKIP (1 << 2) 1345 #define EMULTYPE_ALLOW_RETRY (1 << 3) 1346 #define EMULTYPE_TRAP_UD_FORCED (1 << 4) 1347 #define EMULTYPE_VMWARE_GP (1 << 5) 1348 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); 1349 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 1350 void *insn, int insn_len); 1351 1352 void kvm_enable_efer_bits(u64); 1353 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); 1354 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data); 1355 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data); 1356 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu); 1357 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu); 1358 1359 struct x86_emulate_ctxt; 1360 1361 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); 1362 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 1363 int kvm_emulate_halt(struct kvm_vcpu *vcpu); 1364 int kvm_vcpu_halt(struct kvm_vcpu *vcpu); 1365 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); 1366 1367 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 1368 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 1369 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 1370 1371 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 1372 int reason, bool has_error_code, u32 error_code); 1373 1374 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 1375 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 1376 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 1377 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 1378 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 1379 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 1380 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 1381 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 1382 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 1383 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); 1384 1385 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 1386 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 1387 1388 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 1389 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 1390 bool kvm_rdpmc(struct kvm_vcpu *vcpu); 1391 1392 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 1393 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 1394 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 1395 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 1396 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); 1397 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 1398 gfn_t gfn, void *data, int offset, int len, 1399 u32 access); 1400 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 1401 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); 1402 1403 static inline int __kvm_irq_line_state(unsigned long *irq_state, 1404 int irq_source_id, int level) 1405 { 1406 /* Logical OR for level trig interrupt */ 1407 if (level) 1408 __set_bit(irq_source_id, irq_state); 1409 else 1410 __clear_bit(irq_source_id, irq_state); 1411 1412 return !!(*irq_state); 1413 } 1414 1415 #define KVM_MMU_ROOT_CURRENT BIT(0) 1416 #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) 1417 #define KVM_MMU_ROOTS_ALL (~0UL) 1418 1419 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); 1420 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); 1421 1422 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 1423 1424 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 1425 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 1426 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 1427 int kvm_mmu_load(struct kvm_vcpu *vcpu); 1428 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 1429 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 1430 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 1431 ulong roots_to_free); 1432 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 1433 struct x86_exception *exception); 1434 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 1435 struct x86_exception *exception); 1436 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 1437 struct x86_exception *exception); 1438 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 1439 struct x86_exception *exception); 1440 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1441 struct x86_exception *exception); 1442 1443 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); 1444 1445 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 1446 1447 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code, 1448 void *insn, int insn_len); 1449 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 1450 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); 1451 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush); 1452 1453 void kvm_enable_tdp(void); 1454 void kvm_disable_tdp(void); 1455 1456 static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 1457 struct x86_exception *exception) 1458 { 1459 return gpa; 1460 } 1461 1462 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 1463 { 1464 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 1465 1466 return (struct kvm_mmu_page *)page_private(page); 1467 } 1468 1469 static inline u16 kvm_read_ldt(void) 1470 { 1471 u16 ldt; 1472 asm("sldt %0" : "=g"(ldt)); 1473 return ldt; 1474 } 1475 1476 static inline void kvm_load_ldt(u16 sel) 1477 { 1478 asm("lldt %0" : : "rm"(sel)); 1479 } 1480 1481 #ifdef CONFIG_X86_64 1482 static inline unsigned long read_msr(unsigned long msr) 1483 { 1484 u64 value; 1485 1486 rdmsrl(msr, value); 1487 return value; 1488 } 1489 #endif 1490 1491 static inline u32 get_rdx_init_val(void) 1492 { 1493 return 0x600; /* P6 family */ 1494 } 1495 1496 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) 1497 { 1498 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 1499 } 1500 1501 #define TSS_IOPB_BASE_OFFSET 0x66 1502 #define TSS_BASE_SIZE 0x68 1503 #define TSS_IOPB_SIZE (65536 / 8) 1504 #define TSS_REDIRECTION_SIZE (256 / 8) 1505 #define RMODE_TSS_SIZE \ 1506 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 1507 1508 enum { 1509 TASK_SWITCH_CALL = 0, 1510 TASK_SWITCH_IRET = 1, 1511 TASK_SWITCH_JMP = 2, 1512 TASK_SWITCH_GATE = 3, 1513 }; 1514 1515 #define HF_GIF_MASK (1 << 0) 1516 #define HF_HIF_MASK (1 << 1) 1517 #define HF_VINTR_MASK (1 << 2) 1518 #define HF_NMI_MASK (1 << 3) 1519 #define HF_IRET_MASK (1 << 4) 1520 #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ 1521 #define HF_SMM_MASK (1 << 6) 1522 #define HF_SMM_INSIDE_NMI_MASK (1 << 7) 1523 1524 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 1525 #define KVM_ADDRESS_SPACE_NUM 2 1526 1527 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) 1528 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) 1529 1530 asmlinkage void kvm_spurious_fault(void); 1531 1532 /* 1533 * Hardware virtualization extension instructions may fault if a 1534 * reboot turns off virtualization while processes are running. 1535 * Usually after catching the fault we just panic; during reboot 1536 * instead the instruction is ignored. 1537 */ 1538 #define __kvm_handle_fault_on_reboot(insn) \ 1539 "666: \n\t" \ 1540 insn "\n\t" \ 1541 "jmp 668f \n\t" \ 1542 "667: \n\t" \ 1543 "call kvm_spurious_fault \n\t" \ 1544 "668: \n\t" \ 1545 _ASM_EXTABLE(666b, 667b) 1546 1547 #define KVM_ARCH_WANT_MMU_NOTIFIER 1548 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1549 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1550 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 1551 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 1552 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); 1553 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 1554 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 1555 int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1556 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); 1557 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); 1558 1559 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, 1560 unsigned long ipi_bitmap_high, u32 min, 1561 unsigned long icr, int op_64_bit); 1562 1563 void kvm_define_shared_msr(unsigned index, u32 msr); 1564 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1565 1566 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); 1567 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); 1568 1569 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); 1570 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 1571 1572 void kvm_make_mclock_inprogress_request(struct kvm *kvm); 1573 void kvm_make_scan_ioapic_request(struct kvm *kvm); 1574 1575 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 1576 struct kvm_async_pf *work); 1577 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 1578 struct kvm_async_pf *work); 1579 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 1580 struct kvm_async_pf *work); 1581 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); 1582 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1583 1584 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); 1585 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 1586 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); 1587 1588 int kvm_is_in_guest(void); 1589 1590 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1591 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1592 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); 1593 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); 1594 1595 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, 1596 struct kvm_vcpu **dest_vcpu); 1597 1598 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, 1599 struct kvm_lapic_irq *irq); 1600 1601 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) 1602 { 1603 /* We can only post Fixed and LowPrio IRQs */ 1604 return (irq->delivery_mode == dest_Fixed || 1605 irq->delivery_mode == dest_LowestPrio); 1606 } 1607 1608 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 1609 { 1610 if (kvm_x86_ops->vcpu_blocking) 1611 kvm_x86_ops->vcpu_blocking(vcpu); 1612 } 1613 1614 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 1615 { 1616 if (kvm_x86_ops->vcpu_unblocking) 1617 kvm_x86_ops->vcpu_unblocking(vcpu); 1618 } 1619 1620 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 1621 1622 static inline int kvm_cpu_get_apicid(int mps_cpu) 1623 { 1624 #ifdef CONFIG_X86_LOCAL_APIC 1625 return default_cpu_present_to_apicid(mps_cpu); 1626 #else 1627 WARN_ON_ONCE(1); 1628 return BAD_APICID; 1629 #endif 1630 } 1631 1632 #define put_smstate(type, buf, offset, val) \ 1633 *(type *)((buf) + (offset) - 0x7e00) = val 1634 1635 #define GET_SMSTATE(type, buf, offset) \ 1636 (*(type *)((buf) + (offset) - 0x7e00)) 1637 1638 #endif /* _ASM_X86_KVM_HOST_H */ 1639