1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This header defines architecture specific interfaces, x86 version 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11 #ifndef _ASM_X86_KVM_HOST_H 12 #define _ASM_X86_KVM_HOST_H 13 14 #include <linux/types.h> 15 #include <linux/mm.h> 16 #include <linux/mmu_notifier.h> 17 #include <linux/tracepoint.h> 18 #include <linux/cpumask.h> 19 #include <linux/irq_work.h> 20 21 #include <linux/kvm.h> 22 #include <linux/kvm_para.h> 23 #include <linux/kvm_types.h> 24 #include <linux/perf_event.h> 25 #include <linux/pvclock_gtod.h> 26 #include <linux/clocksource.h> 27 #include <linux/irqbypass.h> 28 #include <linux/hyperv.h> 29 30 #include <asm/apic.h> 31 #include <asm/pvclock-abi.h> 32 #include <asm/desc.h> 33 #include <asm/mtrr.h> 34 #include <asm/msr-index.h> 35 #include <asm/asm.h> 36 #include <asm/kvm_page_track.h> 37 38 #define KVM_MAX_VCPUS 288 39 #define KVM_SOFT_MAX_VCPUS 240 40 #define KVM_MAX_VCPU_ID 1023 41 #define KVM_USER_MEM_SLOTS 509 42 /* memory slots that are not exposed to userspace */ 43 #define KVM_PRIVATE_MEM_SLOTS 3 44 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 45 46 #define KVM_PIO_PAGE_OFFSET 1 47 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 48 #define KVM_HALT_POLL_NS_DEFAULT 400000 49 50 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 51 52 /* x86-specific vcpu->requests bit members */ 53 #define KVM_REQ_MIGRATE_TIMER 8 54 #define KVM_REQ_REPORT_TPR_ACCESS 9 55 #define KVM_REQ_TRIPLE_FAULT 10 56 #define KVM_REQ_MMU_SYNC 11 57 #define KVM_REQ_CLOCK_UPDATE 12 58 #define KVM_REQ_DEACTIVATE_FPU 13 59 #define KVM_REQ_EVENT 14 60 #define KVM_REQ_APF_HALT 15 61 #define KVM_REQ_STEAL_UPDATE 16 62 #define KVM_REQ_NMI 17 63 #define KVM_REQ_PMU 18 64 #define KVM_REQ_PMI 19 65 #define KVM_REQ_SMI 20 66 #define KVM_REQ_MASTERCLOCK_UPDATE 21 67 #define KVM_REQ_MCLOCK_INPROGRESS 22 68 #define KVM_REQ_SCAN_IOAPIC 23 69 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 24 70 #define KVM_REQ_APIC_PAGE_RELOAD 25 71 #define KVM_REQ_HV_CRASH 26 72 #define KVM_REQ_IOAPIC_EOI_EXIT 27 73 #define KVM_REQ_HV_RESET 28 74 #define KVM_REQ_HV_EXIT 29 75 #define KVM_REQ_HV_STIMER 30 76 77 #define CR0_RESERVED_BITS \ 78 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ 79 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ 80 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) 81 82 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL 83 #define CR3_PCID_INVD BIT_64(63) 84 #define CR4_RESERVED_BITS \ 85 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 86 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 87 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 88 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ 89 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \ 90 | X86_CR4_PKE)) 91 92 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 93 94 95 96 #define INVALID_PAGE (~(hpa_t)0) 97 #define VALID_PAGE(x) ((x) != INVALID_PAGE) 98 99 #define UNMAPPED_GVA (~(gpa_t)0) 100 101 /* KVM Hugepage definitions for x86 */ 102 #define KVM_NR_PAGE_SIZES 3 103 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) 104 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) 105 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) 106 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 107 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 108 109 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) 110 { 111 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ 112 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - 113 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 114 } 115 116 #define KVM_PERMILLE_MMU_PAGES 20 117 #define KVM_MIN_ALLOC_MMU_PAGES 64 118 #define KVM_MMU_HASH_SHIFT 10 119 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 120 #define KVM_MIN_FREE_MMU_PAGES 5 121 #define KVM_REFILL_PAGES 25 122 #define KVM_MAX_CPUID_ENTRIES 80 123 #define KVM_NR_FIXED_MTRR_REGION 88 124 #define KVM_NR_VAR_MTRR 8 125 126 #define ASYNC_PF_PER_VCPU 64 127 128 enum kvm_reg { 129 VCPU_REGS_RAX = 0, 130 VCPU_REGS_RCX = 1, 131 VCPU_REGS_RDX = 2, 132 VCPU_REGS_RBX = 3, 133 VCPU_REGS_RSP = 4, 134 VCPU_REGS_RBP = 5, 135 VCPU_REGS_RSI = 6, 136 VCPU_REGS_RDI = 7, 137 #ifdef CONFIG_X86_64 138 VCPU_REGS_R8 = 8, 139 VCPU_REGS_R9 = 9, 140 VCPU_REGS_R10 = 10, 141 VCPU_REGS_R11 = 11, 142 VCPU_REGS_R12 = 12, 143 VCPU_REGS_R13 = 13, 144 VCPU_REGS_R14 = 14, 145 VCPU_REGS_R15 = 15, 146 #endif 147 VCPU_REGS_RIP, 148 NR_VCPU_REGS 149 }; 150 151 enum kvm_reg_ex { 152 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 153 VCPU_EXREG_CR3, 154 VCPU_EXREG_RFLAGS, 155 VCPU_EXREG_SEGMENTS, 156 }; 157 158 enum { 159 VCPU_SREG_ES, 160 VCPU_SREG_CS, 161 VCPU_SREG_SS, 162 VCPU_SREG_DS, 163 VCPU_SREG_FS, 164 VCPU_SREG_GS, 165 VCPU_SREG_TR, 166 VCPU_SREG_LDTR, 167 }; 168 169 #include <asm/kvm_emulate.h> 170 171 #define KVM_NR_MEM_OBJS 40 172 173 #define KVM_NR_DB_REGS 4 174 175 #define DR6_BD (1 << 13) 176 #define DR6_BS (1 << 14) 177 #define DR6_RTM (1 << 16) 178 #define DR6_FIXED_1 0xfffe0ff0 179 #define DR6_INIT 0xffff0ff0 180 #define DR6_VOLATILE 0x0001e00f 181 182 #define DR7_BP_EN_MASK 0x000000ff 183 #define DR7_GE (1 << 9) 184 #define DR7_GD (1 << 13) 185 #define DR7_FIXED_1 0x00000400 186 #define DR7_VOLATILE 0xffff2bff 187 188 #define PFERR_PRESENT_BIT 0 189 #define PFERR_WRITE_BIT 1 190 #define PFERR_USER_BIT 2 191 #define PFERR_RSVD_BIT 3 192 #define PFERR_FETCH_BIT 4 193 #define PFERR_PK_BIT 5 194 195 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) 196 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) 197 #define PFERR_USER_MASK (1U << PFERR_USER_BIT) 198 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) 199 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) 200 #define PFERR_PK_MASK (1U << PFERR_PK_BIT) 201 202 /* apic attention bits */ 203 #define KVM_APIC_CHECK_VAPIC 0 204 /* 205 * The following bit is set with PV-EOI, unset on EOI. 206 * We detect PV-EOI changes by guest by comparing 207 * this bit with PV-EOI in guest memory. 208 * See the implementation in apic_update_pv_eoi. 209 */ 210 #define KVM_APIC_PV_EOI_PENDING 1 211 212 struct kvm_kernel_irq_routing_entry; 213 214 /* 215 * We don't want allocation failures within the mmu code, so we preallocate 216 * enough memory for a single page fault in a cache. 217 */ 218 struct kvm_mmu_memory_cache { 219 int nobjs; 220 void *objects[KVM_NR_MEM_OBJS]; 221 }; 222 223 /* 224 * the pages used as guest page table on soft mmu are tracked by 225 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used 226 * by indirect shadow page can not be more than 15 bits. 227 * 228 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, 229 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. 230 */ 231 union kvm_mmu_page_role { 232 unsigned word; 233 struct { 234 unsigned level:4; 235 unsigned cr4_pae:1; 236 unsigned quadrant:2; 237 unsigned direct:1; 238 unsigned access:3; 239 unsigned invalid:1; 240 unsigned nxe:1; 241 unsigned cr0_wp:1; 242 unsigned smep_andnot_wp:1; 243 unsigned smap_andnot_wp:1; 244 unsigned :8; 245 246 /* 247 * This is left at the top of the word so that 248 * kvm_memslots_for_spte_role can extract it with a 249 * simple shift. While there is room, give it a whole 250 * byte so it is also faster to load it from memory. 251 */ 252 unsigned smm:8; 253 }; 254 }; 255 256 struct kvm_rmap_head { 257 unsigned long val; 258 }; 259 260 struct kvm_mmu_page { 261 struct list_head link; 262 struct hlist_node hash_link; 263 264 /* 265 * The following two entries are used to key the shadow page in the 266 * hash table. 267 */ 268 gfn_t gfn; 269 union kvm_mmu_page_role role; 270 271 u64 *spt; 272 /* hold the gfn of each spte inside spt */ 273 gfn_t *gfns; 274 bool unsync; 275 int root_count; /* Currently serving as active root */ 276 unsigned int unsync_children; 277 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 278 279 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ 280 unsigned long mmu_valid_gen; 281 282 DECLARE_BITMAP(unsync_child_bitmap, 512); 283 284 #ifdef CONFIG_X86_32 285 /* 286 * Used out of the mmu-lock to avoid reading spte values while an 287 * update is in progress; see the comments in __get_spte_lockless(). 288 */ 289 int clear_spte_count; 290 #endif 291 292 /* Number of writes since the last time traversal visited this page. */ 293 atomic_t write_flooding_count; 294 }; 295 296 struct kvm_pio_request { 297 unsigned long count; 298 int in; 299 int port; 300 int size; 301 }; 302 303 struct rsvd_bits_validate { 304 u64 rsvd_bits_mask[2][4]; 305 u64 bad_mt_xwr; 306 }; 307 308 /* 309 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 310 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 311 * mode. 312 */ 313 struct kvm_mmu { 314 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 315 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 316 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); 317 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 318 bool prefault); 319 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 320 struct x86_exception *fault); 321 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 322 struct x86_exception *exception); 323 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 324 struct x86_exception *exception); 325 int (*sync_page)(struct kvm_vcpu *vcpu, 326 struct kvm_mmu_page *sp); 327 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 328 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 329 u64 *spte, const void *pte); 330 hpa_t root_hpa; 331 int root_level; 332 int shadow_root_level; 333 union kvm_mmu_page_role base_role; 334 bool direct_map; 335 336 /* 337 * Bitmap; bit set = permission fault 338 * Byte index: page fault error code [4:1] 339 * Bit index: pte permissions in ACC_* format 340 */ 341 u8 permissions[16]; 342 343 /* 344 * The pkru_mask indicates if protection key checks are needed. It 345 * consists of 16 domains indexed by page fault error code bits [4:1], 346 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. 347 * Each domain has 2 bits which are ANDed with AD and WD from PKRU. 348 */ 349 u32 pkru_mask; 350 351 u64 *pae_root; 352 u64 *lm_root; 353 354 /* 355 * check zero bits on shadow page table entries, these 356 * bits include not only hardware reserved bits but also 357 * the bits spte never used. 358 */ 359 struct rsvd_bits_validate shadow_zero_check; 360 361 struct rsvd_bits_validate guest_rsvd_check; 362 363 /* Can have large pages at levels 2..last_nonleaf_level-1. */ 364 u8 last_nonleaf_level; 365 366 bool nx; 367 368 u64 pdptrs[4]; /* pae */ 369 }; 370 371 enum pmc_type { 372 KVM_PMC_GP = 0, 373 KVM_PMC_FIXED, 374 }; 375 376 struct kvm_pmc { 377 enum pmc_type type; 378 u8 idx; 379 u64 counter; 380 u64 eventsel; 381 struct perf_event *perf_event; 382 struct kvm_vcpu *vcpu; 383 }; 384 385 struct kvm_pmu { 386 unsigned nr_arch_gp_counters; 387 unsigned nr_arch_fixed_counters; 388 unsigned available_event_types; 389 u64 fixed_ctr_ctrl; 390 u64 global_ctrl; 391 u64 global_status; 392 u64 global_ovf_ctrl; 393 u64 counter_bitmask[2]; 394 u64 global_ctrl_mask; 395 u64 reserved_bits; 396 u8 version; 397 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; 398 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; 399 struct irq_work irq_work; 400 u64 reprogram_pmi; 401 }; 402 403 struct kvm_pmu_ops; 404 405 enum { 406 KVM_DEBUGREG_BP_ENABLED = 1, 407 KVM_DEBUGREG_WONT_EXIT = 2, 408 KVM_DEBUGREG_RELOAD = 4, 409 }; 410 411 struct kvm_mtrr_range { 412 u64 base; 413 u64 mask; 414 struct list_head node; 415 }; 416 417 struct kvm_mtrr { 418 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; 419 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; 420 u64 deftype; 421 422 struct list_head head; 423 }; 424 425 /* Hyper-V SynIC timer */ 426 struct kvm_vcpu_hv_stimer { 427 struct hrtimer timer; 428 int index; 429 u64 config; 430 u64 count; 431 u64 exp_time; 432 struct hv_message msg; 433 bool msg_pending; 434 }; 435 436 /* Hyper-V synthetic interrupt controller (SynIC)*/ 437 struct kvm_vcpu_hv_synic { 438 u64 version; 439 u64 control; 440 u64 msg_page; 441 u64 evt_page; 442 atomic64_t sint[HV_SYNIC_SINT_COUNT]; 443 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; 444 DECLARE_BITMAP(auto_eoi_bitmap, 256); 445 DECLARE_BITMAP(vec_bitmap, 256); 446 bool active; 447 }; 448 449 /* Hyper-V per vcpu emulation context */ 450 struct kvm_vcpu_hv { 451 u64 hv_vapic; 452 s64 runtime_offset; 453 struct kvm_vcpu_hv_synic synic; 454 struct kvm_hyperv_exit exit; 455 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; 456 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); 457 }; 458 459 struct kvm_vcpu_arch { 460 /* 461 * rip and regs accesses must go through 462 * kvm_{register,rip}_{read,write} functions. 463 */ 464 unsigned long regs[NR_VCPU_REGS]; 465 u32 regs_avail; 466 u32 regs_dirty; 467 468 unsigned long cr0; 469 unsigned long cr0_guest_owned_bits; 470 unsigned long cr2; 471 unsigned long cr3; 472 unsigned long cr4; 473 unsigned long cr4_guest_owned_bits; 474 unsigned long cr8; 475 u32 hflags; 476 u64 efer; 477 u64 apic_base; 478 struct kvm_lapic *apic; /* kernel irqchip context */ 479 bool apicv_active; 480 DECLARE_BITMAP(ioapic_handled_vectors, 256); 481 unsigned long apic_attention; 482 int32_t apic_arb_prio; 483 int mp_state; 484 u64 ia32_misc_enable_msr; 485 u64 smbase; 486 bool tpr_access_reporting; 487 u64 ia32_xss; 488 489 /* 490 * Paging state of the vcpu 491 * 492 * If the vcpu runs in guest mode with two level paging this still saves 493 * the paging mode of the l1 guest. This context is always used to 494 * handle faults. 495 */ 496 struct kvm_mmu mmu; 497 498 /* 499 * Paging state of an L2 guest (used for nested npt) 500 * 501 * This context will save all necessary information to walk page tables 502 * of the an L2 guest. This context is only initialized for page table 503 * walking and not for faulting since we never handle l2 page faults on 504 * the host. 505 */ 506 struct kvm_mmu nested_mmu; 507 508 /* 509 * Pointer to the mmu context currently used for 510 * gva_to_gpa translations. 511 */ 512 struct kvm_mmu *walk_mmu; 513 514 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; 515 struct kvm_mmu_memory_cache mmu_page_cache; 516 struct kvm_mmu_memory_cache mmu_page_header_cache; 517 518 struct fpu guest_fpu; 519 u64 xcr0; 520 u64 guest_supported_xcr0; 521 u32 guest_xstate_size; 522 523 struct kvm_pio_request pio; 524 void *pio_data; 525 526 u8 event_exit_inst_len; 527 528 struct kvm_queued_exception { 529 bool pending; 530 bool has_error_code; 531 bool reinject; 532 u8 nr; 533 u32 error_code; 534 } exception; 535 536 struct kvm_queued_interrupt { 537 bool pending; 538 bool soft; 539 u8 nr; 540 } interrupt; 541 542 int halt_request; /* real mode on Intel only */ 543 544 int cpuid_nent; 545 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 546 547 int maxphyaddr; 548 549 /* emulate context */ 550 551 struct x86_emulate_ctxt emulate_ctxt; 552 bool emulate_regs_need_sync_to_vcpu; 553 bool emulate_regs_need_sync_from_vcpu; 554 int (*complete_userspace_io)(struct kvm_vcpu *vcpu); 555 556 gpa_t time; 557 struct pvclock_vcpu_time_info hv_clock; 558 unsigned int hw_tsc_khz; 559 struct gfn_to_hva_cache pv_time; 560 bool pv_time_enabled; 561 /* set guest stopped flag in pvclock flags field */ 562 bool pvclock_set_guest_stopped_request; 563 564 struct { 565 u64 msr_val; 566 u64 last_steal; 567 struct gfn_to_hva_cache stime; 568 struct kvm_steal_time steal; 569 } st; 570 571 u64 last_guest_tsc; 572 u64 last_host_tsc; 573 u64 tsc_offset_adjustment; 574 u64 this_tsc_nsec; 575 u64 this_tsc_write; 576 u64 this_tsc_generation; 577 bool tsc_catchup; 578 bool tsc_always_catchup; 579 s8 virtual_tsc_shift; 580 u32 virtual_tsc_mult; 581 u32 virtual_tsc_khz; 582 s64 ia32_tsc_adjust_msr; 583 u64 tsc_scaling_ratio; 584 585 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 586 unsigned nmi_pending; /* NMI queued after currently running handler */ 587 bool nmi_injected; /* Trying to inject an NMI this entry */ 588 bool smi_pending; /* SMI queued after currently running handler */ 589 590 struct kvm_mtrr mtrr_state; 591 u64 pat; 592 593 unsigned switch_db_regs; 594 unsigned long db[KVM_NR_DB_REGS]; 595 unsigned long dr6; 596 unsigned long dr7; 597 unsigned long eff_db[KVM_NR_DB_REGS]; 598 unsigned long guest_debug_dr7; 599 600 u64 mcg_cap; 601 u64 mcg_status; 602 u64 mcg_ctl; 603 u64 mcg_ext_ctl; 604 u64 *mce_banks; 605 606 /* Cache MMIO info */ 607 u64 mmio_gva; 608 unsigned access; 609 gfn_t mmio_gfn; 610 u64 mmio_gen; 611 612 struct kvm_pmu pmu; 613 614 /* used for guest single stepping over the given code position */ 615 unsigned long singlestep_rip; 616 617 struct kvm_vcpu_hv hyperv; 618 619 cpumask_var_t wbinvd_dirty_mask; 620 621 unsigned long last_retry_eip; 622 unsigned long last_retry_addr; 623 624 struct { 625 bool halted; 626 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 627 struct gfn_to_hva_cache data; 628 u64 msr_val; 629 u32 id; 630 bool send_user_only; 631 } apf; 632 633 /* OSVW MSRs (AMD only) */ 634 struct { 635 u64 length; 636 u64 status; 637 } osvw; 638 639 struct { 640 u64 msr_val; 641 struct gfn_to_hva_cache data; 642 } pv_eoi; 643 644 /* 645 * Indicate whether the access faults on its page table in guest 646 * which is set when fix page fault and used to detect unhandeable 647 * instruction. 648 */ 649 bool write_fault_to_shadow_pgtable; 650 651 /* set at EPT violation at this point */ 652 unsigned long exit_qualification; 653 654 /* pv related host specific info */ 655 struct { 656 bool pv_unhalted; 657 } pv; 658 659 int pending_ioapic_eoi; 660 int pending_external_vector; 661 }; 662 663 struct kvm_lpage_info { 664 int disallow_lpage; 665 }; 666 667 struct kvm_arch_memory_slot { 668 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; 669 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 670 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; 671 }; 672 673 /* 674 * We use as the mode the number of bits allocated in the LDR for the 675 * logical processor ID. It happens that these are all powers of two. 676 * This makes it is very easy to detect cases where the APICs are 677 * configured for multiple modes; in that case, we cannot use the map and 678 * hence cannot use kvm_irq_delivery_to_apic_fast either. 679 */ 680 #define KVM_APIC_MODE_XAPIC_CLUSTER 4 681 #define KVM_APIC_MODE_XAPIC_FLAT 8 682 #define KVM_APIC_MODE_X2APIC 16 683 684 struct kvm_apic_map { 685 struct rcu_head rcu; 686 u8 mode; 687 u32 max_apic_id; 688 union { 689 struct kvm_lapic *xapic_flat_map[8]; 690 struct kvm_lapic *xapic_cluster_map[16][4]; 691 }; 692 struct kvm_lapic *phys_map[]; 693 }; 694 695 /* Hyper-V emulation context */ 696 struct kvm_hv { 697 u64 hv_guest_os_id; 698 u64 hv_hypercall; 699 u64 hv_tsc_page; 700 701 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ 702 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; 703 u64 hv_crash_ctl; 704 }; 705 706 struct kvm_arch { 707 unsigned int n_used_mmu_pages; 708 unsigned int n_requested_mmu_pages; 709 unsigned int n_max_mmu_pages; 710 unsigned int indirect_shadow_pages; 711 unsigned long mmu_valid_gen; 712 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 713 /* 714 * Hash table of struct kvm_mmu_page. 715 */ 716 struct list_head active_mmu_pages; 717 struct list_head zapped_obsolete_pages; 718 struct kvm_page_track_notifier_node mmu_sp_tracker; 719 struct kvm_page_track_notifier_head track_notifier_head; 720 721 struct list_head assigned_dev_head; 722 struct iommu_domain *iommu_domain; 723 bool iommu_noncoherent; 724 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA 725 atomic_t noncoherent_dma_count; 726 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE 727 atomic_t assigned_device_count; 728 struct kvm_pic *vpic; 729 struct kvm_ioapic *vioapic; 730 struct kvm_pit *vpit; 731 atomic_t vapics_in_nmi_mode; 732 struct mutex apic_map_lock; 733 struct kvm_apic_map *apic_map; 734 735 unsigned int tss_addr; 736 bool apic_access_page_done; 737 738 gpa_t wall_clock; 739 740 bool ept_identity_pagetable_done; 741 gpa_t ept_identity_map_addr; 742 743 unsigned long irq_sources_bitmap; 744 s64 kvmclock_offset; 745 raw_spinlock_t tsc_write_lock; 746 u64 last_tsc_nsec; 747 u64 last_tsc_write; 748 u32 last_tsc_khz; 749 u64 cur_tsc_nsec; 750 u64 cur_tsc_write; 751 u64 cur_tsc_offset; 752 u64 cur_tsc_generation; 753 int nr_vcpus_matched_tsc; 754 755 spinlock_t pvclock_gtod_sync_lock; 756 bool use_master_clock; 757 u64 master_kernel_ns; 758 cycle_t master_cycle_now; 759 struct delayed_work kvmclock_update_work; 760 struct delayed_work kvmclock_sync_work; 761 762 struct kvm_xen_hvm_config xen_hvm_config; 763 764 /* reads protected by irq_srcu, writes by irq_lock */ 765 struct hlist_head mask_notifier_list; 766 767 struct kvm_hv hyperv; 768 769 #ifdef CONFIG_KVM_MMU_AUDIT 770 int audit_point; 771 #endif 772 773 bool boot_vcpu_runs_old_kvmclock; 774 u32 bsp_vcpu_id; 775 776 u64 disabled_quirks; 777 778 bool irqchip_split; 779 u8 nr_reserved_ioapic_pins; 780 781 bool disabled_lapic_found; 782 783 /* Struct members for AVIC */ 784 u32 ldr_mode; 785 struct page *avic_logical_id_table_page; 786 struct page *avic_physical_id_table_page; 787 788 bool x2apic_format; 789 bool x2apic_broadcast_quirk_disabled; 790 }; 791 792 struct kvm_vm_stat { 793 u32 mmu_shadow_zapped; 794 u32 mmu_pte_write; 795 u32 mmu_pte_updated; 796 u32 mmu_pde_zapped; 797 u32 mmu_flooded; 798 u32 mmu_recycled; 799 u32 mmu_cache_miss; 800 u32 mmu_unsync; 801 u32 remote_tlb_flush; 802 u32 lpages; 803 }; 804 805 struct kvm_vcpu_stat { 806 u32 pf_fixed; 807 u32 pf_guest; 808 u32 tlb_flush; 809 u32 invlpg; 810 811 u32 exits; 812 u32 io_exits; 813 u32 mmio_exits; 814 u32 signal_exits; 815 u32 irq_window_exits; 816 u32 nmi_window_exits; 817 u32 halt_exits; 818 u32 halt_successful_poll; 819 u32 halt_attempted_poll; 820 u32 halt_poll_invalid; 821 u32 halt_wakeup; 822 u32 request_irq_exits; 823 u32 irq_exits; 824 u32 host_state_reload; 825 u32 efer_reload; 826 u32 fpu_reload; 827 u32 insn_emulation; 828 u32 insn_emulation_fail; 829 u32 hypercalls; 830 u32 irq_injections; 831 u32 nmi_injections; 832 }; 833 834 struct x86_instruction_info; 835 836 struct msr_data { 837 bool host_initiated; 838 u32 index; 839 u64 data; 840 }; 841 842 struct kvm_lapic_irq { 843 u32 vector; 844 u16 delivery_mode; 845 u16 dest_mode; 846 bool level; 847 u16 trig_mode; 848 u32 shorthand; 849 u32 dest_id; 850 bool msi_redir_hint; 851 }; 852 853 struct kvm_x86_ops { 854 int (*cpu_has_kvm_support)(void); /* __init */ 855 int (*disabled_by_bios)(void); /* __init */ 856 int (*hardware_enable)(void); 857 void (*hardware_disable)(void); 858 void (*check_processor_compatibility)(void *rtn); 859 int (*hardware_setup)(void); /* __init */ 860 void (*hardware_unsetup)(void); /* __exit */ 861 bool (*cpu_has_accelerated_tpr)(void); 862 bool (*cpu_has_high_real_mode_segbase)(void); 863 void (*cpuid_update)(struct kvm_vcpu *vcpu); 864 865 int (*vm_init)(struct kvm *kvm); 866 void (*vm_destroy)(struct kvm *kvm); 867 868 /* Create, but do not attach this VCPU */ 869 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 870 void (*vcpu_free)(struct kvm_vcpu *vcpu); 871 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); 872 873 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 874 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 875 void (*vcpu_put)(struct kvm_vcpu *vcpu); 876 877 void (*update_bp_intercept)(struct kvm_vcpu *vcpu); 878 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 879 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 880 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 881 void (*get_segment)(struct kvm_vcpu *vcpu, 882 struct kvm_segment *var, int seg); 883 int (*get_cpl)(struct kvm_vcpu *vcpu); 884 void (*set_segment)(struct kvm_vcpu *vcpu, 885 struct kvm_segment *var, int seg); 886 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 887 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 888 void (*decache_cr3)(struct kvm_vcpu *vcpu); 889 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 890 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 891 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 892 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 893 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 894 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 895 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 896 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 897 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 898 u64 (*get_dr6)(struct kvm_vcpu *vcpu); 899 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); 900 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); 901 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); 902 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 903 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 904 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 905 u32 (*get_pkru)(struct kvm_vcpu *vcpu); 906 void (*fpu_activate)(struct kvm_vcpu *vcpu); 907 void (*fpu_deactivate)(struct kvm_vcpu *vcpu); 908 909 void (*tlb_flush)(struct kvm_vcpu *vcpu); 910 911 void (*run)(struct kvm_vcpu *vcpu); 912 int (*handle_exit)(struct kvm_vcpu *vcpu); 913 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 914 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 915 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); 916 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 917 unsigned char *hypercall_addr); 918 void (*set_irq)(struct kvm_vcpu *vcpu); 919 void (*set_nmi)(struct kvm_vcpu *vcpu); 920 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 921 bool has_error_code, u32 error_code, 922 bool reinject); 923 void (*cancel_injection)(struct kvm_vcpu *vcpu); 924 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 925 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 926 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 927 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); 928 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 929 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 930 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 931 bool (*get_enable_apicv)(void); 932 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); 933 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); 934 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); 935 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 936 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); 937 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); 938 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); 939 void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); 940 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 941 int (*get_tdp_level)(void); 942 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 943 int (*get_lpage_level)(void); 944 bool (*rdtscp_supported)(void); 945 bool (*invpcid_supported)(void); 946 void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment); 947 948 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 949 950 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 951 952 bool (*has_wbinvd_exit)(void); 953 954 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); 955 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 956 957 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); 958 959 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 960 961 int (*check_intercept)(struct kvm_vcpu *vcpu, 962 struct x86_instruction_info *info, 963 enum x86_intercept_stage stage); 964 void (*handle_external_intr)(struct kvm_vcpu *vcpu); 965 bool (*mpx_supported)(void); 966 bool (*xsaves_supported)(void); 967 968 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); 969 970 void (*sched_in)(struct kvm_vcpu *kvm, int cpu); 971 972 /* 973 * Arch-specific dirty logging hooks. These hooks are only supposed to 974 * be valid if the specific arch has hardware-accelerated dirty logging 975 * mechanism. Currently only for PML on VMX. 976 * 977 * - slot_enable_log_dirty: 978 * called when enabling log dirty mode for the slot. 979 * - slot_disable_log_dirty: 980 * called when disabling log dirty mode for the slot. 981 * also called when slot is created with log dirty disabled. 982 * - flush_log_dirty: 983 * called before reporting dirty_bitmap to userspace. 984 * - enable_log_dirty_pt_masked: 985 * called when reenabling log dirty for the GFNs in the mask after 986 * corresponding bits are cleared in slot->dirty_bitmap. 987 */ 988 void (*slot_enable_log_dirty)(struct kvm *kvm, 989 struct kvm_memory_slot *slot); 990 void (*slot_disable_log_dirty)(struct kvm *kvm, 991 struct kvm_memory_slot *slot); 992 void (*flush_log_dirty)(struct kvm *kvm); 993 void (*enable_log_dirty_pt_masked)(struct kvm *kvm, 994 struct kvm_memory_slot *slot, 995 gfn_t offset, unsigned long mask); 996 /* pmu operations of sub-arch */ 997 const struct kvm_pmu_ops *pmu_ops; 998 999 /* 1000 * Architecture specific hooks for vCPU blocking due to 1001 * HLT instruction. 1002 * Returns for .pre_block(): 1003 * - 0 means continue to block the vCPU. 1004 * - 1 means we cannot block the vCPU since some event 1005 * happens during this period, such as, 'ON' bit in 1006 * posted-interrupts descriptor is set. 1007 */ 1008 int (*pre_block)(struct kvm_vcpu *vcpu); 1009 void (*post_block)(struct kvm_vcpu *vcpu); 1010 1011 void (*vcpu_blocking)(struct kvm_vcpu *vcpu); 1012 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); 1013 1014 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, 1015 uint32_t guest_irq, bool set); 1016 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); 1017 1018 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc); 1019 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); 1020 1021 void (*setup_mce)(struct kvm_vcpu *vcpu); 1022 }; 1023 1024 struct kvm_arch_async_pf { 1025 u32 token; 1026 gfn_t gfn; 1027 unsigned long cr3; 1028 bool direct_map; 1029 }; 1030 1031 extern struct kvm_x86_ops *kvm_x86_ops; 1032 1033 int kvm_mmu_module_init(void); 1034 void kvm_mmu_module_exit(void); 1035 1036 void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 1037 int kvm_mmu_create(struct kvm_vcpu *vcpu); 1038 void kvm_mmu_setup(struct kvm_vcpu *vcpu); 1039 void kvm_mmu_init_vm(struct kvm *kvm); 1040 void kvm_mmu_uninit_vm(struct kvm *kvm); 1041 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 1042 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask); 1043 1044 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 1045 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 1046 struct kvm_memory_slot *memslot); 1047 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, 1048 const struct kvm_memory_slot *memslot); 1049 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, 1050 struct kvm_memory_slot *memslot); 1051 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, 1052 struct kvm_memory_slot *memslot); 1053 void kvm_mmu_slot_set_dirty(struct kvm *kvm, 1054 struct kvm_memory_slot *memslot); 1055 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1056 struct kvm_memory_slot *slot, 1057 gfn_t gfn_offset, unsigned long mask); 1058 void kvm_mmu_zap_all(struct kvm *kvm); 1059 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); 1060 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 1061 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 1062 1063 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1064 1065 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1066 const void *val, int bytes); 1067 1068 struct kvm_irq_mask_notifier { 1069 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 1070 int irq; 1071 struct hlist_node link; 1072 }; 1073 1074 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 1075 struct kvm_irq_mask_notifier *kimn); 1076 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 1077 struct kvm_irq_mask_notifier *kimn); 1078 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 1079 bool mask); 1080 1081 extern bool tdp_enabled; 1082 1083 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); 1084 1085 /* control of guest tsc rate supported? */ 1086 extern bool kvm_has_tsc_control; 1087 /* maximum supported tsc_khz for guests */ 1088 extern u32 kvm_max_guest_tsc_khz; 1089 /* number of bits of the fractional part of the TSC scaling ratio */ 1090 extern u8 kvm_tsc_scaling_ratio_frac_bits; 1091 /* maximum allowed value of TSC scaling ratio */ 1092 extern u64 kvm_max_tsc_scaling_ratio; 1093 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ 1094 extern u64 kvm_default_tsc_scaling_ratio; 1095 1096 extern u64 kvm_mce_cap_supported; 1097 1098 enum emulation_result { 1099 EMULATE_DONE, /* no further processing */ 1100 EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ 1101 EMULATE_FAIL, /* can't emulate this instruction */ 1102 }; 1103 1104 #define EMULTYPE_NO_DECODE (1 << 0) 1105 #define EMULTYPE_TRAP_UD (1 << 1) 1106 #define EMULTYPE_SKIP (1 << 2) 1107 #define EMULTYPE_RETRY (1 << 3) 1108 #define EMULTYPE_NO_REEXECUTE (1 << 4) 1109 int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 1110 int emulation_type, void *insn, int insn_len); 1111 1112 static inline int emulate_instruction(struct kvm_vcpu *vcpu, 1113 int emulation_type) 1114 { 1115 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 1116 } 1117 1118 void kvm_enable_efer_bits(u64); 1119 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); 1120 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 1121 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 1122 1123 struct x86_emulate_ctxt; 1124 1125 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); 1126 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 1127 int kvm_emulate_halt(struct kvm_vcpu *vcpu); 1128 int kvm_vcpu_halt(struct kvm_vcpu *vcpu); 1129 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); 1130 1131 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 1132 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 1133 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 1134 1135 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 1136 int reason, bool has_error_code, u32 error_code); 1137 1138 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 1139 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 1140 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 1141 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 1142 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 1143 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 1144 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 1145 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 1146 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 1147 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); 1148 1149 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 1150 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 1151 1152 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 1153 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 1154 bool kvm_rdpmc(struct kvm_vcpu *vcpu); 1155 1156 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 1157 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 1158 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 1159 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 1160 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); 1161 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 1162 gfn_t gfn, void *data, int offset, int len, 1163 u32 access); 1164 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 1165 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); 1166 1167 static inline int __kvm_irq_line_state(unsigned long *irq_state, 1168 int irq_source_id, int level) 1169 { 1170 /* Logical OR for level trig interrupt */ 1171 if (level) 1172 __set_bit(irq_source_id, irq_state); 1173 else 1174 __clear_bit(irq_source_id, irq_state); 1175 1176 return !!(*irq_state); 1177 } 1178 1179 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); 1180 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); 1181 1182 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 1183 1184 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 1185 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 1186 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 1187 int kvm_mmu_load(struct kvm_vcpu *vcpu); 1188 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 1189 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 1190 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 1191 struct x86_exception *exception); 1192 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 1193 struct x86_exception *exception); 1194 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 1195 struct x86_exception *exception); 1196 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 1197 struct x86_exception *exception); 1198 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1199 struct x86_exception *exception); 1200 1201 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); 1202 1203 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 1204 1205 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 1206 void *insn, int insn_len); 1207 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 1208 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); 1209 1210 void kvm_enable_tdp(void); 1211 void kvm_disable_tdp(void); 1212 1213 static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 1214 struct x86_exception *exception) 1215 { 1216 return gpa; 1217 } 1218 1219 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 1220 { 1221 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 1222 1223 return (struct kvm_mmu_page *)page_private(page); 1224 } 1225 1226 static inline u16 kvm_read_ldt(void) 1227 { 1228 u16 ldt; 1229 asm("sldt %0" : "=g"(ldt)); 1230 return ldt; 1231 } 1232 1233 static inline void kvm_load_ldt(u16 sel) 1234 { 1235 asm("lldt %0" : : "rm"(sel)); 1236 } 1237 1238 #ifdef CONFIG_X86_64 1239 static inline unsigned long read_msr(unsigned long msr) 1240 { 1241 u64 value; 1242 1243 rdmsrl(msr, value); 1244 return value; 1245 } 1246 #endif 1247 1248 static inline u32 get_rdx_init_val(void) 1249 { 1250 return 0x600; /* P6 family */ 1251 } 1252 1253 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) 1254 { 1255 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 1256 } 1257 1258 static inline u64 get_canonical(u64 la) 1259 { 1260 return ((int64_t)la << 16) >> 16; 1261 } 1262 1263 static inline bool is_noncanonical_address(u64 la) 1264 { 1265 #ifdef CONFIG_X86_64 1266 return get_canonical(la) != la; 1267 #else 1268 return false; 1269 #endif 1270 } 1271 1272 #define TSS_IOPB_BASE_OFFSET 0x66 1273 #define TSS_BASE_SIZE 0x68 1274 #define TSS_IOPB_SIZE (65536 / 8) 1275 #define TSS_REDIRECTION_SIZE (256 / 8) 1276 #define RMODE_TSS_SIZE \ 1277 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 1278 1279 enum { 1280 TASK_SWITCH_CALL = 0, 1281 TASK_SWITCH_IRET = 1, 1282 TASK_SWITCH_JMP = 2, 1283 TASK_SWITCH_GATE = 3, 1284 }; 1285 1286 #define HF_GIF_MASK (1 << 0) 1287 #define HF_HIF_MASK (1 << 1) 1288 #define HF_VINTR_MASK (1 << 2) 1289 #define HF_NMI_MASK (1 << 3) 1290 #define HF_IRET_MASK (1 << 4) 1291 #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ 1292 #define HF_SMM_MASK (1 << 6) 1293 #define HF_SMM_INSIDE_NMI_MASK (1 << 7) 1294 1295 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 1296 #define KVM_ADDRESS_SPACE_NUM 2 1297 1298 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) 1299 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) 1300 1301 /* 1302 * Hardware virtualization extension instructions may fault if a 1303 * reboot turns off virtualization while processes are running. 1304 * Trap the fault and ignore the instruction if that happens. 1305 */ 1306 asmlinkage void kvm_spurious_fault(void); 1307 1308 #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ 1309 "666: " insn "\n\t" \ 1310 "668: \n\t" \ 1311 ".pushsection .fixup, \"ax\" \n" \ 1312 "667: \n\t" \ 1313 cleanup_insn "\n\t" \ 1314 "cmpb $0, kvm_rebooting \n\t" \ 1315 "jne 668b \n\t" \ 1316 __ASM_SIZE(push) " $666b \n\t" \ 1317 "call kvm_spurious_fault \n\t" \ 1318 ".popsection \n\t" \ 1319 _ASM_EXTABLE(666b, 667b) 1320 1321 #define __kvm_handle_fault_on_reboot(insn) \ 1322 ____kvm_handle_fault_on_reboot(insn, "") 1323 1324 #define KVM_ARCH_WANT_MMU_NOTIFIER 1325 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 1326 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1327 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1328 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 1329 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 1330 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); 1331 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 1332 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 1333 int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1334 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); 1335 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); 1336 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, 1337 unsigned long address); 1338 1339 void kvm_define_shared_msr(unsigned index, u32 msr); 1340 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1341 1342 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); 1343 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); 1344 1345 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); 1346 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 1347 1348 void kvm_make_mclock_inprogress_request(struct kvm *kvm); 1349 void kvm_make_scan_ioapic_request(struct kvm *kvm); 1350 1351 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 1352 struct kvm_async_pf *work); 1353 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 1354 struct kvm_async_pf *work); 1355 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 1356 struct kvm_async_pf *work); 1357 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); 1358 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1359 1360 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 1361 1362 int kvm_is_in_guest(void); 1363 1364 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1365 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1366 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); 1367 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); 1368 1369 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, 1370 struct kvm_vcpu **dest_vcpu); 1371 1372 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, 1373 struct kvm_lapic_irq *irq); 1374 1375 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 1376 { 1377 if (kvm_x86_ops->vcpu_blocking) 1378 kvm_x86_ops->vcpu_blocking(vcpu); 1379 } 1380 1381 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 1382 { 1383 if (kvm_x86_ops->vcpu_unblocking) 1384 kvm_x86_ops->vcpu_unblocking(vcpu); 1385 } 1386 1387 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 1388 1389 static inline int kvm_cpu_get_apicid(int mps_cpu) 1390 { 1391 #ifdef CONFIG_X86_LOCAL_APIC 1392 return __default_cpu_present_to_apicid(mps_cpu); 1393 #else 1394 WARN_ON_ONCE(1); 1395 return BAD_APICID; 1396 #endif 1397 } 1398 1399 #endif /* _ASM_X86_KVM_HOST_H */ 1400