1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This header defines architecture specific interfaces, x86 version 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11 #ifndef _ASM_X86_KVM_HOST_H 12 #define _ASM_X86_KVM_HOST_H 13 14 #include <linux/types.h> 15 #include <linux/mm.h> 16 #include <linux/mmu_notifier.h> 17 18 #include <linux/kvm.h> 19 #include <linux/kvm_para.h> 20 #include <linux/kvm_types.h> 21 22 #include <asm/pvclock-abi.h> 23 #include <asm/desc.h> 24 #include <asm/mtrr.h> 25 #include <asm/msr-index.h> 26 27 #define KVM_MAX_VCPUS 16 28 #define KVM_MEMORY_SLOTS 32 29 /* memory slots that does not exposed to userspace */ 30 #define KVM_PRIVATE_MEM_SLOTS 4 31 32 #define KVM_PIO_PAGE_OFFSET 1 33 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 34 35 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 36 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 37 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 38 0xFFFFFF0000000000ULL) 39 40 #define KVM_GUEST_CR0_MASK \ 41 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ 42 | X86_CR0_NW | X86_CR0_CD) 43 #define KVM_VM_CR0_ALWAYS_ON \ 44 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ 45 | X86_CR0_MP) 46 #define KVM_GUEST_CR4_MASK \ 47 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) 48 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 49 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 50 51 #define INVALID_PAGE (~(hpa_t)0) 52 #define UNMAPPED_GVA (~(gpa_t)0) 53 54 /* shadow tables are PAE even on non-PAE hosts */ 55 #define KVM_HPAGE_SHIFT 21 56 #define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) 57 #define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) 58 59 #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) 60 61 #define DE_VECTOR 0 62 #define DB_VECTOR 1 63 #define BP_VECTOR 3 64 #define OF_VECTOR 4 65 #define BR_VECTOR 5 66 #define UD_VECTOR 6 67 #define NM_VECTOR 7 68 #define DF_VECTOR 8 69 #define TS_VECTOR 10 70 #define NP_VECTOR 11 71 #define SS_VECTOR 12 72 #define GP_VECTOR 13 73 #define PF_VECTOR 14 74 #define MF_VECTOR 16 75 #define MC_VECTOR 18 76 77 #define SELECTOR_TI_MASK (1 << 2) 78 #define SELECTOR_RPL_MASK 0x03 79 80 #define IOPL_SHIFT 12 81 82 #define KVM_ALIAS_SLOTS 4 83 84 #define KVM_PERMILLE_MMU_PAGES 20 85 #define KVM_MIN_ALLOC_MMU_PAGES 64 86 #define KVM_MMU_HASH_SHIFT 10 87 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 88 #define KVM_MIN_FREE_MMU_PAGES 5 89 #define KVM_REFILL_PAGES 25 90 #define KVM_MAX_CPUID_ENTRIES 40 91 #define KVM_NR_FIXED_MTRR_REGION 88 92 #define KVM_NR_VAR_MTRR 8 93 94 extern spinlock_t kvm_lock; 95 extern struct list_head vm_list; 96 97 struct kvm_vcpu; 98 struct kvm; 99 100 enum kvm_reg { 101 VCPU_REGS_RAX = 0, 102 VCPU_REGS_RCX = 1, 103 VCPU_REGS_RDX = 2, 104 VCPU_REGS_RBX = 3, 105 VCPU_REGS_RSP = 4, 106 VCPU_REGS_RBP = 5, 107 VCPU_REGS_RSI = 6, 108 VCPU_REGS_RDI = 7, 109 #ifdef CONFIG_X86_64 110 VCPU_REGS_R8 = 8, 111 VCPU_REGS_R9 = 9, 112 VCPU_REGS_R10 = 10, 113 VCPU_REGS_R11 = 11, 114 VCPU_REGS_R12 = 12, 115 VCPU_REGS_R13 = 13, 116 VCPU_REGS_R14 = 14, 117 VCPU_REGS_R15 = 15, 118 #endif 119 VCPU_REGS_RIP, 120 NR_VCPU_REGS 121 }; 122 123 enum { 124 VCPU_SREG_ES, 125 VCPU_SREG_CS, 126 VCPU_SREG_SS, 127 VCPU_SREG_DS, 128 VCPU_SREG_FS, 129 VCPU_SREG_GS, 130 VCPU_SREG_TR, 131 VCPU_SREG_LDTR, 132 }; 133 134 #include <asm/kvm_x86_emulate.h> 135 136 #define KVM_NR_MEM_OBJS 40 137 138 #define KVM_NR_DB_REGS 4 139 140 #define DR6_BD (1 << 13) 141 #define DR6_BS (1 << 14) 142 #define DR6_FIXED_1 0xffff0ff0 143 #define DR6_VOLATILE 0x0000e00f 144 145 #define DR7_BP_EN_MASK 0x000000ff 146 #define DR7_GE (1 << 9) 147 #define DR7_GD (1 << 13) 148 #define DR7_FIXED_1 0x00000400 149 #define DR7_VOLATILE 0xffff23ff 150 151 /* 152 * We don't want allocation failures within the mmu code, so we preallocate 153 * enough memory for a single page fault in a cache. 154 */ 155 struct kvm_mmu_memory_cache { 156 int nobjs; 157 void *objects[KVM_NR_MEM_OBJS]; 158 }; 159 160 #define NR_PTE_CHAIN_ENTRIES 5 161 162 struct kvm_pte_chain { 163 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; 164 struct hlist_node link; 165 }; 166 167 /* 168 * kvm_mmu_page_role, below, is defined as: 169 * 170 * bits 0:3 - total guest paging levels (2-4, or zero for real mode) 171 * bits 4:7 - page table level for this shadow (1-4) 172 * bits 8:9 - page table quadrant for 2-level guests 173 * bit 16 - direct mapping of virtual to physical mapping at gfn 174 * used for real mode and two-dimensional paging 175 * bits 17:19 - common access permissions for all ptes in this shadow page 176 */ 177 union kvm_mmu_page_role { 178 unsigned word; 179 struct { 180 unsigned glevels:4; 181 unsigned level:4; 182 unsigned quadrant:2; 183 unsigned pad_for_nice_hex_output:6; 184 unsigned direct:1; 185 unsigned access:3; 186 unsigned invalid:1; 187 unsigned cr4_pge:1; 188 unsigned nxe:1; 189 }; 190 }; 191 192 struct kvm_mmu_page { 193 struct list_head link; 194 struct hlist_node hash_link; 195 196 struct list_head oos_link; 197 198 /* 199 * The following two entries are used to key the shadow page in the 200 * hash table. 201 */ 202 gfn_t gfn; 203 union kvm_mmu_page_role role; 204 205 u64 *spt; 206 /* hold the gfn of each spte inside spt */ 207 gfn_t *gfns; 208 /* 209 * One bit set per slot which has memory 210 * in this shadow page. 211 */ 212 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 213 int multimapped; /* More than one parent_pte? */ 214 int root_count; /* Currently serving as active root */ 215 bool unsync; 216 unsigned int unsync_children; 217 union { 218 u64 *parent_pte; /* !multimapped */ 219 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ 220 }; 221 DECLARE_BITMAP(unsync_child_bitmap, 512); 222 }; 223 224 struct kvm_pv_mmu_op_buffer { 225 void *ptr; 226 unsigned len; 227 unsigned processed; 228 char buf[512] __aligned(sizeof(long)); 229 }; 230 231 struct kvm_pio_request { 232 unsigned long count; 233 int cur_count; 234 gva_t guest_gva; 235 int in; 236 int port; 237 int size; 238 int string; 239 int down; 240 int rep; 241 }; 242 243 /* 244 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 245 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 246 * mode. 247 */ 248 struct kvm_mmu { 249 void (*new_cr3)(struct kvm_vcpu *vcpu); 250 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 251 void (*free)(struct kvm_vcpu *vcpu); 252 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 253 void (*prefetch_page)(struct kvm_vcpu *vcpu, 254 struct kvm_mmu_page *page); 255 int (*sync_page)(struct kvm_vcpu *vcpu, 256 struct kvm_mmu_page *sp); 257 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 258 hpa_t root_hpa; 259 int root_level; 260 int shadow_root_level; 261 union kvm_mmu_page_role base_role; 262 263 u64 *pae_root; 264 u64 rsvd_bits_mask[2][4]; 265 }; 266 267 struct kvm_vcpu_arch { 268 u64 host_tsc; 269 /* 270 * rip and regs accesses must go through 271 * kvm_{register,rip}_{read,write} functions. 272 */ 273 unsigned long regs[NR_VCPU_REGS]; 274 u32 regs_avail; 275 u32 regs_dirty; 276 277 unsigned long cr0; 278 unsigned long cr2; 279 unsigned long cr3; 280 unsigned long cr4; 281 unsigned long cr8; 282 u32 hflags; 283 u64 pdptrs[4]; /* pae */ 284 u64 shadow_efer; 285 u64 apic_base; 286 struct kvm_lapic *apic; /* kernel irqchip context */ 287 int32_t apic_arb_prio; 288 int mp_state; 289 int sipi_vector; 290 u64 ia32_misc_enable_msr; 291 bool tpr_access_reporting; 292 293 struct kvm_mmu mmu; 294 /* only needed in kvm_pv_mmu_op() path, but it's hot so 295 * put it here to avoid allocation */ 296 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 297 298 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 299 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 300 struct kvm_mmu_memory_cache mmu_page_cache; 301 struct kvm_mmu_memory_cache mmu_page_header_cache; 302 303 gfn_t last_pt_write_gfn; 304 int last_pt_write_count; 305 u64 *last_pte_updated; 306 gfn_t last_pte_gfn; 307 308 struct { 309 gfn_t gfn; /* presumed gfn during guest pte update */ 310 pfn_t pfn; /* pfn corresponding to that gfn */ 311 int largepage; 312 unsigned long mmu_seq; 313 } update_pte; 314 315 struct i387_fxsave_struct host_fx_image; 316 struct i387_fxsave_struct guest_fx_image; 317 318 gva_t mmio_fault_cr2; 319 struct kvm_pio_request pio; 320 void *pio_data; 321 322 u8 event_exit_inst_len; 323 324 struct kvm_queued_exception { 325 bool pending; 326 bool has_error_code; 327 u8 nr; 328 u32 error_code; 329 } exception; 330 331 struct kvm_queued_interrupt { 332 bool pending; 333 bool soft; 334 u8 nr; 335 } interrupt; 336 337 struct { 338 int vm86_active; 339 u8 save_iopl; 340 struct kvm_save_segment { 341 u16 selector; 342 unsigned long base; 343 u32 limit; 344 u32 ar; 345 } tr, es, ds, fs, gs; 346 } rmode; 347 int halt_request; /* real mode on Intel only */ 348 349 int cpuid_nent; 350 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 351 /* emulate context */ 352 353 struct x86_emulate_ctxt emulate_ctxt; 354 355 gpa_t time; 356 struct pvclock_vcpu_time_info hv_clock; 357 unsigned int hv_clock_tsc_khz; 358 unsigned int time_offset; 359 struct page *time_page; 360 361 bool singlestep; /* guest is single stepped by KVM */ 362 bool nmi_pending; 363 bool nmi_injected; 364 365 struct mtrr_state_type mtrr_state; 366 u32 pat; 367 368 int switch_db_regs; 369 unsigned long host_db[KVM_NR_DB_REGS]; 370 unsigned long host_dr6; 371 unsigned long host_dr7; 372 unsigned long db[KVM_NR_DB_REGS]; 373 unsigned long dr6; 374 unsigned long dr7; 375 unsigned long eff_db[KVM_NR_DB_REGS]; 376 }; 377 378 struct kvm_mem_alias { 379 gfn_t base_gfn; 380 unsigned long npages; 381 gfn_t target_gfn; 382 }; 383 384 struct kvm_arch{ 385 int naliases; 386 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; 387 388 unsigned int n_free_mmu_pages; 389 unsigned int n_requested_mmu_pages; 390 unsigned int n_alloc_mmu_pages; 391 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 392 /* 393 * Hash table of struct kvm_mmu_page. 394 */ 395 struct list_head active_mmu_pages; 396 struct list_head assigned_dev_head; 397 struct iommu_domain *iommu_domain; 398 int iommu_flags; 399 struct kvm_pic *vpic; 400 struct kvm_ioapic *vioapic; 401 struct kvm_pit *vpit; 402 struct hlist_head irq_ack_notifier_list; 403 int vapics_in_nmi_mode; 404 405 unsigned int tss_addr; 406 struct page *apic_access_page; 407 408 gpa_t wall_clock; 409 410 struct page *ept_identity_pagetable; 411 bool ept_identity_pagetable_done; 412 413 unsigned long irq_sources_bitmap; 414 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 415 u64 vm_init_tsc; 416 }; 417 418 struct kvm_vm_stat { 419 u32 mmu_shadow_zapped; 420 u32 mmu_pte_write; 421 u32 mmu_pte_updated; 422 u32 mmu_pde_zapped; 423 u32 mmu_flooded; 424 u32 mmu_recycled; 425 u32 mmu_cache_miss; 426 u32 mmu_unsync; 427 u32 remote_tlb_flush; 428 u32 lpages; 429 }; 430 431 struct kvm_vcpu_stat { 432 u32 pf_fixed; 433 u32 pf_guest; 434 u32 tlb_flush; 435 u32 invlpg; 436 437 u32 exits; 438 u32 io_exits; 439 u32 mmio_exits; 440 u32 signal_exits; 441 u32 irq_window_exits; 442 u32 nmi_window_exits; 443 u32 halt_exits; 444 u32 halt_wakeup; 445 u32 request_irq_exits; 446 u32 irq_exits; 447 u32 host_state_reload; 448 u32 efer_reload; 449 u32 fpu_reload; 450 u32 insn_emulation; 451 u32 insn_emulation_fail; 452 u32 hypercalls; 453 u32 irq_injections; 454 u32 nmi_injections; 455 }; 456 457 struct descriptor_table { 458 u16 limit; 459 unsigned long base; 460 } __attribute__((packed)); 461 462 struct kvm_x86_ops { 463 int (*cpu_has_kvm_support)(void); /* __init */ 464 int (*disabled_by_bios)(void); /* __init */ 465 void (*hardware_enable)(void *dummy); /* __init */ 466 void (*hardware_disable)(void *dummy); 467 void (*check_processor_compatibility)(void *rtn); 468 int (*hardware_setup)(void); /* __init */ 469 void (*hardware_unsetup)(void); /* __exit */ 470 bool (*cpu_has_accelerated_tpr)(void); 471 472 /* Create, but do not attach this VCPU */ 473 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 474 void (*vcpu_free)(struct kvm_vcpu *vcpu); 475 int (*vcpu_reset)(struct kvm_vcpu *vcpu); 476 477 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 478 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 479 void (*vcpu_put)(struct kvm_vcpu *vcpu); 480 481 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 482 struct kvm_guest_debug *dbg); 483 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 484 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 485 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 486 void (*get_segment)(struct kvm_vcpu *vcpu, 487 struct kvm_segment *var, int seg); 488 int (*get_cpl)(struct kvm_vcpu *vcpu); 489 void (*set_segment)(struct kvm_vcpu *vcpu, 490 struct kvm_segment *var, int seg); 491 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 492 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 493 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 494 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 495 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 496 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 497 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 498 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 499 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 500 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 501 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); 502 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, 503 int *exception); 504 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 505 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 506 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 507 508 void (*tlb_flush)(struct kvm_vcpu *vcpu); 509 510 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); 511 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 512 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 513 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 514 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 515 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 516 unsigned char *hypercall_addr); 517 void (*set_irq)(struct kvm_vcpu *vcpu); 518 void (*set_nmi)(struct kvm_vcpu *vcpu); 519 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 520 bool has_error_code, u32 error_code); 521 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 522 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 523 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 524 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 525 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 526 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 527 int (*get_tdp_level)(void); 528 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 529 }; 530 531 extern struct kvm_x86_ops *kvm_x86_ops; 532 533 int kvm_mmu_module_init(void); 534 void kvm_mmu_module_exit(void); 535 536 void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 537 int kvm_mmu_create(struct kvm_vcpu *vcpu); 538 int kvm_mmu_setup(struct kvm_vcpu *vcpu); 539 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 540 void kvm_mmu_set_base_ptes(u64 base_pte); 541 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 542 u64 dirty_mask, u64 nx_mask, u64 x_mask); 543 544 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 545 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 546 void kvm_mmu_zap_all(struct kvm *kvm); 547 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 548 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 549 550 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 551 552 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 553 const void *val, int bytes); 554 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, 555 gpa_t addr, unsigned long *ret); 556 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 557 558 extern bool tdp_enabled; 559 560 enum emulation_result { 561 EMULATE_DONE, /* no further processing */ 562 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 563 EMULATE_FAIL, /* can't emulate this instruction */ 564 }; 565 566 #define EMULTYPE_NO_DECODE (1 << 0) 567 #define EMULTYPE_TRAP_UD (1 << 1) 568 #define EMULTYPE_SKIP (1 << 2) 569 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, 570 unsigned long cr2, u16 error_code, int emulation_type); 571 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); 572 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 573 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 574 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 575 unsigned long *rflags); 576 577 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 578 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 579 unsigned long *rflags); 580 void kvm_enable_efer_bits(u64); 581 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 582 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 583 584 struct x86_emulate_ctxt; 585 586 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 587 int size, unsigned port); 588 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 589 int size, unsigned long count, int down, 590 gva_t address, int rep, unsigned port); 591 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 592 int kvm_emulate_halt(struct kvm_vcpu *vcpu); 593 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 594 int emulate_clts(struct kvm_vcpu *vcpu); 595 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 596 unsigned long *dest); 597 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 598 unsigned long value); 599 600 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 601 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 602 int type_bits, int seg); 603 604 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 605 606 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 607 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 608 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 609 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 610 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 611 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 612 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 613 614 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 615 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 616 617 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 618 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 619 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 620 u32 error_code); 621 622 int kvm_pic_set_irq(void *opaque, int irq, int level); 623 624 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 625 626 void fx_init(struct kvm_vcpu *vcpu); 627 628 int emulator_write_emulated(unsigned long addr, 629 const void *val, 630 unsigned int bytes, 631 struct kvm_vcpu *vcpu); 632 633 unsigned long segment_base(u16 selector); 634 635 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 636 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 637 const u8 *new, int bytes, 638 bool guest_initiated); 639 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 640 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 641 int kvm_mmu_load(struct kvm_vcpu *vcpu); 642 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 643 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 644 645 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 646 647 int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 648 649 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 650 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 651 652 void kvm_enable_tdp(void); 653 void kvm_disable_tdp(void); 654 655 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 656 int complete_pio(struct kvm_vcpu *vcpu); 657 658 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); 659 660 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 661 { 662 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 663 664 return (struct kvm_mmu_page *)page_private(page); 665 } 666 667 static inline u16 kvm_read_fs(void) 668 { 669 u16 seg; 670 asm("mov %%fs, %0" : "=g"(seg)); 671 return seg; 672 } 673 674 static inline u16 kvm_read_gs(void) 675 { 676 u16 seg; 677 asm("mov %%gs, %0" : "=g"(seg)); 678 return seg; 679 } 680 681 static inline u16 kvm_read_ldt(void) 682 { 683 u16 ldt; 684 asm("sldt %0" : "=g"(ldt)); 685 return ldt; 686 } 687 688 static inline void kvm_load_fs(u16 sel) 689 { 690 asm("mov %0, %%fs" : : "rm"(sel)); 691 } 692 693 static inline void kvm_load_gs(u16 sel) 694 { 695 asm("mov %0, %%gs" : : "rm"(sel)); 696 } 697 698 static inline void kvm_load_ldt(u16 sel) 699 { 700 asm("lldt %0" : : "rm"(sel)); 701 } 702 703 static inline void kvm_get_idt(struct descriptor_table *table) 704 { 705 asm("sidt %0" : "=m"(*table)); 706 } 707 708 static inline void kvm_get_gdt(struct descriptor_table *table) 709 { 710 asm("sgdt %0" : "=m"(*table)); 711 } 712 713 static inline unsigned long kvm_read_tr_base(void) 714 { 715 u16 tr; 716 asm("str %0" : "=g"(tr)); 717 return segment_base(tr); 718 } 719 720 #ifdef CONFIG_X86_64 721 static inline unsigned long read_msr(unsigned long msr) 722 { 723 u64 value; 724 725 rdmsrl(msr, value); 726 return value; 727 } 728 #endif 729 730 static inline void kvm_fx_save(struct i387_fxsave_struct *image) 731 { 732 asm("fxsave (%0)":: "r" (image)); 733 } 734 735 static inline void kvm_fx_restore(struct i387_fxsave_struct *image) 736 { 737 asm("fxrstor (%0)":: "r" (image)); 738 } 739 740 static inline void kvm_fx_finit(void) 741 { 742 asm("finit"); 743 } 744 745 static inline u32 get_rdx_init_val(void) 746 { 747 return 0x600; /* P6 family */ 748 } 749 750 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) 751 { 752 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 753 } 754 755 #define MSR_IA32_TIME_STAMP_COUNTER 0x010 756 757 #define TSS_IOPB_BASE_OFFSET 0x66 758 #define TSS_BASE_SIZE 0x68 759 #define TSS_IOPB_SIZE (65536 / 8) 760 #define TSS_REDIRECTION_SIZE (256 / 8) 761 #define RMODE_TSS_SIZE \ 762 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 763 764 enum { 765 TASK_SWITCH_CALL = 0, 766 TASK_SWITCH_IRET = 1, 767 TASK_SWITCH_JMP = 2, 768 TASK_SWITCH_GATE = 3, 769 }; 770 771 #define HF_GIF_MASK (1 << 0) 772 #define HF_HIF_MASK (1 << 1) 773 #define HF_VINTR_MASK (1 << 2) 774 #define HF_NMI_MASK (1 << 3) 775 #define HF_IRET_MASK (1 << 4) 776 777 /* 778 * Hardware virtualization extension instructions may fault if a 779 * reboot turns off virtualization while processes are running. 780 * Trap the fault and ignore the instruction if that happens. 781 */ 782 asmlinkage void kvm_handle_fault_on_reboot(void); 783 784 #define __kvm_handle_fault_on_reboot(insn) \ 785 "666: " insn "\n\t" \ 786 ".pushsection .fixup, \"ax\" \n" \ 787 "667: \n\t" \ 788 __ASM_SIZE(push) " $666b \n\t" \ 789 "jmp kvm_handle_fault_on_reboot \n\t" \ 790 ".popsection \n\t" \ 791 ".pushsection __ex_table, \"a\" \n\t" \ 792 _ASM_PTR " 666b, 667b \n\t" \ 793 ".popsection" 794 795 #define KVM_ARCH_WANT_MMU_NOTIFIER 796 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 797 int kvm_age_hva(struct kvm *kvm, unsigned long hva); 798 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 799 800 #endif /* _ASM_X86_KVM_HOST_H */ 801