1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 #ifndef __KVM_HOST_H 3 #define __KVM_HOST_H 4 5 6 #include <linux/types.h> 7 #include <linux/hardirq.h> 8 #include <linux/list.h> 9 #include <linux/mutex.h> 10 #include <linux/spinlock.h> 11 #include <linux/signal.h> 12 #include <linux/sched.h> 13 #include <linux/sched/stat.h> 14 #include <linux/bug.h> 15 #include <linux/minmax.h> 16 #include <linux/mm.h> 17 #include <linux/mmu_notifier.h> 18 #include <linux/preempt.h> 19 #include <linux/msi.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/rcupdate.h> 23 #include <linux/ratelimit.h> 24 #include <linux/err.h> 25 #include <linux/irqflags.h> 26 #include <linux/context_tracking.h> 27 #include <linux/irqbypass.h> 28 #include <linux/rcuwait.h> 29 #include <linux/refcount.h> 30 #include <linux/nospec.h> 31 #include <linux/notifier.h> 32 #include <linux/ftrace.h> 33 #include <linux/hashtable.h> 34 #include <linux/instrumentation.h> 35 #include <linux/interval_tree.h> 36 #include <linux/rbtree.h> 37 #include <linux/xarray.h> 38 #include <asm/signal.h> 39 40 #include <linux/kvm.h> 41 #include <linux/kvm_para.h> 42 43 #include <linux/kvm_types.h> 44 45 #include <asm/kvm_host.h> 46 #include <linux/kvm_dirty_ring.h> 47 48 #ifndef KVM_MAX_VCPU_IDS 49 #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS 50 #endif 51 52 /* 53 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 54 * in kvm, other bits are visible for userspace which are defined in 55 * include/linux/kvm_h. 56 */ 57 #define KVM_MEMSLOT_INVALID (1UL << 16) 58 59 /* 60 * Bit 63 of the memslot generation number is an "update in-progress flag", 61 * e.g. is temporarily set for the duration of install_new_memslots(). 62 * This flag effectively creates a unique generation number that is used to 63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, 64 * i.e. may (or may not) have come from the previous memslots generation. 65 * 66 * This is necessary because the actual memslots update is not atomic with 67 * respect to the generation number update. Updating the generation number 68 * first would allow a vCPU to cache a spte from the old memslots using the 69 * new generation number, and updating the generation number after switching 70 * to the new memslots would allow cache hits using the old generation number 71 * to reference the defunct memslots. 72 * 73 * This mechanism is used to prevent getting hits in KVM's caches while a 74 * memslot update is in-progress, and to prevent cache hits *after* updating 75 * the actual generation number against accesses that were inserted into the 76 * cache *before* the memslots were updated. 77 */ 78 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) 79 80 /* Two fragments for cross MMIO pages. */ 81 #define KVM_MAX_MMIO_FRAGMENTS 2 82 83 #ifndef KVM_ADDRESS_SPACE_NUM 84 #define KVM_ADDRESS_SPACE_NUM 1 85 #endif 86 87 /* 88 * For the normal pfn, the highest 12 bits should be zero, 89 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 90 * mask bit 63 to indicate the noslot pfn. 91 */ 92 #define KVM_PFN_ERR_MASK (0x7ffULL << 52) 93 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 94 #define KVM_PFN_NOSLOT (0x1ULL << 63) 95 96 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 97 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 98 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 99 100 /* 101 * error pfns indicate that the gfn is in slot but faild to 102 * translate it to pfn on host. 103 */ 104 static inline bool is_error_pfn(kvm_pfn_t pfn) 105 { 106 return !!(pfn & KVM_PFN_ERR_MASK); 107 } 108 109 /* 110 * error_noslot pfns indicate that the gfn can not be 111 * translated to pfn - it is not in slot or failed to 112 * translate it to pfn. 113 */ 114 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 115 { 116 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 117 } 118 119 /* noslot pfn indicates that the gfn is not in slot. */ 120 static inline bool is_noslot_pfn(kvm_pfn_t pfn) 121 { 122 return pfn == KVM_PFN_NOSLOT; 123 } 124 125 /* 126 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 127 * provide own defines and kvm_is_error_hva 128 */ 129 #ifndef KVM_HVA_ERR_BAD 130 131 #define KVM_HVA_ERR_BAD (PAGE_OFFSET) 132 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 133 134 static inline bool kvm_is_error_hva(unsigned long addr) 135 { 136 return addr >= PAGE_OFFSET; 137 } 138 139 #endif 140 141 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 142 143 static inline bool is_error_page(struct page *page) 144 { 145 return IS_ERR(page); 146 } 147 148 #define KVM_REQUEST_MASK GENMASK(7,0) 149 #define KVM_REQUEST_NO_WAKEUP BIT(8) 150 #define KVM_REQUEST_WAIT BIT(9) 151 #define KVM_REQUEST_NO_ACTION BIT(10) 152 /* 153 * Architecture-independent vcpu->requests bit members 154 * Bits 4-7 are reserved for more arch-independent bits. 155 */ 156 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 157 #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 158 #define KVM_REQ_UNBLOCK 2 159 #define KVM_REQ_UNHALT 3 160 #define KVM_REQUEST_ARCH_BASE 8 161 162 /* 163 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to 164 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" 165 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing 166 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous 167 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no 168 * guarantee the vCPU received an IPI and has actually exited guest mode. 169 */ 170 #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 171 172 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 173 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 174 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 175 }) 176 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) 177 178 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 179 unsigned long *vcpu_bitmap); 180 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 181 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 182 struct kvm_vcpu *except); 183 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, 184 unsigned long *vcpu_bitmap); 185 186 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 187 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 188 189 extern struct mutex kvm_lock; 190 extern struct list_head vm_list; 191 192 struct kvm_io_range { 193 gpa_t addr; 194 int len; 195 struct kvm_io_device *dev; 196 }; 197 198 #define NR_IOBUS_DEVS 1000 199 200 struct kvm_io_bus { 201 int dev_count; 202 int ioeventfd_count; 203 struct kvm_io_range range[]; 204 }; 205 206 enum kvm_bus { 207 KVM_MMIO_BUS, 208 KVM_PIO_BUS, 209 KVM_VIRTIO_CCW_NOTIFY_BUS, 210 KVM_FAST_MMIO_BUS, 211 KVM_NR_BUSES 212 }; 213 214 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 215 int len, const void *val); 216 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 217 gpa_t addr, int len, const void *val, long cookie); 218 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 219 int len, void *val); 220 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 221 int len, struct kvm_io_device *dev); 222 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 223 struct kvm_io_device *dev); 224 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 225 gpa_t addr); 226 227 #ifdef CONFIG_KVM_ASYNC_PF 228 struct kvm_async_pf { 229 struct work_struct work; 230 struct list_head link; 231 struct list_head queue; 232 struct kvm_vcpu *vcpu; 233 struct mm_struct *mm; 234 gpa_t cr2_or_gpa; 235 unsigned long addr; 236 struct kvm_arch_async_pf arch; 237 bool wakeup_all; 238 bool notpresent_injected; 239 }; 240 241 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 242 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 243 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 244 unsigned long hva, struct kvm_arch_async_pf *arch); 245 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 246 #endif 247 248 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER 249 struct kvm_gfn_range { 250 struct kvm_memory_slot *slot; 251 gfn_t start; 252 gfn_t end; 253 pte_t pte; 254 bool may_block; 255 }; 256 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 257 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 258 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 259 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 260 #endif 261 262 enum { 263 OUTSIDE_GUEST_MODE, 264 IN_GUEST_MODE, 265 EXITING_GUEST_MODE, 266 READING_SHADOW_PAGE_TABLES, 267 }; 268 269 #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) 270 271 struct kvm_host_map { 272 /* 273 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is 274 * a 'struct page' for it. When using mem= kernel parameter some memory 275 * can be used as guest memory but they are not managed by host 276 * kernel). 277 * If 'pfn' is not managed by the host kernel, this field is 278 * initialized to KVM_UNMAPPED_PAGE. 279 */ 280 struct page *page; 281 void *hva; 282 kvm_pfn_t pfn; 283 kvm_pfn_t gfn; 284 }; 285 286 /* 287 * Used to check if the mapping is valid or not. Never use 'kvm_host_map' 288 * directly to check for that. 289 */ 290 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) 291 { 292 return !!map->hva; 293 } 294 295 static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) 296 { 297 return single_task_running() && !need_resched() && ktime_before(cur, stop); 298 } 299 300 /* 301 * Sometimes a large or cross-page mmio needs to be broken up into separate 302 * exits for userspace servicing. 303 */ 304 struct kvm_mmio_fragment { 305 gpa_t gpa; 306 void *data; 307 unsigned len; 308 }; 309 310 struct kvm_vcpu { 311 struct kvm *kvm; 312 #ifdef CONFIG_PREEMPT_NOTIFIERS 313 struct preempt_notifier preempt_notifier; 314 #endif 315 int cpu; 316 int vcpu_id; /* id given by userspace at creation */ 317 int vcpu_idx; /* index in kvm->vcpus array */ 318 int ____srcu_idx; /* Don't use this directly. You've been warned. */ 319 #ifdef CONFIG_PROVE_RCU 320 int srcu_depth; 321 #endif 322 int mode; 323 u64 requests; 324 unsigned long guest_debug; 325 326 struct mutex mutex; 327 struct kvm_run *run; 328 329 #ifndef __KVM_HAVE_ARCH_WQP 330 struct rcuwait wait; 331 #endif 332 struct pid __rcu *pid; 333 int sigset_active; 334 sigset_t sigset; 335 unsigned int halt_poll_ns; 336 bool valid_wakeup; 337 338 #ifdef CONFIG_HAS_IOMEM 339 int mmio_needed; 340 int mmio_read_completed; 341 int mmio_is_write; 342 int mmio_cur_fragment; 343 int mmio_nr_fragments; 344 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 345 #endif 346 347 #ifdef CONFIG_KVM_ASYNC_PF 348 struct { 349 u32 queued; 350 struct list_head queue; 351 struct list_head done; 352 spinlock_t lock; 353 } async_pf; 354 #endif 355 356 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 357 /* 358 * Cpu relax intercept or pause loop exit optimization 359 * in_spin_loop: set when a vcpu does a pause loop exit 360 * or cpu relax intercepted. 361 * dy_eligible: indicates whether vcpu is eligible for directed yield. 362 */ 363 struct { 364 bool in_spin_loop; 365 bool dy_eligible; 366 } spin_loop; 367 #endif 368 bool preempted; 369 bool ready; 370 struct kvm_vcpu_arch arch; 371 struct kvm_vcpu_stat stat; 372 char stats_id[KVM_STATS_NAME_SIZE]; 373 struct kvm_dirty_ring dirty_ring; 374 375 /* 376 * The most recently used memslot by this vCPU and the slots generation 377 * for which it is valid. 378 * No wraparound protection is needed since generations won't overflow in 379 * thousands of years, even assuming 1M memslot operations per second. 380 */ 381 struct kvm_memory_slot *last_used_slot; 382 u64 last_used_slot_gen; 383 }; 384 385 /* 386 * Start accounting time towards a guest. 387 * Must be called before entering guest context. 388 */ 389 static __always_inline void guest_timing_enter_irqoff(void) 390 { 391 /* 392 * This is running in ioctl context so its safe to assume that it's the 393 * stime pending cputime to flush. 394 */ 395 instrumentation_begin(); 396 vtime_account_guest_enter(); 397 instrumentation_end(); 398 } 399 400 /* 401 * Enter guest context and enter an RCU extended quiescent state. 402 * 403 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is 404 * unsafe to use any code which may directly or indirectly use RCU, tracing 405 * (including IRQ flag tracing), or lockdep. All code in this period must be 406 * non-instrumentable. 407 */ 408 static __always_inline void guest_context_enter_irqoff(void) 409 { 410 /* 411 * KVM does not hold any references to rcu protected data when it 412 * switches CPU into a guest mode. In fact switching to a guest mode 413 * is very similar to exiting to userspace from rcu point of view. In 414 * addition CPU may stay in a guest mode for quite a long time (up to 415 * one time slice). Lets treat guest mode as quiescent state, just like 416 * we do with user-mode execution. 417 */ 418 if (!context_tracking_guest_enter()) { 419 instrumentation_begin(); 420 rcu_virt_note_context_switch(smp_processor_id()); 421 instrumentation_end(); 422 } 423 } 424 425 /* 426 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and 427 * guest_state_enter_irqoff(). 428 */ 429 static __always_inline void guest_enter_irqoff(void) 430 { 431 guest_timing_enter_irqoff(); 432 guest_context_enter_irqoff(); 433 } 434 435 /** 436 * guest_state_enter_irqoff - Fixup state when entering a guest 437 * 438 * Entry to a guest will enable interrupts, but the kernel state is interrupts 439 * disabled when this is invoked. Also tell RCU about it. 440 * 441 * 1) Trace interrupts on state 442 * 2) Invoke context tracking if enabled to adjust RCU state 443 * 3) Tell lockdep that interrupts are enabled 444 * 445 * Invoked from architecture specific code before entering a guest. 446 * Must be called with interrupts disabled and the caller must be 447 * non-instrumentable. 448 * The caller has to invoke guest_timing_enter_irqoff() before this. 449 * 450 * Note: this is analogous to exit_to_user_mode(). 451 */ 452 static __always_inline void guest_state_enter_irqoff(void) 453 { 454 instrumentation_begin(); 455 trace_hardirqs_on_prepare(); 456 lockdep_hardirqs_on_prepare(); 457 instrumentation_end(); 458 459 guest_context_enter_irqoff(); 460 lockdep_hardirqs_on(CALLER_ADDR0); 461 } 462 463 /* 464 * Exit guest context and exit an RCU extended quiescent state. 465 * 466 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is 467 * unsafe to use any code which may directly or indirectly use RCU, tracing 468 * (including IRQ flag tracing), or lockdep. All code in this period must be 469 * non-instrumentable. 470 */ 471 static __always_inline void guest_context_exit_irqoff(void) 472 { 473 context_tracking_guest_exit(); 474 } 475 476 /* 477 * Stop accounting time towards a guest. 478 * Must be called after exiting guest context. 479 */ 480 static __always_inline void guest_timing_exit_irqoff(void) 481 { 482 instrumentation_begin(); 483 /* Flush the guest cputime we spent on the guest */ 484 vtime_account_guest_exit(); 485 instrumentation_end(); 486 } 487 488 /* 489 * Deprecated. Architectures should move to guest_state_exit_irqoff() and 490 * guest_timing_exit_irqoff(). 491 */ 492 static __always_inline void guest_exit_irqoff(void) 493 { 494 guest_context_exit_irqoff(); 495 guest_timing_exit_irqoff(); 496 } 497 498 static inline void guest_exit(void) 499 { 500 unsigned long flags; 501 502 local_irq_save(flags); 503 guest_exit_irqoff(); 504 local_irq_restore(flags); 505 } 506 507 /** 508 * guest_state_exit_irqoff - Establish state when returning from guest mode 509 * 510 * Entry from a guest disables interrupts, but guest mode is traced as 511 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 512 * 513 * 1) Tell lockdep that interrupts are disabled 514 * 2) Invoke context tracking if enabled to reactivate RCU 515 * 3) Trace interrupts off state 516 * 517 * Invoked from architecture specific code after exiting a guest. 518 * Must be invoked with interrupts disabled and the caller must be 519 * non-instrumentable. 520 * The caller has to invoke guest_timing_exit_irqoff() after this. 521 * 522 * Note: this is analogous to enter_from_user_mode(). 523 */ 524 static __always_inline void guest_state_exit_irqoff(void) 525 { 526 lockdep_hardirqs_off(CALLER_ADDR0); 527 guest_context_exit_irqoff(); 528 529 instrumentation_begin(); 530 trace_hardirqs_off_finish(); 531 instrumentation_end(); 532 } 533 534 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 535 { 536 /* 537 * The memory barrier ensures a previous write to vcpu->requests cannot 538 * be reordered with the read of vcpu->mode. It pairs with the general 539 * memory barrier following the write of vcpu->mode in VCPU RUN. 540 */ 541 smp_mb__before_atomic(); 542 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 543 } 544 545 /* 546 * Some of the bitops functions do not support too long bitmaps. 547 * This number must be determined not to exceed such limits. 548 */ 549 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 550 551 /* 552 * Since at idle each memslot belongs to two memslot sets it has to contain 553 * two embedded nodes for each data structure that it forms a part of. 554 * 555 * Two memslot sets (one active and one inactive) are necessary so the VM 556 * continues to run on one memslot set while the other is being modified. 557 * 558 * These two memslot sets normally point to the same set of memslots. 559 * They can, however, be desynchronized when performing a memslot management 560 * operation by replacing the memslot to be modified by its copy. 561 * After the operation is complete, both memslot sets once again point to 562 * the same, common set of memslot data. 563 * 564 * The memslots themselves are independent of each other so they can be 565 * individually added or deleted. 566 */ 567 struct kvm_memory_slot { 568 struct hlist_node id_node[2]; 569 struct interval_tree_node hva_node[2]; 570 struct rb_node gfn_node[2]; 571 gfn_t base_gfn; 572 unsigned long npages; 573 unsigned long *dirty_bitmap; 574 struct kvm_arch_memory_slot arch; 575 unsigned long userspace_addr; 576 u32 flags; 577 short id; 578 u16 as_id; 579 }; 580 581 static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) 582 { 583 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; 584 } 585 586 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 587 { 588 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 589 } 590 591 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) 592 { 593 unsigned long len = kvm_dirty_bitmap_bytes(memslot); 594 595 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); 596 } 597 598 #ifndef KVM_DIRTY_LOG_MANUAL_CAPS 599 #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE 600 #endif 601 602 struct kvm_s390_adapter_int { 603 u64 ind_addr; 604 u64 summary_addr; 605 u64 ind_offset; 606 u32 summary_offset; 607 u32 adapter_id; 608 }; 609 610 struct kvm_hv_sint { 611 u32 vcpu; 612 u32 sint; 613 }; 614 615 struct kvm_xen_evtchn { 616 u32 port; 617 u32 vcpu_id; 618 int vcpu_idx; 619 u32 priority; 620 }; 621 622 struct kvm_kernel_irq_routing_entry { 623 u32 gsi; 624 u32 type; 625 int (*set)(struct kvm_kernel_irq_routing_entry *e, 626 struct kvm *kvm, int irq_source_id, int level, 627 bool line_status); 628 union { 629 struct { 630 unsigned irqchip; 631 unsigned pin; 632 } irqchip; 633 struct { 634 u32 address_lo; 635 u32 address_hi; 636 u32 data; 637 u32 flags; 638 u32 devid; 639 } msi; 640 struct kvm_s390_adapter_int adapter; 641 struct kvm_hv_sint hv_sint; 642 struct kvm_xen_evtchn xen_evtchn; 643 }; 644 struct hlist_node link; 645 }; 646 647 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 648 struct kvm_irq_routing_table { 649 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 650 u32 nr_rt_entries; 651 /* 652 * Array indexed by gsi. Each entry contains list of irq chips 653 * the gsi is connected to. 654 */ 655 struct hlist_head map[]; 656 }; 657 #endif 658 659 #ifndef KVM_PRIVATE_MEM_SLOTS 660 #define KVM_PRIVATE_MEM_SLOTS 0 661 #endif 662 663 #define KVM_MEM_SLOTS_NUM SHRT_MAX 664 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) 665 666 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 667 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 668 { 669 return 0; 670 } 671 #endif 672 673 struct kvm_memslots { 674 u64 generation; 675 atomic_long_t last_used_slot; 676 struct rb_root_cached hva_tree; 677 struct rb_root gfn_tree; 678 /* 679 * The mapping table from slot id to memslot. 680 * 681 * 7-bit bucket count matches the size of the old id to index array for 682 * 512 slots, while giving good performance with this slot count. 683 * Higher bucket counts bring only small performance improvements but 684 * always result in higher memory usage (even for lower memslot counts). 685 */ 686 DECLARE_HASHTABLE(id_hash, 7); 687 int node_idx; 688 }; 689 690 struct kvm { 691 #ifdef KVM_HAVE_MMU_RWLOCK 692 rwlock_t mmu_lock; 693 #else 694 spinlock_t mmu_lock; 695 #endif /* KVM_HAVE_MMU_RWLOCK */ 696 697 struct mutex slots_lock; 698 699 /* 700 * Protects the arch-specific fields of struct kvm_memory_slots in 701 * use by the VM. To be used under the slots_lock (above) or in a 702 * kvm->srcu critical section where acquiring the slots_lock would 703 * lead to deadlock with the synchronize_srcu in 704 * install_new_memslots. 705 */ 706 struct mutex slots_arch_lock; 707 struct mm_struct *mm; /* userspace tied to this vm */ 708 unsigned long nr_memslot_pages; 709 /* The two memslot sets - active and inactive (per address space) */ 710 struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2]; 711 /* The current active memslot set for each address space */ 712 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; 713 struct xarray vcpu_array; 714 715 /* Used to wait for completion of MMU notifiers. */ 716 spinlock_t mn_invalidate_lock; 717 unsigned long mn_active_invalidate_count; 718 struct rcuwait mn_memslots_update_rcuwait; 719 720 /* For management / invalidation of gfn_to_pfn_caches */ 721 spinlock_t gpc_lock; 722 struct list_head gpc_list; 723 724 /* 725 * created_vcpus is protected by kvm->lock, and is incremented 726 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only 727 * incremented after storing the kvm_vcpu pointer in vcpus, 728 * and is accessed atomically. 729 */ 730 atomic_t online_vcpus; 731 int max_vcpus; 732 int created_vcpus; 733 int last_boosted_vcpu; 734 struct list_head vm_list; 735 struct mutex lock; 736 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; 737 #ifdef CONFIG_HAVE_KVM_EVENTFD 738 struct { 739 spinlock_t lock; 740 struct list_head items; 741 struct list_head resampler_list; 742 struct mutex resampler_lock; 743 } irqfds; 744 struct list_head ioeventfds; 745 #endif 746 struct kvm_vm_stat stat; 747 struct kvm_arch arch; 748 refcount_t users_count; 749 #ifdef CONFIG_KVM_MMIO 750 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 751 spinlock_t ring_lock; 752 struct list_head coalesced_zones; 753 #endif 754 755 struct mutex irq_lock; 756 #ifdef CONFIG_HAVE_KVM_IRQCHIP 757 /* 758 * Update side is protected by irq_lock. 759 */ 760 struct kvm_irq_routing_table __rcu *irq_routing; 761 #endif 762 #ifdef CONFIG_HAVE_KVM_IRQFD 763 struct hlist_head irq_ack_notifier_list; 764 #endif 765 766 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 767 struct mmu_notifier mmu_notifier; 768 unsigned long mmu_notifier_seq; 769 long mmu_notifier_count; 770 unsigned long mmu_notifier_range_start; 771 unsigned long mmu_notifier_range_end; 772 #endif 773 struct list_head devices; 774 u64 manual_dirty_log_protect; 775 struct dentry *debugfs_dentry; 776 struct kvm_stat_data **debugfs_stat_data; 777 struct srcu_struct srcu; 778 struct srcu_struct irq_srcu; 779 pid_t userspace_pid; 780 unsigned int max_halt_poll_ns; 781 u32 dirty_ring_size; 782 bool vm_bugged; 783 bool vm_dead; 784 785 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 786 struct notifier_block pm_notifier; 787 #endif 788 char stats_id[KVM_STATS_NAME_SIZE]; 789 }; 790 791 #define kvm_err(fmt, ...) \ 792 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 793 #define kvm_info(fmt, ...) \ 794 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 795 #define kvm_debug(fmt, ...) \ 796 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 797 #define kvm_debug_ratelimited(fmt, ...) \ 798 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ 799 ## __VA_ARGS__) 800 #define kvm_pr_unimpl(fmt, ...) \ 801 pr_err_ratelimited("kvm [%i]: " fmt, \ 802 task_tgid_nr(current), ## __VA_ARGS__) 803 804 /* The guest did something we don't support. */ 805 #define vcpu_unimpl(vcpu, fmt, ...) \ 806 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 807 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 808 809 #define vcpu_debug(vcpu, fmt, ...) \ 810 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 811 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ 812 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ 813 ## __VA_ARGS__) 814 #define vcpu_err(vcpu, fmt, ...) \ 815 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 816 817 static inline void kvm_vm_dead(struct kvm *kvm) 818 { 819 kvm->vm_dead = true; 820 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); 821 } 822 823 static inline void kvm_vm_bugged(struct kvm *kvm) 824 { 825 kvm->vm_bugged = true; 826 kvm_vm_dead(kvm); 827 } 828 829 830 #define KVM_BUG(cond, kvm, fmt...) \ 831 ({ \ 832 int __ret = (cond); \ 833 \ 834 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ 835 kvm_vm_bugged(kvm); \ 836 unlikely(__ret); \ 837 }) 838 839 #define KVM_BUG_ON(cond, kvm) \ 840 ({ \ 841 int __ret = (cond); \ 842 \ 843 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ 844 kvm_vm_bugged(kvm); \ 845 unlikely(__ret); \ 846 }) 847 848 static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) 849 { 850 #ifdef CONFIG_PROVE_RCU 851 WARN_ONCE(vcpu->srcu_depth++, 852 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1); 853 #endif 854 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 855 } 856 857 static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu) 858 { 859 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); 860 861 #ifdef CONFIG_PROVE_RCU 862 WARN_ONCE(--vcpu->srcu_depth, 863 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth); 864 #endif 865 } 866 867 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) 868 { 869 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); 870 } 871 872 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 873 { 874 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, 875 lockdep_is_held(&kvm->slots_lock) || 876 !refcount_read(&kvm->users_count)); 877 } 878 879 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 880 { 881 int num_vcpus = atomic_read(&kvm->online_vcpus); 882 i = array_index_nospec(i, num_vcpus); 883 884 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ 885 smp_rmb(); 886 return xa_load(&kvm->vcpu_array, i); 887 } 888 889 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ 890 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ 891 (atomic_read(&kvm->online_vcpus) - 1)) 892 893 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 894 { 895 struct kvm_vcpu *vcpu = NULL; 896 unsigned long i; 897 898 if (id < 0) 899 return NULL; 900 if (id < KVM_MAX_VCPUS) 901 vcpu = kvm_get_vcpu(kvm, id); 902 if (vcpu && vcpu->vcpu_id == id) 903 return vcpu; 904 kvm_for_each_vcpu(i, vcpu, kvm) 905 if (vcpu->vcpu_id == id) 906 return vcpu; 907 return NULL; 908 } 909 910 void kvm_destroy_vcpus(struct kvm *kvm); 911 912 void vcpu_load(struct kvm_vcpu *vcpu); 913 void vcpu_put(struct kvm_vcpu *vcpu); 914 915 #ifdef __KVM_HAVE_IOAPIC 916 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); 917 void kvm_arch_post_irq_routing_update(struct kvm *kvm); 918 #else 919 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 920 { 921 } 922 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) 923 { 924 } 925 #endif 926 927 #ifdef CONFIG_HAVE_KVM_IRQFD 928 int kvm_irqfd_init(void); 929 void kvm_irqfd_exit(void); 930 #else 931 static inline int kvm_irqfd_init(void) 932 { 933 return 0; 934 } 935 936 static inline void kvm_irqfd_exit(void) 937 { 938 } 939 #endif 940 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 941 struct module *module); 942 void kvm_exit(void); 943 944 void kvm_get_kvm(struct kvm *kvm); 945 bool kvm_get_kvm_safe(struct kvm *kvm); 946 void kvm_put_kvm(struct kvm *kvm); 947 bool file_is_kvm(struct file *file); 948 void kvm_put_kvm_no_destroy(struct kvm *kvm); 949 950 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 951 { 952 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); 953 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, 954 lockdep_is_held(&kvm->slots_lock) || 955 !refcount_read(&kvm->users_count)); 956 } 957 958 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 959 { 960 return __kvm_memslots(kvm, 0); 961 } 962 963 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 964 { 965 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 966 967 return __kvm_memslots(vcpu->kvm, as_id); 968 } 969 970 static inline bool kvm_memslots_empty(struct kvm_memslots *slots) 971 { 972 return RB_EMPTY_ROOT(&slots->gfn_tree); 973 } 974 975 #define kvm_for_each_memslot(memslot, bkt, slots) \ 976 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ 977 if (WARN_ON_ONCE(!memslot->npages)) { \ 978 } else 979 980 static inline 981 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) 982 { 983 struct kvm_memory_slot *slot; 984 int idx = slots->node_idx; 985 986 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { 987 if (slot->id == id) 988 return slot; 989 } 990 991 return NULL; 992 } 993 994 /* Iterator used for walking memslots that overlap a gfn range. */ 995 struct kvm_memslot_iter { 996 struct kvm_memslots *slots; 997 struct rb_node *node; 998 struct kvm_memory_slot *slot; 999 }; 1000 1001 static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) 1002 { 1003 iter->node = rb_next(iter->node); 1004 if (!iter->node) 1005 return; 1006 1007 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); 1008 } 1009 1010 static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, 1011 struct kvm_memslots *slots, 1012 gfn_t start) 1013 { 1014 int idx = slots->node_idx; 1015 struct rb_node *tmp; 1016 struct kvm_memory_slot *slot; 1017 1018 iter->slots = slots; 1019 1020 /* 1021 * Find the so called "upper bound" of a key - the first node that has 1022 * its key strictly greater than the searched one (the start gfn in our case). 1023 */ 1024 iter->node = NULL; 1025 for (tmp = slots->gfn_tree.rb_node; tmp; ) { 1026 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); 1027 if (start < slot->base_gfn) { 1028 iter->node = tmp; 1029 tmp = tmp->rb_left; 1030 } else { 1031 tmp = tmp->rb_right; 1032 } 1033 } 1034 1035 /* 1036 * Find the slot with the lowest gfn that can possibly intersect with 1037 * the range, so we'll ideally have slot start <= range start 1038 */ 1039 if (iter->node) { 1040 /* 1041 * A NULL previous node means that the very first slot 1042 * already has a higher start gfn. 1043 * In this case slot start > range start. 1044 */ 1045 tmp = rb_prev(iter->node); 1046 if (tmp) 1047 iter->node = tmp; 1048 } else { 1049 /* a NULL node below means no slots */ 1050 iter->node = rb_last(&slots->gfn_tree); 1051 } 1052 1053 if (iter->node) { 1054 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); 1055 1056 /* 1057 * It is possible in the slot start < range start case that the 1058 * found slot ends before or at range start (slot end <= range start) 1059 * and so it does not overlap the requested range. 1060 * 1061 * In such non-overlapping case the next slot (if it exists) will 1062 * already have slot start > range start, otherwise the logic above 1063 * would have found it instead of the current slot. 1064 */ 1065 if (iter->slot->base_gfn + iter->slot->npages <= start) 1066 kvm_memslot_iter_next(iter); 1067 } 1068 } 1069 1070 static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) 1071 { 1072 if (!iter->node) 1073 return false; 1074 1075 /* 1076 * If this slot starts beyond or at the end of the range so does 1077 * every next one 1078 */ 1079 return iter->slot->base_gfn < end; 1080 } 1081 1082 /* Iterate over each memslot at least partially intersecting [start, end) range */ 1083 #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ 1084 for (kvm_memslot_iter_start(iter, slots, start); \ 1085 kvm_memslot_iter_is_valid(iter, end); \ 1086 kvm_memslot_iter_next(iter)) 1087 1088 /* 1089 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 1090 * - create a new memory slot 1091 * - delete an existing memory slot 1092 * - modify an existing memory slot 1093 * -- move it in the guest physical memory space 1094 * -- just change its flags 1095 * 1096 * Since flags can be changed by some of these operations, the following 1097 * differentiation is the best we can do for __kvm_set_memory_region(): 1098 */ 1099 enum kvm_mr_change { 1100 KVM_MR_CREATE, 1101 KVM_MR_DELETE, 1102 KVM_MR_MOVE, 1103 KVM_MR_FLAGS_ONLY, 1104 }; 1105 1106 int kvm_set_memory_region(struct kvm *kvm, 1107 const struct kvm_userspace_memory_region *mem); 1108 int __kvm_set_memory_region(struct kvm *kvm, 1109 const struct kvm_userspace_memory_region *mem); 1110 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); 1111 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); 1112 int kvm_arch_prepare_memory_region(struct kvm *kvm, 1113 const struct kvm_memory_slot *old, 1114 struct kvm_memory_slot *new, 1115 enum kvm_mr_change change); 1116 void kvm_arch_commit_memory_region(struct kvm *kvm, 1117 struct kvm_memory_slot *old, 1118 const struct kvm_memory_slot *new, 1119 enum kvm_mr_change change); 1120 /* flush all memory translations */ 1121 void kvm_arch_flush_shadow_all(struct kvm *kvm); 1122 /* flush memory translations pointing to 'slot' */ 1123 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1124 struct kvm_memory_slot *slot); 1125 1126 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1127 struct page **pages, int nr_pages); 1128 1129 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 1130 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 1131 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 1132 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 1133 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 1134 bool *writable); 1135 void kvm_release_page_clean(struct page *page); 1136 void kvm_release_page_dirty(struct page *page); 1137 1138 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 1139 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1140 bool *writable); 1141 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); 1142 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); 1143 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 1144 bool atomic, bool *async, bool write_fault, 1145 bool *writable, hva_t *hva); 1146 1147 void kvm_release_pfn_clean(kvm_pfn_t pfn); 1148 void kvm_release_pfn_dirty(kvm_pfn_t pfn); 1149 void kvm_set_pfn_dirty(kvm_pfn_t pfn); 1150 void kvm_set_pfn_accessed(kvm_pfn_t pfn); 1151 1152 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); 1153 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1154 int len); 1155 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 1156 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1157 void *data, unsigned long len); 1158 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1159 void *data, unsigned int offset, 1160 unsigned long len); 1161 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1162 int offset, int len); 1163 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1164 unsigned long len); 1165 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1166 void *data, unsigned long len); 1167 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1168 void *data, unsigned int offset, 1169 unsigned long len); 1170 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1171 gpa_t gpa, unsigned long len); 1172 1173 #define __kvm_get_guest(kvm, gfn, offset, v) \ 1174 ({ \ 1175 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 1176 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 1177 int __ret = -EFAULT; \ 1178 \ 1179 if (!kvm_is_error_hva(__addr)) \ 1180 __ret = get_user(v, __uaddr); \ 1181 __ret; \ 1182 }) 1183 1184 #define kvm_get_guest(kvm, gpa, v) \ 1185 ({ \ 1186 gpa_t __gpa = gpa; \ 1187 struct kvm *__kvm = kvm; \ 1188 \ 1189 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ 1190 offset_in_page(__gpa), v); \ 1191 }) 1192 1193 #define __kvm_put_guest(kvm, gfn, offset, v) \ 1194 ({ \ 1195 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 1196 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 1197 int __ret = -EFAULT; \ 1198 \ 1199 if (!kvm_is_error_hva(__addr)) \ 1200 __ret = put_user(v, __uaddr); \ 1201 if (!__ret) \ 1202 mark_page_dirty(kvm, gfn); \ 1203 __ret; \ 1204 }) 1205 1206 #define kvm_put_guest(kvm, gpa, v) \ 1207 ({ \ 1208 gpa_t __gpa = gpa; \ 1209 struct kvm *__kvm = kvm; \ 1210 \ 1211 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ 1212 offset_in_page(__gpa), v); \ 1213 }) 1214 1215 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 1216 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 1217 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 1218 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1219 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); 1220 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); 1221 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 1222 1223 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 1224 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 1225 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 1226 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1227 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); 1228 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); 1229 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 1230 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 1231 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 1232 int len); 1233 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 1234 unsigned long len); 1235 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 1236 unsigned long len); 1237 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 1238 int offset, int len); 1239 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1240 unsigned long len); 1241 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 1242 1243 /** 1244 * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a 1245 * given guest physical address. 1246 * 1247 * @kvm: pointer to kvm instance. 1248 * @gpc: struct gfn_to_pfn_cache object. 1249 * @vcpu: vCPU to be used for marking pages dirty and to be woken on 1250 * invalidation. 1251 * @usage: indicates if the resulting host physical PFN is used while 1252 * the @vcpu is IN_GUEST_MODE (in which case invalidation of 1253 * the cache from MMU notifiers---but not for KVM memslot 1254 * changes!---will also force @vcpu to exit the guest and 1255 * refresh the cache); and/or if the PFN used directly 1256 * by KVM (and thus needs a kernel virtual mapping). 1257 * @gpa: guest physical address to map. 1258 * @len: sanity check; the range being access must fit a single page. 1259 * 1260 * @return: 0 for success. 1261 * -EINVAL for a mapping which would cross a page boundary. 1262 * -EFAULT for an untranslatable guest physical address. 1263 * 1264 * This primes a gfn_to_pfn_cache and links it into the @kvm's list for 1265 * invalidations to be processed. Callers are required to use 1266 * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before 1267 * accessing the target page. 1268 */ 1269 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, 1270 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, 1271 gpa_t gpa, unsigned long len); 1272 1273 /** 1274 * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. 1275 * 1276 * @kvm: pointer to kvm instance. 1277 * @gpc: struct gfn_to_pfn_cache object. 1278 * @gpa: current guest physical address to map. 1279 * @len: sanity check; the range being access must fit a single page. 1280 * 1281 * @return: %true if the cache is still valid and the address matches. 1282 * %false if the cache is not valid. 1283 * 1284 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock 1285 * while calling this function, and then continue to hold the lock until the 1286 * access is complete. 1287 * 1288 * Callers in IN_GUEST_MODE may do so without locking, although they should 1289 * still hold a read lock on kvm->scru for the memslot checks. 1290 */ 1291 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, 1292 gpa_t gpa, unsigned long len); 1293 1294 /** 1295 * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache. 1296 * 1297 * @kvm: pointer to kvm instance. 1298 * @gpc: struct gfn_to_pfn_cache object. 1299 * @gpa: updated guest physical address to map. 1300 * @len: sanity check; the range being access must fit a single page. 1301 * 1302 * @return: 0 for success. 1303 * -EINVAL for a mapping which would cross a page boundary. 1304 * -EFAULT for an untranslatable guest physical address. 1305 * 1306 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful 1307 * returm from this function does not mean the page can be immediately 1308 * accessed because it may have raced with an invalidation. Callers must 1309 * still lock and check the cache status, as this function does not return 1310 * with the lock still held to permit access. 1311 */ 1312 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, 1313 gpa_t gpa, unsigned long len); 1314 1315 /** 1316 * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. 1317 * 1318 * @kvm: pointer to kvm instance. 1319 * @gpc: struct gfn_to_pfn_cache object. 1320 * 1321 * This unmaps the referenced page. The cache is left in the invalid state 1322 * but at least the mapping from GPA to userspace HVA will remain cached 1323 * and can be reused on a subsequent refresh. 1324 */ 1325 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); 1326 1327 /** 1328 * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache. 1329 * 1330 * @kvm: pointer to kvm instance. 1331 * @gpc: struct gfn_to_pfn_cache object. 1332 * 1333 * This removes a cache from the @kvm's list to be processed on MMU notifier 1334 * invocation. 1335 */ 1336 void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); 1337 1338 void kvm_sigset_activate(struct kvm_vcpu *vcpu); 1339 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); 1340 1341 void kvm_vcpu_halt(struct kvm_vcpu *vcpu); 1342 bool kvm_vcpu_block(struct kvm_vcpu *vcpu); 1343 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 1344 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 1345 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); 1346 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 1347 int kvm_vcpu_yield_to(struct kvm_vcpu *target); 1348 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); 1349 1350 void kvm_flush_remote_tlbs(struct kvm *kvm); 1351 1352 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 1353 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); 1354 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); 1355 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); 1356 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); 1357 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 1358 #endif 1359 1360 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, 1361 unsigned long end); 1362 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, 1363 unsigned long end); 1364 1365 long kvm_arch_dev_ioctl(struct file *filp, 1366 unsigned int ioctl, unsigned long arg); 1367 long kvm_arch_vcpu_ioctl(struct file *filp, 1368 unsigned int ioctl, unsigned long arg); 1369 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 1370 1371 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 1372 1373 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 1374 struct kvm_memory_slot *slot, 1375 gfn_t gfn_offset, 1376 unsigned long mask); 1377 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); 1378 1379 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1380 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 1381 const struct kvm_memory_slot *memslot); 1382 #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1383 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); 1384 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1385 int *is_dirty, struct kvm_memory_slot **memslot); 1386 #endif 1387 1388 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 1389 bool line_status); 1390 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 1391 struct kvm_enable_cap *cap); 1392 long kvm_arch_vm_ioctl(struct file *filp, 1393 unsigned int ioctl, unsigned long arg); 1394 1395 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1396 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1397 1398 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1399 struct kvm_translation *tr); 1400 1401 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 1402 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 1403 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1404 struct kvm_sregs *sregs); 1405 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1406 struct kvm_sregs *sregs); 1407 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1408 struct kvm_mp_state *mp_state); 1409 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1410 struct kvm_mp_state *mp_state); 1411 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1412 struct kvm_guest_debug *dbg); 1413 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); 1414 1415 int kvm_arch_init(void *opaque); 1416 void kvm_arch_exit(void); 1417 1418 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 1419 1420 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 1421 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 1422 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); 1423 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); 1424 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 1425 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 1426 1427 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 1428 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); 1429 #endif 1430 1431 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 1432 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); 1433 #else 1434 static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {} 1435 #endif 1436 1437 int kvm_arch_hardware_enable(void); 1438 void kvm_arch_hardware_disable(void); 1439 int kvm_arch_hardware_setup(void *opaque); 1440 void kvm_arch_hardware_unsetup(void); 1441 int kvm_arch_check_processor_compat(void *opaque); 1442 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 1443 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 1444 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 1445 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); 1446 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); 1447 int kvm_arch_post_init_vm(struct kvm *kvm); 1448 void kvm_arch_pre_destroy_vm(struct kvm *kvm); 1449 int kvm_arch_create_vm_debugfs(struct kvm *kvm); 1450 1451 #ifndef __KVM_HAVE_ARCH_VM_ALLOC 1452 /* 1453 * All architectures that want to use vzalloc currently also 1454 * need their own kvm_arch_alloc_vm implementation. 1455 */ 1456 static inline struct kvm *kvm_arch_alloc_vm(void) 1457 { 1458 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 1459 } 1460 #endif 1461 1462 static inline void __kvm_arch_free_vm(struct kvm *kvm) 1463 { 1464 kvfree(kvm); 1465 } 1466 1467 #ifndef __KVM_HAVE_ARCH_VM_FREE 1468 static inline void kvm_arch_free_vm(struct kvm *kvm) 1469 { 1470 __kvm_arch_free_vm(kvm); 1471 } 1472 #endif 1473 1474 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB 1475 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) 1476 { 1477 return -ENOTSUPP; 1478 } 1479 #endif 1480 1481 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 1482 void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 1483 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 1484 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 1485 #else 1486 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 1487 { 1488 } 1489 1490 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 1491 { 1492 } 1493 1494 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 1495 { 1496 return false; 1497 } 1498 #endif 1499 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE 1500 void kvm_arch_start_assignment(struct kvm *kvm); 1501 void kvm_arch_end_assignment(struct kvm *kvm); 1502 bool kvm_arch_has_assigned_device(struct kvm *kvm); 1503 #else 1504 static inline void kvm_arch_start_assignment(struct kvm *kvm) 1505 { 1506 } 1507 1508 static inline void kvm_arch_end_assignment(struct kvm *kvm) 1509 { 1510 } 1511 1512 static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) 1513 { 1514 return false; 1515 } 1516 #endif 1517 1518 static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) 1519 { 1520 #ifdef __KVM_HAVE_ARCH_WQP 1521 return vcpu->arch.waitp; 1522 #else 1523 return &vcpu->wait; 1524 #endif 1525 } 1526 1527 /* 1528 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns 1529 * true if the vCPU was blocking and was awakened, false otherwise. 1530 */ 1531 static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 1532 { 1533 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); 1534 } 1535 1536 static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) 1537 { 1538 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); 1539 } 1540 1541 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 1542 /* 1543 * returns true if the virtual interrupt controller is initialized and 1544 * ready to accept virtual IRQ. On some architectures the virtual interrupt 1545 * controller is dynamically instantiated and this is not always true. 1546 */ 1547 bool kvm_arch_intc_initialized(struct kvm *kvm); 1548 #else 1549 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 1550 { 1551 return true; 1552 } 1553 #endif 1554 1555 #ifdef CONFIG_GUEST_PERF_EVENTS 1556 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu); 1557 1558 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)); 1559 void kvm_unregister_perf_callbacks(void); 1560 #else 1561 static inline void kvm_register_perf_callbacks(void *ign) {} 1562 static inline void kvm_unregister_perf_callbacks(void) {} 1563 #endif /* CONFIG_GUEST_PERF_EVENTS */ 1564 1565 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 1566 void kvm_arch_destroy_vm(struct kvm *kvm); 1567 void kvm_arch_sync_events(struct kvm *kvm); 1568 1569 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 1570 1571 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); 1572 bool kvm_is_zone_device_page(struct page *page); 1573 1574 struct kvm_irq_ack_notifier { 1575 struct hlist_node link; 1576 unsigned gsi; 1577 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 1578 }; 1579 1580 int kvm_irq_map_gsi(struct kvm *kvm, 1581 struct kvm_kernel_irq_routing_entry *entries, int gsi); 1582 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 1583 1584 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1585 bool line_status); 1586 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 1587 int irq_source_id, int level, bool line_status); 1588 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 1589 struct kvm *kvm, int irq_source_id, 1590 int level, bool line_status); 1591 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 1592 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 1593 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 1594 void kvm_register_irq_ack_notifier(struct kvm *kvm, 1595 struct kvm_irq_ack_notifier *kian); 1596 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 1597 struct kvm_irq_ack_notifier *kian); 1598 int kvm_request_irq_source_id(struct kvm *kvm); 1599 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 1600 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); 1601 1602 /* 1603 * Returns a pointer to the memslot if it contains gfn. 1604 * Otherwise returns NULL. 1605 */ 1606 static inline struct kvm_memory_slot * 1607 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1608 { 1609 if (!slot) 1610 return NULL; 1611 1612 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) 1613 return slot; 1614 else 1615 return NULL; 1616 } 1617 1618 /* 1619 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. 1620 * 1621 * With "approx" set returns the memslot also when the address falls 1622 * in a hole. In that case one of the memslots bordering the hole is 1623 * returned. 1624 */ 1625 static inline struct kvm_memory_slot * 1626 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) 1627 { 1628 struct kvm_memory_slot *slot; 1629 struct rb_node *node; 1630 int idx = slots->node_idx; 1631 1632 slot = NULL; 1633 for (node = slots->gfn_tree.rb_node; node; ) { 1634 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); 1635 if (gfn >= slot->base_gfn) { 1636 if (gfn < slot->base_gfn + slot->npages) 1637 return slot; 1638 node = node->rb_right; 1639 } else 1640 node = node->rb_left; 1641 } 1642 1643 return approx ? slot : NULL; 1644 } 1645 1646 static inline struct kvm_memory_slot * 1647 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) 1648 { 1649 struct kvm_memory_slot *slot; 1650 1651 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); 1652 slot = try_get_memslot(slot, gfn); 1653 if (slot) 1654 return slot; 1655 1656 slot = search_memslots(slots, gfn, approx); 1657 if (slot) { 1658 atomic_long_set(&slots->last_used_slot, (unsigned long)slot); 1659 return slot; 1660 } 1661 1662 return NULL; 1663 } 1664 1665 /* 1666 * __gfn_to_memslot() and its descendants are here to allow arch code to inline 1667 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline 1668 * because that would bloat other code too much. 1669 */ 1670 static inline struct kvm_memory_slot * 1671 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 1672 { 1673 return ____gfn_to_memslot(slots, gfn, false); 1674 } 1675 1676 static inline unsigned long 1677 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 1678 { 1679 /* 1680 * The index was checked originally in search_memslots. To avoid 1681 * that a malicious guest builds a Spectre gadget out of e.g. page 1682 * table walks, do not let the processor speculate loads outside 1683 * the guest's registered memslots. 1684 */ 1685 unsigned long offset = gfn - slot->base_gfn; 1686 offset = array_index_nospec(offset, slot->npages); 1687 return slot->userspace_addr + offset * PAGE_SIZE; 1688 } 1689 1690 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 1691 { 1692 return gfn_to_memslot(kvm, gfn)->id; 1693 } 1694 1695 static inline gfn_t 1696 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 1697 { 1698 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 1699 1700 return slot->base_gfn + gfn_offset; 1701 } 1702 1703 static inline gpa_t gfn_to_gpa(gfn_t gfn) 1704 { 1705 return (gpa_t)gfn << PAGE_SHIFT; 1706 } 1707 1708 static inline gfn_t gpa_to_gfn(gpa_t gpa) 1709 { 1710 return (gfn_t)(gpa >> PAGE_SHIFT); 1711 } 1712 1713 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 1714 { 1715 return (hpa_t)pfn << PAGE_SHIFT; 1716 } 1717 1718 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 1719 { 1720 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 1721 1722 return kvm_is_error_hva(hva); 1723 } 1724 1725 enum kvm_stat_kind { 1726 KVM_STAT_VM, 1727 KVM_STAT_VCPU, 1728 }; 1729 1730 struct kvm_stat_data { 1731 struct kvm *kvm; 1732 const struct _kvm_stats_desc *desc; 1733 enum kvm_stat_kind kind; 1734 }; 1735 1736 struct _kvm_stats_desc { 1737 struct kvm_stats_desc desc; 1738 char name[KVM_STATS_NAME_SIZE]; 1739 }; 1740 1741 #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1742 .flags = type | unit | base | \ 1743 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1744 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1745 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1746 .exponent = exp, \ 1747 .size = sz, \ 1748 .bucket_size = bsz 1749 1750 #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1751 { \ 1752 { \ 1753 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1754 .offset = offsetof(struct kvm_vm_stat, generic.stat) \ 1755 }, \ 1756 .name = #stat, \ 1757 } 1758 #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1759 { \ 1760 { \ 1761 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1762 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ 1763 }, \ 1764 .name = #stat, \ 1765 } 1766 #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1767 { \ 1768 { \ 1769 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1770 .offset = offsetof(struct kvm_vm_stat, stat) \ 1771 }, \ 1772 .name = #stat, \ 1773 } 1774 #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1775 { \ 1776 { \ 1777 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1778 .offset = offsetof(struct kvm_vcpu_stat, stat) \ 1779 }, \ 1780 .name = #stat, \ 1781 } 1782 /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ 1783 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ 1784 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) 1785 1786 #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ 1787 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ 1788 unit, base, exponent, 1, 0) 1789 #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ 1790 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ 1791 unit, base, exponent, 1, 0) 1792 #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ 1793 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ 1794 unit, base, exponent, 1, 0) 1795 #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ 1796 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ 1797 unit, base, exponent, sz, bsz) 1798 #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ 1799 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ 1800 unit, base, exponent, sz, 0) 1801 1802 /* Cumulative counter, read/write */ 1803 #define STATS_DESC_COUNTER(SCOPE, name) \ 1804 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ 1805 KVM_STATS_BASE_POW10, 0) 1806 /* Instantaneous counter, read only */ 1807 #define STATS_DESC_ICOUNTER(SCOPE, name) \ 1808 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ 1809 KVM_STATS_BASE_POW10, 0) 1810 /* Peak counter, read/write */ 1811 #define STATS_DESC_PCOUNTER(SCOPE, name) \ 1812 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ 1813 KVM_STATS_BASE_POW10, 0) 1814 1815 /* Instantaneous boolean value, read only */ 1816 #define STATS_DESC_IBOOLEAN(SCOPE, name) \ 1817 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ 1818 KVM_STATS_BASE_POW10, 0) 1819 /* Peak (sticky) boolean value, read/write */ 1820 #define STATS_DESC_PBOOLEAN(SCOPE, name) \ 1821 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ 1822 KVM_STATS_BASE_POW10, 0) 1823 1824 /* Cumulative time in nanosecond */ 1825 #define STATS_DESC_TIME_NSEC(SCOPE, name) \ 1826 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 1827 KVM_STATS_BASE_POW10, -9) 1828 /* Linear histogram for time in nanosecond */ 1829 #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ 1830 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 1831 KVM_STATS_BASE_POW10, -9, sz, bsz) 1832 /* Logarithmic histogram for time in nanosecond */ 1833 #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ 1834 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 1835 KVM_STATS_BASE_POW10, -9, sz) 1836 1837 #define KVM_GENERIC_VM_STATS() \ 1838 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ 1839 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) 1840 1841 #define KVM_GENERIC_VCPU_STATS() \ 1842 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ 1843 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ 1844 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ 1845 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ 1846 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ 1847 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ 1848 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ 1849 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ 1850 HALT_POLL_HIST_COUNT), \ 1851 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ 1852 HALT_POLL_HIST_COUNT), \ 1853 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ 1854 HALT_POLL_HIST_COUNT), \ 1855 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) 1856 1857 extern struct dentry *kvm_debugfs_dir; 1858 1859 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 1860 const struct _kvm_stats_desc *desc, 1861 void *stats, size_t size_stats, 1862 char __user *user_buffer, size_t size, loff_t *offset); 1863 1864 /** 1865 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram 1866 * statistics data. 1867 * 1868 * @data: start address of the stats data 1869 * @size: the number of bucket of the stats data 1870 * @value: the new value used to update the linear histogram's bucket 1871 * @bucket_size: the size (width) of a bucket 1872 */ 1873 static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, 1874 u64 value, size_t bucket_size) 1875 { 1876 size_t index = div64_u64(value, bucket_size); 1877 1878 index = min(index, size - 1); 1879 ++data[index]; 1880 } 1881 1882 /** 1883 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram 1884 * statistics data. 1885 * 1886 * @data: start address of the stats data 1887 * @size: the number of bucket of the stats data 1888 * @value: the new value used to update the logarithmic histogram's bucket 1889 */ 1890 static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) 1891 { 1892 size_t index = fls64(value); 1893 1894 index = min(index, size - 1); 1895 ++data[index]; 1896 } 1897 1898 #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ 1899 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) 1900 #define KVM_STATS_LOG_HIST_UPDATE(array, value) \ 1901 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) 1902 1903 1904 extern const struct kvm_stats_header kvm_vm_stats_header; 1905 extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; 1906 extern const struct kvm_stats_header kvm_vcpu_stats_header; 1907 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 1908 1909 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1910 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 1911 { 1912 if (unlikely(kvm->mmu_notifier_count)) 1913 return 1; 1914 /* 1915 * Ensure the read of mmu_notifier_count happens before the read 1916 * of mmu_notifier_seq. This interacts with the smp_wmb() in 1917 * mmu_notifier_invalidate_range_end to make sure that the caller 1918 * either sees the old (non-zero) value of mmu_notifier_count or 1919 * the new (incremented) value of mmu_notifier_seq. 1920 * PowerPC Book3s HV KVM calls this under a per-page lock 1921 * rather than under kvm->mmu_lock, for scalability, so 1922 * can't rely on kvm->mmu_lock to keep things ordered. 1923 */ 1924 smp_rmb(); 1925 if (kvm->mmu_notifier_seq != mmu_seq) 1926 return 1; 1927 return 0; 1928 } 1929 1930 static inline int mmu_notifier_retry_hva(struct kvm *kvm, 1931 unsigned long mmu_seq, 1932 unsigned long hva) 1933 { 1934 lockdep_assert_held(&kvm->mmu_lock); 1935 /* 1936 * If mmu_notifier_count is non-zero, then the range maintained by 1937 * kvm_mmu_notifier_invalidate_range_start contains all addresses that 1938 * might be being invalidated. Note that it may include some false 1939 * positives, due to shortcuts when handing concurrent invalidations. 1940 */ 1941 if (unlikely(kvm->mmu_notifier_count) && 1942 hva >= kvm->mmu_notifier_range_start && 1943 hva < kvm->mmu_notifier_range_end) 1944 return 1; 1945 if (kvm->mmu_notifier_seq != mmu_seq) 1946 return 1; 1947 return 0; 1948 } 1949 #endif 1950 1951 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 1952 1953 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ 1954 1955 bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 1956 int kvm_set_irq_routing(struct kvm *kvm, 1957 const struct kvm_irq_routing_entry *entries, 1958 unsigned nr, 1959 unsigned flags); 1960 int kvm_set_routing_entry(struct kvm *kvm, 1961 struct kvm_kernel_irq_routing_entry *e, 1962 const struct kvm_irq_routing_entry *ue); 1963 void kvm_free_irq_routing(struct kvm *kvm); 1964 1965 #else 1966 1967 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 1968 1969 #endif 1970 1971 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 1972 1973 #ifdef CONFIG_HAVE_KVM_EVENTFD 1974 1975 void kvm_eventfd_init(struct kvm *kvm); 1976 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 1977 1978 #ifdef CONFIG_HAVE_KVM_IRQFD 1979 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 1980 void kvm_irqfd_release(struct kvm *kvm); 1981 void kvm_irq_routing_update(struct kvm *); 1982 #else 1983 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1984 { 1985 return -EINVAL; 1986 } 1987 1988 static inline void kvm_irqfd_release(struct kvm *kvm) {} 1989 #endif 1990 1991 #else 1992 1993 static inline void kvm_eventfd_init(struct kvm *kvm) {} 1994 1995 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1996 { 1997 return -EINVAL; 1998 } 1999 2000 static inline void kvm_irqfd_release(struct kvm *kvm) {} 2001 2002 #ifdef CONFIG_HAVE_KVM_IRQCHIP 2003 static inline void kvm_irq_routing_update(struct kvm *kvm) 2004 { 2005 } 2006 #endif 2007 2008 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 2009 { 2010 return -ENOSYS; 2011 } 2012 2013 #endif /* CONFIG_HAVE_KVM_EVENTFD */ 2014 2015 void kvm_arch_irq_routing_update(struct kvm *kvm); 2016 2017 static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) 2018 { 2019 /* 2020 * Ensure the rest of the request is published to kvm_check_request's 2021 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 2022 */ 2023 smp_wmb(); 2024 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2025 } 2026 2027 static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 2028 { 2029 /* 2030 * Request that don't require vCPU action should never be logged in 2031 * vcpu->requests. The vCPU won't clear the request, so it will stay 2032 * logged indefinitely and prevent the vCPU from entering the guest. 2033 */ 2034 BUILD_BUG_ON(!__builtin_constant_p(req) || 2035 (req & KVM_REQUEST_NO_ACTION)); 2036 2037 __kvm_make_request(req, vcpu); 2038 } 2039 2040 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 2041 { 2042 return READ_ONCE(vcpu->requests); 2043 } 2044 2045 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 2046 { 2047 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2048 } 2049 2050 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) 2051 { 2052 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2053 } 2054 2055 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 2056 { 2057 if (kvm_test_request(req, vcpu)) { 2058 kvm_clear_request(req, vcpu); 2059 2060 /* 2061 * Ensure the rest of the request is visible to kvm_check_request's 2062 * caller. Paired with the smp_wmb in kvm_make_request. 2063 */ 2064 smp_mb__after_atomic(); 2065 return true; 2066 } else { 2067 return false; 2068 } 2069 } 2070 2071 extern bool kvm_rebooting; 2072 2073 extern unsigned int halt_poll_ns; 2074 extern unsigned int halt_poll_ns_grow; 2075 extern unsigned int halt_poll_ns_grow_start; 2076 extern unsigned int halt_poll_ns_shrink; 2077 2078 struct kvm_device { 2079 const struct kvm_device_ops *ops; 2080 struct kvm *kvm; 2081 void *private; 2082 struct list_head vm_node; 2083 }; 2084 2085 /* create, destroy, and name are mandatory */ 2086 struct kvm_device_ops { 2087 const char *name; 2088 2089 /* 2090 * create is called holding kvm->lock and any operations not suitable 2091 * to do while holding the lock should be deferred to init (see 2092 * below). 2093 */ 2094 int (*create)(struct kvm_device *dev, u32 type); 2095 2096 /* 2097 * init is called after create if create is successful and is called 2098 * outside of holding kvm->lock. 2099 */ 2100 void (*init)(struct kvm_device *dev); 2101 2102 /* 2103 * Destroy is responsible for freeing dev. 2104 * 2105 * Destroy may be called before or after destructors are called 2106 * on emulated I/O regions, depending on whether a reference is 2107 * held by a vcpu or other kvm component that gets destroyed 2108 * after the emulated I/O. 2109 */ 2110 void (*destroy)(struct kvm_device *dev); 2111 2112 /* 2113 * Release is an alternative method to free the device. It is 2114 * called when the device file descriptor is closed. Once 2115 * release is called, the destroy method will not be called 2116 * anymore as the device is removed from the device list of 2117 * the VM. kvm->lock is held. 2118 */ 2119 void (*release)(struct kvm_device *dev); 2120 2121 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2122 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2123 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2124 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 2125 unsigned long arg); 2126 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); 2127 }; 2128 2129 void kvm_device_get(struct kvm_device *dev); 2130 void kvm_device_put(struct kvm_device *dev); 2131 struct kvm_device *kvm_device_from_filp(struct file *filp); 2132 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); 2133 void kvm_unregister_device_ops(u32 type); 2134 2135 extern struct kvm_device_ops kvm_mpic_ops; 2136 extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 2137 extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 2138 2139 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2140 2141 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 2142 { 2143 vcpu->spin_loop.in_spin_loop = val; 2144 } 2145 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 2146 { 2147 vcpu->spin_loop.dy_eligible = val; 2148 } 2149 2150 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 2151 2152 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 2153 { 2154 } 2155 2156 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 2157 { 2158 } 2159 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 2160 2161 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) 2162 { 2163 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && 2164 !(memslot->flags & KVM_MEMSLOT_INVALID)); 2165 } 2166 2167 struct kvm_vcpu *kvm_get_running_vcpu(void); 2168 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 2169 2170 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 2171 bool kvm_arch_has_irq_bypass(void); 2172 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 2173 struct irq_bypass_producer *); 2174 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 2175 struct irq_bypass_producer *); 2176 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 2177 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 2178 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 2179 uint32_t guest_irq, bool set); 2180 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, 2181 struct kvm_kernel_irq_routing_entry *); 2182 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 2183 2184 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS 2185 /* If we wakeup during the poll time, was it a sucessful poll? */ 2186 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 2187 { 2188 return vcpu->valid_wakeup; 2189 } 2190 2191 #else 2192 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 2193 { 2194 return true; 2195 } 2196 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ 2197 2198 #ifdef CONFIG_HAVE_KVM_NO_POLL 2199 /* Callback that tells if we must not poll */ 2200 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); 2201 #else 2202 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 2203 { 2204 return false; 2205 } 2206 #endif /* CONFIG_HAVE_KVM_NO_POLL */ 2207 2208 #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL 2209 long kvm_arch_vcpu_async_ioctl(struct file *filp, 2210 unsigned int ioctl, unsigned long arg); 2211 #else 2212 static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, 2213 unsigned int ioctl, 2214 unsigned long arg) 2215 { 2216 return -ENOIOCTLCMD; 2217 } 2218 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ 2219 2220 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 2221 unsigned long start, unsigned long end); 2222 2223 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); 2224 2225 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE 2226 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); 2227 #else 2228 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 2229 { 2230 return 0; 2231 } 2232 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ 2233 2234 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); 2235 2236 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 2237 uintptr_t data, const char *name, 2238 struct task_struct **thread_ptr); 2239 2240 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK 2241 static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) 2242 { 2243 vcpu->run->exit_reason = KVM_EXIT_INTR; 2244 vcpu->stat.signal_exits++; 2245 } 2246 #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ 2247 2248 /* 2249 * This defines how many reserved entries we want to keep before we 2250 * kick the vcpu to the userspace to avoid dirty ring full. This 2251 * value can be tuned to higher if e.g. PML is enabled on the host. 2252 */ 2253 #define KVM_DIRTY_RING_RSVD_ENTRIES 64 2254 2255 /* Max number of entries allowed for each kvm dirty ring */ 2256 #define KVM_DIRTY_RING_MAX_ENTRIES 65536 2257 2258 #endif 2259