1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 #ifndef __KVM_HOST_H 3 #define __KVM_HOST_H 4 5 6 #include <linux/types.h> 7 #include <linux/hardirq.h> 8 #include <linux/list.h> 9 #include <linux/mutex.h> 10 #include <linux/spinlock.h> 11 #include <linux/signal.h> 12 #include <linux/sched.h> 13 #include <linux/bug.h> 14 #include <linux/mm.h> 15 #include <linux/mmu_notifier.h> 16 #include <linux/preempt.h> 17 #include <linux/msi.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/rcupdate.h> 21 #include <linux/ratelimit.h> 22 #include <linux/err.h> 23 #include <linux/irqflags.h> 24 #include <linux/context_tracking.h> 25 #include <linux/irqbypass.h> 26 #include <linux/rcuwait.h> 27 #include <linux/refcount.h> 28 #include <linux/nospec.h> 29 #include <asm/signal.h> 30 31 #include <linux/kvm.h> 32 #include <linux/kvm_para.h> 33 34 #include <linux/kvm_types.h> 35 36 #include <asm/kvm_host.h> 37 38 #ifndef KVM_MAX_VCPU_ID 39 #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS 40 #endif 41 42 /* 43 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 44 * in kvm, other bits are visible for userspace which are defined in 45 * include/linux/kvm_h. 46 */ 47 #define KVM_MEMSLOT_INVALID (1UL << 16) 48 49 /* 50 * Bit 63 of the memslot generation number is an "update in-progress flag", 51 * e.g. is temporarily set for the duration of install_new_memslots(). 52 * This flag effectively creates a unique generation number that is used to 53 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, 54 * i.e. may (or may not) have come from the previous memslots generation. 55 * 56 * This is necessary because the actual memslots update is not atomic with 57 * respect to the generation number update. Updating the generation number 58 * first would allow a vCPU to cache a spte from the old memslots using the 59 * new generation number, and updating the generation number after switching 60 * to the new memslots would allow cache hits using the old generation number 61 * to reference the defunct memslots. 62 * 63 * This mechanism is used to prevent getting hits in KVM's caches while a 64 * memslot update is in-progress, and to prevent cache hits *after* updating 65 * the actual generation number against accesses that were inserted into the 66 * cache *before* the memslots were updated. 67 */ 68 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) 69 70 /* Two fragments for cross MMIO pages. */ 71 #define KVM_MAX_MMIO_FRAGMENTS 2 72 73 #ifndef KVM_ADDRESS_SPACE_NUM 74 #define KVM_ADDRESS_SPACE_NUM 1 75 #endif 76 77 /* 78 * For the normal pfn, the highest 12 bits should be zero, 79 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 80 * mask bit 63 to indicate the noslot pfn. 81 */ 82 #define KVM_PFN_ERR_MASK (0x7ffULL << 52) 83 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 84 #define KVM_PFN_NOSLOT (0x1ULL << 63) 85 86 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 87 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 88 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 89 90 /* 91 * error pfns indicate that the gfn is in slot but faild to 92 * translate it to pfn on host. 93 */ 94 static inline bool is_error_pfn(kvm_pfn_t pfn) 95 { 96 return !!(pfn & KVM_PFN_ERR_MASK); 97 } 98 99 /* 100 * error_noslot pfns indicate that the gfn can not be 101 * translated to pfn - it is not in slot or failed to 102 * translate it to pfn. 103 */ 104 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 105 { 106 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 107 } 108 109 /* noslot pfn indicates that the gfn is not in slot. */ 110 static inline bool is_noslot_pfn(kvm_pfn_t pfn) 111 { 112 return pfn == KVM_PFN_NOSLOT; 113 } 114 115 /* 116 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 117 * provide own defines and kvm_is_error_hva 118 */ 119 #ifndef KVM_HVA_ERR_BAD 120 121 #define KVM_HVA_ERR_BAD (PAGE_OFFSET) 122 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 123 124 static inline bool kvm_is_error_hva(unsigned long addr) 125 { 126 return addr >= PAGE_OFFSET; 127 } 128 129 #endif 130 131 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 132 133 static inline bool is_error_page(struct page *page) 134 { 135 return IS_ERR(page); 136 } 137 138 #define KVM_REQUEST_MASK GENMASK(7,0) 139 #define KVM_REQUEST_NO_WAKEUP BIT(8) 140 #define KVM_REQUEST_WAIT BIT(9) 141 /* 142 * Architecture-independent vcpu->requests bit members 143 * Bits 4-7 are reserved for more arch-independent bits. 144 */ 145 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 146 #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 147 #define KVM_REQ_PENDING_TIMER 2 148 #define KVM_REQ_UNHALT 3 149 #define KVM_REQUEST_ARCH_BASE 8 150 151 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 152 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 153 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 154 }) 155 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) 156 157 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 158 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 159 160 extern struct mutex kvm_lock; 161 extern struct list_head vm_list; 162 163 struct kvm_io_range { 164 gpa_t addr; 165 int len; 166 struct kvm_io_device *dev; 167 }; 168 169 #define NR_IOBUS_DEVS 1000 170 171 struct kvm_io_bus { 172 int dev_count; 173 int ioeventfd_count; 174 struct kvm_io_range range[]; 175 }; 176 177 enum kvm_bus { 178 KVM_MMIO_BUS, 179 KVM_PIO_BUS, 180 KVM_VIRTIO_CCW_NOTIFY_BUS, 181 KVM_FAST_MMIO_BUS, 182 KVM_NR_BUSES 183 }; 184 185 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 186 int len, const void *val); 187 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 188 gpa_t addr, int len, const void *val, long cookie); 189 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 190 int len, void *val); 191 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 192 int len, struct kvm_io_device *dev); 193 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 194 struct kvm_io_device *dev); 195 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 196 gpa_t addr); 197 198 #ifdef CONFIG_KVM_ASYNC_PF 199 struct kvm_async_pf { 200 struct work_struct work; 201 struct list_head link; 202 struct list_head queue; 203 struct kvm_vcpu *vcpu; 204 struct mm_struct *mm; 205 gpa_t cr2_or_gpa; 206 unsigned long addr; 207 struct kvm_arch_async_pf arch; 208 bool wakeup_all; 209 bool notpresent_injected; 210 }; 211 212 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 213 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 214 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 215 unsigned long hva, struct kvm_arch_async_pf *arch); 216 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 217 #endif 218 219 enum { 220 OUTSIDE_GUEST_MODE, 221 IN_GUEST_MODE, 222 EXITING_GUEST_MODE, 223 READING_SHADOW_PAGE_TABLES, 224 }; 225 226 #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) 227 228 struct kvm_host_map { 229 /* 230 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is 231 * a 'struct page' for it. When using mem= kernel parameter some memory 232 * can be used as guest memory but they are not managed by host 233 * kernel). 234 * If 'pfn' is not managed by the host kernel, this field is 235 * initialized to KVM_UNMAPPED_PAGE. 236 */ 237 struct page *page; 238 void *hva; 239 kvm_pfn_t pfn; 240 kvm_pfn_t gfn; 241 }; 242 243 /* 244 * Used to check if the mapping is valid or not. Never use 'kvm_host_map' 245 * directly to check for that. 246 */ 247 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) 248 { 249 return !!map->hva; 250 } 251 252 /* 253 * Sometimes a large or cross-page mmio needs to be broken up into separate 254 * exits for userspace servicing. 255 */ 256 struct kvm_mmio_fragment { 257 gpa_t gpa; 258 void *data; 259 unsigned len; 260 }; 261 262 struct kvm_vcpu { 263 struct kvm *kvm; 264 #ifdef CONFIG_PREEMPT_NOTIFIERS 265 struct preempt_notifier preempt_notifier; 266 #endif 267 int cpu; 268 int vcpu_id; /* id given by userspace at creation */ 269 int vcpu_idx; /* index in kvm->vcpus array */ 270 int srcu_idx; 271 int mode; 272 u64 requests; 273 unsigned long guest_debug; 274 275 int pre_pcpu; 276 struct list_head blocked_vcpu_list; 277 278 struct mutex mutex; 279 struct kvm_run *run; 280 281 struct rcuwait wait; 282 struct pid __rcu *pid; 283 int sigset_active; 284 sigset_t sigset; 285 struct kvm_vcpu_stat stat; 286 unsigned int halt_poll_ns; 287 bool valid_wakeup; 288 289 #ifdef CONFIG_HAS_IOMEM 290 int mmio_needed; 291 int mmio_read_completed; 292 int mmio_is_write; 293 int mmio_cur_fragment; 294 int mmio_nr_fragments; 295 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 296 #endif 297 298 #ifdef CONFIG_KVM_ASYNC_PF 299 struct { 300 u32 queued; 301 struct list_head queue; 302 struct list_head done; 303 spinlock_t lock; 304 } async_pf; 305 #endif 306 307 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 308 /* 309 * Cpu relax intercept or pause loop exit optimization 310 * in_spin_loop: set when a vcpu does a pause loop exit 311 * or cpu relax intercepted. 312 * dy_eligible: indicates whether vcpu is eligible for directed yield. 313 */ 314 struct { 315 bool in_spin_loop; 316 bool dy_eligible; 317 } spin_loop; 318 #endif 319 bool preempted; 320 bool ready; 321 struct kvm_vcpu_arch arch; 322 }; 323 324 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 325 { 326 /* 327 * The memory barrier ensures a previous write to vcpu->requests cannot 328 * be reordered with the read of vcpu->mode. It pairs with the general 329 * memory barrier following the write of vcpu->mode in VCPU RUN. 330 */ 331 smp_mb__before_atomic(); 332 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 333 } 334 335 /* 336 * Some of the bitops functions do not support too long bitmaps. 337 * This number must be determined not to exceed such limits. 338 */ 339 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 340 341 struct kvm_memory_slot { 342 gfn_t base_gfn; 343 unsigned long npages; 344 unsigned long *dirty_bitmap; 345 struct kvm_arch_memory_slot arch; 346 unsigned long userspace_addr; 347 u32 flags; 348 short id; 349 }; 350 351 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 352 { 353 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 354 } 355 356 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) 357 { 358 unsigned long len = kvm_dirty_bitmap_bytes(memslot); 359 360 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); 361 } 362 363 #ifndef KVM_DIRTY_LOG_MANUAL_CAPS 364 #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE 365 #endif 366 367 struct kvm_s390_adapter_int { 368 u64 ind_addr; 369 u64 summary_addr; 370 u64 ind_offset; 371 u32 summary_offset; 372 u32 adapter_id; 373 }; 374 375 struct kvm_hv_sint { 376 u32 vcpu; 377 u32 sint; 378 }; 379 380 struct kvm_kernel_irq_routing_entry { 381 u32 gsi; 382 u32 type; 383 int (*set)(struct kvm_kernel_irq_routing_entry *e, 384 struct kvm *kvm, int irq_source_id, int level, 385 bool line_status); 386 union { 387 struct { 388 unsigned irqchip; 389 unsigned pin; 390 } irqchip; 391 struct { 392 u32 address_lo; 393 u32 address_hi; 394 u32 data; 395 u32 flags; 396 u32 devid; 397 } msi; 398 struct kvm_s390_adapter_int adapter; 399 struct kvm_hv_sint hv_sint; 400 }; 401 struct hlist_node link; 402 }; 403 404 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 405 struct kvm_irq_routing_table { 406 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 407 u32 nr_rt_entries; 408 /* 409 * Array indexed by gsi. Each entry contains list of irq chips 410 * the gsi is connected to. 411 */ 412 struct hlist_head map[0]; 413 }; 414 #endif 415 416 #ifndef KVM_PRIVATE_MEM_SLOTS 417 #define KVM_PRIVATE_MEM_SLOTS 0 418 #endif 419 420 #ifndef KVM_MEM_SLOTS_NUM 421 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 422 #endif 423 424 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 425 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 426 { 427 return 0; 428 } 429 #endif 430 431 /* 432 * Note: 433 * memslots are not sorted by id anymore, please use id_to_memslot() 434 * to get the memslot by its id. 435 */ 436 struct kvm_memslots { 437 u64 generation; 438 /* The mapping table from slot id to the index in memslots[]. */ 439 short id_to_index[KVM_MEM_SLOTS_NUM]; 440 atomic_t lru_slot; 441 int used_slots; 442 struct kvm_memory_slot memslots[]; 443 }; 444 445 struct kvm { 446 spinlock_t mmu_lock; 447 struct mutex slots_lock; 448 struct mm_struct *mm; /* userspace tied to this vm */ 449 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; 450 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 451 452 /* 453 * created_vcpus is protected by kvm->lock, and is incremented 454 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only 455 * incremented after storing the kvm_vcpu pointer in vcpus, 456 * and is accessed atomically. 457 */ 458 atomic_t online_vcpus; 459 int created_vcpus; 460 int last_boosted_vcpu; 461 struct list_head vm_list; 462 struct mutex lock; 463 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; 464 #ifdef CONFIG_HAVE_KVM_EVENTFD 465 struct { 466 spinlock_t lock; 467 struct list_head items; 468 struct list_head resampler_list; 469 struct mutex resampler_lock; 470 } irqfds; 471 struct list_head ioeventfds; 472 #endif 473 struct kvm_vm_stat stat; 474 struct kvm_arch arch; 475 refcount_t users_count; 476 #ifdef CONFIG_KVM_MMIO 477 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 478 spinlock_t ring_lock; 479 struct list_head coalesced_zones; 480 #endif 481 482 struct mutex irq_lock; 483 #ifdef CONFIG_HAVE_KVM_IRQCHIP 484 /* 485 * Update side is protected by irq_lock. 486 */ 487 struct kvm_irq_routing_table __rcu *irq_routing; 488 #endif 489 #ifdef CONFIG_HAVE_KVM_IRQFD 490 struct hlist_head irq_ack_notifier_list; 491 #endif 492 493 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 494 struct mmu_notifier mmu_notifier; 495 unsigned long mmu_notifier_seq; 496 long mmu_notifier_count; 497 #endif 498 long tlbs_dirty; 499 struct list_head devices; 500 u64 manual_dirty_log_protect; 501 struct dentry *debugfs_dentry; 502 struct kvm_stat_data **debugfs_stat_data; 503 struct srcu_struct srcu; 504 struct srcu_struct irq_srcu; 505 pid_t userspace_pid; 506 unsigned int max_halt_poll_ns; 507 }; 508 509 #define kvm_err(fmt, ...) \ 510 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 511 #define kvm_info(fmt, ...) \ 512 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 513 #define kvm_debug(fmt, ...) \ 514 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 515 #define kvm_debug_ratelimited(fmt, ...) \ 516 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ 517 ## __VA_ARGS__) 518 #define kvm_pr_unimpl(fmt, ...) \ 519 pr_err_ratelimited("kvm [%i]: " fmt, \ 520 task_tgid_nr(current), ## __VA_ARGS__) 521 522 /* The guest did something we don't support. */ 523 #define vcpu_unimpl(vcpu, fmt, ...) \ 524 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 525 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 526 527 #define vcpu_debug(vcpu, fmt, ...) \ 528 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 529 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ 530 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ 531 ## __VA_ARGS__) 532 #define vcpu_err(vcpu, fmt, ...) \ 533 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 534 535 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) 536 { 537 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); 538 } 539 540 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 541 { 542 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, 543 lockdep_is_held(&kvm->slots_lock) || 544 !refcount_read(&kvm->users_count)); 545 } 546 547 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 548 { 549 int num_vcpus = atomic_read(&kvm->online_vcpus); 550 i = array_index_nospec(i, num_vcpus); 551 552 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ 553 smp_rmb(); 554 return kvm->vcpus[i]; 555 } 556 557 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ 558 for (idx = 0; \ 559 idx < atomic_read(&kvm->online_vcpus) && \ 560 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 561 idx++) 562 563 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 564 { 565 struct kvm_vcpu *vcpu = NULL; 566 int i; 567 568 if (id < 0) 569 return NULL; 570 if (id < KVM_MAX_VCPUS) 571 vcpu = kvm_get_vcpu(kvm, id); 572 if (vcpu && vcpu->vcpu_id == id) 573 return vcpu; 574 kvm_for_each_vcpu(i, vcpu, kvm) 575 if (vcpu->vcpu_id == id) 576 return vcpu; 577 return NULL; 578 } 579 580 static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) 581 { 582 return vcpu->vcpu_idx; 583 } 584 585 #define kvm_for_each_memslot(memslot, slots) \ 586 for (memslot = &slots->memslots[0]; \ 587 memslot < slots->memslots + slots->used_slots; memslot++) \ 588 if (WARN_ON_ONCE(!memslot->npages)) { \ 589 } else 590 591 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); 592 593 void vcpu_load(struct kvm_vcpu *vcpu); 594 void vcpu_put(struct kvm_vcpu *vcpu); 595 596 #ifdef __KVM_HAVE_IOAPIC 597 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); 598 void kvm_arch_post_irq_routing_update(struct kvm *kvm); 599 #else 600 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 601 { 602 } 603 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) 604 { 605 } 606 #endif 607 608 #ifdef CONFIG_HAVE_KVM_IRQFD 609 int kvm_irqfd_init(void); 610 void kvm_irqfd_exit(void); 611 #else 612 static inline int kvm_irqfd_init(void) 613 { 614 return 0; 615 } 616 617 static inline void kvm_irqfd_exit(void) 618 { 619 } 620 #endif 621 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 622 struct module *module); 623 void kvm_exit(void); 624 625 void kvm_get_kvm(struct kvm *kvm); 626 void kvm_put_kvm(struct kvm *kvm); 627 void kvm_put_kvm_no_destroy(struct kvm *kvm); 628 629 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 630 { 631 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); 632 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, 633 lockdep_is_held(&kvm->slots_lock) || 634 !refcount_read(&kvm->users_count)); 635 } 636 637 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 638 { 639 return __kvm_memslots(kvm, 0); 640 } 641 642 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 643 { 644 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 645 646 return __kvm_memslots(vcpu->kvm, as_id); 647 } 648 649 static inline 650 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) 651 { 652 int index = slots->id_to_index[id]; 653 struct kvm_memory_slot *slot; 654 655 if (index < 0) 656 return NULL; 657 658 slot = &slots->memslots[index]; 659 660 WARN_ON(slot->id != id); 661 return slot; 662 } 663 664 /* 665 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 666 * - create a new memory slot 667 * - delete an existing memory slot 668 * - modify an existing memory slot 669 * -- move it in the guest physical memory space 670 * -- just change its flags 671 * 672 * Since flags can be changed by some of these operations, the following 673 * differentiation is the best we can do for __kvm_set_memory_region(): 674 */ 675 enum kvm_mr_change { 676 KVM_MR_CREATE, 677 KVM_MR_DELETE, 678 KVM_MR_MOVE, 679 KVM_MR_FLAGS_ONLY, 680 }; 681 682 int kvm_set_memory_region(struct kvm *kvm, 683 const struct kvm_userspace_memory_region *mem); 684 int __kvm_set_memory_region(struct kvm *kvm, 685 const struct kvm_userspace_memory_region *mem); 686 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); 687 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); 688 int kvm_arch_prepare_memory_region(struct kvm *kvm, 689 struct kvm_memory_slot *memslot, 690 const struct kvm_userspace_memory_region *mem, 691 enum kvm_mr_change change); 692 void kvm_arch_commit_memory_region(struct kvm *kvm, 693 const struct kvm_userspace_memory_region *mem, 694 struct kvm_memory_slot *old, 695 const struct kvm_memory_slot *new, 696 enum kvm_mr_change change); 697 /* flush all memory translations */ 698 void kvm_arch_flush_shadow_all(struct kvm *kvm); 699 /* flush memory translations pointing to 'slot' */ 700 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 701 struct kvm_memory_slot *slot); 702 703 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 704 struct page **pages, int nr_pages); 705 706 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 707 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 708 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 709 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 710 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 711 bool *writable); 712 void kvm_release_page_clean(struct page *page); 713 void kvm_release_page_dirty(struct page *page); 714 void kvm_set_page_accessed(struct page *page); 715 716 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 717 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 718 bool *writable); 719 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 720 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 721 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 722 bool atomic, bool *async, bool write_fault, 723 bool *writable); 724 725 void kvm_release_pfn_clean(kvm_pfn_t pfn); 726 void kvm_release_pfn_dirty(kvm_pfn_t pfn); 727 void kvm_set_pfn_dirty(kvm_pfn_t pfn); 728 void kvm_set_pfn_accessed(kvm_pfn_t pfn); 729 void kvm_get_pfn(kvm_pfn_t pfn); 730 731 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); 732 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 733 int len); 734 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 735 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 736 void *data, unsigned long len); 737 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 738 void *data, unsigned int offset, 739 unsigned long len); 740 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 741 int offset, int len); 742 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 743 unsigned long len); 744 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 745 void *data, unsigned long len); 746 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 747 void *data, unsigned int offset, 748 unsigned long len); 749 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 750 gpa_t gpa, unsigned long len); 751 752 #define __kvm_put_guest(kvm, gfn, offset, value, type) \ 753 ({ \ 754 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 755 type __user *__uaddr = (type __user *)(__addr + offset); \ 756 int __ret = -EFAULT; \ 757 \ 758 if (!kvm_is_error_hva(__addr)) \ 759 __ret = put_user(value, __uaddr); \ 760 if (!__ret) \ 761 mark_page_dirty(kvm, gfn); \ 762 __ret; \ 763 }) 764 765 #define kvm_put_guest(kvm, gpa, value, type) \ 766 ({ \ 767 gpa_t __gpa = gpa; \ 768 struct kvm *__kvm = kvm; \ 769 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ 770 offset_in_page(__gpa), (value), type); \ 771 }) 772 773 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 774 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 775 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 776 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 777 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); 778 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 779 780 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 781 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 782 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 783 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 784 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); 785 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 786 struct gfn_to_pfn_cache *cache, bool atomic); 787 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); 788 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); 789 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 790 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); 791 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 792 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 793 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 794 int len); 795 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 796 unsigned long len); 797 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 798 unsigned long len); 799 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 800 int offset, int len); 801 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 802 unsigned long len); 803 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 804 805 void kvm_sigset_activate(struct kvm_vcpu *vcpu); 806 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); 807 808 void kvm_vcpu_block(struct kvm_vcpu *vcpu); 809 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 810 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 811 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); 812 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 813 int kvm_vcpu_yield_to(struct kvm_vcpu *target); 814 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); 815 816 void kvm_flush_remote_tlbs(struct kvm *kvm); 817 void kvm_reload_remote_mmus(struct kvm *kvm); 818 819 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 820 struct kvm_vcpu *except, 821 unsigned long *vcpu_bitmap, cpumask_var_t tmp); 822 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 823 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 824 struct kvm_vcpu *except); 825 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, 826 unsigned long *vcpu_bitmap); 827 828 long kvm_arch_dev_ioctl(struct file *filp, 829 unsigned int ioctl, unsigned long arg); 830 long kvm_arch_vcpu_ioctl(struct file *filp, 831 unsigned int ioctl, unsigned long arg); 832 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 833 834 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 835 836 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 837 struct kvm_memory_slot *slot, 838 gfn_t gfn_offset, 839 unsigned long mask); 840 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); 841 842 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 843 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 844 struct kvm_memory_slot *memslot); 845 #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 846 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); 847 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 848 int *is_dirty, struct kvm_memory_slot **memslot); 849 #endif 850 851 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 852 bool line_status); 853 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 854 struct kvm_enable_cap *cap); 855 long kvm_arch_vm_ioctl(struct file *filp, 856 unsigned int ioctl, unsigned long arg); 857 858 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 859 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 860 861 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 862 struct kvm_translation *tr); 863 864 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 865 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 866 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 867 struct kvm_sregs *sregs); 868 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 869 struct kvm_sregs *sregs); 870 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 871 struct kvm_mp_state *mp_state); 872 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 873 struct kvm_mp_state *mp_state); 874 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 875 struct kvm_guest_debug *dbg); 876 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); 877 878 int kvm_arch_init(void *opaque); 879 void kvm_arch_exit(void); 880 881 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 882 883 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 884 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 885 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); 886 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); 887 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 888 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 889 890 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 891 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); 892 #endif 893 894 int kvm_arch_hardware_enable(void); 895 void kvm_arch_hardware_disable(void); 896 int kvm_arch_hardware_setup(void *opaque); 897 void kvm_arch_hardware_unsetup(void); 898 int kvm_arch_check_processor_compat(void *opaque); 899 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 900 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 901 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 902 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); 903 int kvm_arch_post_init_vm(struct kvm *kvm); 904 void kvm_arch_pre_destroy_vm(struct kvm *kvm); 905 906 #ifndef __KVM_HAVE_ARCH_VM_ALLOC 907 /* 908 * All architectures that want to use vzalloc currently also 909 * need their own kvm_arch_alloc_vm implementation. 910 */ 911 static inline struct kvm *kvm_arch_alloc_vm(void) 912 { 913 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 914 } 915 916 static inline void kvm_arch_free_vm(struct kvm *kvm) 917 { 918 kfree(kvm); 919 } 920 #endif 921 922 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB 923 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) 924 { 925 return -ENOTSUPP; 926 } 927 #endif 928 929 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 930 void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 931 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 932 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 933 #else 934 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 935 { 936 } 937 938 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 939 { 940 } 941 942 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 943 { 944 return false; 945 } 946 #endif 947 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE 948 void kvm_arch_start_assignment(struct kvm *kvm); 949 void kvm_arch_end_assignment(struct kvm *kvm); 950 bool kvm_arch_has_assigned_device(struct kvm *kvm); 951 #else 952 static inline void kvm_arch_start_assignment(struct kvm *kvm) 953 { 954 } 955 956 static inline void kvm_arch_end_assignment(struct kvm *kvm) 957 { 958 } 959 960 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) 961 { 962 return false; 963 } 964 #endif 965 966 static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) 967 { 968 #ifdef __KVM_HAVE_ARCH_WQP 969 return vcpu->arch.waitp; 970 #else 971 return &vcpu->wait; 972 #endif 973 } 974 975 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 976 /* 977 * returns true if the virtual interrupt controller is initialized and 978 * ready to accept virtual IRQ. On some architectures the virtual interrupt 979 * controller is dynamically instantiated and this is not always true. 980 */ 981 bool kvm_arch_intc_initialized(struct kvm *kvm); 982 #else 983 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 984 { 985 return true; 986 } 987 #endif 988 989 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 990 void kvm_arch_destroy_vm(struct kvm *kvm); 991 void kvm_arch_sync_events(struct kvm *kvm); 992 993 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 994 995 bool kvm_is_reserved_pfn(kvm_pfn_t pfn); 996 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); 997 bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); 998 999 struct kvm_irq_ack_notifier { 1000 struct hlist_node link; 1001 unsigned gsi; 1002 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 1003 }; 1004 1005 int kvm_irq_map_gsi(struct kvm *kvm, 1006 struct kvm_kernel_irq_routing_entry *entries, int gsi); 1007 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 1008 1009 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1010 bool line_status); 1011 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 1012 int irq_source_id, int level, bool line_status); 1013 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 1014 struct kvm *kvm, int irq_source_id, 1015 int level, bool line_status); 1016 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 1017 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 1018 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 1019 void kvm_register_irq_ack_notifier(struct kvm *kvm, 1020 struct kvm_irq_ack_notifier *kian); 1021 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 1022 struct kvm_irq_ack_notifier *kian); 1023 int kvm_request_irq_source_id(struct kvm *kvm); 1024 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 1025 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); 1026 1027 /* 1028 * search_memslots() and __gfn_to_memslot() are here because they are 1029 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. 1030 * gfn_to_memslot() itself isn't here as an inline because that would 1031 * bloat other code too much. 1032 * 1033 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! 1034 */ 1035 static inline struct kvm_memory_slot * 1036 search_memslots(struct kvm_memslots *slots, gfn_t gfn) 1037 { 1038 int start = 0, end = slots->used_slots; 1039 int slot = atomic_read(&slots->lru_slot); 1040 struct kvm_memory_slot *memslots = slots->memslots; 1041 1042 if (unlikely(!slots->used_slots)) 1043 return NULL; 1044 1045 if (gfn >= memslots[slot].base_gfn && 1046 gfn < memslots[slot].base_gfn + memslots[slot].npages) 1047 return &memslots[slot]; 1048 1049 while (start < end) { 1050 slot = start + (end - start) / 2; 1051 1052 if (gfn >= memslots[slot].base_gfn) 1053 end = slot; 1054 else 1055 start = slot + 1; 1056 } 1057 1058 if (start < slots->used_slots && gfn >= memslots[start].base_gfn && 1059 gfn < memslots[start].base_gfn + memslots[start].npages) { 1060 atomic_set(&slots->lru_slot, start); 1061 return &memslots[start]; 1062 } 1063 1064 return NULL; 1065 } 1066 1067 static inline struct kvm_memory_slot * 1068 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 1069 { 1070 return search_memslots(slots, gfn); 1071 } 1072 1073 static inline unsigned long 1074 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1075 { 1076 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 1077 } 1078 1079 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 1080 { 1081 return gfn_to_memslot(kvm, gfn)->id; 1082 } 1083 1084 static inline gfn_t 1085 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 1086 { 1087 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 1088 1089 return slot->base_gfn + gfn_offset; 1090 } 1091 1092 static inline gpa_t gfn_to_gpa(gfn_t gfn) 1093 { 1094 return (gpa_t)gfn << PAGE_SHIFT; 1095 } 1096 1097 static inline gfn_t gpa_to_gfn(gpa_t gpa) 1098 { 1099 return (gfn_t)(gpa >> PAGE_SHIFT); 1100 } 1101 1102 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 1103 { 1104 return (hpa_t)pfn << PAGE_SHIFT; 1105 } 1106 1107 static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, 1108 gpa_t gpa) 1109 { 1110 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); 1111 } 1112 1113 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 1114 { 1115 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 1116 1117 return kvm_is_error_hva(hva); 1118 } 1119 1120 enum kvm_stat_kind { 1121 KVM_STAT_VM, 1122 KVM_STAT_VCPU, 1123 }; 1124 1125 struct kvm_stat_data { 1126 struct kvm *kvm; 1127 struct kvm_stats_debugfs_item *dbgfs_item; 1128 }; 1129 1130 struct kvm_stats_debugfs_item { 1131 const char *name; 1132 int offset; 1133 enum kvm_stat_kind kind; 1134 int mode; 1135 }; 1136 1137 #define KVM_DBGFS_GET_MODE(dbgfs_item) \ 1138 ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) 1139 1140 #define VM_STAT(n, x, ...) \ 1141 { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } 1142 #define VCPU_STAT(n, x, ...) \ 1143 { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } 1144 1145 extern struct kvm_stats_debugfs_item debugfs_entries[]; 1146 extern struct dentry *kvm_debugfs_dir; 1147 1148 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1149 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) 1150 { 1151 if (unlikely(kvm->mmu_notifier_count)) 1152 return 1; 1153 /* 1154 * Ensure the read of mmu_notifier_count happens before the read 1155 * of mmu_notifier_seq. This interacts with the smp_wmb() in 1156 * mmu_notifier_invalidate_range_end to make sure that the caller 1157 * either sees the old (non-zero) value of mmu_notifier_count or 1158 * the new (incremented) value of mmu_notifier_seq. 1159 * PowerPC Book3s HV KVM calls this under a per-page lock 1160 * rather than under kvm->mmu_lock, for scalability, so 1161 * can't rely on kvm->mmu_lock to keep things ordered. 1162 */ 1163 smp_rmb(); 1164 if (kvm->mmu_notifier_seq != mmu_seq) 1165 return 1; 1166 return 0; 1167 } 1168 #endif 1169 1170 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 1171 1172 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ 1173 1174 bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 1175 int kvm_set_irq_routing(struct kvm *kvm, 1176 const struct kvm_irq_routing_entry *entries, 1177 unsigned nr, 1178 unsigned flags); 1179 int kvm_set_routing_entry(struct kvm *kvm, 1180 struct kvm_kernel_irq_routing_entry *e, 1181 const struct kvm_irq_routing_entry *ue); 1182 void kvm_free_irq_routing(struct kvm *kvm); 1183 1184 #else 1185 1186 static inline void kvm_free_irq_routing(struct kvm *kvm) {} 1187 1188 #endif 1189 1190 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 1191 1192 #ifdef CONFIG_HAVE_KVM_EVENTFD 1193 1194 void kvm_eventfd_init(struct kvm *kvm); 1195 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 1196 1197 #ifdef CONFIG_HAVE_KVM_IRQFD 1198 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 1199 void kvm_irqfd_release(struct kvm *kvm); 1200 void kvm_irq_routing_update(struct kvm *); 1201 #else 1202 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1203 { 1204 return -EINVAL; 1205 } 1206 1207 static inline void kvm_irqfd_release(struct kvm *kvm) {} 1208 #endif 1209 1210 #else 1211 1212 static inline void kvm_eventfd_init(struct kvm *kvm) {} 1213 1214 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 1215 { 1216 return -EINVAL; 1217 } 1218 1219 static inline void kvm_irqfd_release(struct kvm *kvm) {} 1220 1221 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1222 static inline void kvm_irq_routing_update(struct kvm *kvm) 1223 { 1224 } 1225 #endif 1226 1227 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 1228 { 1229 return -ENOSYS; 1230 } 1231 1232 #endif /* CONFIG_HAVE_KVM_EVENTFD */ 1233 1234 void kvm_arch_irq_routing_update(struct kvm *kvm); 1235 1236 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1237 { 1238 /* 1239 * Ensure the rest of the request is published to kvm_check_request's 1240 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 1241 */ 1242 smp_wmb(); 1243 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 1244 } 1245 1246 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 1247 { 1248 return READ_ONCE(vcpu->requests); 1249 } 1250 1251 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 1252 { 1253 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 1254 } 1255 1256 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) 1257 { 1258 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 1259 } 1260 1261 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 1262 { 1263 if (kvm_test_request(req, vcpu)) { 1264 kvm_clear_request(req, vcpu); 1265 1266 /* 1267 * Ensure the rest of the request is visible to kvm_check_request's 1268 * caller. Paired with the smp_wmb in kvm_make_request. 1269 */ 1270 smp_mb__after_atomic(); 1271 return true; 1272 } else { 1273 return false; 1274 } 1275 } 1276 1277 extern bool kvm_rebooting; 1278 1279 extern unsigned int halt_poll_ns; 1280 extern unsigned int halt_poll_ns_grow; 1281 extern unsigned int halt_poll_ns_grow_start; 1282 extern unsigned int halt_poll_ns_shrink; 1283 1284 struct kvm_device { 1285 const struct kvm_device_ops *ops; 1286 struct kvm *kvm; 1287 void *private; 1288 struct list_head vm_node; 1289 }; 1290 1291 /* create, destroy, and name are mandatory */ 1292 struct kvm_device_ops { 1293 const char *name; 1294 1295 /* 1296 * create is called holding kvm->lock and any operations not suitable 1297 * to do while holding the lock should be deferred to init (see 1298 * below). 1299 */ 1300 int (*create)(struct kvm_device *dev, u32 type); 1301 1302 /* 1303 * init is called after create if create is successful and is called 1304 * outside of holding kvm->lock. 1305 */ 1306 void (*init)(struct kvm_device *dev); 1307 1308 /* 1309 * Destroy is responsible for freeing dev. 1310 * 1311 * Destroy may be called before or after destructors are called 1312 * on emulated I/O regions, depending on whether a reference is 1313 * held by a vcpu or other kvm component that gets destroyed 1314 * after the emulated I/O. 1315 */ 1316 void (*destroy)(struct kvm_device *dev); 1317 1318 /* 1319 * Release is an alternative method to free the device. It is 1320 * called when the device file descriptor is closed. Once 1321 * release is called, the destroy method will not be called 1322 * anymore as the device is removed from the device list of 1323 * the VM. kvm->lock is held. 1324 */ 1325 void (*release)(struct kvm_device *dev); 1326 1327 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1328 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1329 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 1330 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 1331 unsigned long arg); 1332 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); 1333 }; 1334 1335 void kvm_device_get(struct kvm_device *dev); 1336 void kvm_device_put(struct kvm_device *dev); 1337 struct kvm_device *kvm_device_from_filp(struct file *filp); 1338 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); 1339 void kvm_unregister_device_ops(u32 type); 1340 1341 extern struct kvm_device_ops kvm_mpic_ops; 1342 extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 1343 extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 1344 1345 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1346 1347 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1348 { 1349 vcpu->spin_loop.in_spin_loop = val; 1350 } 1351 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1352 { 1353 vcpu->spin_loop.dy_eligible = val; 1354 } 1355 1356 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1357 1358 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1359 { 1360 } 1361 1362 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 1363 { 1364 } 1365 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1366 1367 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) 1368 { 1369 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && 1370 !(memslot->flags & KVM_MEMSLOT_INVALID)); 1371 } 1372 1373 struct kvm_vcpu *kvm_get_running_vcpu(void); 1374 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 1375 1376 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 1377 bool kvm_arch_has_irq_bypass(void); 1378 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 1379 struct irq_bypass_producer *); 1380 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 1381 struct irq_bypass_producer *); 1382 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 1383 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 1384 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 1385 uint32_t guest_irq, bool set); 1386 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 1387 1388 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS 1389 /* If we wakeup during the poll time, was it a sucessful poll? */ 1390 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 1391 { 1392 return vcpu->valid_wakeup; 1393 } 1394 1395 #else 1396 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 1397 { 1398 return true; 1399 } 1400 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ 1401 1402 #ifdef CONFIG_HAVE_KVM_NO_POLL 1403 /* Callback that tells if we must not poll */ 1404 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); 1405 #else 1406 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 1407 { 1408 return false; 1409 } 1410 #endif /* CONFIG_HAVE_KVM_NO_POLL */ 1411 1412 #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL 1413 long kvm_arch_vcpu_async_ioctl(struct file *filp, 1414 unsigned int ioctl, unsigned long arg); 1415 #else 1416 static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, 1417 unsigned int ioctl, 1418 unsigned long arg) 1419 { 1420 return -ENOIOCTLCMD; 1421 } 1422 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ 1423 1424 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 1425 unsigned long start, unsigned long end); 1426 1427 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE 1428 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); 1429 #else 1430 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 1431 { 1432 return 0; 1433 } 1434 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ 1435 1436 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); 1437 1438 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 1439 uintptr_t data, const char *name, 1440 struct task_struct **thread_ptr); 1441 1442 #endif 1443