1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * tools/testing/selftests/kvm/include/kvm_util_base.h 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 #ifndef SELFTEST_KVM_UTIL_BASE_H 8 #define SELFTEST_KVM_UTIL_BASE_H 9 10 #include "test_util.h" 11 12 #include <linux/compiler.h> 13 #include "linux/hashtable.h" 14 #include "linux/list.h" 15 #include <linux/kernel.h> 16 #include <linux/kvm.h> 17 #include "linux/rbtree.h" 18 #include <linux/types.h> 19 20 #include <asm/atomic.h> 21 22 #include <sys/ioctl.h> 23 24 #include "sparsebit.h" 25 26 /* 27 * Provide a version of static_assert() that is guaranteed to have an optional 28 * message param. If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h) 29 * #undefs and #defines static_assert() as a direct alias to _Static_assert(), 30 * i.e. effectively makes the message mandatory. Many KVM selftests #define 31 * _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE. As 32 * a result, static_assert() behavior is non-deterministic and may or may not 33 * require a message depending on #include order. 34 */ 35 #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) 36 #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) 37 38 #define KVM_DEV_PATH "/dev/kvm" 39 #define KVM_MAX_VCPUS 512 40 41 #define NSEC_PER_SEC 1000000000L 42 43 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ 44 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ 45 46 struct userspace_mem_region { 47 struct kvm_userspace_memory_region region; 48 struct sparsebit *unused_phy_pages; 49 int fd; 50 off_t offset; 51 enum vm_mem_backing_src_type backing_src_type; 52 void *host_mem; 53 void *host_alias; 54 void *mmap_start; 55 void *mmap_alias; 56 size_t mmap_size; 57 struct rb_node gpa_node; 58 struct rb_node hva_node; 59 struct hlist_node slot_node; 60 }; 61 62 struct kvm_vcpu { 63 struct list_head list; 64 uint32_t id; 65 int fd; 66 struct kvm_vm *vm; 67 struct kvm_run *run; 68 #ifdef __x86_64__ 69 struct kvm_cpuid2 *cpuid; 70 #endif 71 struct kvm_dirty_gfn *dirty_gfns; 72 uint32_t fetch_index; 73 uint32_t dirty_gfns_count; 74 }; 75 76 struct userspace_mem_regions { 77 struct rb_root gpa_tree; 78 struct rb_root hva_tree; 79 DECLARE_HASHTABLE(slot_hash, 9); 80 }; 81 82 enum kvm_mem_region_type { 83 MEM_REGION_CODE, 84 MEM_REGION_DATA, 85 MEM_REGION_PT, 86 MEM_REGION_TEST_DATA, 87 NR_MEM_REGIONS, 88 }; 89 90 struct kvm_vm { 91 int mode; 92 unsigned long type; 93 int kvm_fd; 94 int fd; 95 unsigned int pgtable_levels; 96 unsigned int page_size; 97 unsigned int page_shift; 98 unsigned int pa_bits; 99 unsigned int va_bits; 100 uint64_t max_gfn; 101 struct list_head vcpus; 102 struct userspace_mem_regions regions; 103 struct sparsebit *vpages_valid; 104 struct sparsebit *vpages_mapped; 105 bool has_irqchip; 106 bool pgd_created; 107 vm_paddr_t ucall_mmio_addr; 108 vm_paddr_t pgd; 109 vm_vaddr_t gdt; 110 vm_vaddr_t tss; 111 vm_vaddr_t idt; 112 vm_vaddr_t handlers; 113 uint32_t dirty_ring_size; 114 115 /* Cache of information for binary stats interface */ 116 int stats_fd; 117 struct kvm_stats_header stats_header; 118 struct kvm_stats_desc *stats_desc; 119 120 /* 121 * KVM region slots. These are the default memslots used by page 122 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 123 * memslot. 124 */ 125 uint32_t memslots[NR_MEM_REGIONS]; 126 }; 127 128 struct vcpu_reg_sublist { 129 const char *name; 130 long capability; 131 int feature; 132 bool finalize; 133 __u64 *regs; 134 __u64 regs_n; 135 __u64 *rejects_set; 136 __u64 rejects_set_n; 137 __u64 *skips_set; 138 __u64 skips_set_n; 139 }; 140 141 struct vcpu_reg_list { 142 char *name; 143 struct vcpu_reg_sublist sublists[]; 144 }; 145 146 #define for_each_sublist(c, s) \ 147 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) 148 149 #define kvm_for_each_vcpu(vm, i, vcpu) \ 150 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 151 if (!((vcpu) = vm->vcpus[i])) \ 152 continue; \ 153 else 154 155 struct userspace_mem_region * 156 memslot2region(struct kvm_vm *vm, uint32_t memslot); 157 158 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 159 enum kvm_mem_region_type type) 160 { 161 assert(type < NR_MEM_REGIONS); 162 return memslot2region(vm, vm->memslots[type]); 163 } 164 165 /* Minimum allocated guest virtual and physical addresses */ 166 #define KVM_UTIL_MIN_VADDR 0x2000 167 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 168 169 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 170 #define DEFAULT_STACK_PGS 5 171 172 enum vm_guest_mode { 173 VM_MODE_P52V48_4K, 174 VM_MODE_P52V48_64K, 175 VM_MODE_P48V48_4K, 176 VM_MODE_P48V48_16K, 177 VM_MODE_P48V48_64K, 178 VM_MODE_P40V48_4K, 179 VM_MODE_P40V48_16K, 180 VM_MODE_P40V48_64K, 181 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ 182 VM_MODE_P47V64_4K, 183 VM_MODE_P44V64_4K, 184 VM_MODE_P36V48_4K, 185 VM_MODE_P36V48_16K, 186 VM_MODE_P36V48_64K, 187 VM_MODE_P36V47_16K, 188 NUM_VM_MODES, 189 }; 190 191 #if defined(__aarch64__) 192 193 extern enum vm_guest_mode vm_mode_default; 194 195 #define VM_MODE_DEFAULT vm_mode_default 196 #define MIN_PAGE_SHIFT 12U 197 #define ptes_per_page(page_size) ((page_size) / 8) 198 199 #elif defined(__x86_64__) 200 201 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K 202 #define MIN_PAGE_SHIFT 12U 203 #define ptes_per_page(page_size) ((page_size) / 8) 204 205 #elif defined(__s390x__) 206 207 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K 208 #define MIN_PAGE_SHIFT 12U 209 #define ptes_per_page(page_size) ((page_size) / 16) 210 211 #elif defined(__riscv) 212 213 #if __riscv_xlen == 32 214 #error "RISC-V 32-bit kvm selftests not supported" 215 #endif 216 217 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K 218 #define MIN_PAGE_SHIFT 12U 219 #define ptes_per_page(page_size) ((page_size) / 8) 220 221 #endif 222 223 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) 224 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) 225 226 struct vm_guest_mode_params { 227 unsigned int pa_bits; 228 unsigned int va_bits; 229 unsigned int page_size; 230 unsigned int page_shift; 231 }; 232 extern const struct vm_guest_mode_params vm_guest_mode_params[]; 233 234 int open_path_or_exit(const char *path, int flags); 235 int open_kvm_dev_path_or_exit(void); 236 237 bool get_kvm_param_bool(const char *param); 238 bool get_kvm_intel_param_bool(const char *param); 239 bool get_kvm_amd_param_bool(const char *param); 240 241 unsigned int kvm_check_cap(long cap); 242 243 static inline bool kvm_has_cap(long cap) 244 { 245 return kvm_check_cap(cap); 246 } 247 248 #define __KVM_SYSCALL_ERROR(_name, _ret) \ 249 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) 250 251 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret) 252 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret) 253 254 #define kvm_do_ioctl(fd, cmd, arg) \ 255 ({ \ 256 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \ 257 ioctl(fd, cmd, arg); \ 258 }) 259 260 #define __kvm_ioctl(kvm_fd, cmd, arg) \ 261 kvm_do_ioctl(kvm_fd, cmd, arg) 262 263 264 #define _kvm_ioctl(kvm_fd, cmd, name, arg) \ 265 ({ \ 266 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \ 267 \ 268 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \ 269 }) 270 271 #define kvm_ioctl(kvm_fd, cmd, arg) \ 272 _kvm_ioctl(kvm_fd, cmd, #cmd, arg) 273 274 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } 275 276 #define __vm_ioctl(vm, cmd, arg) \ 277 ({ \ 278 static_assert_is_vm(vm); \ 279 kvm_do_ioctl((vm)->fd, cmd, arg); \ 280 }) 281 282 #define _vm_ioctl(vm, cmd, name, arg) \ 283 ({ \ 284 int ret = __vm_ioctl(vm, cmd, arg); \ 285 \ 286 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \ 287 }) 288 289 #define vm_ioctl(vm, cmd, arg) \ 290 _vm_ioctl(vm, cmd, #cmd, arg) 291 292 293 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } 294 295 #define __vcpu_ioctl(vcpu, cmd, arg) \ 296 ({ \ 297 static_assert_is_vcpu(vcpu); \ 298 kvm_do_ioctl((vcpu)->fd, cmd, arg); \ 299 }) 300 301 #define _vcpu_ioctl(vcpu, cmd, name, arg) \ 302 ({ \ 303 int ret = __vcpu_ioctl(vcpu, cmd, arg); \ 304 \ 305 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \ 306 }) 307 308 #define vcpu_ioctl(vcpu, cmd, arg) \ 309 _vcpu_ioctl(vcpu, cmd, #cmd, arg) 310 311 /* 312 * Looks up and returns the value corresponding to the capability 313 * (KVM_CAP_*) given by cap. 314 */ 315 static inline int vm_check_cap(struct kvm_vm *vm, long cap) 316 { 317 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); 318 319 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret)); 320 return ret; 321 } 322 323 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 324 { 325 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 326 327 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 328 } 329 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 330 { 331 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 332 333 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 334 } 335 336 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 337 const char *vm_guest_mode_string(uint32_t i); 338 339 void kvm_vm_free(struct kvm_vm *vmp); 340 void kvm_vm_restart(struct kvm_vm *vmp); 341 void kvm_vm_release(struct kvm_vm *vmp); 342 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, 343 size_t len); 344 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 345 int kvm_memfd_alloc(size_t size, bool hugepages); 346 347 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 348 349 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 350 { 351 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 352 353 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); 354 } 355 356 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 357 uint64_t first_page, uint32_t num_pages) 358 { 359 struct kvm_clear_dirty_log args = { 360 .dirty_bitmap = log, 361 .slot = slot, 362 .first_page = first_page, 363 .num_pages = num_pages 364 }; 365 366 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 367 } 368 369 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 370 { 371 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 372 } 373 374 static inline int vm_get_stats_fd(struct kvm_vm *vm) 375 { 376 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); 377 378 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd)); 379 return fd; 380 } 381 382 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header) 383 { 384 ssize_t ret; 385 386 ret = pread(stats_fd, header, sizeof(*header), 0); 387 TEST_ASSERT(ret == sizeof(*header), 388 "Failed to read '%lu' header bytes, ret = '%ld'", 389 sizeof(*header), ret); 390 } 391 392 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 393 struct kvm_stats_header *header); 394 395 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header) 396 { 397 /* 398 * The base size of the descriptor is defined by KVM's ABI, but the 399 * size of the name field is variable, as far as KVM's ABI is 400 * concerned. For a given instance of KVM, the name field is the same 401 * size for all stats and is provided in the overall stats header. 402 */ 403 return sizeof(struct kvm_stats_desc) + header->name_size; 404 } 405 406 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats, 407 int index, 408 struct kvm_stats_header *header) 409 { 410 /* 411 * Note, size_desc includes the size of the name field, which is 412 * variable. i.e. this is NOT equivalent to &stats_desc[i]. 413 */ 414 return (void *)stats + index * get_stats_descriptor_size(header); 415 } 416 417 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 418 struct kvm_stats_desc *desc, uint64_t *data, 419 size_t max_elements); 420 421 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, 422 size_t max_elements); 423 424 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name) 425 { 426 uint64_t data; 427 428 __vm_get_stat(vm, stat_name, &data, 1); 429 return data; 430 } 431 432 void vm_create_irqchip(struct kvm_vm *vm); 433 434 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 435 uint64_t gpa, uint64_t size, void *hva); 436 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 437 uint64_t gpa, uint64_t size, void *hva); 438 void vm_userspace_mem_region_add(struct kvm_vm *vm, 439 enum vm_mem_backing_src_type src_type, 440 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 441 uint32_t flags); 442 443 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 444 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 445 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 446 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 447 void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 448 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 449 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 450 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 451 enum kvm_mem_region_type type); 452 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 453 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, 454 enum kvm_mem_region_type type); 455 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 456 457 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 458 unsigned int npages); 459 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 460 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 461 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 462 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 463 464 void vcpu_run(struct kvm_vcpu *vcpu); 465 int _vcpu_run(struct kvm_vcpu *vcpu); 466 467 static inline int __vcpu_run(struct kvm_vcpu *vcpu) 468 { 469 return __vcpu_ioctl(vcpu, KVM_RUN, NULL); 470 } 471 472 void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 473 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 474 475 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 476 uint64_t arg0) 477 { 478 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 479 480 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap); 481 } 482 483 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu, 484 struct kvm_guest_debug *debug) 485 { 486 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug); 487 } 488 489 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu, 490 struct kvm_mp_state *mp_state) 491 { 492 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state); 493 } 494 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu, 495 struct kvm_mp_state *mp_state) 496 { 497 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state); 498 } 499 500 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 501 { 502 vcpu_ioctl(vcpu, KVM_GET_REGS, regs); 503 } 504 505 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 506 { 507 vcpu_ioctl(vcpu, KVM_SET_REGS, regs); 508 } 509 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 510 { 511 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs); 512 513 } 514 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 515 { 516 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 517 } 518 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 519 { 520 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 521 } 522 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 523 { 524 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu); 525 } 526 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 527 { 528 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 529 } 530 531 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 532 { 533 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 534 535 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 536 } 537 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 538 { 539 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 540 541 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 542 } 543 static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 544 { 545 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 546 547 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 548 } 549 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 550 { 551 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 552 553 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 554 } 555 556 #ifdef __KVM_HAVE_VCPU_EVENTS 557 static inline void vcpu_events_get(struct kvm_vcpu *vcpu, 558 struct kvm_vcpu_events *events) 559 { 560 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events); 561 } 562 static inline void vcpu_events_set(struct kvm_vcpu *vcpu, 563 struct kvm_vcpu_events *events) 564 { 565 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events); 566 } 567 #endif 568 #ifdef __x86_64__ 569 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu, 570 struct kvm_nested_state *state) 571 { 572 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state); 573 } 574 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu, 575 struct kvm_nested_state *state) 576 { 577 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 578 } 579 580 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu, 581 struct kvm_nested_state *state) 582 { 583 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 584 } 585 #endif 586 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) 587 { 588 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); 589 590 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd)); 591 return fd; 592 } 593 594 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); 595 596 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 597 { 598 int ret = __kvm_has_device_attr(dev_fd, group, attr); 599 600 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 601 } 602 603 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); 604 605 static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 606 uint64_t attr, void *val) 607 { 608 int ret = __kvm_device_attr_get(dev_fd, group, attr, val); 609 610 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 611 } 612 613 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); 614 615 static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 616 uint64_t attr, void *val) 617 { 618 int ret = __kvm_device_attr_set(dev_fd, group, attr, val); 619 620 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 621 } 622 623 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 624 uint64_t attr) 625 { 626 return __kvm_has_device_attr(vcpu->fd, group, attr); 627 } 628 629 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 630 uint64_t attr) 631 { 632 kvm_has_device_attr(vcpu->fd, group, attr); 633 } 634 635 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 636 uint64_t attr, void *val) 637 { 638 return __kvm_device_attr_get(vcpu->fd, group, attr, val); 639 } 640 641 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 642 uint64_t attr, void *val) 643 { 644 kvm_device_attr_get(vcpu->fd, group, attr, val); 645 } 646 647 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 648 uint64_t attr, void *val) 649 { 650 return __kvm_device_attr_set(vcpu->fd, group, attr, val); 651 } 652 653 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 654 uint64_t attr, void *val) 655 { 656 kvm_device_attr_set(vcpu->fd, group, attr, val); 657 } 658 659 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 660 int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 661 662 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) 663 { 664 int fd = __kvm_create_device(vm, type); 665 666 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd)); 667 return fd; 668 } 669 670 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); 671 672 /* 673 * VM VCPU Args Set 674 * 675 * Input Args: 676 * vm - Virtual Machine 677 * num - number of arguments 678 * ... - arguments, each of type uint64_t 679 * 680 * Output Args: None 681 * 682 * Return: None 683 * 684 * Sets the first @num input parameters for the function at @vcpu's entry point, 685 * per the C calling convention of the architecture, to the values given as 686 * variable args. Each of the variable args is expected to be of type uint64_t. 687 * The maximum @num can be is specific to the architecture. 688 */ 689 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 690 691 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 692 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 693 694 #define KVM_MAX_IRQ_ROUTES 4096 695 696 struct kvm_irq_routing *kvm_gsi_routing_create(void); 697 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 698 uint32_t gsi, uint32_t pin); 699 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 700 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 701 702 const char *exit_reason_str(unsigned int exit_reason); 703 704 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 705 uint32_t memslot); 706 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 707 vm_paddr_t paddr_min, uint32_t memslot); 708 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); 709 710 /* 711 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also 712 * loads the test binary into guest memory and creates an IRQ chip (x86 only). 713 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to 714 * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 715 */ 716 struct kvm_vm *____vm_create(enum vm_guest_mode mode); 717 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus, 718 uint64_t nr_extra_pages); 719 720 static inline struct kvm_vm *vm_create_barebones(void) 721 { 722 return ____vm_create(VM_MODE_DEFAULT); 723 } 724 725 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 726 { 727 return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0); 728 } 729 730 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 731 uint64_t extra_mem_pages, 732 void *guest_code, struct kvm_vcpu *vcpus[]); 733 734 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 735 void *guest_code, 736 struct kvm_vcpu *vcpus[]) 737 { 738 return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0, 739 guest_code, vcpus); 740 } 741 742 /* 743 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages 744 * additional pages of guest memory. Returns the VM and vCPU (via out param). 745 */ 746 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 747 uint64_t extra_mem_pages, 748 void *guest_code); 749 750 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 751 void *guest_code) 752 { 753 return __vm_create_with_one_vcpu(vcpu, 0, guest_code); 754 } 755 756 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 757 758 void kvm_pin_this_task_to_pcpu(uint32_t pcpu); 759 void kvm_print_vcpu_pinning_help(void); 760 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 761 int nr_vcpus); 762 763 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 764 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); 765 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); 766 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); 767 static inline unsigned int 768 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 769 { 770 unsigned int n; 771 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); 772 #ifdef __s390x__ 773 /* s390 requires 1M aligned guest sizes */ 774 n = (n + 255) & ~255; 775 #endif 776 return n; 777 } 778 779 struct kvm_userspace_memory_region * 780 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 781 uint64_t end); 782 783 #define sync_global_to_guest(vm, g) ({ \ 784 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 785 memcpy(_p, &(g), sizeof(g)); \ 786 }) 787 788 #define sync_global_from_guest(vm, g) ({ \ 789 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 790 memcpy(&(g), _p, sizeof(g)); \ 791 }) 792 793 /* 794 * Write a global value, but only in the VM's (guest's) domain. Primarily used 795 * for "globals" that hold per-VM values (VMs always duplicate code and global 796 * data into their own region of physical memory), but can be used anytime it's 797 * undesirable to change the host's copy of the global. 798 */ 799 #define write_guest_global(vm, g, val) ({ \ 800 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 801 typeof(g) _val = val; \ 802 \ 803 memcpy(_p, &(_val), sizeof(g)); \ 804 }) 805 806 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 807 808 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 809 uint8_t indent); 810 811 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 812 uint8_t indent) 813 { 814 vcpu_arch_dump(stream, vcpu, indent); 815 } 816 817 /* 818 * Adds a vCPU with reasonable defaults (e.g. a stack) 819 * 820 * Input Args: 821 * vm - Virtual Machine 822 * vcpu_id - The id of the VCPU to add to the VM. 823 * guest_code - The vCPU's entry point 824 */ 825 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 826 void *guest_code); 827 828 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 829 void *guest_code) 830 { 831 return vm_arch_vcpu_add(vm, vcpu_id, guest_code); 832 } 833 834 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 835 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 836 837 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 838 uint32_t vcpu_id) 839 { 840 return vm_arch_vcpu_recreate(vm, vcpu_id); 841 } 842 843 void vcpu_arch_free(struct kvm_vcpu *vcpu); 844 845 void virt_arch_pgd_alloc(struct kvm_vm *vm); 846 847 static inline void virt_pgd_alloc(struct kvm_vm *vm) 848 { 849 virt_arch_pgd_alloc(vm); 850 } 851 852 /* 853 * VM Virtual Page Map 854 * 855 * Input Args: 856 * vm - Virtual Machine 857 * vaddr - VM Virtual Address 858 * paddr - VM Physical Address 859 * memslot - Memory region slot for new virtual translation tables 860 * 861 * Output Args: None 862 * 863 * Return: None 864 * 865 * Within @vm, creates a virtual translation for the page starting 866 * at @vaddr to the page starting at @paddr. 867 */ 868 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 869 870 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 871 { 872 virt_arch_pg_map(vm, vaddr, paddr); 873 } 874 875 876 /* 877 * Address Guest Virtual to Guest Physical 878 * 879 * Input Args: 880 * vm - Virtual Machine 881 * gva - VM virtual address 882 * 883 * Output Args: None 884 * 885 * Return: 886 * Equivalent VM physical address 887 * 888 * Returns the VM physical address of the translated VM virtual 889 * address given by @gva. 890 */ 891 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 892 893 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 894 { 895 return addr_arch_gva2gpa(vm, gva); 896 } 897 898 /* 899 * Virtual Translation Tables Dump 900 * 901 * Input Args: 902 * stream - Output FILE stream 903 * vm - Virtual Machine 904 * indent - Left margin indent amount 905 * 906 * Output Args: None 907 * 908 * Return: None 909 * 910 * Dumps to the FILE stream given by @stream, the contents of all the 911 * virtual translation tables for the VM given by @vm. 912 */ 913 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 914 915 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 916 { 917 virt_arch_dump(stream, vm, indent); 918 } 919 920 921 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) 922 { 923 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); 924 } 925 926 /* 927 * Arch hook that is invoked via a constructor, i.e. before exeucting main(), 928 * to allow for arch-specific setup that is common to all tests, e.g. computing 929 * the default guest "mode". 930 */ 931 void kvm_selftest_arch_init(void); 932 933 void kvm_arch_vm_post_create(struct kvm_vm *vm); 934 935 #endif /* SELFTEST_KVM_UTIL_BASE_H */ 936