1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tools/testing/selftests/kvm/lib/kvm_util.c 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 8 #define _GNU_SOURCE /* for program_invocation_name */ 9 #include "test_util.h" 10 #include "kvm_util.h" 11 #include "kvm_util_internal.h" 12 #include "processor.h" 13 14 #include <assert.h> 15 #include <sys/mman.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <unistd.h> 19 #include <linux/kernel.h> 20 21 #define KVM_UTIL_PGS_PER_HUGEPG 512 22 #define KVM_UTIL_MIN_PFN 2 23 24 /* Aligns x up to the next multiple of size. Size must be a power of 2. */ 25 static void *align(void *x, size_t size) 26 { 27 size_t mask = size - 1; 28 TEST_ASSERT(size != 0 && !(size & (size - 1)), 29 "size not a power of 2: %lu", size); 30 return (void *) (((size_t) x + mask) & ~mask); 31 } 32 33 /* 34 * Capability 35 * 36 * Input Args: 37 * cap - Capability 38 * 39 * Output Args: None 40 * 41 * Return: 42 * On success, the Value corresponding to the capability (KVM_CAP_*) 43 * specified by the value of cap. On failure a TEST_ASSERT failure 44 * is produced. 45 * 46 * Looks up and returns the value corresponding to the capability 47 * (KVM_CAP_*) given by cap. 48 */ 49 int kvm_check_cap(long cap) 50 { 51 int ret; 52 int kvm_fd; 53 54 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 55 if (kvm_fd < 0) 56 exit(KSFT_SKIP); 57 58 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); 59 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" 60 " rc: %i errno: %i", ret, errno); 61 62 close(kvm_fd); 63 64 return ret; 65 } 66 67 /* VM Enable Capability 68 * 69 * Input Args: 70 * vm - Virtual Machine 71 * cap - Capability 72 * 73 * Output Args: None 74 * 75 * Return: On success, 0. On failure a TEST_ASSERT failure is produced. 76 * 77 * Enables a capability (KVM_CAP_*) on the VM. 78 */ 79 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) 80 { 81 int ret; 82 83 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); 84 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" 85 " rc: %i errno: %i", ret, errno); 86 87 return ret; 88 } 89 90 /* VCPU Enable Capability 91 * 92 * Input Args: 93 * vm - Virtual Machine 94 * vcpu_id - VCPU 95 * cap - Capability 96 * 97 * Output Args: None 98 * 99 * Return: On success, 0. On failure a TEST_ASSERT failure is produced. 100 * 101 * Enables a capability (KVM_CAP_*) on the VCPU. 102 */ 103 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, 104 struct kvm_enable_cap *cap) 105 { 106 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); 107 int r; 108 109 TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id); 110 111 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap); 112 TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n" 113 " rc: %i, errno: %i", r, errno); 114 115 return r; 116 } 117 118 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 119 { 120 struct kvm_enable_cap cap = { 0 }; 121 122 cap.cap = KVM_CAP_DIRTY_LOG_RING; 123 cap.args[0] = ring_size; 124 vm_enable_cap(vm, &cap); 125 vm->dirty_ring_size = ring_size; 126 } 127 128 static void vm_open(struct kvm_vm *vm, int perm) 129 { 130 vm->kvm_fd = open(KVM_DEV_PATH, perm); 131 if (vm->kvm_fd < 0) 132 exit(KSFT_SKIP); 133 134 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { 135 print_skip("immediate_exit not available"); 136 exit(KSFT_SKIP); 137 } 138 139 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); 140 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " 141 "rc: %i errno: %i", vm->fd, errno); 142 } 143 144 const char * const vm_guest_mode_string[] = { 145 "PA-bits:52, VA-bits:48, 4K pages", 146 "PA-bits:52, VA-bits:48, 64K pages", 147 "PA-bits:48, VA-bits:48, 4K pages", 148 "PA-bits:48, VA-bits:48, 64K pages", 149 "PA-bits:40, VA-bits:48, 4K pages", 150 "PA-bits:40, VA-bits:48, 64K pages", 151 "PA-bits:ANY, VA-bits:48, 4K pages", 152 }; 153 _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES, 154 "Missing new mode strings?"); 155 156 const struct vm_guest_mode_params vm_guest_mode_params[] = { 157 { 52, 48, 0x1000, 12 }, 158 { 52, 48, 0x10000, 16 }, 159 { 48, 48, 0x1000, 12 }, 160 { 48, 48, 0x10000, 16 }, 161 { 40, 48, 0x1000, 12 }, 162 { 40, 48, 0x10000, 16 }, 163 { 0, 0, 0x1000, 12 }, 164 }; 165 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, 166 "Missing new mode params?"); 167 168 /* 169 * VM Create 170 * 171 * Input Args: 172 * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 173 * phy_pages - Physical memory pages 174 * perm - permission 175 * 176 * Output Args: None 177 * 178 * Return: 179 * Pointer to opaque structure that describes the created VM. 180 * 181 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). 182 * When phy_pages is non-zero, a memory region of phy_pages physical pages 183 * is created and mapped starting at guest physical address 0. The file 184 * descriptor to control the created VM is created with the permissions 185 * given by perm (e.g. O_RDWR). 186 */ 187 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) 188 { 189 struct kvm_vm *vm; 190 191 pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__, 192 vm_guest_mode_string(mode), phy_pages, perm); 193 194 vm = calloc(1, sizeof(*vm)); 195 TEST_ASSERT(vm != NULL, "Insufficient Memory"); 196 197 INIT_LIST_HEAD(&vm->vcpus); 198 INIT_LIST_HEAD(&vm->userspace_mem_regions); 199 200 vm->mode = mode; 201 vm->type = 0; 202 203 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; 204 vm->va_bits = vm_guest_mode_params[mode].va_bits; 205 vm->page_size = vm_guest_mode_params[mode].page_size; 206 vm->page_shift = vm_guest_mode_params[mode].page_shift; 207 208 /* Setup mode specific traits. */ 209 switch (vm->mode) { 210 case VM_MODE_P52V48_4K: 211 vm->pgtable_levels = 4; 212 break; 213 case VM_MODE_P52V48_64K: 214 vm->pgtable_levels = 3; 215 break; 216 case VM_MODE_P48V48_4K: 217 vm->pgtable_levels = 4; 218 break; 219 case VM_MODE_P48V48_64K: 220 vm->pgtable_levels = 3; 221 break; 222 case VM_MODE_P40V48_4K: 223 vm->pgtable_levels = 4; 224 break; 225 case VM_MODE_P40V48_64K: 226 vm->pgtable_levels = 3; 227 break; 228 case VM_MODE_PXXV48_4K: 229 #ifdef __x86_64__ 230 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); 231 /* 232 * Ignore KVM support for 5-level paging (vm->va_bits == 57), 233 * it doesn't take effect unless a CR4.LA57 is set, which it 234 * isn't for this VM_MODE. 235 */ 236 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, 237 "Linear address width (%d bits) not supported", 238 vm->va_bits); 239 pr_debug("Guest physical address width detected: %d\n", 240 vm->pa_bits); 241 vm->pgtable_levels = 4; 242 vm->va_bits = 48; 243 #else 244 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); 245 #endif 246 break; 247 default: 248 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); 249 } 250 251 #ifdef __aarch64__ 252 if (vm->pa_bits != 40) 253 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); 254 #endif 255 256 vm_open(vm, perm); 257 258 /* Limit to VA-bit canonical virtual addresses. */ 259 vm->vpages_valid = sparsebit_alloc(); 260 sparsebit_set_num(vm->vpages_valid, 261 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 262 sparsebit_set_num(vm->vpages_valid, 263 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, 264 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 265 266 /* Limit physical addresses to PA-bits. */ 267 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 268 269 /* Allocate and setup memory for guest. */ 270 vm->vpages_mapped = sparsebit_alloc(); 271 if (phy_pages != 0) 272 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 273 0, 0, phy_pages, 0); 274 275 return vm; 276 } 277 278 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 279 uint64_t extra_mem_pages, uint32_t num_percpu_pages, 280 void *guest_code, uint32_t vcpuids[]) 281 { 282 /* The maximum page table size for a memory region will be when the 283 * smallest pages are used. Considering each page contains x page 284 * table descriptors, the total extra size for page tables (for extra 285 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 286 * than N/x*2. 287 */ 288 uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; 289 uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; 290 uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; 291 struct kvm_vm *vm; 292 int i; 293 294 TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 295 "nr_vcpus = %d too large for host, max-vcpus = %d", 296 nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); 297 298 pages = vm_adjust_num_guest_pages(mode, pages); 299 vm = vm_create(mode, pages, O_RDWR); 300 301 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 302 303 #ifdef __x86_64__ 304 vm_create_irqchip(vm); 305 #endif 306 307 for (i = 0; i < nr_vcpus; ++i) { 308 uint32_t vcpuid = vcpuids ? vcpuids[i] : i; 309 310 vm_vcpu_add_default(vm, vcpuid, guest_code); 311 312 #ifdef __x86_64__ 313 vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); 314 #endif 315 } 316 317 return vm; 318 } 319 320 struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages, 321 uint32_t num_percpu_pages, void *guest_code, 322 uint32_t vcpuids[]) 323 { 324 return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages, 325 num_percpu_pages, guest_code, vcpuids); 326 } 327 328 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, 329 void *guest_code) 330 { 331 return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code, 332 (uint32_t []){ vcpuid }); 333 } 334 335 /* 336 * VM Restart 337 * 338 * Input Args: 339 * vm - VM that has been released before 340 * perm - permission 341 * 342 * Output Args: None 343 * 344 * Reopens the file descriptors associated to the VM and reinstates the 345 * global state, such as the irqchip and the memory regions that are mapped 346 * into the guest. 347 */ 348 void kvm_vm_restart(struct kvm_vm *vmp, int perm) 349 { 350 struct userspace_mem_region *region; 351 352 vm_open(vmp, perm); 353 if (vmp->has_irqchip) 354 vm_create_irqchip(vmp); 355 356 list_for_each_entry(region, &vmp->userspace_mem_regions, list) { 357 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 358 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 359 " rc: %i errno: %i\n" 360 " slot: %u flags: 0x%x\n" 361 " guest_phys_addr: 0x%llx size: 0x%llx", 362 ret, errno, region->region.slot, 363 region->region.flags, 364 region->region.guest_phys_addr, 365 region->region.memory_size); 366 } 367 } 368 369 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 370 { 371 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 372 int ret; 373 374 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); 375 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s", 376 __func__, strerror(-ret)); 377 } 378 379 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 380 uint64_t first_page, uint32_t num_pages) 381 { 382 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot, 383 .first_page = first_page, 384 .num_pages = num_pages }; 385 int ret; 386 387 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); 388 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s", 389 __func__, strerror(-ret)); 390 } 391 392 uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 393 { 394 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS); 395 } 396 397 /* 398 * Userspace Memory Region Find 399 * 400 * Input Args: 401 * vm - Virtual Machine 402 * start - Starting VM physical address 403 * end - Ending VM physical address, inclusive. 404 * 405 * Output Args: None 406 * 407 * Return: 408 * Pointer to overlapping region, NULL if no such region. 409 * 410 * Searches for a region with any physical memory that overlaps with 411 * any portion of the guest physical addresses from start to end 412 * inclusive. If multiple overlapping regions exist, a pointer to any 413 * of the regions is returned. Null is returned only when no overlapping 414 * region exists. 415 */ 416 static struct userspace_mem_region * 417 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 418 { 419 struct userspace_mem_region *region; 420 421 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 422 uint64_t existing_start = region->region.guest_phys_addr; 423 uint64_t existing_end = region->region.guest_phys_addr 424 + region->region.memory_size - 1; 425 if (start <= existing_end && end >= existing_start) 426 return region; 427 } 428 429 return NULL; 430 } 431 432 /* 433 * KVM Userspace Memory Region Find 434 * 435 * Input Args: 436 * vm - Virtual Machine 437 * start - Starting VM physical address 438 * end - Ending VM physical address, inclusive. 439 * 440 * Output Args: None 441 * 442 * Return: 443 * Pointer to overlapping region, NULL if no such region. 444 * 445 * Public interface to userspace_mem_region_find. Allows tests to look up 446 * the memslot datastructure for a given range of guest physical memory. 447 */ 448 struct kvm_userspace_memory_region * 449 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 450 uint64_t end) 451 { 452 struct userspace_mem_region *region; 453 454 region = userspace_mem_region_find(vm, start, end); 455 if (!region) 456 return NULL; 457 458 return ®ion->region; 459 } 460 461 /* 462 * VCPU Find 463 * 464 * Input Args: 465 * vm - Virtual Machine 466 * vcpuid - VCPU ID 467 * 468 * Output Args: None 469 * 470 * Return: 471 * Pointer to VCPU structure 472 * 473 * Locates a vcpu structure that describes the VCPU specified by vcpuid and 474 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU 475 * for the specified vcpuid. 476 */ 477 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) 478 { 479 struct vcpu *vcpu; 480 481 list_for_each_entry(vcpu, &vm->vcpus, list) { 482 if (vcpu->id == vcpuid) 483 return vcpu; 484 } 485 486 return NULL; 487 } 488 489 /* 490 * VM VCPU Remove 491 * 492 * Input Args: 493 * vcpu - VCPU to remove 494 * 495 * Output Args: None 496 * 497 * Return: None, TEST_ASSERT failures for all error conditions 498 * 499 * Removes a vCPU from a VM and frees its resources. 500 */ 501 static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu) 502 { 503 int ret; 504 505 if (vcpu->dirty_gfns) { 506 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 507 TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, " 508 "rc: %i errno: %i", ret, errno); 509 vcpu->dirty_gfns = NULL; 510 } 511 512 ret = munmap(vcpu->state, sizeof(*vcpu->state)); 513 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i " 514 "errno: %i", ret, errno); 515 close(vcpu->fd); 516 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " 517 "errno: %i", ret, errno); 518 519 list_del(&vcpu->list); 520 free(vcpu); 521 } 522 523 void kvm_vm_release(struct kvm_vm *vmp) 524 { 525 struct vcpu *vcpu, *tmp; 526 int ret; 527 528 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 529 vm_vcpu_rm(vmp, vcpu); 530 531 ret = close(vmp->fd); 532 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" 533 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); 534 535 close(vmp->kvm_fd); 536 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n" 537 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); 538 } 539 540 static void __vm_mem_region_delete(struct kvm_vm *vm, 541 struct userspace_mem_region *region) 542 { 543 int ret; 544 545 list_del(®ion->list); 546 547 region->region.memory_size = 0; 548 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 549 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " 550 "rc: %i errno: %i", ret, errno); 551 552 sparsebit_free(®ion->unused_phy_pages); 553 ret = munmap(region->mmap_start, region->mmap_size); 554 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno); 555 556 free(region); 557 } 558 559 /* 560 * Destroys and frees the VM pointed to by vmp. 561 */ 562 void kvm_vm_free(struct kvm_vm *vmp) 563 { 564 struct userspace_mem_region *region, *tmp; 565 566 if (vmp == NULL) 567 return; 568 569 /* Free userspace_mem_regions. */ 570 list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) 571 __vm_mem_region_delete(vmp, region); 572 573 /* Free sparsebit arrays. */ 574 sparsebit_free(&vmp->vpages_valid); 575 sparsebit_free(&vmp->vpages_mapped); 576 577 kvm_vm_release(vmp); 578 579 /* Free the structure describing the VM. */ 580 free(vmp); 581 } 582 583 /* 584 * Memory Compare, host virtual to guest virtual 585 * 586 * Input Args: 587 * hva - Starting host virtual address 588 * vm - Virtual Machine 589 * gva - Starting guest virtual address 590 * len - number of bytes to compare 591 * 592 * Output Args: None 593 * 594 * Input/Output Args: None 595 * 596 * Return: 597 * Returns 0 if the bytes starting at hva for a length of len 598 * are equal the guest virtual bytes starting at gva. Returns 599 * a value < 0, if bytes at hva are less than those at gva. 600 * Otherwise a value > 0 is returned. 601 * 602 * Compares the bytes starting at the host virtual address hva, for 603 * a length of len, to the guest bytes starting at the guest virtual 604 * address given by gva. 605 */ 606 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) 607 { 608 size_t amt; 609 610 /* 611 * Compare a batch of bytes until either a match is found 612 * or all the bytes have been compared. 613 */ 614 for (uintptr_t offset = 0; offset < len; offset += amt) { 615 uintptr_t ptr1 = (uintptr_t)hva + offset; 616 617 /* 618 * Determine host address for guest virtual address 619 * at offset. 620 */ 621 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); 622 623 /* 624 * Determine amount to compare on this pass. 625 * Don't allow the comparsion to cross a page boundary. 626 */ 627 amt = len - offset; 628 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) 629 amt = vm->page_size - (ptr1 % vm->page_size); 630 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) 631 amt = vm->page_size - (ptr2 % vm->page_size); 632 633 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); 634 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); 635 636 /* 637 * Perform the comparison. If there is a difference 638 * return that result to the caller, otherwise need 639 * to continue on looking for a mismatch. 640 */ 641 int ret = memcmp((void *)ptr1, (void *)ptr2, amt); 642 if (ret != 0) 643 return ret; 644 } 645 646 /* 647 * No mismatch found. Let the caller know the two memory 648 * areas are equal. 649 */ 650 return 0; 651 } 652 653 /* 654 * VM Userspace Memory Region Add 655 * 656 * Input Args: 657 * vm - Virtual Machine 658 * backing_src - Storage source for this region. 659 * NULL to use anonymous memory. 660 * guest_paddr - Starting guest physical address 661 * slot - KVM region slot 662 * npages - Number of physical pages 663 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) 664 * 665 * Output Args: None 666 * 667 * Return: None 668 * 669 * Allocates a memory area of the number of pages specified by npages 670 * and maps it to the VM specified by vm, at a starting physical address 671 * given by guest_paddr. The region is created with a KVM region slot 672 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The 673 * region is created with the flags given by flags. 674 */ 675 void vm_userspace_mem_region_add(struct kvm_vm *vm, 676 enum vm_mem_backing_src_type src_type, 677 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 678 uint32_t flags) 679 { 680 int ret; 681 struct userspace_mem_region *region; 682 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; 683 size_t alignment; 684 685 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, 686 "Number of guest pages is not compatible with the host. " 687 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 688 689 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 690 "address not on a page boundary.\n" 691 " guest_paddr: 0x%lx vm->page_size: 0x%x", 692 guest_paddr, vm->page_size); 693 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 694 <= vm->max_gfn, "Physical range beyond maximum " 695 "supported physical address,\n" 696 " guest_paddr: 0x%lx npages: 0x%lx\n" 697 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 698 guest_paddr, npages, vm->max_gfn, vm->page_size); 699 700 /* 701 * Confirm a mem region with an overlapping address doesn't 702 * already exist. 703 */ 704 region = (struct userspace_mem_region *) userspace_mem_region_find( 705 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 706 if (region != NULL) 707 TEST_FAIL("overlapping userspace_mem_region already " 708 "exists\n" 709 " requested guest_paddr: 0x%lx npages: 0x%lx " 710 "page_size: 0x%x\n" 711 " existing guest_paddr: 0x%lx size: 0x%lx", 712 guest_paddr, npages, vm->page_size, 713 (uint64_t) region->region.guest_phys_addr, 714 (uint64_t) region->region.memory_size); 715 716 /* Confirm no region with the requested slot already exists. */ 717 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 718 if (region->region.slot != slot) 719 continue; 720 721 TEST_FAIL("A mem region with the requested slot " 722 "already exists.\n" 723 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 724 " existing slot: %u paddr: 0x%lx size: 0x%lx", 725 slot, guest_paddr, npages, 726 region->region.slot, 727 (uint64_t) region->region.guest_phys_addr, 728 (uint64_t) region->region.memory_size); 729 } 730 731 /* Allocate and initialize new mem region structure. */ 732 region = calloc(1, sizeof(*region)); 733 TEST_ASSERT(region != NULL, "Insufficient Memory"); 734 region->mmap_size = npages * vm->page_size; 735 736 #ifdef __s390x__ 737 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 738 alignment = 0x100000; 739 #else 740 alignment = 1; 741 #endif 742 743 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 744 alignment = max(huge_page_size, alignment); 745 746 /* Add enough memory to align up if necessary */ 747 if (alignment > 1) 748 region->mmap_size += alignment; 749 750 region->mmap_start = mmap(NULL, region->mmap_size, 751 PROT_READ | PROT_WRITE, 752 MAP_PRIVATE | MAP_ANONYMOUS 753 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0), 754 -1, 0); 755 TEST_ASSERT(region->mmap_start != MAP_FAILED, 756 "test_malloc failed, mmap_start: %p errno: %i", 757 region->mmap_start, errno); 758 759 /* Align host address */ 760 region->host_mem = align(region->mmap_start, alignment); 761 762 /* As needed perform madvise */ 763 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) { 764 struct stat statbuf; 765 766 ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); 767 TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), 768 "stat /sys/kernel/mm/transparent_hugepage"); 769 770 TEST_ASSERT(ret == 0 || src_type != VM_MEM_SRC_ANONYMOUS_THP, 771 "VM_MEM_SRC_ANONYMOUS_THP requires THP to be configured in the host kernel"); 772 773 if (ret == 0) { 774 ret = madvise(region->host_mem, npages * vm->page_size, 775 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); 776 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %x", 777 region->host_mem, npages * vm->page_size, src_type); 778 } 779 } 780 781 region->unused_phy_pages = sparsebit_alloc(); 782 sparsebit_set_num(region->unused_phy_pages, 783 guest_paddr >> vm->page_shift, npages); 784 region->region.slot = slot; 785 region->region.flags = flags; 786 region->region.guest_phys_addr = guest_paddr; 787 region->region.memory_size = npages * vm->page_size; 788 region->region.userspace_addr = (uintptr_t) region->host_mem; 789 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 790 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 791 " rc: %i errno: %i\n" 792 " slot: %u flags: 0x%x\n" 793 " guest_phys_addr: 0x%lx size: 0x%lx", 794 ret, errno, slot, flags, 795 guest_paddr, (uint64_t) region->region.memory_size); 796 797 /* Add to linked-list of memory regions. */ 798 list_add(®ion->list, &vm->userspace_mem_regions); 799 } 800 801 /* 802 * Memslot to region 803 * 804 * Input Args: 805 * vm - Virtual Machine 806 * memslot - KVM memory slot ID 807 * 808 * Output Args: None 809 * 810 * Return: 811 * Pointer to memory region structure that describe memory region 812 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 813 * on error (e.g. currently no memory region using memslot as a KVM 814 * memory slot ID). 815 */ 816 struct userspace_mem_region * 817 memslot2region(struct kvm_vm *vm, uint32_t memslot) 818 { 819 struct userspace_mem_region *region; 820 821 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 822 if (region->region.slot == memslot) 823 return region; 824 } 825 826 fprintf(stderr, "No mem region with the requested slot found,\n" 827 " requested slot: %u\n", memslot); 828 fputs("---- vm dump ----\n", stderr); 829 vm_dump(stderr, vm, 2); 830 TEST_FAIL("Mem region not found"); 831 return NULL; 832 } 833 834 /* 835 * VM Memory Region Flags Set 836 * 837 * Input Args: 838 * vm - Virtual Machine 839 * flags - Starting guest physical address 840 * 841 * Output Args: None 842 * 843 * Return: None 844 * 845 * Sets the flags of the memory region specified by the value of slot, 846 * to the values given by flags. 847 */ 848 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 849 { 850 int ret; 851 struct userspace_mem_region *region; 852 853 region = memslot2region(vm, slot); 854 855 region->region.flags = flags; 856 857 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 858 859 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 860 " rc: %i errno: %i slot: %u flags: 0x%x", 861 ret, errno, slot, flags); 862 } 863 864 /* 865 * VM Memory Region Move 866 * 867 * Input Args: 868 * vm - Virtual Machine 869 * slot - Slot of the memory region to move 870 * new_gpa - Starting guest physical address 871 * 872 * Output Args: None 873 * 874 * Return: None 875 * 876 * Change the gpa of a memory region. 877 */ 878 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 879 { 880 struct userspace_mem_region *region; 881 int ret; 882 883 region = memslot2region(vm, slot); 884 885 region->region.guest_phys_addr = new_gpa; 886 887 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 888 889 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n" 890 "ret: %i errno: %i slot: %u new_gpa: 0x%lx", 891 ret, errno, slot, new_gpa); 892 } 893 894 /* 895 * VM Memory Region Delete 896 * 897 * Input Args: 898 * vm - Virtual Machine 899 * slot - Slot of the memory region to delete 900 * 901 * Output Args: None 902 * 903 * Return: None 904 * 905 * Delete a memory region. 906 */ 907 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 908 { 909 __vm_mem_region_delete(vm, memslot2region(vm, slot)); 910 } 911 912 /* 913 * VCPU mmap Size 914 * 915 * Input Args: None 916 * 917 * Output Args: None 918 * 919 * Return: 920 * Size of VCPU state 921 * 922 * Returns the size of the structure pointed to by the return value 923 * of vcpu_state(). 924 */ 925 static int vcpu_mmap_sz(void) 926 { 927 int dev_fd, ret; 928 929 dev_fd = open(KVM_DEV_PATH, O_RDONLY); 930 if (dev_fd < 0) 931 exit(KSFT_SKIP); 932 933 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 934 TEST_ASSERT(ret >= sizeof(struct kvm_run), 935 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i", 936 __func__, ret, errno); 937 938 close(dev_fd); 939 940 return ret; 941 } 942 943 /* 944 * VM VCPU Add 945 * 946 * Input Args: 947 * vm - Virtual Machine 948 * vcpuid - VCPU ID 949 * 950 * Output Args: None 951 * 952 * Return: None 953 * 954 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid. 955 * No additional VCPU setup is done. 956 */ 957 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) 958 { 959 struct vcpu *vcpu; 960 961 /* Confirm a vcpu with the specified id doesn't already exist. */ 962 vcpu = vcpu_find(vm, vcpuid); 963 if (vcpu != NULL) 964 TEST_FAIL("vcpu with the specified id " 965 "already exists,\n" 966 " requested vcpuid: %u\n" 967 " existing vcpuid: %u state: %p", 968 vcpuid, vcpu->id, vcpu->state); 969 970 /* Allocate and initialize new vcpu structure. */ 971 vcpu = calloc(1, sizeof(*vcpu)); 972 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); 973 vcpu->id = vcpuid; 974 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); 975 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i", 976 vcpu->fd, errno); 977 978 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size " 979 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 980 vcpu_mmap_sz(), sizeof(*vcpu->state)); 981 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state), 982 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); 983 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, " 984 "vcpu id: %u errno: %i", vcpuid, errno); 985 986 /* Add to linked-list of VCPUs. */ 987 list_add(&vcpu->list, &vm->vcpus); 988 } 989 990 /* 991 * VM Virtual Address Unused Gap 992 * 993 * Input Args: 994 * vm - Virtual Machine 995 * sz - Size (bytes) 996 * vaddr_min - Minimum Virtual Address 997 * 998 * Output Args: None 999 * 1000 * Return: 1001 * Lowest virtual address at or below vaddr_min, with at least 1002 * sz unused bytes. TEST_ASSERT failure if no area of at least 1003 * size sz is available. 1004 * 1005 * Within the VM specified by vm, locates the lowest starting virtual 1006 * address >= vaddr_min, that has at least sz unallocated bytes. A 1007 * TEST_ASSERT failure occurs for invalid input or no area of at least 1008 * sz unallocated bytes >= vaddr_min is available. 1009 */ 1010 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1011 vm_vaddr_t vaddr_min) 1012 { 1013 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1014 1015 /* Determine lowest permitted virtual page index. */ 1016 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1017 if ((pgidx_start * vm->page_size) < vaddr_min) 1018 goto no_va_found; 1019 1020 /* Loop over section with enough valid virtual page indexes. */ 1021 if (!sparsebit_is_set_num(vm->vpages_valid, 1022 pgidx_start, pages)) 1023 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, 1024 pgidx_start, pages); 1025 do { 1026 /* 1027 * Are there enough unused virtual pages available at 1028 * the currently proposed starting virtual page index. 1029 * If not, adjust proposed starting index to next 1030 * possible. 1031 */ 1032 if (sparsebit_is_clear_num(vm->vpages_mapped, 1033 pgidx_start, pages)) 1034 goto va_found; 1035 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, 1036 pgidx_start, pages); 1037 if (pgidx_start == 0) 1038 goto no_va_found; 1039 1040 /* 1041 * If needed, adjust proposed starting virtual address, 1042 * to next range of valid virtual addresses. 1043 */ 1044 if (!sparsebit_is_set_num(vm->vpages_valid, 1045 pgidx_start, pages)) { 1046 pgidx_start = sparsebit_next_set_num( 1047 vm->vpages_valid, pgidx_start, pages); 1048 if (pgidx_start == 0) 1049 goto no_va_found; 1050 } 1051 } while (pgidx_start != 0); 1052 1053 no_va_found: 1054 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1055 1056 /* NOT REACHED */ 1057 return -1; 1058 1059 va_found: 1060 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, 1061 pgidx_start, pages), 1062 "Unexpected, invalid virtual page index range,\n" 1063 " pgidx_start: 0x%lx\n" 1064 " pages: 0x%lx", 1065 pgidx_start, pages); 1066 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, 1067 pgidx_start, pages), 1068 "Unexpected, pages already mapped,\n" 1069 " pgidx_start: 0x%lx\n" 1070 " pages: 0x%lx", 1071 pgidx_start, pages); 1072 1073 return pgidx_start * vm->page_size; 1074 } 1075 1076 /* 1077 * VM Virtual Address Allocate 1078 * 1079 * Input Args: 1080 * vm - Virtual Machine 1081 * sz - Size in bytes 1082 * vaddr_min - Minimum starting virtual address 1083 * data_memslot - Memory region slot for data pages 1084 * pgd_memslot - Memory region slot for new virtual translation tables 1085 * 1086 * Output Args: None 1087 * 1088 * Return: 1089 * Starting guest virtual address 1090 * 1091 * Allocates at least sz bytes within the virtual address space of the vm 1092 * given by vm. The allocated bytes are mapped to a virtual address >= 1093 * the address given by vaddr_min. Note that each allocation uses a 1094 * a unique set of pages, with the minimum real allocation being at least 1095 * a page. 1096 */ 1097 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1098 uint32_t data_memslot, uint32_t pgd_memslot) 1099 { 1100 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1101 1102 virt_pgd_alloc(vm, pgd_memslot); 1103 1104 /* 1105 * Find an unused range of virtual page addresses of at least 1106 * pages in length. 1107 */ 1108 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1109 1110 /* Map the virtual pages. */ 1111 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1112 pages--, vaddr += vm->page_size) { 1113 vm_paddr_t paddr; 1114 1115 paddr = vm_phy_page_alloc(vm, 1116 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); 1117 1118 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1119 1120 sparsebit_set(vm->vpages_mapped, 1121 vaddr >> vm->page_shift); 1122 } 1123 1124 return vaddr_start; 1125 } 1126 1127 /* 1128 * Map a range of VM virtual address to the VM's physical address 1129 * 1130 * Input Args: 1131 * vm - Virtual Machine 1132 * vaddr - Virtuall address to map 1133 * paddr - VM Physical Address 1134 * npages - The number of pages to map 1135 * pgd_memslot - Memory region slot for new virtual translation tables 1136 * 1137 * Output Args: None 1138 * 1139 * Return: None 1140 * 1141 * Within the VM given by @vm, creates a virtual translation for 1142 * @npages starting at @vaddr to the page range starting at @paddr. 1143 */ 1144 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1145 unsigned int npages, uint32_t pgd_memslot) 1146 { 1147 size_t page_size = vm->page_size; 1148 size_t size = npages * page_size; 1149 1150 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1151 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1152 1153 while (npages--) { 1154 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1155 vaddr += page_size; 1156 paddr += page_size; 1157 } 1158 } 1159 1160 /* 1161 * Address VM Physical to Host Virtual 1162 * 1163 * Input Args: 1164 * vm - Virtual Machine 1165 * gpa - VM physical address 1166 * 1167 * Output Args: None 1168 * 1169 * Return: 1170 * Equivalent host virtual address 1171 * 1172 * Locates the memory region containing the VM physical address given 1173 * by gpa, within the VM given by vm. When found, the host virtual 1174 * address providing the memory to the vm physical address is returned. 1175 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1176 */ 1177 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1178 { 1179 struct userspace_mem_region *region; 1180 1181 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1182 if ((gpa >= region->region.guest_phys_addr) 1183 && (gpa <= (region->region.guest_phys_addr 1184 + region->region.memory_size - 1))) 1185 return (void *) ((uintptr_t) region->host_mem 1186 + (gpa - region->region.guest_phys_addr)); 1187 } 1188 1189 TEST_FAIL("No vm physical memory at 0x%lx", gpa); 1190 return NULL; 1191 } 1192 1193 /* 1194 * Address Host Virtual to VM Physical 1195 * 1196 * Input Args: 1197 * vm - Virtual Machine 1198 * hva - Host virtual address 1199 * 1200 * Output Args: None 1201 * 1202 * Return: 1203 * Equivalent VM physical address 1204 * 1205 * Locates the memory region containing the host virtual address given 1206 * by hva, within the VM given by vm. When found, the equivalent 1207 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1208 * region containing hva exists. 1209 */ 1210 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1211 { 1212 struct userspace_mem_region *region; 1213 1214 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1215 if ((hva >= region->host_mem) 1216 && (hva <= (region->host_mem 1217 + region->region.memory_size - 1))) 1218 return (vm_paddr_t) ((uintptr_t) 1219 region->region.guest_phys_addr 1220 + (hva - (uintptr_t) region->host_mem)); 1221 } 1222 1223 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); 1224 return -1; 1225 } 1226 1227 /* 1228 * VM Create IRQ Chip 1229 * 1230 * Input Args: 1231 * vm - Virtual Machine 1232 * 1233 * Output Args: None 1234 * 1235 * Return: None 1236 * 1237 * Creates an interrupt controller chip for the VM specified by vm. 1238 */ 1239 void vm_create_irqchip(struct kvm_vm *vm) 1240 { 1241 int ret; 1242 1243 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); 1244 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, " 1245 "rc: %i errno: %i", ret, errno); 1246 1247 vm->has_irqchip = true; 1248 } 1249 1250 /* 1251 * VM VCPU State 1252 * 1253 * Input Args: 1254 * vm - Virtual Machine 1255 * vcpuid - VCPU ID 1256 * 1257 * Output Args: None 1258 * 1259 * Return: 1260 * Pointer to structure that describes the state of the VCPU. 1261 * 1262 * Locates and returns a pointer to a structure that describes the 1263 * state of the VCPU with the given vcpuid. 1264 */ 1265 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) 1266 { 1267 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1268 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1269 1270 return vcpu->state; 1271 } 1272 1273 /* 1274 * VM VCPU Run 1275 * 1276 * Input Args: 1277 * vm - Virtual Machine 1278 * vcpuid - VCPU ID 1279 * 1280 * Output Args: None 1281 * 1282 * Return: None 1283 * 1284 * Switch to executing the code for the VCPU given by vcpuid, within the VM 1285 * given by vm. 1286 */ 1287 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1288 { 1289 int ret = _vcpu_run(vm, vcpuid); 1290 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1291 "rc: %i errno: %i", ret, errno); 1292 } 1293 1294 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1295 { 1296 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1297 int rc; 1298 1299 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1300 do { 1301 rc = ioctl(vcpu->fd, KVM_RUN, NULL); 1302 } while (rc == -1 && errno == EINTR); 1303 1304 assert_on_unhandled_exception(vm, vcpuid); 1305 1306 return rc; 1307 } 1308 1309 int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid) 1310 { 1311 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1312 1313 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1314 1315 return vcpu->fd; 1316 } 1317 1318 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) 1319 { 1320 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1321 int ret; 1322 1323 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1324 1325 vcpu->state->immediate_exit = 1; 1326 ret = ioctl(vcpu->fd, KVM_RUN, NULL); 1327 vcpu->state->immediate_exit = 0; 1328 1329 TEST_ASSERT(ret == -1 && errno == EINTR, 1330 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", 1331 ret, errno); 1332 } 1333 1334 void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, 1335 struct kvm_guest_debug *debug) 1336 { 1337 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1338 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug); 1339 1340 TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret); 1341 } 1342 1343 /* 1344 * VM VCPU Set MP State 1345 * 1346 * Input Args: 1347 * vm - Virtual Machine 1348 * vcpuid - VCPU ID 1349 * mp_state - mp_state to be set 1350 * 1351 * Output Args: None 1352 * 1353 * Return: None 1354 * 1355 * Sets the MP state of the VCPU given by vcpuid, to the state given 1356 * by mp_state. 1357 */ 1358 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 1359 struct kvm_mp_state *mp_state) 1360 { 1361 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1362 int ret; 1363 1364 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1365 1366 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); 1367 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, " 1368 "rc: %i errno: %i", ret, errno); 1369 } 1370 1371 /* 1372 * VM VCPU Get Reg List 1373 * 1374 * Input Args: 1375 * vm - Virtual Machine 1376 * vcpuid - VCPU ID 1377 * 1378 * Output Args: 1379 * None 1380 * 1381 * Return: 1382 * A pointer to an allocated struct kvm_reg_list 1383 * 1384 * Get the list of guest registers which are supported for 1385 * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls 1386 */ 1387 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) 1388 { 1389 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1390 int ret; 1391 1392 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n); 1393 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1394 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1395 reg_list->n = reg_list_n.n; 1396 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); 1397 return reg_list; 1398 } 1399 1400 /* 1401 * VM VCPU Regs Get 1402 * 1403 * Input Args: 1404 * vm - Virtual Machine 1405 * vcpuid - VCPU ID 1406 * 1407 * Output Args: 1408 * regs - current state of VCPU regs 1409 * 1410 * Return: None 1411 * 1412 * Obtains the current register state for the VCPU specified by vcpuid 1413 * and stores it at the location given by regs. 1414 */ 1415 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) 1416 { 1417 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1418 int ret; 1419 1420 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1421 1422 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); 1423 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", 1424 ret, errno); 1425 } 1426 1427 /* 1428 * VM VCPU Regs Set 1429 * 1430 * Input Args: 1431 * vm - Virtual Machine 1432 * vcpuid - VCPU ID 1433 * regs - Values to set VCPU regs to 1434 * 1435 * Output Args: None 1436 * 1437 * Return: None 1438 * 1439 * Sets the regs of the VCPU specified by vcpuid to the values 1440 * given by regs. 1441 */ 1442 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) 1443 { 1444 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1445 int ret; 1446 1447 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1448 1449 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); 1450 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", 1451 ret, errno); 1452 } 1453 1454 #ifdef __KVM_HAVE_VCPU_EVENTS 1455 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 1456 struct kvm_vcpu_events *events) 1457 { 1458 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1459 int ret; 1460 1461 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1462 1463 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); 1464 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", 1465 ret, errno); 1466 } 1467 1468 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 1469 struct kvm_vcpu_events *events) 1470 { 1471 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1472 int ret; 1473 1474 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1475 1476 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); 1477 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", 1478 ret, errno); 1479 } 1480 #endif 1481 1482 #ifdef __x86_64__ 1483 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, 1484 struct kvm_nested_state *state) 1485 { 1486 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1487 int ret; 1488 1489 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1490 1491 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state); 1492 TEST_ASSERT(ret == 0, 1493 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", 1494 ret, errno); 1495 } 1496 1497 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, 1498 struct kvm_nested_state *state, bool ignore_error) 1499 { 1500 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1501 int ret; 1502 1503 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1504 1505 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state); 1506 if (!ignore_error) { 1507 TEST_ASSERT(ret == 0, 1508 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", 1509 ret, errno); 1510 } 1511 1512 return ret; 1513 } 1514 #endif 1515 1516 /* 1517 * VM VCPU System Regs Get 1518 * 1519 * Input Args: 1520 * vm - Virtual Machine 1521 * vcpuid - VCPU ID 1522 * 1523 * Output Args: 1524 * sregs - current state of VCPU system regs 1525 * 1526 * Return: None 1527 * 1528 * Obtains the current system register state for the VCPU specified by 1529 * vcpuid and stores it at the location given by sregs. 1530 */ 1531 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1532 { 1533 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1534 int ret; 1535 1536 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1537 1538 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); 1539 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", 1540 ret, errno); 1541 } 1542 1543 /* 1544 * VM VCPU System Regs Set 1545 * 1546 * Input Args: 1547 * vm - Virtual Machine 1548 * vcpuid - VCPU ID 1549 * sregs - Values to set VCPU system regs to 1550 * 1551 * Output Args: None 1552 * 1553 * Return: None 1554 * 1555 * Sets the system regs of the VCPU specified by vcpuid to the values 1556 * given by sregs. 1557 */ 1558 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1559 { 1560 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); 1561 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1562 "rc: %i errno: %i", ret, errno); 1563 } 1564 1565 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1566 { 1567 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1568 1569 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1570 1571 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); 1572 } 1573 1574 void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) 1575 { 1576 int ret; 1577 1578 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); 1579 TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)", 1580 ret, errno, strerror(errno)); 1581 } 1582 1583 void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) 1584 { 1585 int ret; 1586 1587 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); 1588 TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)", 1589 ret, errno, strerror(errno)); 1590 } 1591 1592 void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) 1593 { 1594 int ret; 1595 1596 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); 1597 TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)", 1598 ret, errno, strerror(errno)); 1599 } 1600 1601 void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) 1602 { 1603 int ret; 1604 1605 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); 1606 TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)", 1607 ret, errno, strerror(errno)); 1608 } 1609 1610 /* 1611 * VCPU Ioctl 1612 * 1613 * Input Args: 1614 * vm - Virtual Machine 1615 * vcpuid - VCPU ID 1616 * cmd - Ioctl number 1617 * arg - Argument to pass to the ioctl 1618 * 1619 * Return: None 1620 * 1621 * Issues an arbitrary ioctl on a VCPU fd. 1622 */ 1623 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1624 unsigned long cmd, void *arg) 1625 { 1626 int ret; 1627 1628 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); 1629 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)", 1630 cmd, ret, errno, strerror(errno)); 1631 } 1632 1633 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1634 unsigned long cmd, void *arg) 1635 { 1636 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1637 int ret; 1638 1639 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1640 1641 ret = ioctl(vcpu->fd, cmd, arg); 1642 1643 return ret; 1644 } 1645 1646 void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid) 1647 { 1648 struct vcpu *vcpu; 1649 uint32_t size = vm->dirty_ring_size; 1650 1651 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1652 1653 vcpu = vcpu_find(vm, vcpuid); 1654 1655 TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid); 1656 1657 if (!vcpu->dirty_gfns) { 1658 void *addr; 1659 1660 addr = mmap(NULL, size, PROT_READ, 1661 MAP_PRIVATE, vcpu->fd, 1662 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1663 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1664 1665 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, 1666 MAP_PRIVATE, vcpu->fd, 1667 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1668 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1669 1670 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, 1671 MAP_SHARED, vcpu->fd, 1672 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1673 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1674 1675 vcpu->dirty_gfns = addr; 1676 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); 1677 } 1678 1679 return vcpu->dirty_gfns; 1680 } 1681 1682 /* 1683 * VM Ioctl 1684 * 1685 * Input Args: 1686 * vm - Virtual Machine 1687 * cmd - Ioctl number 1688 * arg - Argument to pass to the ioctl 1689 * 1690 * Return: None 1691 * 1692 * Issues an arbitrary ioctl on a VM fd. 1693 */ 1694 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1695 { 1696 int ret; 1697 1698 ret = ioctl(vm->fd, cmd, arg); 1699 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)", 1700 cmd, ret, errno, strerror(errno)); 1701 } 1702 1703 /* 1704 * KVM system ioctl 1705 * 1706 * Input Args: 1707 * vm - Virtual Machine 1708 * cmd - Ioctl number 1709 * arg - Argument to pass to the ioctl 1710 * 1711 * Return: None 1712 * 1713 * Issues an arbitrary ioctl on a KVM fd. 1714 */ 1715 void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1716 { 1717 int ret; 1718 1719 ret = ioctl(vm->kvm_fd, cmd, arg); 1720 TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)", 1721 cmd, ret, errno, strerror(errno)); 1722 } 1723 1724 int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1725 { 1726 return ioctl(vm->kvm_fd, cmd, arg); 1727 } 1728 1729 /* 1730 * VM Dump 1731 * 1732 * Input Args: 1733 * vm - Virtual Machine 1734 * indent - Left margin indent amount 1735 * 1736 * Output Args: 1737 * stream - Output FILE stream 1738 * 1739 * Return: None 1740 * 1741 * Dumps the current state of the VM given by vm, to the FILE stream 1742 * given by stream. 1743 */ 1744 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1745 { 1746 struct userspace_mem_region *region; 1747 struct vcpu *vcpu; 1748 1749 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); 1750 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); 1751 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); 1752 fprintf(stream, "%*sMem Regions:\n", indent, ""); 1753 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1754 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1755 "host_virt: %p\n", indent + 2, "", 1756 (uint64_t) region->region.guest_phys_addr, 1757 (uint64_t) region->region.memory_size, 1758 region->host_mem); 1759 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1760 sparsebit_dump(stream, region->unused_phy_pages, 0); 1761 } 1762 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); 1763 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); 1764 fprintf(stream, "%*spgd_created: %u\n", indent, "", 1765 vm->pgd_created); 1766 if (vm->pgd_created) { 1767 fprintf(stream, "%*sVirtual Translation Tables:\n", 1768 indent + 2, ""); 1769 virt_dump(stream, vm, indent + 4); 1770 } 1771 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1772 list_for_each_entry(vcpu, &vm->vcpus, list) 1773 vcpu_dump(stream, vm, vcpu->id, indent + 2); 1774 } 1775 1776 /* Known KVM exit reasons */ 1777 static struct exit_reason { 1778 unsigned int reason; 1779 const char *name; 1780 } exit_reasons_known[] = { 1781 {KVM_EXIT_UNKNOWN, "UNKNOWN"}, 1782 {KVM_EXIT_EXCEPTION, "EXCEPTION"}, 1783 {KVM_EXIT_IO, "IO"}, 1784 {KVM_EXIT_HYPERCALL, "HYPERCALL"}, 1785 {KVM_EXIT_DEBUG, "DEBUG"}, 1786 {KVM_EXIT_HLT, "HLT"}, 1787 {KVM_EXIT_MMIO, "MMIO"}, 1788 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"}, 1789 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"}, 1790 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"}, 1791 {KVM_EXIT_INTR, "INTR"}, 1792 {KVM_EXIT_SET_TPR, "SET_TPR"}, 1793 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"}, 1794 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"}, 1795 {KVM_EXIT_S390_RESET, "S390_RESET"}, 1796 {KVM_EXIT_DCR, "DCR"}, 1797 {KVM_EXIT_NMI, "NMI"}, 1798 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"}, 1799 {KVM_EXIT_OSI, "OSI"}, 1800 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"}, 1801 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, 1802 {KVM_EXIT_X86_RDMSR, "RDMSR"}, 1803 {KVM_EXIT_X86_WRMSR, "WRMSR"}, 1804 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT 1805 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, 1806 #endif 1807 }; 1808 1809 /* 1810 * Exit Reason String 1811 * 1812 * Input Args: 1813 * exit_reason - Exit reason 1814 * 1815 * Output Args: None 1816 * 1817 * Return: 1818 * Constant string pointer describing the exit reason. 1819 * 1820 * Locates and returns a constant string that describes the KVM exit 1821 * reason given by exit_reason. If no such string is found, a constant 1822 * string of "Unknown" is returned. 1823 */ 1824 const char *exit_reason_str(unsigned int exit_reason) 1825 { 1826 unsigned int n1; 1827 1828 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { 1829 if (exit_reason == exit_reasons_known[n1].reason) 1830 return exit_reasons_known[n1].name; 1831 } 1832 1833 return "Unknown"; 1834 } 1835 1836 /* 1837 * Physical Contiguous Page Allocator 1838 * 1839 * Input Args: 1840 * vm - Virtual Machine 1841 * num - number of pages 1842 * paddr_min - Physical address minimum 1843 * memslot - Memory region to allocate page from 1844 * 1845 * Output Args: None 1846 * 1847 * Return: 1848 * Starting physical address 1849 * 1850 * Within the VM specified by vm, locates a range of available physical 1851 * pages at or above paddr_min. If found, the pages are marked as in use 1852 * and their base address is returned. A TEST_ASSERT failure occurs if 1853 * not enough pages are available at or above paddr_min. 1854 */ 1855 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1856 vm_paddr_t paddr_min, uint32_t memslot) 1857 { 1858 struct userspace_mem_region *region; 1859 sparsebit_idx_t pg, base; 1860 1861 TEST_ASSERT(num > 0, "Must allocate at least one page"); 1862 1863 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1864 "not divisible by page size.\n" 1865 " paddr_min: 0x%lx page_size: 0x%x", 1866 paddr_min, vm->page_size); 1867 1868 region = memslot2region(vm, memslot); 1869 base = pg = paddr_min >> vm->page_shift; 1870 1871 do { 1872 for (; pg < base + num; ++pg) { 1873 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 1874 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); 1875 break; 1876 } 1877 } 1878 } while (pg && pg != base + num); 1879 1880 if (pg == 0) { 1881 fprintf(stderr, "No guest physical page available, " 1882 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 1883 paddr_min, vm->page_size, memslot); 1884 fputs("---- vm dump ----\n", stderr); 1885 vm_dump(stderr, vm, 2); 1886 abort(); 1887 } 1888 1889 for (pg = base; pg < base + num; ++pg) 1890 sparsebit_clear(region->unused_phy_pages, pg); 1891 1892 return base * vm->page_size; 1893 } 1894 1895 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 1896 uint32_t memslot) 1897 { 1898 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 1899 } 1900 1901 /* 1902 * Address Guest Virtual to Host Virtual 1903 * 1904 * Input Args: 1905 * vm - Virtual Machine 1906 * gva - VM virtual address 1907 * 1908 * Output Args: None 1909 * 1910 * Return: 1911 * Equivalent host virtual address 1912 */ 1913 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 1914 { 1915 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1916 } 1917 1918 /* 1919 * Is Unrestricted Guest 1920 * 1921 * Input Args: 1922 * vm - Virtual Machine 1923 * 1924 * Output Args: None 1925 * 1926 * Return: True if the unrestricted guest is set to 'Y', otherwise return false. 1927 * 1928 * Check if the unrestricted guest flag is enabled. 1929 */ 1930 bool vm_is_unrestricted_guest(struct kvm_vm *vm) 1931 { 1932 char val = 'N'; 1933 size_t count; 1934 FILE *f; 1935 1936 if (vm == NULL) { 1937 /* Ensure that the KVM vendor-specific module is loaded. */ 1938 f = fopen(KVM_DEV_PATH, "r"); 1939 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d", 1940 errno); 1941 fclose(f); 1942 } 1943 1944 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); 1945 if (f) { 1946 count = fread(&val, sizeof(char), 1, f); 1947 TEST_ASSERT(count == 1, "Unable to read from param file."); 1948 fclose(f); 1949 } 1950 1951 return val == 'Y'; 1952 } 1953 1954 unsigned int vm_get_page_size(struct kvm_vm *vm) 1955 { 1956 return vm->page_size; 1957 } 1958 1959 unsigned int vm_get_page_shift(struct kvm_vm *vm) 1960 { 1961 return vm->page_shift; 1962 } 1963 1964 unsigned int vm_get_max_gfn(struct kvm_vm *vm) 1965 { 1966 return vm->max_gfn; 1967 } 1968 1969 int vm_get_fd(struct kvm_vm *vm) 1970 { 1971 return vm->fd; 1972 } 1973 1974 static unsigned int vm_calc_num_pages(unsigned int num_pages, 1975 unsigned int page_shift, 1976 unsigned int new_page_shift, 1977 bool ceil) 1978 { 1979 unsigned int n = 1 << (new_page_shift - page_shift); 1980 1981 if (page_shift >= new_page_shift) 1982 return num_pages * (1 << (page_shift - new_page_shift)); 1983 1984 return num_pages / n + !!(ceil && num_pages % n); 1985 } 1986 1987 static inline int getpageshift(void) 1988 { 1989 return __builtin_ffs(getpagesize()) - 1; 1990 } 1991 1992 unsigned int 1993 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 1994 { 1995 return vm_calc_num_pages(num_guest_pages, 1996 vm_guest_mode_params[mode].page_shift, 1997 getpageshift(), true); 1998 } 1999 2000 unsigned int 2001 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) 2002 { 2003 return vm_calc_num_pages(num_host_pages, getpageshift(), 2004 vm_guest_mode_params[mode].page_shift, false); 2005 } 2006 2007 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) 2008 { 2009 unsigned int n; 2010 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); 2011 return vm_adjust_num_guest_pages(mode, n); 2012 } 2013