1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tools/testing/selftests/kvm/lib/kvm_util.c 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 8 #define _GNU_SOURCE /* for program_invocation_name */ 9 #include "test_util.h" 10 #include "kvm_util.h" 11 #include "kvm_util_internal.h" 12 #include "processor.h" 13 14 #include <assert.h> 15 #include <sys/mman.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <unistd.h> 19 #include <linux/kernel.h> 20 21 #define KVM_UTIL_PGS_PER_HUGEPG 512 22 #define KVM_UTIL_MIN_PFN 2 23 24 static int vcpu_mmap_sz(void); 25 26 /* Aligns x up to the next multiple of size. Size must be a power of 2. */ 27 static void *align(void *x, size_t size) 28 { 29 size_t mask = size - 1; 30 TEST_ASSERT(size != 0 && !(size & (size - 1)), 31 "size not a power of 2: %lu", size); 32 return (void *) (((size_t) x + mask) & ~mask); 33 } 34 35 /* 36 * Capability 37 * 38 * Input Args: 39 * cap - Capability 40 * 41 * Output Args: None 42 * 43 * Return: 44 * On success, the Value corresponding to the capability (KVM_CAP_*) 45 * specified by the value of cap. On failure a TEST_ASSERT failure 46 * is produced. 47 * 48 * Looks up and returns the value corresponding to the capability 49 * (KVM_CAP_*) given by cap. 50 */ 51 int kvm_check_cap(long cap) 52 { 53 int ret; 54 int kvm_fd; 55 56 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 57 if (kvm_fd < 0) 58 exit(KSFT_SKIP); 59 60 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); 61 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" 62 " rc: %i errno: %i", ret, errno); 63 64 close(kvm_fd); 65 66 return ret; 67 } 68 69 /* VM Enable Capability 70 * 71 * Input Args: 72 * vm - Virtual Machine 73 * cap - Capability 74 * 75 * Output Args: None 76 * 77 * Return: On success, 0. On failure a TEST_ASSERT failure is produced. 78 * 79 * Enables a capability (KVM_CAP_*) on the VM. 80 */ 81 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) 82 { 83 int ret; 84 85 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); 86 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" 87 " rc: %i errno: %i", ret, errno); 88 89 return ret; 90 } 91 92 /* VCPU Enable Capability 93 * 94 * Input Args: 95 * vm - Virtual Machine 96 * vcpu_id - VCPU 97 * cap - Capability 98 * 99 * Output Args: None 100 * 101 * Return: On success, 0. On failure a TEST_ASSERT failure is produced. 102 * 103 * Enables a capability (KVM_CAP_*) on the VCPU. 104 */ 105 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, 106 struct kvm_enable_cap *cap) 107 { 108 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); 109 int r; 110 111 TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id); 112 113 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap); 114 TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n" 115 " rc: %i, errno: %i", r, errno); 116 117 return r; 118 } 119 120 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 121 { 122 struct kvm_enable_cap cap = { 0 }; 123 124 cap.cap = KVM_CAP_DIRTY_LOG_RING; 125 cap.args[0] = ring_size; 126 vm_enable_cap(vm, &cap); 127 vm->dirty_ring_size = ring_size; 128 } 129 130 static void vm_open(struct kvm_vm *vm, int perm) 131 { 132 vm->kvm_fd = open(KVM_DEV_PATH, perm); 133 if (vm->kvm_fd < 0) 134 exit(KSFT_SKIP); 135 136 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { 137 print_skip("immediate_exit not available"); 138 exit(KSFT_SKIP); 139 } 140 141 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); 142 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " 143 "rc: %i errno: %i", vm->fd, errno); 144 } 145 146 const char * const vm_guest_mode_string[] = { 147 "PA-bits:52, VA-bits:48, 4K pages", 148 "PA-bits:52, VA-bits:48, 64K pages", 149 "PA-bits:48, VA-bits:48, 4K pages", 150 "PA-bits:48, VA-bits:48, 64K pages", 151 "PA-bits:40, VA-bits:48, 4K pages", 152 "PA-bits:40, VA-bits:48, 64K pages", 153 "PA-bits:ANY, VA-bits:48, 4K pages", 154 }; 155 _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES, 156 "Missing new mode strings?"); 157 158 const struct vm_guest_mode_params vm_guest_mode_params[] = { 159 { 52, 48, 0x1000, 12 }, 160 { 52, 48, 0x10000, 16 }, 161 { 48, 48, 0x1000, 12 }, 162 { 48, 48, 0x10000, 16 }, 163 { 40, 48, 0x1000, 12 }, 164 { 40, 48, 0x10000, 16 }, 165 { 0, 0, 0x1000, 12 }, 166 }; 167 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, 168 "Missing new mode params?"); 169 170 /* 171 * VM Create 172 * 173 * Input Args: 174 * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 175 * phy_pages - Physical memory pages 176 * perm - permission 177 * 178 * Output Args: None 179 * 180 * Return: 181 * Pointer to opaque structure that describes the created VM. 182 * 183 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). 184 * When phy_pages is non-zero, a memory region of phy_pages physical pages 185 * is created and mapped starting at guest physical address 0. The file 186 * descriptor to control the created VM is created with the permissions 187 * given by perm (e.g. O_RDWR). 188 */ 189 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) 190 { 191 struct kvm_vm *vm; 192 193 pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__, 194 vm_guest_mode_string(mode), phy_pages, perm); 195 196 vm = calloc(1, sizeof(*vm)); 197 TEST_ASSERT(vm != NULL, "Insufficient Memory"); 198 199 INIT_LIST_HEAD(&vm->vcpus); 200 INIT_LIST_HEAD(&vm->userspace_mem_regions); 201 202 vm->mode = mode; 203 vm->type = 0; 204 205 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; 206 vm->va_bits = vm_guest_mode_params[mode].va_bits; 207 vm->page_size = vm_guest_mode_params[mode].page_size; 208 vm->page_shift = vm_guest_mode_params[mode].page_shift; 209 210 /* Setup mode specific traits. */ 211 switch (vm->mode) { 212 case VM_MODE_P52V48_4K: 213 vm->pgtable_levels = 4; 214 break; 215 case VM_MODE_P52V48_64K: 216 vm->pgtable_levels = 3; 217 break; 218 case VM_MODE_P48V48_4K: 219 vm->pgtable_levels = 4; 220 break; 221 case VM_MODE_P48V48_64K: 222 vm->pgtable_levels = 3; 223 break; 224 case VM_MODE_P40V48_4K: 225 vm->pgtable_levels = 4; 226 break; 227 case VM_MODE_P40V48_64K: 228 vm->pgtable_levels = 3; 229 break; 230 case VM_MODE_PXXV48_4K: 231 #ifdef __x86_64__ 232 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); 233 /* 234 * Ignore KVM support for 5-level paging (vm->va_bits == 57), 235 * it doesn't take effect unless a CR4.LA57 is set, which it 236 * isn't for this VM_MODE. 237 */ 238 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, 239 "Linear address width (%d bits) not supported", 240 vm->va_bits); 241 pr_debug("Guest physical address width detected: %d\n", 242 vm->pa_bits); 243 vm->pgtable_levels = 4; 244 vm->va_bits = 48; 245 #else 246 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); 247 #endif 248 break; 249 default: 250 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); 251 } 252 253 #ifdef __aarch64__ 254 if (vm->pa_bits != 40) 255 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); 256 #endif 257 258 vm_open(vm, perm); 259 260 /* Limit to VA-bit canonical virtual addresses. */ 261 vm->vpages_valid = sparsebit_alloc(); 262 sparsebit_set_num(vm->vpages_valid, 263 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 264 sparsebit_set_num(vm->vpages_valid, 265 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, 266 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 267 268 /* Limit physical addresses to PA-bits. */ 269 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 270 271 /* Allocate and setup memory for guest. */ 272 vm->vpages_mapped = sparsebit_alloc(); 273 if (phy_pages != 0) 274 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 275 0, 0, phy_pages, 0); 276 277 return vm; 278 } 279 280 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 281 uint64_t extra_mem_pages, uint32_t num_percpu_pages, 282 void *guest_code, uint32_t vcpuids[]) 283 { 284 /* The maximum page table size for a memory region will be when the 285 * smallest pages are used. Considering each page contains x page 286 * table descriptors, the total extra size for page tables (for extra 287 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 288 * than N/x*2. 289 */ 290 uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; 291 uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; 292 uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; 293 struct kvm_vm *vm; 294 int i; 295 296 TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 297 "nr_vcpus = %d too large for host, max-vcpus = %d", 298 nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); 299 300 pages = vm_adjust_num_guest_pages(mode, pages); 301 vm = vm_create(mode, pages, O_RDWR); 302 303 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 304 305 #ifdef __x86_64__ 306 vm_create_irqchip(vm); 307 #endif 308 309 for (i = 0; i < nr_vcpus; ++i) { 310 uint32_t vcpuid = vcpuids ? vcpuids[i] : i; 311 312 vm_vcpu_add_default(vm, vcpuid, guest_code); 313 314 #ifdef __x86_64__ 315 vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); 316 #endif 317 } 318 319 return vm; 320 } 321 322 struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages, 323 uint32_t num_percpu_pages, void *guest_code, 324 uint32_t vcpuids[]) 325 { 326 return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages, 327 num_percpu_pages, guest_code, vcpuids); 328 } 329 330 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, 331 void *guest_code) 332 { 333 return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code, 334 (uint32_t []){ vcpuid }); 335 } 336 337 /* 338 * VM Restart 339 * 340 * Input Args: 341 * vm - VM that has been released before 342 * perm - permission 343 * 344 * Output Args: None 345 * 346 * Reopens the file descriptors associated to the VM and reinstates the 347 * global state, such as the irqchip and the memory regions that are mapped 348 * into the guest. 349 */ 350 void kvm_vm_restart(struct kvm_vm *vmp, int perm) 351 { 352 struct userspace_mem_region *region; 353 354 vm_open(vmp, perm); 355 if (vmp->has_irqchip) 356 vm_create_irqchip(vmp); 357 358 list_for_each_entry(region, &vmp->userspace_mem_regions, list) { 359 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 360 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 361 " rc: %i errno: %i\n" 362 " slot: %u flags: 0x%x\n" 363 " guest_phys_addr: 0x%llx size: 0x%llx", 364 ret, errno, region->region.slot, 365 region->region.flags, 366 region->region.guest_phys_addr, 367 region->region.memory_size); 368 } 369 } 370 371 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 372 { 373 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 374 int ret; 375 376 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); 377 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s", 378 __func__, strerror(-ret)); 379 } 380 381 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 382 uint64_t first_page, uint32_t num_pages) 383 { 384 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot, 385 .first_page = first_page, 386 .num_pages = num_pages }; 387 int ret; 388 389 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); 390 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s", 391 __func__, strerror(-ret)); 392 } 393 394 uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 395 { 396 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS); 397 } 398 399 /* 400 * Userspace Memory Region Find 401 * 402 * Input Args: 403 * vm - Virtual Machine 404 * start - Starting VM physical address 405 * end - Ending VM physical address, inclusive. 406 * 407 * Output Args: None 408 * 409 * Return: 410 * Pointer to overlapping region, NULL if no such region. 411 * 412 * Searches for a region with any physical memory that overlaps with 413 * any portion of the guest physical addresses from start to end 414 * inclusive. If multiple overlapping regions exist, a pointer to any 415 * of the regions is returned. Null is returned only when no overlapping 416 * region exists. 417 */ 418 static struct userspace_mem_region * 419 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 420 { 421 struct userspace_mem_region *region; 422 423 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 424 uint64_t existing_start = region->region.guest_phys_addr; 425 uint64_t existing_end = region->region.guest_phys_addr 426 + region->region.memory_size - 1; 427 if (start <= existing_end && end >= existing_start) 428 return region; 429 } 430 431 return NULL; 432 } 433 434 /* 435 * KVM Userspace Memory Region Find 436 * 437 * Input Args: 438 * vm - Virtual Machine 439 * start - Starting VM physical address 440 * end - Ending VM physical address, inclusive. 441 * 442 * Output Args: None 443 * 444 * Return: 445 * Pointer to overlapping region, NULL if no such region. 446 * 447 * Public interface to userspace_mem_region_find. Allows tests to look up 448 * the memslot datastructure for a given range of guest physical memory. 449 */ 450 struct kvm_userspace_memory_region * 451 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 452 uint64_t end) 453 { 454 struct userspace_mem_region *region; 455 456 region = userspace_mem_region_find(vm, start, end); 457 if (!region) 458 return NULL; 459 460 return ®ion->region; 461 } 462 463 /* 464 * VCPU Find 465 * 466 * Input Args: 467 * vm - Virtual Machine 468 * vcpuid - VCPU ID 469 * 470 * Output Args: None 471 * 472 * Return: 473 * Pointer to VCPU structure 474 * 475 * Locates a vcpu structure that describes the VCPU specified by vcpuid and 476 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU 477 * for the specified vcpuid. 478 */ 479 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) 480 { 481 struct vcpu *vcpu; 482 483 list_for_each_entry(vcpu, &vm->vcpus, list) { 484 if (vcpu->id == vcpuid) 485 return vcpu; 486 } 487 488 return NULL; 489 } 490 491 /* 492 * VM VCPU Remove 493 * 494 * Input Args: 495 * vcpu - VCPU to remove 496 * 497 * Output Args: None 498 * 499 * Return: None, TEST_ASSERT failures for all error conditions 500 * 501 * Removes a vCPU from a VM and frees its resources. 502 */ 503 static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu) 504 { 505 int ret; 506 507 if (vcpu->dirty_gfns) { 508 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 509 TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, " 510 "rc: %i errno: %i", ret, errno); 511 vcpu->dirty_gfns = NULL; 512 } 513 514 ret = munmap(vcpu->state, vcpu_mmap_sz()); 515 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i " 516 "errno: %i", ret, errno); 517 close(vcpu->fd); 518 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " 519 "errno: %i", ret, errno); 520 521 list_del(&vcpu->list); 522 free(vcpu); 523 } 524 525 void kvm_vm_release(struct kvm_vm *vmp) 526 { 527 struct vcpu *vcpu, *tmp; 528 int ret; 529 530 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 531 vm_vcpu_rm(vmp, vcpu); 532 533 ret = close(vmp->fd); 534 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" 535 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); 536 537 close(vmp->kvm_fd); 538 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n" 539 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); 540 } 541 542 static void __vm_mem_region_delete(struct kvm_vm *vm, 543 struct userspace_mem_region *region) 544 { 545 int ret; 546 547 list_del(®ion->list); 548 549 region->region.memory_size = 0; 550 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 551 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " 552 "rc: %i errno: %i", ret, errno); 553 554 sparsebit_free(®ion->unused_phy_pages); 555 ret = munmap(region->mmap_start, region->mmap_size); 556 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno); 557 558 free(region); 559 } 560 561 /* 562 * Destroys and frees the VM pointed to by vmp. 563 */ 564 void kvm_vm_free(struct kvm_vm *vmp) 565 { 566 struct userspace_mem_region *region, *tmp; 567 568 if (vmp == NULL) 569 return; 570 571 /* Free userspace_mem_regions. */ 572 list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) 573 __vm_mem_region_delete(vmp, region); 574 575 /* Free sparsebit arrays. */ 576 sparsebit_free(&vmp->vpages_valid); 577 sparsebit_free(&vmp->vpages_mapped); 578 579 kvm_vm_release(vmp); 580 581 /* Free the structure describing the VM. */ 582 free(vmp); 583 } 584 585 /* 586 * Memory Compare, host virtual to guest virtual 587 * 588 * Input Args: 589 * hva - Starting host virtual address 590 * vm - Virtual Machine 591 * gva - Starting guest virtual address 592 * len - number of bytes to compare 593 * 594 * Output Args: None 595 * 596 * Input/Output Args: None 597 * 598 * Return: 599 * Returns 0 if the bytes starting at hva for a length of len 600 * are equal the guest virtual bytes starting at gva. Returns 601 * a value < 0, if bytes at hva are less than those at gva. 602 * Otherwise a value > 0 is returned. 603 * 604 * Compares the bytes starting at the host virtual address hva, for 605 * a length of len, to the guest bytes starting at the guest virtual 606 * address given by gva. 607 */ 608 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) 609 { 610 size_t amt; 611 612 /* 613 * Compare a batch of bytes until either a match is found 614 * or all the bytes have been compared. 615 */ 616 for (uintptr_t offset = 0; offset < len; offset += amt) { 617 uintptr_t ptr1 = (uintptr_t)hva + offset; 618 619 /* 620 * Determine host address for guest virtual address 621 * at offset. 622 */ 623 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); 624 625 /* 626 * Determine amount to compare on this pass. 627 * Don't allow the comparsion to cross a page boundary. 628 */ 629 amt = len - offset; 630 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) 631 amt = vm->page_size - (ptr1 % vm->page_size); 632 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) 633 amt = vm->page_size - (ptr2 % vm->page_size); 634 635 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); 636 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); 637 638 /* 639 * Perform the comparison. If there is a difference 640 * return that result to the caller, otherwise need 641 * to continue on looking for a mismatch. 642 */ 643 int ret = memcmp((void *)ptr1, (void *)ptr2, amt); 644 if (ret != 0) 645 return ret; 646 } 647 648 /* 649 * No mismatch found. Let the caller know the two memory 650 * areas are equal. 651 */ 652 return 0; 653 } 654 655 /* 656 * VM Userspace Memory Region Add 657 * 658 * Input Args: 659 * vm - Virtual Machine 660 * backing_src - Storage source for this region. 661 * NULL to use anonymous memory. 662 * guest_paddr - Starting guest physical address 663 * slot - KVM region slot 664 * npages - Number of physical pages 665 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) 666 * 667 * Output Args: None 668 * 669 * Return: None 670 * 671 * Allocates a memory area of the number of pages specified by npages 672 * and maps it to the VM specified by vm, at a starting physical address 673 * given by guest_paddr. The region is created with a KVM region slot 674 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The 675 * region is created with the flags given by flags. 676 */ 677 void vm_userspace_mem_region_add(struct kvm_vm *vm, 678 enum vm_mem_backing_src_type src_type, 679 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 680 uint32_t flags) 681 { 682 int ret; 683 struct userspace_mem_region *region; 684 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; 685 size_t alignment; 686 687 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, 688 "Number of guest pages is not compatible with the host. " 689 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 690 691 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 692 "address not on a page boundary.\n" 693 " guest_paddr: 0x%lx vm->page_size: 0x%x", 694 guest_paddr, vm->page_size); 695 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 696 <= vm->max_gfn, "Physical range beyond maximum " 697 "supported physical address,\n" 698 " guest_paddr: 0x%lx npages: 0x%lx\n" 699 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 700 guest_paddr, npages, vm->max_gfn, vm->page_size); 701 702 /* 703 * Confirm a mem region with an overlapping address doesn't 704 * already exist. 705 */ 706 region = (struct userspace_mem_region *) userspace_mem_region_find( 707 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 708 if (region != NULL) 709 TEST_FAIL("overlapping userspace_mem_region already " 710 "exists\n" 711 " requested guest_paddr: 0x%lx npages: 0x%lx " 712 "page_size: 0x%x\n" 713 " existing guest_paddr: 0x%lx size: 0x%lx", 714 guest_paddr, npages, vm->page_size, 715 (uint64_t) region->region.guest_phys_addr, 716 (uint64_t) region->region.memory_size); 717 718 /* Confirm no region with the requested slot already exists. */ 719 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 720 if (region->region.slot != slot) 721 continue; 722 723 TEST_FAIL("A mem region with the requested slot " 724 "already exists.\n" 725 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 726 " existing slot: %u paddr: 0x%lx size: 0x%lx", 727 slot, guest_paddr, npages, 728 region->region.slot, 729 (uint64_t) region->region.guest_phys_addr, 730 (uint64_t) region->region.memory_size); 731 } 732 733 /* Allocate and initialize new mem region structure. */ 734 region = calloc(1, sizeof(*region)); 735 TEST_ASSERT(region != NULL, "Insufficient Memory"); 736 region->mmap_size = npages * vm->page_size; 737 738 #ifdef __s390x__ 739 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 740 alignment = 0x100000; 741 #else 742 alignment = 1; 743 #endif 744 745 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 746 alignment = max(huge_page_size, alignment); 747 748 /* Add enough memory to align up if necessary */ 749 if (alignment > 1) 750 region->mmap_size += alignment; 751 752 region->mmap_start = mmap(NULL, region->mmap_size, 753 PROT_READ | PROT_WRITE, 754 MAP_PRIVATE | MAP_ANONYMOUS 755 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0), 756 -1, 0); 757 TEST_ASSERT(region->mmap_start != MAP_FAILED, 758 "test_malloc failed, mmap_start: %p errno: %i", 759 region->mmap_start, errno); 760 761 /* Align host address */ 762 region->host_mem = align(region->mmap_start, alignment); 763 764 /* As needed perform madvise */ 765 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) { 766 struct stat statbuf; 767 768 ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); 769 TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), 770 "stat /sys/kernel/mm/transparent_hugepage"); 771 772 TEST_ASSERT(ret == 0 || src_type != VM_MEM_SRC_ANONYMOUS_THP, 773 "VM_MEM_SRC_ANONYMOUS_THP requires THP to be configured in the host kernel"); 774 775 if (ret == 0) { 776 ret = madvise(region->host_mem, npages * vm->page_size, 777 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); 778 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %x", 779 region->host_mem, npages * vm->page_size, src_type); 780 } 781 } 782 783 region->unused_phy_pages = sparsebit_alloc(); 784 sparsebit_set_num(region->unused_phy_pages, 785 guest_paddr >> vm->page_shift, npages); 786 region->region.slot = slot; 787 region->region.flags = flags; 788 region->region.guest_phys_addr = guest_paddr; 789 region->region.memory_size = npages * vm->page_size; 790 region->region.userspace_addr = (uintptr_t) region->host_mem; 791 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 792 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 793 " rc: %i errno: %i\n" 794 " slot: %u flags: 0x%x\n" 795 " guest_phys_addr: 0x%lx size: 0x%lx", 796 ret, errno, slot, flags, 797 guest_paddr, (uint64_t) region->region.memory_size); 798 799 /* Add to linked-list of memory regions. */ 800 list_add(®ion->list, &vm->userspace_mem_regions); 801 } 802 803 /* 804 * Memslot to region 805 * 806 * Input Args: 807 * vm - Virtual Machine 808 * memslot - KVM memory slot ID 809 * 810 * Output Args: None 811 * 812 * Return: 813 * Pointer to memory region structure that describe memory region 814 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 815 * on error (e.g. currently no memory region using memslot as a KVM 816 * memory slot ID). 817 */ 818 struct userspace_mem_region * 819 memslot2region(struct kvm_vm *vm, uint32_t memslot) 820 { 821 struct userspace_mem_region *region; 822 823 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 824 if (region->region.slot == memslot) 825 return region; 826 } 827 828 fprintf(stderr, "No mem region with the requested slot found,\n" 829 " requested slot: %u\n", memslot); 830 fputs("---- vm dump ----\n", stderr); 831 vm_dump(stderr, vm, 2); 832 TEST_FAIL("Mem region not found"); 833 return NULL; 834 } 835 836 /* 837 * VM Memory Region Flags Set 838 * 839 * Input Args: 840 * vm - Virtual Machine 841 * flags - Starting guest physical address 842 * 843 * Output Args: None 844 * 845 * Return: None 846 * 847 * Sets the flags of the memory region specified by the value of slot, 848 * to the values given by flags. 849 */ 850 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 851 { 852 int ret; 853 struct userspace_mem_region *region; 854 855 region = memslot2region(vm, slot); 856 857 region->region.flags = flags; 858 859 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 860 861 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 862 " rc: %i errno: %i slot: %u flags: 0x%x", 863 ret, errno, slot, flags); 864 } 865 866 /* 867 * VM Memory Region Move 868 * 869 * Input Args: 870 * vm - Virtual Machine 871 * slot - Slot of the memory region to move 872 * new_gpa - Starting guest physical address 873 * 874 * Output Args: None 875 * 876 * Return: None 877 * 878 * Change the gpa of a memory region. 879 */ 880 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 881 { 882 struct userspace_mem_region *region; 883 int ret; 884 885 region = memslot2region(vm, slot); 886 887 region->region.guest_phys_addr = new_gpa; 888 889 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 890 891 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n" 892 "ret: %i errno: %i slot: %u new_gpa: 0x%lx", 893 ret, errno, slot, new_gpa); 894 } 895 896 /* 897 * VM Memory Region Delete 898 * 899 * Input Args: 900 * vm - Virtual Machine 901 * slot - Slot of the memory region to delete 902 * 903 * Output Args: None 904 * 905 * Return: None 906 * 907 * Delete a memory region. 908 */ 909 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 910 { 911 __vm_mem_region_delete(vm, memslot2region(vm, slot)); 912 } 913 914 /* 915 * VCPU mmap Size 916 * 917 * Input Args: None 918 * 919 * Output Args: None 920 * 921 * Return: 922 * Size of VCPU state 923 * 924 * Returns the size of the structure pointed to by the return value 925 * of vcpu_state(). 926 */ 927 static int vcpu_mmap_sz(void) 928 { 929 int dev_fd, ret; 930 931 dev_fd = open(KVM_DEV_PATH, O_RDONLY); 932 if (dev_fd < 0) 933 exit(KSFT_SKIP); 934 935 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 936 TEST_ASSERT(ret >= sizeof(struct kvm_run), 937 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i", 938 __func__, ret, errno); 939 940 close(dev_fd); 941 942 return ret; 943 } 944 945 /* 946 * VM VCPU Add 947 * 948 * Input Args: 949 * vm - Virtual Machine 950 * vcpuid - VCPU ID 951 * 952 * Output Args: None 953 * 954 * Return: None 955 * 956 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid. 957 * No additional VCPU setup is done. 958 */ 959 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) 960 { 961 struct vcpu *vcpu; 962 963 /* Confirm a vcpu with the specified id doesn't already exist. */ 964 vcpu = vcpu_find(vm, vcpuid); 965 if (vcpu != NULL) 966 TEST_FAIL("vcpu with the specified id " 967 "already exists,\n" 968 " requested vcpuid: %u\n" 969 " existing vcpuid: %u state: %p", 970 vcpuid, vcpu->id, vcpu->state); 971 972 /* Allocate and initialize new vcpu structure. */ 973 vcpu = calloc(1, sizeof(*vcpu)); 974 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); 975 vcpu->id = vcpuid; 976 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); 977 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i", 978 vcpu->fd, errno); 979 980 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size " 981 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 982 vcpu_mmap_sz(), sizeof(*vcpu->state)); 983 vcpu->state = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), 984 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); 985 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, " 986 "vcpu id: %u errno: %i", vcpuid, errno); 987 988 /* Add to linked-list of VCPUs. */ 989 list_add(&vcpu->list, &vm->vcpus); 990 } 991 992 /* 993 * VM Virtual Address Unused Gap 994 * 995 * Input Args: 996 * vm - Virtual Machine 997 * sz - Size (bytes) 998 * vaddr_min - Minimum Virtual Address 999 * 1000 * Output Args: None 1001 * 1002 * Return: 1003 * Lowest virtual address at or below vaddr_min, with at least 1004 * sz unused bytes. TEST_ASSERT failure if no area of at least 1005 * size sz is available. 1006 * 1007 * Within the VM specified by vm, locates the lowest starting virtual 1008 * address >= vaddr_min, that has at least sz unallocated bytes. A 1009 * TEST_ASSERT failure occurs for invalid input or no area of at least 1010 * sz unallocated bytes >= vaddr_min is available. 1011 */ 1012 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1013 vm_vaddr_t vaddr_min) 1014 { 1015 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1016 1017 /* Determine lowest permitted virtual page index. */ 1018 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1019 if ((pgidx_start * vm->page_size) < vaddr_min) 1020 goto no_va_found; 1021 1022 /* Loop over section with enough valid virtual page indexes. */ 1023 if (!sparsebit_is_set_num(vm->vpages_valid, 1024 pgidx_start, pages)) 1025 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, 1026 pgidx_start, pages); 1027 do { 1028 /* 1029 * Are there enough unused virtual pages available at 1030 * the currently proposed starting virtual page index. 1031 * If not, adjust proposed starting index to next 1032 * possible. 1033 */ 1034 if (sparsebit_is_clear_num(vm->vpages_mapped, 1035 pgidx_start, pages)) 1036 goto va_found; 1037 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, 1038 pgidx_start, pages); 1039 if (pgidx_start == 0) 1040 goto no_va_found; 1041 1042 /* 1043 * If needed, adjust proposed starting virtual address, 1044 * to next range of valid virtual addresses. 1045 */ 1046 if (!sparsebit_is_set_num(vm->vpages_valid, 1047 pgidx_start, pages)) { 1048 pgidx_start = sparsebit_next_set_num( 1049 vm->vpages_valid, pgidx_start, pages); 1050 if (pgidx_start == 0) 1051 goto no_va_found; 1052 } 1053 } while (pgidx_start != 0); 1054 1055 no_va_found: 1056 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1057 1058 /* NOT REACHED */ 1059 return -1; 1060 1061 va_found: 1062 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, 1063 pgidx_start, pages), 1064 "Unexpected, invalid virtual page index range,\n" 1065 " pgidx_start: 0x%lx\n" 1066 " pages: 0x%lx", 1067 pgidx_start, pages); 1068 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, 1069 pgidx_start, pages), 1070 "Unexpected, pages already mapped,\n" 1071 " pgidx_start: 0x%lx\n" 1072 " pages: 0x%lx", 1073 pgidx_start, pages); 1074 1075 return pgidx_start * vm->page_size; 1076 } 1077 1078 /* 1079 * VM Virtual Address Allocate 1080 * 1081 * Input Args: 1082 * vm - Virtual Machine 1083 * sz - Size in bytes 1084 * vaddr_min - Minimum starting virtual address 1085 * data_memslot - Memory region slot for data pages 1086 * pgd_memslot - Memory region slot for new virtual translation tables 1087 * 1088 * Output Args: None 1089 * 1090 * Return: 1091 * Starting guest virtual address 1092 * 1093 * Allocates at least sz bytes within the virtual address space of the vm 1094 * given by vm. The allocated bytes are mapped to a virtual address >= 1095 * the address given by vaddr_min. Note that each allocation uses a 1096 * a unique set of pages, with the minimum real allocation being at least 1097 * a page. 1098 */ 1099 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1100 uint32_t data_memslot, uint32_t pgd_memslot) 1101 { 1102 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1103 1104 virt_pgd_alloc(vm, pgd_memslot); 1105 1106 /* 1107 * Find an unused range of virtual page addresses of at least 1108 * pages in length. 1109 */ 1110 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1111 1112 /* Map the virtual pages. */ 1113 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1114 pages--, vaddr += vm->page_size) { 1115 vm_paddr_t paddr; 1116 1117 paddr = vm_phy_page_alloc(vm, 1118 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); 1119 1120 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1121 1122 sparsebit_set(vm->vpages_mapped, 1123 vaddr >> vm->page_shift); 1124 } 1125 1126 return vaddr_start; 1127 } 1128 1129 /* 1130 * Map a range of VM virtual address to the VM's physical address 1131 * 1132 * Input Args: 1133 * vm - Virtual Machine 1134 * vaddr - Virtuall address to map 1135 * paddr - VM Physical Address 1136 * npages - The number of pages to map 1137 * pgd_memslot - Memory region slot for new virtual translation tables 1138 * 1139 * Output Args: None 1140 * 1141 * Return: None 1142 * 1143 * Within the VM given by @vm, creates a virtual translation for 1144 * @npages starting at @vaddr to the page range starting at @paddr. 1145 */ 1146 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1147 unsigned int npages, uint32_t pgd_memslot) 1148 { 1149 size_t page_size = vm->page_size; 1150 size_t size = npages * page_size; 1151 1152 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1153 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1154 1155 while (npages--) { 1156 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1157 vaddr += page_size; 1158 paddr += page_size; 1159 } 1160 } 1161 1162 /* 1163 * Address VM Physical to Host Virtual 1164 * 1165 * Input Args: 1166 * vm - Virtual Machine 1167 * gpa - VM physical address 1168 * 1169 * Output Args: None 1170 * 1171 * Return: 1172 * Equivalent host virtual address 1173 * 1174 * Locates the memory region containing the VM physical address given 1175 * by gpa, within the VM given by vm. When found, the host virtual 1176 * address providing the memory to the vm physical address is returned. 1177 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1178 */ 1179 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1180 { 1181 struct userspace_mem_region *region; 1182 1183 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1184 if ((gpa >= region->region.guest_phys_addr) 1185 && (gpa <= (region->region.guest_phys_addr 1186 + region->region.memory_size - 1))) 1187 return (void *) ((uintptr_t) region->host_mem 1188 + (gpa - region->region.guest_phys_addr)); 1189 } 1190 1191 TEST_FAIL("No vm physical memory at 0x%lx", gpa); 1192 return NULL; 1193 } 1194 1195 /* 1196 * Address Host Virtual to VM Physical 1197 * 1198 * Input Args: 1199 * vm - Virtual Machine 1200 * hva - Host virtual address 1201 * 1202 * Output Args: None 1203 * 1204 * Return: 1205 * Equivalent VM physical address 1206 * 1207 * Locates the memory region containing the host virtual address given 1208 * by hva, within the VM given by vm. When found, the equivalent 1209 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1210 * region containing hva exists. 1211 */ 1212 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1213 { 1214 struct userspace_mem_region *region; 1215 1216 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1217 if ((hva >= region->host_mem) 1218 && (hva <= (region->host_mem 1219 + region->region.memory_size - 1))) 1220 return (vm_paddr_t) ((uintptr_t) 1221 region->region.guest_phys_addr 1222 + (hva - (uintptr_t) region->host_mem)); 1223 } 1224 1225 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); 1226 return -1; 1227 } 1228 1229 /* 1230 * VM Create IRQ Chip 1231 * 1232 * Input Args: 1233 * vm - Virtual Machine 1234 * 1235 * Output Args: None 1236 * 1237 * Return: None 1238 * 1239 * Creates an interrupt controller chip for the VM specified by vm. 1240 */ 1241 void vm_create_irqchip(struct kvm_vm *vm) 1242 { 1243 int ret; 1244 1245 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); 1246 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, " 1247 "rc: %i errno: %i", ret, errno); 1248 1249 vm->has_irqchip = true; 1250 } 1251 1252 /* 1253 * VM VCPU State 1254 * 1255 * Input Args: 1256 * vm - Virtual Machine 1257 * vcpuid - VCPU ID 1258 * 1259 * Output Args: None 1260 * 1261 * Return: 1262 * Pointer to structure that describes the state of the VCPU. 1263 * 1264 * Locates and returns a pointer to a structure that describes the 1265 * state of the VCPU with the given vcpuid. 1266 */ 1267 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) 1268 { 1269 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1270 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1271 1272 return vcpu->state; 1273 } 1274 1275 /* 1276 * VM VCPU Run 1277 * 1278 * Input Args: 1279 * vm - Virtual Machine 1280 * vcpuid - VCPU ID 1281 * 1282 * Output Args: None 1283 * 1284 * Return: None 1285 * 1286 * Switch to executing the code for the VCPU given by vcpuid, within the VM 1287 * given by vm. 1288 */ 1289 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1290 { 1291 int ret = _vcpu_run(vm, vcpuid); 1292 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1293 "rc: %i errno: %i", ret, errno); 1294 } 1295 1296 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1297 { 1298 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1299 int rc; 1300 1301 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1302 do { 1303 rc = ioctl(vcpu->fd, KVM_RUN, NULL); 1304 } while (rc == -1 && errno == EINTR); 1305 1306 assert_on_unhandled_exception(vm, vcpuid); 1307 1308 return rc; 1309 } 1310 1311 int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid) 1312 { 1313 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1314 1315 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1316 1317 return vcpu->fd; 1318 } 1319 1320 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) 1321 { 1322 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1323 int ret; 1324 1325 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1326 1327 vcpu->state->immediate_exit = 1; 1328 ret = ioctl(vcpu->fd, KVM_RUN, NULL); 1329 vcpu->state->immediate_exit = 0; 1330 1331 TEST_ASSERT(ret == -1 && errno == EINTR, 1332 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", 1333 ret, errno); 1334 } 1335 1336 void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, 1337 struct kvm_guest_debug *debug) 1338 { 1339 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1340 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug); 1341 1342 TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret); 1343 } 1344 1345 /* 1346 * VM VCPU Set MP State 1347 * 1348 * Input Args: 1349 * vm - Virtual Machine 1350 * vcpuid - VCPU ID 1351 * mp_state - mp_state to be set 1352 * 1353 * Output Args: None 1354 * 1355 * Return: None 1356 * 1357 * Sets the MP state of the VCPU given by vcpuid, to the state given 1358 * by mp_state. 1359 */ 1360 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 1361 struct kvm_mp_state *mp_state) 1362 { 1363 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1364 int ret; 1365 1366 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1367 1368 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); 1369 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, " 1370 "rc: %i errno: %i", ret, errno); 1371 } 1372 1373 /* 1374 * VM VCPU Get Reg List 1375 * 1376 * Input Args: 1377 * vm - Virtual Machine 1378 * vcpuid - VCPU ID 1379 * 1380 * Output Args: 1381 * None 1382 * 1383 * Return: 1384 * A pointer to an allocated struct kvm_reg_list 1385 * 1386 * Get the list of guest registers which are supported for 1387 * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls 1388 */ 1389 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) 1390 { 1391 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1392 int ret; 1393 1394 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n); 1395 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1396 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1397 reg_list->n = reg_list_n.n; 1398 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); 1399 return reg_list; 1400 } 1401 1402 /* 1403 * VM VCPU Regs Get 1404 * 1405 * Input Args: 1406 * vm - Virtual Machine 1407 * vcpuid - VCPU ID 1408 * 1409 * Output Args: 1410 * regs - current state of VCPU regs 1411 * 1412 * Return: None 1413 * 1414 * Obtains the current register state for the VCPU specified by vcpuid 1415 * and stores it at the location given by regs. 1416 */ 1417 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) 1418 { 1419 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1420 int ret; 1421 1422 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1423 1424 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); 1425 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", 1426 ret, errno); 1427 } 1428 1429 /* 1430 * VM VCPU Regs Set 1431 * 1432 * Input Args: 1433 * vm - Virtual Machine 1434 * vcpuid - VCPU ID 1435 * regs - Values to set VCPU regs to 1436 * 1437 * Output Args: None 1438 * 1439 * Return: None 1440 * 1441 * Sets the regs of the VCPU specified by vcpuid to the values 1442 * given by regs. 1443 */ 1444 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) 1445 { 1446 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1447 int ret; 1448 1449 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1450 1451 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); 1452 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", 1453 ret, errno); 1454 } 1455 1456 #ifdef __KVM_HAVE_VCPU_EVENTS 1457 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 1458 struct kvm_vcpu_events *events) 1459 { 1460 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1461 int ret; 1462 1463 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1464 1465 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); 1466 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", 1467 ret, errno); 1468 } 1469 1470 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 1471 struct kvm_vcpu_events *events) 1472 { 1473 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1474 int ret; 1475 1476 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1477 1478 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); 1479 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", 1480 ret, errno); 1481 } 1482 #endif 1483 1484 #ifdef __x86_64__ 1485 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, 1486 struct kvm_nested_state *state) 1487 { 1488 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1489 int ret; 1490 1491 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1492 1493 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state); 1494 TEST_ASSERT(ret == 0, 1495 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", 1496 ret, errno); 1497 } 1498 1499 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, 1500 struct kvm_nested_state *state, bool ignore_error) 1501 { 1502 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1503 int ret; 1504 1505 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1506 1507 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state); 1508 if (!ignore_error) { 1509 TEST_ASSERT(ret == 0, 1510 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", 1511 ret, errno); 1512 } 1513 1514 return ret; 1515 } 1516 #endif 1517 1518 /* 1519 * VM VCPU System Regs Get 1520 * 1521 * Input Args: 1522 * vm - Virtual Machine 1523 * vcpuid - VCPU ID 1524 * 1525 * Output Args: 1526 * sregs - current state of VCPU system regs 1527 * 1528 * Return: None 1529 * 1530 * Obtains the current system register state for the VCPU specified by 1531 * vcpuid and stores it at the location given by sregs. 1532 */ 1533 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1534 { 1535 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1536 int ret; 1537 1538 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1539 1540 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); 1541 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", 1542 ret, errno); 1543 } 1544 1545 /* 1546 * VM VCPU System Regs Set 1547 * 1548 * Input Args: 1549 * vm - Virtual Machine 1550 * vcpuid - VCPU ID 1551 * sregs - Values to set VCPU system regs to 1552 * 1553 * Output Args: None 1554 * 1555 * Return: None 1556 * 1557 * Sets the system regs of the VCPU specified by vcpuid to the values 1558 * given by sregs. 1559 */ 1560 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1561 { 1562 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); 1563 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1564 "rc: %i errno: %i", ret, errno); 1565 } 1566 1567 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1568 { 1569 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1570 1571 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1572 1573 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); 1574 } 1575 1576 void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) 1577 { 1578 int ret; 1579 1580 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); 1581 TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)", 1582 ret, errno, strerror(errno)); 1583 } 1584 1585 void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) 1586 { 1587 int ret; 1588 1589 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); 1590 TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)", 1591 ret, errno, strerror(errno)); 1592 } 1593 1594 void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) 1595 { 1596 int ret; 1597 1598 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); 1599 TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)", 1600 ret, errno, strerror(errno)); 1601 } 1602 1603 void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) 1604 { 1605 int ret; 1606 1607 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); 1608 TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)", 1609 ret, errno, strerror(errno)); 1610 } 1611 1612 /* 1613 * VCPU Ioctl 1614 * 1615 * Input Args: 1616 * vm - Virtual Machine 1617 * vcpuid - VCPU ID 1618 * cmd - Ioctl number 1619 * arg - Argument to pass to the ioctl 1620 * 1621 * Return: None 1622 * 1623 * Issues an arbitrary ioctl on a VCPU fd. 1624 */ 1625 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1626 unsigned long cmd, void *arg) 1627 { 1628 int ret; 1629 1630 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); 1631 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)", 1632 cmd, ret, errno, strerror(errno)); 1633 } 1634 1635 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1636 unsigned long cmd, void *arg) 1637 { 1638 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1639 int ret; 1640 1641 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1642 1643 ret = ioctl(vcpu->fd, cmd, arg); 1644 1645 return ret; 1646 } 1647 1648 void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid) 1649 { 1650 struct vcpu *vcpu; 1651 uint32_t size = vm->dirty_ring_size; 1652 1653 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1654 1655 vcpu = vcpu_find(vm, vcpuid); 1656 1657 TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid); 1658 1659 if (!vcpu->dirty_gfns) { 1660 void *addr; 1661 1662 addr = mmap(NULL, size, PROT_READ, 1663 MAP_PRIVATE, vcpu->fd, 1664 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1665 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1666 1667 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, 1668 MAP_PRIVATE, vcpu->fd, 1669 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1670 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1671 1672 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, 1673 MAP_SHARED, vcpu->fd, 1674 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1675 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1676 1677 vcpu->dirty_gfns = addr; 1678 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); 1679 } 1680 1681 return vcpu->dirty_gfns; 1682 } 1683 1684 /* 1685 * VM Ioctl 1686 * 1687 * Input Args: 1688 * vm - Virtual Machine 1689 * cmd - Ioctl number 1690 * arg - Argument to pass to the ioctl 1691 * 1692 * Return: None 1693 * 1694 * Issues an arbitrary ioctl on a VM fd. 1695 */ 1696 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1697 { 1698 int ret; 1699 1700 ret = _vm_ioctl(vm, cmd, arg); 1701 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)", 1702 cmd, ret, errno, strerror(errno)); 1703 } 1704 1705 int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1706 { 1707 return ioctl(vm->fd, cmd, arg); 1708 } 1709 1710 /* 1711 * KVM system ioctl 1712 * 1713 * Input Args: 1714 * vm - Virtual Machine 1715 * cmd - Ioctl number 1716 * arg - Argument to pass to the ioctl 1717 * 1718 * Return: None 1719 * 1720 * Issues an arbitrary ioctl on a KVM fd. 1721 */ 1722 void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1723 { 1724 int ret; 1725 1726 ret = ioctl(vm->kvm_fd, cmd, arg); 1727 TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)", 1728 cmd, ret, errno, strerror(errno)); 1729 } 1730 1731 int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1732 { 1733 return ioctl(vm->kvm_fd, cmd, arg); 1734 } 1735 1736 /* 1737 * VM Dump 1738 * 1739 * Input Args: 1740 * vm - Virtual Machine 1741 * indent - Left margin indent amount 1742 * 1743 * Output Args: 1744 * stream - Output FILE stream 1745 * 1746 * Return: None 1747 * 1748 * Dumps the current state of the VM given by vm, to the FILE stream 1749 * given by stream. 1750 */ 1751 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1752 { 1753 struct userspace_mem_region *region; 1754 struct vcpu *vcpu; 1755 1756 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); 1757 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); 1758 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); 1759 fprintf(stream, "%*sMem Regions:\n", indent, ""); 1760 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1761 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1762 "host_virt: %p\n", indent + 2, "", 1763 (uint64_t) region->region.guest_phys_addr, 1764 (uint64_t) region->region.memory_size, 1765 region->host_mem); 1766 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1767 sparsebit_dump(stream, region->unused_phy_pages, 0); 1768 } 1769 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); 1770 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); 1771 fprintf(stream, "%*spgd_created: %u\n", indent, "", 1772 vm->pgd_created); 1773 if (vm->pgd_created) { 1774 fprintf(stream, "%*sVirtual Translation Tables:\n", 1775 indent + 2, ""); 1776 virt_dump(stream, vm, indent + 4); 1777 } 1778 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1779 list_for_each_entry(vcpu, &vm->vcpus, list) 1780 vcpu_dump(stream, vm, vcpu->id, indent + 2); 1781 } 1782 1783 /* Known KVM exit reasons */ 1784 static struct exit_reason { 1785 unsigned int reason; 1786 const char *name; 1787 } exit_reasons_known[] = { 1788 {KVM_EXIT_UNKNOWN, "UNKNOWN"}, 1789 {KVM_EXIT_EXCEPTION, "EXCEPTION"}, 1790 {KVM_EXIT_IO, "IO"}, 1791 {KVM_EXIT_HYPERCALL, "HYPERCALL"}, 1792 {KVM_EXIT_DEBUG, "DEBUG"}, 1793 {KVM_EXIT_HLT, "HLT"}, 1794 {KVM_EXIT_MMIO, "MMIO"}, 1795 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"}, 1796 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"}, 1797 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"}, 1798 {KVM_EXIT_INTR, "INTR"}, 1799 {KVM_EXIT_SET_TPR, "SET_TPR"}, 1800 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"}, 1801 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"}, 1802 {KVM_EXIT_S390_RESET, "S390_RESET"}, 1803 {KVM_EXIT_DCR, "DCR"}, 1804 {KVM_EXIT_NMI, "NMI"}, 1805 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"}, 1806 {KVM_EXIT_OSI, "OSI"}, 1807 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"}, 1808 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, 1809 {KVM_EXIT_X86_RDMSR, "RDMSR"}, 1810 {KVM_EXIT_X86_WRMSR, "WRMSR"}, 1811 {KVM_EXIT_XEN, "XEN"}, 1812 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT 1813 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, 1814 #endif 1815 }; 1816 1817 /* 1818 * Exit Reason String 1819 * 1820 * Input Args: 1821 * exit_reason - Exit reason 1822 * 1823 * Output Args: None 1824 * 1825 * Return: 1826 * Constant string pointer describing the exit reason. 1827 * 1828 * Locates and returns a constant string that describes the KVM exit 1829 * reason given by exit_reason. If no such string is found, a constant 1830 * string of "Unknown" is returned. 1831 */ 1832 const char *exit_reason_str(unsigned int exit_reason) 1833 { 1834 unsigned int n1; 1835 1836 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { 1837 if (exit_reason == exit_reasons_known[n1].reason) 1838 return exit_reasons_known[n1].name; 1839 } 1840 1841 return "Unknown"; 1842 } 1843 1844 /* 1845 * Physical Contiguous Page Allocator 1846 * 1847 * Input Args: 1848 * vm - Virtual Machine 1849 * num - number of pages 1850 * paddr_min - Physical address minimum 1851 * memslot - Memory region to allocate page from 1852 * 1853 * Output Args: None 1854 * 1855 * Return: 1856 * Starting physical address 1857 * 1858 * Within the VM specified by vm, locates a range of available physical 1859 * pages at or above paddr_min. If found, the pages are marked as in use 1860 * and their base address is returned. A TEST_ASSERT failure occurs if 1861 * not enough pages are available at or above paddr_min. 1862 */ 1863 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1864 vm_paddr_t paddr_min, uint32_t memslot) 1865 { 1866 struct userspace_mem_region *region; 1867 sparsebit_idx_t pg, base; 1868 1869 TEST_ASSERT(num > 0, "Must allocate at least one page"); 1870 1871 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1872 "not divisible by page size.\n" 1873 " paddr_min: 0x%lx page_size: 0x%x", 1874 paddr_min, vm->page_size); 1875 1876 region = memslot2region(vm, memslot); 1877 base = pg = paddr_min >> vm->page_shift; 1878 1879 do { 1880 for (; pg < base + num; ++pg) { 1881 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 1882 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); 1883 break; 1884 } 1885 } 1886 } while (pg && pg != base + num); 1887 1888 if (pg == 0) { 1889 fprintf(stderr, "No guest physical page available, " 1890 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 1891 paddr_min, vm->page_size, memslot); 1892 fputs("---- vm dump ----\n", stderr); 1893 vm_dump(stderr, vm, 2); 1894 abort(); 1895 } 1896 1897 for (pg = base; pg < base + num; ++pg) 1898 sparsebit_clear(region->unused_phy_pages, pg); 1899 1900 return base * vm->page_size; 1901 } 1902 1903 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 1904 uint32_t memslot) 1905 { 1906 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 1907 } 1908 1909 /* 1910 * Address Guest Virtual to Host Virtual 1911 * 1912 * Input Args: 1913 * vm - Virtual Machine 1914 * gva - VM virtual address 1915 * 1916 * Output Args: None 1917 * 1918 * Return: 1919 * Equivalent host virtual address 1920 */ 1921 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 1922 { 1923 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1924 } 1925 1926 /* 1927 * Is Unrestricted Guest 1928 * 1929 * Input Args: 1930 * vm - Virtual Machine 1931 * 1932 * Output Args: None 1933 * 1934 * Return: True if the unrestricted guest is set to 'Y', otherwise return false. 1935 * 1936 * Check if the unrestricted guest flag is enabled. 1937 */ 1938 bool vm_is_unrestricted_guest(struct kvm_vm *vm) 1939 { 1940 char val = 'N'; 1941 size_t count; 1942 FILE *f; 1943 1944 if (vm == NULL) { 1945 /* Ensure that the KVM vendor-specific module is loaded. */ 1946 f = fopen(KVM_DEV_PATH, "r"); 1947 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d", 1948 errno); 1949 fclose(f); 1950 } 1951 1952 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); 1953 if (f) { 1954 count = fread(&val, sizeof(char), 1, f); 1955 TEST_ASSERT(count == 1, "Unable to read from param file."); 1956 fclose(f); 1957 } 1958 1959 return val == 'Y'; 1960 } 1961 1962 unsigned int vm_get_page_size(struct kvm_vm *vm) 1963 { 1964 return vm->page_size; 1965 } 1966 1967 unsigned int vm_get_page_shift(struct kvm_vm *vm) 1968 { 1969 return vm->page_shift; 1970 } 1971 1972 unsigned int vm_get_max_gfn(struct kvm_vm *vm) 1973 { 1974 return vm->max_gfn; 1975 } 1976 1977 int vm_get_fd(struct kvm_vm *vm) 1978 { 1979 return vm->fd; 1980 } 1981 1982 static unsigned int vm_calc_num_pages(unsigned int num_pages, 1983 unsigned int page_shift, 1984 unsigned int new_page_shift, 1985 bool ceil) 1986 { 1987 unsigned int n = 1 << (new_page_shift - page_shift); 1988 1989 if (page_shift >= new_page_shift) 1990 return num_pages * (1 << (page_shift - new_page_shift)); 1991 1992 return num_pages / n + !!(ceil && num_pages % n); 1993 } 1994 1995 static inline int getpageshift(void) 1996 { 1997 return __builtin_ffs(getpagesize()) - 1; 1998 } 1999 2000 unsigned int 2001 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 2002 { 2003 return vm_calc_num_pages(num_guest_pages, 2004 vm_guest_mode_params[mode].page_shift, 2005 getpageshift(), true); 2006 } 2007 2008 unsigned int 2009 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) 2010 { 2011 return vm_calc_num_pages(num_host_pages, getpageshift(), 2012 vm_guest_mode_params[mode].page_shift, false); 2013 } 2014 2015 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) 2016 { 2017 unsigned int n; 2018 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); 2019 return vm_adjust_num_guest_pages(mode, n); 2020 } 2021