1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tools/testing/selftests/kvm/lib/kvm_util.c 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 8 #define _GNU_SOURCE /* for program_invocation_name */ 9 #include "test_util.h" 10 #include "kvm_util.h" 11 #include "kvm_util_internal.h" 12 #include "processor.h" 13 14 #include <assert.h> 15 #include <sys/mman.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <unistd.h> 19 #include <linux/kernel.h> 20 21 #define KVM_UTIL_PGS_PER_HUGEPG 512 22 #define KVM_UTIL_MIN_PFN 2 23 24 /* Aligns x up to the next multiple of size. Size must be a power of 2. */ 25 static void *align(void *x, size_t size) 26 { 27 size_t mask = size - 1; 28 TEST_ASSERT(size != 0 && !(size & (size - 1)), 29 "size not a power of 2: %lu", size); 30 return (void *) (((size_t) x + mask) & ~mask); 31 } 32 33 /* 34 * Capability 35 * 36 * Input Args: 37 * cap - Capability 38 * 39 * Output Args: None 40 * 41 * Return: 42 * On success, the Value corresponding to the capability (KVM_CAP_*) 43 * specified by the value of cap. On failure a TEST_ASSERT failure 44 * is produced. 45 * 46 * Looks up and returns the value corresponding to the capability 47 * (KVM_CAP_*) given by cap. 48 */ 49 int kvm_check_cap(long cap) 50 { 51 int ret; 52 int kvm_fd; 53 54 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 55 if (kvm_fd < 0) 56 exit(KSFT_SKIP); 57 58 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); 59 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" 60 " rc: %i errno: %i", ret, errno); 61 62 close(kvm_fd); 63 64 return ret; 65 } 66 67 /* VM Enable Capability 68 * 69 * Input Args: 70 * vm - Virtual Machine 71 * cap - Capability 72 * 73 * Output Args: None 74 * 75 * Return: On success, 0. On failure a TEST_ASSERT failure is produced. 76 * 77 * Enables a capability (KVM_CAP_*) on the VM. 78 */ 79 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) 80 { 81 int ret; 82 83 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); 84 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" 85 " rc: %i errno: %i", ret, errno); 86 87 return ret; 88 } 89 90 /* VCPU Enable Capability 91 * 92 * Input Args: 93 * vm - Virtual Machine 94 * vcpu_id - VCPU 95 * cap - Capability 96 * 97 * Output Args: None 98 * 99 * Return: On success, 0. On failure a TEST_ASSERT failure is produced. 100 * 101 * Enables a capability (KVM_CAP_*) on the VCPU. 102 */ 103 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, 104 struct kvm_enable_cap *cap) 105 { 106 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); 107 int r; 108 109 TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id); 110 111 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap); 112 TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n" 113 " rc: %i, errno: %i", r, errno); 114 115 return r; 116 } 117 118 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 119 { 120 struct kvm_enable_cap cap = { 0 }; 121 122 cap.cap = KVM_CAP_DIRTY_LOG_RING; 123 cap.args[0] = ring_size; 124 vm_enable_cap(vm, &cap); 125 vm->dirty_ring_size = ring_size; 126 } 127 128 static void vm_open(struct kvm_vm *vm, int perm) 129 { 130 vm->kvm_fd = open(KVM_DEV_PATH, perm); 131 if (vm->kvm_fd < 0) 132 exit(KSFT_SKIP); 133 134 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { 135 print_skip("immediate_exit not available"); 136 exit(KSFT_SKIP); 137 } 138 139 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); 140 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " 141 "rc: %i errno: %i", vm->fd, errno); 142 } 143 144 const char * const vm_guest_mode_string[] = { 145 "PA-bits:52, VA-bits:48, 4K pages", 146 "PA-bits:52, VA-bits:48, 64K pages", 147 "PA-bits:48, VA-bits:48, 4K pages", 148 "PA-bits:48, VA-bits:48, 64K pages", 149 "PA-bits:40, VA-bits:48, 4K pages", 150 "PA-bits:40, VA-bits:48, 64K pages", 151 "PA-bits:ANY, VA-bits:48, 4K pages", 152 }; 153 _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES, 154 "Missing new mode strings?"); 155 156 struct vm_guest_mode_params { 157 unsigned int pa_bits; 158 unsigned int va_bits; 159 unsigned int page_size; 160 unsigned int page_shift; 161 }; 162 163 static const struct vm_guest_mode_params vm_guest_mode_params[] = { 164 { 52, 48, 0x1000, 12 }, 165 { 52, 48, 0x10000, 16 }, 166 { 48, 48, 0x1000, 12 }, 167 { 48, 48, 0x10000, 16 }, 168 { 40, 48, 0x1000, 12 }, 169 { 40, 48, 0x10000, 16 }, 170 { 0, 0, 0x1000, 12 }, 171 }; 172 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, 173 "Missing new mode params?"); 174 175 /* 176 * VM Create 177 * 178 * Input Args: 179 * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 180 * phy_pages - Physical memory pages 181 * perm - permission 182 * 183 * Output Args: None 184 * 185 * Return: 186 * Pointer to opaque structure that describes the created VM. 187 * 188 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). 189 * When phy_pages is non-zero, a memory region of phy_pages physical pages 190 * is created and mapped starting at guest physical address 0. The file 191 * descriptor to control the created VM is created with the permissions 192 * given by perm (e.g. O_RDWR). 193 */ 194 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) 195 { 196 struct kvm_vm *vm; 197 198 pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__, 199 vm_guest_mode_string(mode), phy_pages, perm); 200 201 vm = calloc(1, sizeof(*vm)); 202 TEST_ASSERT(vm != NULL, "Insufficient Memory"); 203 204 INIT_LIST_HEAD(&vm->vcpus); 205 INIT_LIST_HEAD(&vm->userspace_mem_regions); 206 207 vm->mode = mode; 208 vm->type = 0; 209 210 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; 211 vm->va_bits = vm_guest_mode_params[mode].va_bits; 212 vm->page_size = vm_guest_mode_params[mode].page_size; 213 vm->page_shift = vm_guest_mode_params[mode].page_shift; 214 215 /* Setup mode specific traits. */ 216 switch (vm->mode) { 217 case VM_MODE_P52V48_4K: 218 vm->pgtable_levels = 4; 219 break; 220 case VM_MODE_P52V48_64K: 221 vm->pgtable_levels = 3; 222 break; 223 case VM_MODE_P48V48_4K: 224 vm->pgtable_levels = 4; 225 break; 226 case VM_MODE_P48V48_64K: 227 vm->pgtable_levels = 3; 228 break; 229 case VM_MODE_P40V48_4K: 230 vm->pgtable_levels = 4; 231 break; 232 case VM_MODE_P40V48_64K: 233 vm->pgtable_levels = 3; 234 break; 235 case VM_MODE_PXXV48_4K: 236 #ifdef __x86_64__ 237 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); 238 /* 239 * Ignore KVM support for 5-level paging (vm->va_bits == 57), 240 * it doesn't take effect unless a CR4.LA57 is set, which it 241 * isn't for this VM_MODE. 242 */ 243 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, 244 "Linear address width (%d bits) not supported", 245 vm->va_bits); 246 pr_debug("Guest physical address width detected: %d\n", 247 vm->pa_bits); 248 vm->pgtable_levels = 4; 249 vm->va_bits = 48; 250 #else 251 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); 252 #endif 253 break; 254 default: 255 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); 256 } 257 258 #ifdef __aarch64__ 259 if (vm->pa_bits != 40) 260 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); 261 #endif 262 263 vm_open(vm, perm); 264 265 /* Limit to VA-bit canonical virtual addresses. */ 266 vm->vpages_valid = sparsebit_alloc(); 267 sparsebit_set_num(vm->vpages_valid, 268 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 269 sparsebit_set_num(vm->vpages_valid, 270 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, 271 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 272 273 /* Limit physical addresses to PA-bits. */ 274 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 275 276 /* Allocate and setup memory for guest. */ 277 vm->vpages_mapped = sparsebit_alloc(); 278 if (phy_pages != 0) 279 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 280 0, 0, phy_pages, 0); 281 282 return vm; 283 } 284 285 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 286 uint64_t extra_mem_pages, uint32_t num_percpu_pages, 287 void *guest_code, uint32_t vcpuids[]) 288 { 289 /* The maximum page table size for a memory region will be when the 290 * smallest pages are used. Considering each page contains x page 291 * table descriptors, the total extra size for page tables (for extra 292 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 293 * than N/x*2. 294 */ 295 uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; 296 uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; 297 uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; 298 struct kvm_vm *vm; 299 int i; 300 301 TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 302 "nr_vcpus = %d too large for host, max-vcpus = %d", 303 nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); 304 305 pages = vm_adjust_num_guest_pages(mode, pages); 306 vm = vm_create(mode, pages, O_RDWR); 307 308 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 309 310 #ifdef __x86_64__ 311 vm_create_irqchip(vm); 312 #endif 313 314 for (i = 0; i < nr_vcpus; ++i) { 315 uint32_t vcpuid = vcpuids ? vcpuids[i] : i; 316 317 vm_vcpu_add_default(vm, vcpuid, guest_code); 318 319 #ifdef __x86_64__ 320 vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); 321 #endif 322 } 323 324 return vm; 325 } 326 327 struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages, 328 uint32_t num_percpu_pages, void *guest_code, 329 uint32_t vcpuids[]) 330 { 331 return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages, 332 num_percpu_pages, guest_code, vcpuids); 333 } 334 335 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, 336 void *guest_code) 337 { 338 return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code, 339 (uint32_t []){ vcpuid }); 340 } 341 342 /* 343 * VM Restart 344 * 345 * Input Args: 346 * vm - VM that has been released before 347 * perm - permission 348 * 349 * Output Args: None 350 * 351 * Reopens the file descriptors associated to the VM and reinstates the 352 * global state, such as the irqchip and the memory regions that are mapped 353 * into the guest. 354 */ 355 void kvm_vm_restart(struct kvm_vm *vmp, int perm) 356 { 357 struct userspace_mem_region *region; 358 359 vm_open(vmp, perm); 360 if (vmp->has_irqchip) 361 vm_create_irqchip(vmp); 362 363 list_for_each_entry(region, &vmp->userspace_mem_regions, list) { 364 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 365 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 366 " rc: %i errno: %i\n" 367 " slot: %u flags: 0x%x\n" 368 " guest_phys_addr: 0x%llx size: 0x%llx", 369 ret, errno, region->region.slot, 370 region->region.flags, 371 region->region.guest_phys_addr, 372 region->region.memory_size); 373 } 374 } 375 376 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 377 { 378 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 379 int ret; 380 381 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); 382 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s", 383 __func__, strerror(-ret)); 384 } 385 386 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 387 uint64_t first_page, uint32_t num_pages) 388 { 389 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot, 390 .first_page = first_page, 391 .num_pages = num_pages }; 392 int ret; 393 394 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); 395 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s", 396 __func__, strerror(-ret)); 397 } 398 399 uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 400 { 401 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS); 402 } 403 404 /* 405 * Userspace Memory Region Find 406 * 407 * Input Args: 408 * vm - Virtual Machine 409 * start - Starting VM physical address 410 * end - Ending VM physical address, inclusive. 411 * 412 * Output Args: None 413 * 414 * Return: 415 * Pointer to overlapping region, NULL if no such region. 416 * 417 * Searches for a region with any physical memory that overlaps with 418 * any portion of the guest physical addresses from start to end 419 * inclusive. If multiple overlapping regions exist, a pointer to any 420 * of the regions is returned. Null is returned only when no overlapping 421 * region exists. 422 */ 423 static struct userspace_mem_region * 424 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 425 { 426 struct userspace_mem_region *region; 427 428 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 429 uint64_t existing_start = region->region.guest_phys_addr; 430 uint64_t existing_end = region->region.guest_phys_addr 431 + region->region.memory_size - 1; 432 if (start <= existing_end && end >= existing_start) 433 return region; 434 } 435 436 return NULL; 437 } 438 439 /* 440 * KVM Userspace Memory Region Find 441 * 442 * Input Args: 443 * vm - Virtual Machine 444 * start - Starting VM physical address 445 * end - Ending VM physical address, inclusive. 446 * 447 * Output Args: None 448 * 449 * Return: 450 * Pointer to overlapping region, NULL if no such region. 451 * 452 * Public interface to userspace_mem_region_find. Allows tests to look up 453 * the memslot datastructure for a given range of guest physical memory. 454 */ 455 struct kvm_userspace_memory_region * 456 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 457 uint64_t end) 458 { 459 struct userspace_mem_region *region; 460 461 region = userspace_mem_region_find(vm, start, end); 462 if (!region) 463 return NULL; 464 465 return ®ion->region; 466 } 467 468 /* 469 * VCPU Find 470 * 471 * Input Args: 472 * vm - Virtual Machine 473 * vcpuid - VCPU ID 474 * 475 * Output Args: None 476 * 477 * Return: 478 * Pointer to VCPU structure 479 * 480 * Locates a vcpu structure that describes the VCPU specified by vcpuid and 481 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU 482 * for the specified vcpuid. 483 */ 484 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) 485 { 486 struct vcpu *vcpu; 487 488 list_for_each_entry(vcpu, &vm->vcpus, list) { 489 if (vcpu->id == vcpuid) 490 return vcpu; 491 } 492 493 return NULL; 494 } 495 496 /* 497 * VM VCPU Remove 498 * 499 * Input Args: 500 * vcpu - VCPU to remove 501 * 502 * Output Args: None 503 * 504 * Return: None, TEST_ASSERT failures for all error conditions 505 * 506 * Removes a vCPU from a VM and frees its resources. 507 */ 508 static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu) 509 { 510 int ret; 511 512 if (vcpu->dirty_gfns) { 513 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 514 TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, " 515 "rc: %i errno: %i", ret, errno); 516 vcpu->dirty_gfns = NULL; 517 } 518 519 ret = munmap(vcpu->state, sizeof(*vcpu->state)); 520 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i " 521 "errno: %i", ret, errno); 522 close(vcpu->fd); 523 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " 524 "errno: %i", ret, errno); 525 526 list_del(&vcpu->list); 527 free(vcpu); 528 } 529 530 void kvm_vm_release(struct kvm_vm *vmp) 531 { 532 struct vcpu *vcpu, *tmp; 533 int ret; 534 535 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 536 vm_vcpu_rm(vmp, vcpu); 537 538 ret = close(vmp->fd); 539 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" 540 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); 541 542 close(vmp->kvm_fd); 543 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n" 544 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); 545 } 546 547 static void __vm_mem_region_delete(struct kvm_vm *vm, 548 struct userspace_mem_region *region) 549 { 550 int ret; 551 552 list_del(®ion->list); 553 554 region->region.memory_size = 0; 555 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 556 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " 557 "rc: %i errno: %i", ret, errno); 558 559 sparsebit_free(®ion->unused_phy_pages); 560 ret = munmap(region->mmap_start, region->mmap_size); 561 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno); 562 563 free(region); 564 } 565 566 /* 567 * Destroys and frees the VM pointed to by vmp. 568 */ 569 void kvm_vm_free(struct kvm_vm *vmp) 570 { 571 struct userspace_mem_region *region, *tmp; 572 573 if (vmp == NULL) 574 return; 575 576 /* Free userspace_mem_regions. */ 577 list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) 578 __vm_mem_region_delete(vmp, region); 579 580 /* Free sparsebit arrays. */ 581 sparsebit_free(&vmp->vpages_valid); 582 sparsebit_free(&vmp->vpages_mapped); 583 584 kvm_vm_release(vmp); 585 586 /* Free the structure describing the VM. */ 587 free(vmp); 588 } 589 590 /* 591 * Memory Compare, host virtual to guest virtual 592 * 593 * Input Args: 594 * hva - Starting host virtual address 595 * vm - Virtual Machine 596 * gva - Starting guest virtual address 597 * len - number of bytes to compare 598 * 599 * Output Args: None 600 * 601 * Input/Output Args: None 602 * 603 * Return: 604 * Returns 0 if the bytes starting at hva for a length of len 605 * are equal the guest virtual bytes starting at gva. Returns 606 * a value < 0, if bytes at hva are less than those at gva. 607 * Otherwise a value > 0 is returned. 608 * 609 * Compares the bytes starting at the host virtual address hva, for 610 * a length of len, to the guest bytes starting at the guest virtual 611 * address given by gva. 612 */ 613 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) 614 { 615 size_t amt; 616 617 /* 618 * Compare a batch of bytes until either a match is found 619 * or all the bytes have been compared. 620 */ 621 for (uintptr_t offset = 0; offset < len; offset += amt) { 622 uintptr_t ptr1 = (uintptr_t)hva + offset; 623 624 /* 625 * Determine host address for guest virtual address 626 * at offset. 627 */ 628 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); 629 630 /* 631 * Determine amount to compare on this pass. 632 * Don't allow the comparsion to cross a page boundary. 633 */ 634 amt = len - offset; 635 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) 636 amt = vm->page_size - (ptr1 % vm->page_size); 637 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) 638 amt = vm->page_size - (ptr2 % vm->page_size); 639 640 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); 641 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); 642 643 /* 644 * Perform the comparison. If there is a difference 645 * return that result to the caller, otherwise need 646 * to continue on looking for a mismatch. 647 */ 648 int ret = memcmp((void *)ptr1, (void *)ptr2, amt); 649 if (ret != 0) 650 return ret; 651 } 652 653 /* 654 * No mismatch found. Let the caller know the two memory 655 * areas are equal. 656 */ 657 return 0; 658 } 659 660 /* 661 * VM Userspace Memory Region Add 662 * 663 * Input Args: 664 * vm - Virtual Machine 665 * backing_src - Storage source for this region. 666 * NULL to use anonymous memory. 667 * guest_paddr - Starting guest physical address 668 * slot - KVM region slot 669 * npages - Number of physical pages 670 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) 671 * 672 * Output Args: None 673 * 674 * Return: None 675 * 676 * Allocates a memory area of the number of pages specified by npages 677 * and maps it to the VM specified by vm, at a starting physical address 678 * given by guest_paddr. The region is created with a KVM region slot 679 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The 680 * region is created with the flags given by flags. 681 */ 682 void vm_userspace_mem_region_add(struct kvm_vm *vm, 683 enum vm_mem_backing_src_type src_type, 684 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 685 uint32_t flags) 686 { 687 int ret; 688 struct userspace_mem_region *region; 689 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; 690 size_t alignment; 691 692 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, 693 "Number of guest pages is not compatible with the host. " 694 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 695 696 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 697 "address not on a page boundary.\n" 698 " guest_paddr: 0x%lx vm->page_size: 0x%x", 699 guest_paddr, vm->page_size); 700 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 701 <= vm->max_gfn, "Physical range beyond maximum " 702 "supported physical address,\n" 703 " guest_paddr: 0x%lx npages: 0x%lx\n" 704 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 705 guest_paddr, npages, vm->max_gfn, vm->page_size); 706 707 /* 708 * Confirm a mem region with an overlapping address doesn't 709 * already exist. 710 */ 711 region = (struct userspace_mem_region *) userspace_mem_region_find( 712 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 713 if (region != NULL) 714 TEST_FAIL("overlapping userspace_mem_region already " 715 "exists\n" 716 " requested guest_paddr: 0x%lx npages: 0x%lx " 717 "page_size: 0x%x\n" 718 " existing guest_paddr: 0x%lx size: 0x%lx", 719 guest_paddr, npages, vm->page_size, 720 (uint64_t) region->region.guest_phys_addr, 721 (uint64_t) region->region.memory_size); 722 723 /* Confirm no region with the requested slot already exists. */ 724 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 725 if (region->region.slot != slot) 726 continue; 727 728 TEST_FAIL("A mem region with the requested slot " 729 "already exists.\n" 730 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 731 " existing slot: %u paddr: 0x%lx size: 0x%lx", 732 slot, guest_paddr, npages, 733 region->region.slot, 734 (uint64_t) region->region.guest_phys_addr, 735 (uint64_t) region->region.memory_size); 736 } 737 738 /* Allocate and initialize new mem region structure. */ 739 region = calloc(1, sizeof(*region)); 740 TEST_ASSERT(region != NULL, "Insufficient Memory"); 741 region->mmap_size = npages * vm->page_size; 742 743 #ifdef __s390x__ 744 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 745 alignment = 0x100000; 746 #else 747 alignment = 1; 748 #endif 749 750 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 751 alignment = max(huge_page_size, alignment); 752 753 /* Add enough memory to align up if necessary */ 754 if (alignment > 1) 755 region->mmap_size += alignment; 756 757 region->mmap_start = mmap(NULL, region->mmap_size, 758 PROT_READ | PROT_WRITE, 759 MAP_PRIVATE | MAP_ANONYMOUS 760 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0), 761 -1, 0); 762 TEST_ASSERT(region->mmap_start != MAP_FAILED, 763 "test_malloc failed, mmap_start: %p errno: %i", 764 region->mmap_start, errno); 765 766 /* Align host address */ 767 region->host_mem = align(region->mmap_start, alignment); 768 769 /* As needed perform madvise */ 770 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) { 771 struct stat statbuf; 772 773 ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); 774 TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), 775 "stat /sys/kernel/mm/transparent_hugepage"); 776 777 TEST_ASSERT(ret == 0 || src_type != VM_MEM_SRC_ANONYMOUS_THP, 778 "VM_MEM_SRC_ANONYMOUS_THP requires THP to be configured in the host kernel"); 779 780 if (ret == 0) { 781 ret = madvise(region->host_mem, npages * vm->page_size, 782 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); 783 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %x", 784 region->host_mem, npages * vm->page_size, src_type); 785 } 786 } 787 788 region->unused_phy_pages = sparsebit_alloc(); 789 sparsebit_set_num(region->unused_phy_pages, 790 guest_paddr >> vm->page_shift, npages); 791 region->region.slot = slot; 792 region->region.flags = flags; 793 region->region.guest_phys_addr = guest_paddr; 794 region->region.memory_size = npages * vm->page_size; 795 region->region.userspace_addr = (uintptr_t) region->host_mem; 796 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 797 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 798 " rc: %i errno: %i\n" 799 " slot: %u flags: 0x%x\n" 800 " guest_phys_addr: 0x%lx size: 0x%lx", 801 ret, errno, slot, flags, 802 guest_paddr, (uint64_t) region->region.memory_size); 803 804 /* Add to linked-list of memory regions. */ 805 list_add(®ion->list, &vm->userspace_mem_regions); 806 } 807 808 /* 809 * Memslot to region 810 * 811 * Input Args: 812 * vm - Virtual Machine 813 * memslot - KVM memory slot ID 814 * 815 * Output Args: None 816 * 817 * Return: 818 * Pointer to memory region structure that describe memory region 819 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 820 * on error (e.g. currently no memory region using memslot as a KVM 821 * memory slot ID). 822 */ 823 struct userspace_mem_region * 824 memslot2region(struct kvm_vm *vm, uint32_t memslot) 825 { 826 struct userspace_mem_region *region; 827 828 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 829 if (region->region.slot == memslot) 830 return region; 831 } 832 833 fprintf(stderr, "No mem region with the requested slot found,\n" 834 " requested slot: %u\n", memslot); 835 fputs("---- vm dump ----\n", stderr); 836 vm_dump(stderr, vm, 2); 837 TEST_FAIL("Mem region not found"); 838 return NULL; 839 } 840 841 /* 842 * VM Memory Region Flags Set 843 * 844 * Input Args: 845 * vm - Virtual Machine 846 * flags - Starting guest physical address 847 * 848 * Output Args: None 849 * 850 * Return: None 851 * 852 * Sets the flags of the memory region specified by the value of slot, 853 * to the values given by flags. 854 */ 855 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 856 { 857 int ret; 858 struct userspace_mem_region *region; 859 860 region = memslot2region(vm, slot); 861 862 region->region.flags = flags; 863 864 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 865 866 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 867 " rc: %i errno: %i slot: %u flags: 0x%x", 868 ret, errno, slot, flags); 869 } 870 871 /* 872 * VM Memory Region Move 873 * 874 * Input Args: 875 * vm - Virtual Machine 876 * slot - Slot of the memory region to move 877 * new_gpa - Starting guest physical address 878 * 879 * Output Args: None 880 * 881 * Return: None 882 * 883 * Change the gpa of a memory region. 884 */ 885 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 886 { 887 struct userspace_mem_region *region; 888 int ret; 889 890 region = memslot2region(vm, slot); 891 892 region->region.guest_phys_addr = new_gpa; 893 894 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 895 896 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n" 897 "ret: %i errno: %i slot: %u new_gpa: 0x%lx", 898 ret, errno, slot, new_gpa); 899 } 900 901 /* 902 * VM Memory Region Delete 903 * 904 * Input Args: 905 * vm - Virtual Machine 906 * slot - Slot of the memory region to delete 907 * 908 * Output Args: None 909 * 910 * Return: None 911 * 912 * Delete a memory region. 913 */ 914 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 915 { 916 __vm_mem_region_delete(vm, memslot2region(vm, slot)); 917 } 918 919 /* 920 * VCPU mmap Size 921 * 922 * Input Args: None 923 * 924 * Output Args: None 925 * 926 * Return: 927 * Size of VCPU state 928 * 929 * Returns the size of the structure pointed to by the return value 930 * of vcpu_state(). 931 */ 932 static int vcpu_mmap_sz(void) 933 { 934 int dev_fd, ret; 935 936 dev_fd = open(KVM_DEV_PATH, O_RDONLY); 937 if (dev_fd < 0) 938 exit(KSFT_SKIP); 939 940 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 941 TEST_ASSERT(ret >= sizeof(struct kvm_run), 942 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i", 943 __func__, ret, errno); 944 945 close(dev_fd); 946 947 return ret; 948 } 949 950 /* 951 * VM VCPU Add 952 * 953 * Input Args: 954 * vm - Virtual Machine 955 * vcpuid - VCPU ID 956 * 957 * Output Args: None 958 * 959 * Return: None 960 * 961 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid. 962 * No additional VCPU setup is done. 963 */ 964 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) 965 { 966 struct vcpu *vcpu; 967 968 /* Confirm a vcpu with the specified id doesn't already exist. */ 969 vcpu = vcpu_find(vm, vcpuid); 970 if (vcpu != NULL) 971 TEST_FAIL("vcpu with the specified id " 972 "already exists,\n" 973 " requested vcpuid: %u\n" 974 " existing vcpuid: %u state: %p", 975 vcpuid, vcpu->id, vcpu->state); 976 977 /* Allocate and initialize new vcpu structure. */ 978 vcpu = calloc(1, sizeof(*vcpu)); 979 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); 980 vcpu->id = vcpuid; 981 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); 982 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i", 983 vcpu->fd, errno); 984 985 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size " 986 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 987 vcpu_mmap_sz(), sizeof(*vcpu->state)); 988 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state), 989 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); 990 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, " 991 "vcpu id: %u errno: %i", vcpuid, errno); 992 993 /* Add to linked-list of VCPUs. */ 994 list_add(&vcpu->list, &vm->vcpus); 995 } 996 997 /* 998 * VM Virtual Address Unused Gap 999 * 1000 * Input Args: 1001 * vm - Virtual Machine 1002 * sz - Size (bytes) 1003 * vaddr_min - Minimum Virtual Address 1004 * 1005 * Output Args: None 1006 * 1007 * Return: 1008 * Lowest virtual address at or below vaddr_min, with at least 1009 * sz unused bytes. TEST_ASSERT failure if no area of at least 1010 * size sz is available. 1011 * 1012 * Within the VM specified by vm, locates the lowest starting virtual 1013 * address >= vaddr_min, that has at least sz unallocated bytes. A 1014 * TEST_ASSERT failure occurs for invalid input or no area of at least 1015 * sz unallocated bytes >= vaddr_min is available. 1016 */ 1017 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1018 vm_vaddr_t vaddr_min) 1019 { 1020 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1021 1022 /* Determine lowest permitted virtual page index. */ 1023 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1024 if ((pgidx_start * vm->page_size) < vaddr_min) 1025 goto no_va_found; 1026 1027 /* Loop over section with enough valid virtual page indexes. */ 1028 if (!sparsebit_is_set_num(vm->vpages_valid, 1029 pgidx_start, pages)) 1030 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, 1031 pgidx_start, pages); 1032 do { 1033 /* 1034 * Are there enough unused virtual pages available at 1035 * the currently proposed starting virtual page index. 1036 * If not, adjust proposed starting index to next 1037 * possible. 1038 */ 1039 if (sparsebit_is_clear_num(vm->vpages_mapped, 1040 pgidx_start, pages)) 1041 goto va_found; 1042 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, 1043 pgidx_start, pages); 1044 if (pgidx_start == 0) 1045 goto no_va_found; 1046 1047 /* 1048 * If needed, adjust proposed starting virtual address, 1049 * to next range of valid virtual addresses. 1050 */ 1051 if (!sparsebit_is_set_num(vm->vpages_valid, 1052 pgidx_start, pages)) { 1053 pgidx_start = sparsebit_next_set_num( 1054 vm->vpages_valid, pgidx_start, pages); 1055 if (pgidx_start == 0) 1056 goto no_va_found; 1057 } 1058 } while (pgidx_start != 0); 1059 1060 no_va_found: 1061 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1062 1063 /* NOT REACHED */ 1064 return -1; 1065 1066 va_found: 1067 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, 1068 pgidx_start, pages), 1069 "Unexpected, invalid virtual page index range,\n" 1070 " pgidx_start: 0x%lx\n" 1071 " pages: 0x%lx", 1072 pgidx_start, pages); 1073 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, 1074 pgidx_start, pages), 1075 "Unexpected, pages already mapped,\n" 1076 " pgidx_start: 0x%lx\n" 1077 " pages: 0x%lx", 1078 pgidx_start, pages); 1079 1080 return pgidx_start * vm->page_size; 1081 } 1082 1083 /* 1084 * VM Virtual Address Allocate 1085 * 1086 * Input Args: 1087 * vm - Virtual Machine 1088 * sz - Size in bytes 1089 * vaddr_min - Minimum starting virtual address 1090 * data_memslot - Memory region slot for data pages 1091 * pgd_memslot - Memory region slot for new virtual translation tables 1092 * 1093 * Output Args: None 1094 * 1095 * Return: 1096 * Starting guest virtual address 1097 * 1098 * Allocates at least sz bytes within the virtual address space of the vm 1099 * given by vm. The allocated bytes are mapped to a virtual address >= 1100 * the address given by vaddr_min. Note that each allocation uses a 1101 * a unique set of pages, with the minimum real allocation being at least 1102 * a page. 1103 */ 1104 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1105 uint32_t data_memslot, uint32_t pgd_memslot) 1106 { 1107 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1108 1109 virt_pgd_alloc(vm, pgd_memslot); 1110 1111 /* 1112 * Find an unused range of virtual page addresses of at least 1113 * pages in length. 1114 */ 1115 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1116 1117 /* Map the virtual pages. */ 1118 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1119 pages--, vaddr += vm->page_size) { 1120 vm_paddr_t paddr; 1121 1122 paddr = vm_phy_page_alloc(vm, 1123 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); 1124 1125 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1126 1127 sparsebit_set(vm->vpages_mapped, 1128 vaddr >> vm->page_shift); 1129 } 1130 1131 return vaddr_start; 1132 } 1133 1134 /* 1135 * Map a range of VM virtual address to the VM's physical address 1136 * 1137 * Input Args: 1138 * vm - Virtual Machine 1139 * vaddr - Virtuall address to map 1140 * paddr - VM Physical Address 1141 * npages - The number of pages to map 1142 * pgd_memslot - Memory region slot for new virtual translation tables 1143 * 1144 * Output Args: None 1145 * 1146 * Return: None 1147 * 1148 * Within the VM given by @vm, creates a virtual translation for 1149 * @npages starting at @vaddr to the page range starting at @paddr. 1150 */ 1151 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1152 unsigned int npages, uint32_t pgd_memslot) 1153 { 1154 size_t page_size = vm->page_size; 1155 size_t size = npages * page_size; 1156 1157 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1158 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1159 1160 while (npages--) { 1161 virt_pg_map(vm, vaddr, paddr, pgd_memslot); 1162 vaddr += page_size; 1163 paddr += page_size; 1164 } 1165 } 1166 1167 /* 1168 * Address VM Physical to Host Virtual 1169 * 1170 * Input Args: 1171 * vm - Virtual Machine 1172 * gpa - VM physical address 1173 * 1174 * Output Args: None 1175 * 1176 * Return: 1177 * Equivalent host virtual address 1178 * 1179 * Locates the memory region containing the VM physical address given 1180 * by gpa, within the VM given by vm. When found, the host virtual 1181 * address providing the memory to the vm physical address is returned. 1182 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1183 */ 1184 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1185 { 1186 struct userspace_mem_region *region; 1187 1188 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1189 if ((gpa >= region->region.guest_phys_addr) 1190 && (gpa <= (region->region.guest_phys_addr 1191 + region->region.memory_size - 1))) 1192 return (void *) ((uintptr_t) region->host_mem 1193 + (gpa - region->region.guest_phys_addr)); 1194 } 1195 1196 TEST_FAIL("No vm physical memory at 0x%lx", gpa); 1197 return NULL; 1198 } 1199 1200 /* 1201 * Address Host Virtual to VM Physical 1202 * 1203 * Input Args: 1204 * vm - Virtual Machine 1205 * hva - Host virtual address 1206 * 1207 * Output Args: None 1208 * 1209 * Return: 1210 * Equivalent VM physical address 1211 * 1212 * Locates the memory region containing the host virtual address given 1213 * by hva, within the VM given by vm. When found, the equivalent 1214 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1215 * region containing hva exists. 1216 */ 1217 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1218 { 1219 struct userspace_mem_region *region; 1220 1221 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1222 if ((hva >= region->host_mem) 1223 && (hva <= (region->host_mem 1224 + region->region.memory_size - 1))) 1225 return (vm_paddr_t) ((uintptr_t) 1226 region->region.guest_phys_addr 1227 + (hva - (uintptr_t) region->host_mem)); 1228 } 1229 1230 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); 1231 return -1; 1232 } 1233 1234 /* 1235 * VM Create IRQ Chip 1236 * 1237 * Input Args: 1238 * vm - Virtual Machine 1239 * 1240 * Output Args: None 1241 * 1242 * Return: None 1243 * 1244 * Creates an interrupt controller chip for the VM specified by vm. 1245 */ 1246 void vm_create_irqchip(struct kvm_vm *vm) 1247 { 1248 int ret; 1249 1250 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); 1251 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, " 1252 "rc: %i errno: %i", ret, errno); 1253 1254 vm->has_irqchip = true; 1255 } 1256 1257 /* 1258 * VM VCPU State 1259 * 1260 * Input Args: 1261 * vm - Virtual Machine 1262 * vcpuid - VCPU ID 1263 * 1264 * Output Args: None 1265 * 1266 * Return: 1267 * Pointer to structure that describes the state of the VCPU. 1268 * 1269 * Locates and returns a pointer to a structure that describes the 1270 * state of the VCPU with the given vcpuid. 1271 */ 1272 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) 1273 { 1274 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1275 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1276 1277 return vcpu->state; 1278 } 1279 1280 /* 1281 * VM VCPU Run 1282 * 1283 * Input Args: 1284 * vm - Virtual Machine 1285 * vcpuid - VCPU ID 1286 * 1287 * Output Args: None 1288 * 1289 * Return: None 1290 * 1291 * Switch to executing the code for the VCPU given by vcpuid, within the VM 1292 * given by vm. 1293 */ 1294 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1295 { 1296 int ret = _vcpu_run(vm, vcpuid); 1297 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1298 "rc: %i errno: %i", ret, errno); 1299 } 1300 1301 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1302 { 1303 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1304 int rc; 1305 1306 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1307 do { 1308 rc = ioctl(vcpu->fd, KVM_RUN, NULL); 1309 } while (rc == -1 && errno == EINTR); 1310 1311 assert_on_unhandled_exception(vm, vcpuid); 1312 1313 return rc; 1314 } 1315 1316 int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid) 1317 { 1318 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1319 1320 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1321 1322 return vcpu->fd; 1323 } 1324 1325 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) 1326 { 1327 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1328 int ret; 1329 1330 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1331 1332 vcpu->state->immediate_exit = 1; 1333 ret = ioctl(vcpu->fd, KVM_RUN, NULL); 1334 vcpu->state->immediate_exit = 0; 1335 1336 TEST_ASSERT(ret == -1 && errno == EINTR, 1337 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", 1338 ret, errno); 1339 } 1340 1341 void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, 1342 struct kvm_guest_debug *debug) 1343 { 1344 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1345 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug); 1346 1347 TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret); 1348 } 1349 1350 /* 1351 * VM VCPU Set MP State 1352 * 1353 * Input Args: 1354 * vm - Virtual Machine 1355 * vcpuid - VCPU ID 1356 * mp_state - mp_state to be set 1357 * 1358 * Output Args: None 1359 * 1360 * Return: None 1361 * 1362 * Sets the MP state of the VCPU given by vcpuid, to the state given 1363 * by mp_state. 1364 */ 1365 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 1366 struct kvm_mp_state *mp_state) 1367 { 1368 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1369 int ret; 1370 1371 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1372 1373 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); 1374 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, " 1375 "rc: %i errno: %i", ret, errno); 1376 } 1377 1378 /* 1379 * VM VCPU Get Reg List 1380 * 1381 * Input Args: 1382 * vm - Virtual Machine 1383 * vcpuid - VCPU ID 1384 * 1385 * Output Args: 1386 * None 1387 * 1388 * Return: 1389 * A pointer to an allocated struct kvm_reg_list 1390 * 1391 * Get the list of guest registers which are supported for 1392 * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls 1393 */ 1394 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) 1395 { 1396 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1397 int ret; 1398 1399 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n); 1400 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1401 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1402 reg_list->n = reg_list_n.n; 1403 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); 1404 return reg_list; 1405 } 1406 1407 /* 1408 * VM VCPU Regs Get 1409 * 1410 * Input Args: 1411 * vm - Virtual Machine 1412 * vcpuid - VCPU ID 1413 * 1414 * Output Args: 1415 * regs - current state of VCPU regs 1416 * 1417 * Return: None 1418 * 1419 * Obtains the current register state for the VCPU specified by vcpuid 1420 * and stores it at the location given by regs. 1421 */ 1422 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) 1423 { 1424 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1425 int ret; 1426 1427 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1428 1429 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); 1430 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", 1431 ret, errno); 1432 } 1433 1434 /* 1435 * VM VCPU Regs Set 1436 * 1437 * Input Args: 1438 * vm - Virtual Machine 1439 * vcpuid - VCPU ID 1440 * regs - Values to set VCPU regs to 1441 * 1442 * Output Args: None 1443 * 1444 * Return: None 1445 * 1446 * Sets the regs of the VCPU specified by vcpuid to the values 1447 * given by regs. 1448 */ 1449 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) 1450 { 1451 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1452 int ret; 1453 1454 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1455 1456 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); 1457 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", 1458 ret, errno); 1459 } 1460 1461 #ifdef __KVM_HAVE_VCPU_EVENTS 1462 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 1463 struct kvm_vcpu_events *events) 1464 { 1465 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1466 int ret; 1467 1468 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1469 1470 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); 1471 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", 1472 ret, errno); 1473 } 1474 1475 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 1476 struct kvm_vcpu_events *events) 1477 { 1478 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1479 int ret; 1480 1481 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1482 1483 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); 1484 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", 1485 ret, errno); 1486 } 1487 #endif 1488 1489 #ifdef __x86_64__ 1490 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, 1491 struct kvm_nested_state *state) 1492 { 1493 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1494 int ret; 1495 1496 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1497 1498 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state); 1499 TEST_ASSERT(ret == 0, 1500 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", 1501 ret, errno); 1502 } 1503 1504 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, 1505 struct kvm_nested_state *state, bool ignore_error) 1506 { 1507 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1508 int ret; 1509 1510 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1511 1512 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state); 1513 if (!ignore_error) { 1514 TEST_ASSERT(ret == 0, 1515 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", 1516 ret, errno); 1517 } 1518 1519 return ret; 1520 } 1521 #endif 1522 1523 /* 1524 * VM VCPU System Regs Get 1525 * 1526 * Input Args: 1527 * vm - Virtual Machine 1528 * vcpuid - VCPU ID 1529 * 1530 * Output Args: 1531 * sregs - current state of VCPU system regs 1532 * 1533 * Return: None 1534 * 1535 * Obtains the current system register state for the VCPU specified by 1536 * vcpuid and stores it at the location given by sregs. 1537 */ 1538 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1539 { 1540 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1541 int ret; 1542 1543 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1544 1545 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); 1546 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", 1547 ret, errno); 1548 } 1549 1550 /* 1551 * VM VCPU System Regs Set 1552 * 1553 * Input Args: 1554 * vm - Virtual Machine 1555 * vcpuid - VCPU ID 1556 * sregs - Values to set VCPU system regs to 1557 * 1558 * Output Args: None 1559 * 1560 * Return: None 1561 * 1562 * Sets the system regs of the VCPU specified by vcpuid to the values 1563 * given by sregs. 1564 */ 1565 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1566 { 1567 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); 1568 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " 1569 "rc: %i errno: %i", ret, errno); 1570 } 1571 1572 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) 1573 { 1574 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1575 1576 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1577 1578 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); 1579 } 1580 1581 void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) 1582 { 1583 int ret; 1584 1585 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); 1586 TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)", 1587 ret, errno, strerror(errno)); 1588 } 1589 1590 void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) 1591 { 1592 int ret; 1593 1594 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); 1595 TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)", 1596 ret, errno, strerror(errno)); 1597 } 1598 1599 void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) 1600 { 1601 int ret; 1602 1603 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); 1604 TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)", 1605 ret, errno, strerror(errno)); 1606 } 1607 1608 void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) 1609 { 1610 int ret; 1611 1612 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); 1613 TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)", 1614 ret, errno, strerror(errno)); 1615 } 1616 1617 /* 1618 * VCPU Ioctl 1619 * 1620 * Input Args: 1621 * vm - Virtual Machine 1622 * vcpuid - VCPU ID 1623 * cmd - Ioctl number 1624 * arg - Argument to pass to the ioctl 1625 * 1626 * Return: None 1627 * 1628 * Issues an arbitrary ioctl on a VCPU fd. 1629 */ 1630 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1631 unsigned long cmd, void *arg) 1632 { 1633 int ret; 1634 1635 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); 1636 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)", 1637 cmd, ret, errno, strerror(errno)); 1638 } 1639 1640 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1641 unsigned long cmd, void *arg) 1642 { 1643 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 1644 int ret; 1645 1646 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); 1647 1648 ret = ioctl(vcpu->fd, cmd, arg); 1649 1650 return ret; 1651 } 1652 1653 void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid) 1654 { 1655 struct vcpu *vcpu; 1656 uint32_t size = vm->dirty_ring_size; 1657 1658 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1659 1660 vcpu = vcpu_find(vm, vcpuid); 1661 1662 TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid); 1663 1664 if (!vcpu->dirty_gfns) { 1665 void *addr; 1666 1667 addr = mmap(NULL, size, PROT_READ, 1668 MAP_PRIVATE, vcpu->fd, 1669 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1670 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1671 1672 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, 1673 MAP_PRIVATE, vcpu->fd, 1674 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1675 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1676 1677 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, 1678 MAP_SHARED, vcpu->fd, 1679 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1680 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1681 1682 vcpu->dirty_gfns = addr; 1683 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); 1684 } 1685 1686 return vcpu->dirty_gfns; 1687 } 1688 1689 /* 1690 * VM Ioctl 1691 * 1692 * Input Args: 1693 * vm - Virtual Machine 1694 * cmd - Ioctl number 1695 * arg - Argument to pass to the ioctl 1696 * 1697 * Return: None 1698 * 1699 * Issues an arbitrary ioctl on a VM fd. 1700 */ 1701 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1702 { 1703 int ret; 1704 1705 ret = ioctl(vm->fd, cmd, arg); 1706 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)", 1707 cmd, ret, errno, strerror(errno)); 1708 } 1709 1710 /* 1711 * KVM system ioctl 1712 * 1713 * Input Args: 1714 * vm - Virtual Machine 1715 * cmd - Ioctl number 1716 * arg - Argument to pass to the ioctl 1717 * 1718 * Return: None 1719 * 1720 * Issues an arbitrary ioctl on a KVM fd. 1721 */ 1722 void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1723 { 1724 int ret; 1725 1726 ret = ioctl(vm->kvm_fd, cmd, arg); 1727 TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)", 1728 cmd, ret, errno, strerror(errno)); 1729 } 1730 1731 int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) 1732 { 1733 return ioctl(vm->kvm_fd, cmd, arg); 1734 } 1735 1736 /* 1737 * VM Dump 1738 * 1739 * Input Args: 1740 * vm - Virtual Machine 1741 * indent - Left margin indent amount 1742 * 1743 * Output Args: 1744 * stream - Output FILE stream 1745 * 1746 * Return: None 1747 * 1748 * Dumps the current state of the VM given by vm, to the FILE stream 1749 * given by stream. 1750 */ 1751 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1752 { 1753 struct userspace_mem_region *region; 1754 struct vcpu *vcpu; 1755 1756 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); 1757 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); 1758 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); 1759 fprintf(stream, "%*sMem Regions:\n", indent, ""); 1760 list_for_each_entry(region, &vm->userspace_mem_regions, list) { 1761 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1762 "host_virt: %p\n", indent + 2, "", 1763 (uint64_t) region->region.guest_phys_addr, 1764 (uint64_t) region->region.memory_size, 1765 region->host_mem); 1766 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1767 sparsebit_dump(stream, region->unused_phy_pages, 0); 1768 } 1769 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); 1770 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); 1771 fprintf(stream, "%*spgd_created: %u\n", indent, "", 1772 vm->pgd_created); 1773 if (vm->pgd_created) { 1774 fprintf(stream, "%*sVirtual Translation Tables:\n", 1775 indent + 2, ""); 1776 virt_dump(stream, vm, indent + 4); 1777 } 1778 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1779 list_for_each_entry(vcpu, &vm->vcpus, list) 1780 vcpu_dump(stream, vm, vcpu->id, indent + 2); 1781 } 1782 1783 /* Known KVM exit reasons */ 1784 static struct exit_reason { 1785 unsigned int reason; 1786 const char *name; 1787 } exit_reasons_known[] = { 1788 {KVM_EXIT_UNKNOWN, "UNKNOWN"}, 1789 {KVM_EXIT_EXCEPTION, "EXCEPTION"}, 1790 {KVM_EXIT_IO, "IO"}, 1791 {KVM_EXIT_HYPERCALL, "HYPERCALL"}, 1792 {KVM_EXIT_DEBUG, "DEBUG"}, 1793 {KVM_EXIT_HLT, "HLT"}, 1794 {KVM_EXIT_MMIO, "MMIO"}, 1795 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"}, 1796 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"}, 1797 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"}, 1798 {KVM_EXIT_INTR, "INTR"}, 1799 {KVM_EXIT_SET_TPR, "SET_TPR"}, 1800 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"}, 1801 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"}, 1802 {KVM_EXIT_S390_RESET, "S390_RESET"}, 1803 {KVM_EXIT_DCR, "DCR"}, 1804 {KVM_EXIT_NMI, "NMI"}, 1805 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"}, 1806 {KVM_EXIT_OSI, "OSI"}, 1807 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"}, 1808 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, 1809 {KVM_EXIT_X86_RDMSR, "RDMSR"}, 1810 {KVM_EXIT_X86_WRMSR, "WRMSR"}, 1811 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT 1812 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, 1813 #endif 1814 }; 1815 1816 /* 1817 * Exit Reason String 1818 * 1819 * Input Args: 1820 * exit_reason - Exit reason 1821 * 1822 * Output Args: None 1823 * 1824 * Return: 1825 * Constant string pointer describing the exit reason. 1826 * 1827 * Locates and returns a constant string that describes the KVM exit 1828 * reason given by exit_reason. If no such string is found, a constant 1829 * string of "Unknown" is returned. 1830 */ 1831 const char *exit_reason_str(unsigned int exit_reason) 1832 { 1833 unsigned int n1; 1834 1835 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { 1836 if (exit_reason == exit_reasons_known[n1].reason) 1837 return exit_reasons_known[n1].name; 1838 } 1839 1840 return "Unknown"; 1841 } 1842 1843 /* 1844 * Physical Contiguous Page Allocator 1845 * 1846 * Input Args: 1847 * vm - Virtual Machine 1848 * num - number of pages 1849 * paddr_min - Physical address minimum 1850 * memslot - Memory region to allocate page from 1851 * 1852 * Output Args: None 1853 * 1854 * Return: 1855 * Starting physical address 1856 * 1857 * Within the VM specified by vm, locates a range of available physical 1858 * pages at or above paddr_min. If found, the pages are marked as in use 1859 * and their base address is returned. A TEST_ASSERT failure occurs if 1860 * not enough pages are available at or above paddr_min. 1861 */ 1862 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1863 vm_paddr_t paddr_min, uint32_t memslot) 1864 { 1865 struct userspace_mem_region *region; 1866 sparsebit_idx_t pg, base; 1867 1868 TEST_ASSERT(num > 0, "Must allocate at least one page"); 1869 1870 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1871 "not divisible by page size.\n" 1872 " paddr_min: 0x%lx page_size: 0x%x", 1873 paddr_min, vm->page_size); 1874 1875 region = memslot2region(vm, memslot); 1876 base = pg = paddr_min >> vm->page_shift; 1877 1878 do { 1879 for (; pg < base + num; ++pg) { 1880 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 1881 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); 1882 break; 1883 } 1884 } 1885 } while (pg && pg != base + num); 1886 1887 if (pg == 0) { 1888 fprintf(stderr, "No guest physical page available, " 1889 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 1890 paddr_min, vm->page_size, memslot); 1891 fputs("---- vm dump ----\n", stderr); 1892 vm_dump(stderr, vm, 2); 1893 abort(); 1894 } 1895 1896 for (pg = base; pg < base + num; ++pg) 1897 sparsebit_clear(region->unused_phy_pages, pg); 1898 1899 return base * vm->page_size; 1900 } 1901 1902 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 1903 uint32_t memslot) 1904 { 1905 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 1906 } 1907 1908 /* 1909 * Address Guest Virtual to Host Virtual 1910 * 1911 * Input Args: 1912 * vm - Virtual Machine 1913 * gva - VM virtual address 1914 * 1915 * Output Args: None 1916 * 1917 * Return: 1918 * Equivalent host virtual address 1919 */ 1920 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 1921 { 1922 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1923 } 1924 1925 /* 1926 * Is Unrestricted Guest 1927 * 1928 * Input Args: 1929 * vm - Virtual Machine 1930 * 1931 * Output Args: None 1932 * 1933 * Return: True if the unrestricted guest is set to 'Y', otherwise return false. 1934 * 1935 * Check if the unrestricted guest flag is enabled. 1936 */ 1937 bool vm_is_unrestricted_guest(struct kvm_vm *vm) 1938 { 1939 char val = 'N'; 1940 size_t count; 1941 FILE *f; 1942 1943 if (vm == NULL) { 1944 /* Ensure that the KVM vendor-specific module is loaded. */ 1945 f = fopen(KVM_DEV_PATH, "r"); 1946 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d", 1947 errno); 1948 fclose(f); 1949 } 1950 1951 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); 1952 if (f) { 1953 count = fread(&val, sizeof(char), 1, f); 1954 TEST_ASSERT(count == 1, "Unable to read from param file."); 1955 fclose(f); 1956 } 1957 1958 return val == 'Y'; 1959 } 1960 1961 unsigned int vm_get_page_size(struct kvm_vm *vm) 1962 { 1963 return vm->page_size; 1964 } 1965 1966 unsigned int vm_get_page_shift(struct kvm_vm *vm) 1967 { 1968 return vm->page_shift; 1969 } 1970 1971 unsigned int vm_get_max_gfn(struct kvm_vm *vm) 1972 { 1973 return vm->max_gfn; 1974 } 1975 1976 int vm_get_fd(struct kvm_vm *vm) 1977 { 1978 return vm->fd; 1979 } 1980 1981 static unsigned int vm_calc_num_pages(unsigned int num_pages, 1982 unsigned int page_shift, 1983 unsigned int new_page_shift, 1984 bool ceil) 1985 { 1986 unsigned int n = 1 << (new_page_shift - page_shift); 1987 1988 if (page_shift >= new_page_shift) 1989 return num_pages * (1 << (page_shift - new_page_shift)); 1990 1991 return num_pages / n + !!(ceil && num_pages % n); 1992 } 1993 1994 static inline int getpageshift(void) 1995 { 1996 return __builtin_ffs(getpagesize()) - 1; 1997 } 1998 1999 unsigned int 2000 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 2001 { 2002 return vm_calc_num_pages(num_guest_pages, 2003 vm_guest_mode_params[mode].page_shift, 2004 getpageshift(), true); 2005 } 2006 2007 unsigned int 2008 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) 2009 { 2010 return vm_calc_num_pages(num_host_pages, getpageshift(), 2011 vm_guest_mode_params[mode].page_shift, false); 2012 } 2013 2014 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) 2015 { 2016 unsigned int n; 2017 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); 2018 return vm_adjust_num_guest_pages(mode, n); 2019 } 2020