1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tools/testing/selftests/kvm/lib/kvm_util.c 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 8 #define _GNU_SOURCE /* for program_invocation_name */ 9 #include "test_util.h" 10 #include "kvm_util.h" 11 #include "processor.h" 12 13 #include <assert.h> 14 #include <sched.h> 15 #include <sys/mman.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <unistd.h> 19 #include <linux/kernel.h> 20 21 #define KVM_UTIL_MIN_PFN 2 22 23 static int vcpu_mmap_sz(void); 24 25 int open_path_or_exit(const char *path, int flags) 26 { 27 int fd; 28 29 fd = open(path, flags); 30 __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno); 31 32 return fd; 33 } 34 35 /* 36 * Open KVM_DEV_PATH if available, otherwise exit the entire program. 37 * 38 * Input Args: 39 * flags - The flags to pass when opening KVM_DEV_PATH. 40 * 41 * Return: 42 * The opened file descriptor of /dev/kvm. 43 */ 44 static int _open_kvm_dev_path_or_exit(int flags) 45 { 46 return open_path_or_exit(KVM_DEV_PATH, flags); 47 } 48 49 int open_kvm_dev_path_or_exit(void) 50 { 51 return _open_kvm_dev_path_or_exit(O_RDONLY); 52 } 53 54 static bool get_module_param_bool(const char *module_name, const char *param) 55 { 56 const int path_size = 128; 57 char path[path_size]; 58 char value; 59 ssize_t r; 60 int fd; 61 62 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s", 63 module_name, param); 64 TEST_ASSERT(r < path_size, 65 "Failed to construct sysfs path in %d bytes.", path_size); 66 67 fd = open_path_or_exit(path, O_RDONLY); 68 69 r = read(fd, &value, 1); 70 TEST_ASSERT(r == 1, "read(%s) failed", path); 71 72 r = close(fd); 73 TEST_ASSERT(!r, "close(%s) failed", path); 74 75 if (value == 'Y') 76 return true; 77 else if (value == 'N') 78 return false; 79 80 TEST_FAIL("Unrecognized value '%c' for boolean module param", value); 81 } 82 83 bool get_kvm_param_bool(const char *param) 84 { 85 return get_module_param_bool("kvm", param); 86 } 87 88 bool get_kvm_intel_param_bool(const char *param) 89 { 90 return get_module_param_bool("kvm_intel", param); 91 } 92 93 bool get_kvm_amd_param_bool(const char *param) 94 { 95 return get_module_param_bool("kvm_amd", param); 96 } 97 98 /* 99 * Capability 100 * 101 * Input Args: 102 * cap - Capability 103 * 104 * Output Args: None 105 * 106 * Return: 107 * On success, the Value corresponding to the capability (KVM_CAP_*) 108 * specified by the value of cap. On failure a TEST_ASSERT failure 109 * is produced. 110 * 111 * Looks up and returns the value corresponding to the capability 112 * (KVM_CAP_*) given by cap. 113 */ 114 unsigned int kvm_check_cap(long cap) 115 { 116 int ret; 117 int kvm_fd; 118 119 kvm_fd = open_kvm_dev_path_or_exit(); 120 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap); 121 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret)); 122 123 close(kvm_fd); 124 125 return (unsigned int)ret; 126 } 127 128 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 129 { 130 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) 131 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); 132 else 133 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); 134 vm->dirty_ring_size = ring_size; 135 } 136 137 static void vm_open(struct kvm_vm *vm) 138 { 139 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); 140 141 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)); 142 143 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); 144 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); 145 } 146 147 const char *vm_guest_mode_string(uint32_t i) 148 { 149 static const char * const strings[] = { 150 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", 151 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", 152 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", 153 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", 154 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", 155 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", 156 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", 157 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", 158 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", 159 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", 160 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", 161 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", 162 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", 163 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", 164 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", 165 }; 166 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, 167 "Missing new mode strings?"); 168 169 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i); 170 171 return strings[i]; 172 } 173 174 const struct vm_guest_mode_params vm_guest_mode_params[] = { 175 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, 176 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, 177 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, 178 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 }, 179 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, 180 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, 181 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 }, 182 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, 183 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, 184 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, 185 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, 186 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 }, 187 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 }, 188 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 }, 189 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 }, 190 }; 191 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, 192 "Missing new mode params?"); 193 194 /* 195 * Initializes vm->vpages_valid to match the canonical VA space of the 196 * architecture. 197 * 198 * The default implementation is valid for architectures which split the 199 * range addressed by a single page table into a low and high region 200 * based on the MSB of the VA. On architectures with this behavior 201 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. 202 */ 203 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) 204 { 205 sparsebit_set_num(vm->vpages_valid, 206 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 207 sparsebit_set_num(vm->vpages_valid, 208 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, 209 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 210 } 211 212 struct kvm_vm *____vm_create(enum vm_guest_mode mode) 213 { 214 struct kvm_vm *vm; 215 216 vm = calloc(1, sizeof(*vm)); 217 TEST_ASSERT(vm != NULL, "Insufficient Memory"); 218 219 INIT_LIST_HEAD(&vm->vcpus); 220 vm->regions.gpa_tree = RB_ROOT; 221 vm->regions.hva_tree = RB_ROOT; 222 hash_init(vm->regions.slot_hash); 223 224 vm->mode = mode; 225 vm->type = 0; 226 227 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; 228 vm->va_bits = vm_guest_mode_params[mode].va_bits; 229 vm->page_size = vm_guest_mode_params[mode].page_size; 230 vm->page_shift = vm_guest_mode_params[mode].page_shift; 231 232 /* Setup mode specific traits. */ 233 switch (vm->mode) { 234 case VM_MODE_P52V48_4K: 235 vm->pgtable_levels = 4; 236 break; 237 case VM_MODE_P52V48_64K: 238 vm->pgtable_levels = 3; 239 break; 240 case VM_MODE_P48V48_4K: 241 vm->pgtable_levels = 4; 242 break; 243 case VM_MODE_P48V48_64K: 244 vm->pgtable_levels = 3; 245 break; 246 case VM_MODE_P40V48_4K: 247 case VM_MODE_P36V48_4K: 248 vm->pgtable_levels = 4; 249 break; 250 case VM_MODE_P40V48_64K: 251 case VM_MODE_P36V48_64K: 252 vm->pgtable_levels = 3; 253 break; 254 case VM_MODE_P48V48_16K: 255 case VM_MODE_P40V48_16K: 256 case VM_MODE_P36V48_16K: 257 vm->pgtable_levels = 4; 258 break; 259 case VM_MODE_P36V47_16K: 260 vm->pgtable_levels = 3; 261 break; 262 case VM_MODE_PXXV48_4K: 263 #ifdef __x86_64__ 264 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); 265 /* 266 * Ignore KVM support for 5-level paging (vm->va_bits == 57), 267 * it doesn't take effect unless a CR4.LA57 is set, which it 268 * isn't for this VM_MODE. 269 */ 270 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, 271 "Linear address width (%d bits) not supported", 272 vm->va_bits); 273 pr_debug("Guest physical address width detected: %d\n", 274 vm->pa_bits); 275 vm->pgtable_levels = 4; 276 vm->va_bits = 48; 277 #else 278 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); 279 #endif 280 break; 281 case VM_MODE_P47V64_4K: 282 vm->pgtable_levels = 5; 283 break; 284 case VM_MODE_P44V64_4K: 285 vm->pgtable_levels = 5; 286 break; 287 default: 288 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); 289 } 290 291 #ifdef __aarch64__ 292 if (vm->pa_bits != 40) 293 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); 294 #endif 295 296 vm_open(vm); 297 298 /* Limit to VA-bit canonical virtual addresses. */ 299 vm->vpages_valid = sparsebit_alloc(); 300 vm_vaddr_populate_bitmap(vm); 301 302 /* Limit physical addresses to PA-bits. */ 303 vm->max_gfn = vm_compute_max_gfn(vm); 304 305 /* Allocate and setup memory for guest. */ 306 vm->vpages_mapped = sparsebit_alloc(); 307 308 return vm; 309 } 310 311 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, 312 uint32_t nr_runnable_vcpus, 313 uint64_t extra_mem_pages) 314 { 315 uint64_t nr_pages; 316 317 TEST_ASSERT(nr_runnable_vcpus, 318 "Use vm_create_barebones() for VMs that _never_ have vCPUs\n"); 319 320 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 321 "nr_vcpus = %d too large for host, max-vcpus = %d", 322 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); 323 324 /* 325 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the 326 * test code and other per-VM assets that will be loaded into memslot0. 327 */ 328 nr_pages = 512; 329 330 /* Account for the per-vCPU stacks on behalf of the test. */ 331 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS; 332 333 /* 334 * Account for the number of pages needed for the page tables. The 335 * maximum page table size for a memory region will be when the 336 * smallest page size is used. Considering each page contains x page 337 * table descriptors, the total extra size for page tables (for extra 338 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 339 * than N/x*2. 340 */ 341 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2; 342 343 return vm_adjust_num_guest_pages(mode, nr_pages); 344 } 345 346 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus, 347 uint64_t nr_extra_pages) 348 { 349 uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus, 350 nr_extra_pages); 351 struct userspace_mem_region *slot0; 352 struct kvm_vm *vm; 353 int i; 354 355 pr_debug("%s: mode='%s' pages='%ld'\n", __func__, 356 vm_guest_mode_string(mode), nr_pages); 357 358 vm = ____vm_create(mode); 359 360 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); 361 for (i = 0; i < NR_MEM_REGIONS; i++) 362 vm->memslots[i] = 0; 363 364 kvm_vm_elf_load(vm, program_invocation_name); 365 366 /* 367 * TODO: Add proper defines to protect the library's memslots, and then 368 * carve out memslot1 for the ucall MMIO address. KVM treats writes to 369 * read-only memslots as MMIO, and creating a read-only memslot for the 370 * MMIO region would prevent silently clobbering the MMIO region. 371 */ 372 slot0 = memslot2region(vm, 0); 373 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); 374 375 kvm_arch_vm_post_create(vm); 376 377 return vm; 378 } 379 380 /* 381 * VM Create with customized parameters 382 * 383 * Input Args: 384 * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 385 * nr_vcpus - VCPU count 386 * extra_mem_pages - Non-slot0 physical memory total size 387 * guest_code - Guest entry point 388 * vcpuids - VCPU IDs 389 * 390 * Output Args: None 391 * 392 * Return: 393 * Pointer to opaque structure that describes the created VM. 394 * 395 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). 396 * extra_mem_pages is only used to calculate the maximum page table size, 397 * no real memory allocation for non-slot0 memory in this function. 398 */ 399 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, 400 uint64_t extra_mem_pages, 401 void *guest_code, struct kvm_vcpu *vcpus[]) 402 { 403 struct kvm_vm *vm; 404 int i; 405 406 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array"); 407 408 vm = __vm_create(mode, nr_vcpus, extra_mem_pages); 409 410 for (i = 0; i < nr_vcpus; ++i) 411 vcpus[i] = vm_vcpu_add(vm, i, guest_code); 412 413 return vm; 414 } 415 416 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 417 uint64_t extra_mem_pages, 418 void *guest_code) 419 { 420 struct kvm_vcpu *vcpus[1]; 421 struct kvm_vm *vm; 422 423 vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages, 424 guest_code, vcpus); 425 426 *vcpu = vcpus[0]; 427 return vm; 428 } 429 430 /* 431 * VM Restart 432 * 433 * Input Args: 434 * vm - VM that has been released before 435 * 436 * Output Args: None 437 * 438 * Reopens the file descriptors associated to the VM and reinstates the 439 * global state, such as the irqchip and the memory regions that are mapped 440 * into the guest. 441 */ 442 void kvm_vm_restart(struct kvm_vm *vmp) 443 { 444 int ctr; 445 struct userspace_mem_region *region; 446 447 vm_open(vmp); 448 if (vmp->has_irqchip) 449 vm_create_irqchip(vmp); 450 451 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { 452 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); 453 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 454 " rc: %i errno: %i\n" 455 " slot: %u flags: 0x%x\n" 456 " guest_phys_addr: 0x%llx size: 0x%llx", 457 ret, errno, region->region.slot, 458 region->region.flags, 459 region->region.guest_phys_addr, 460 region->region.memory_size); 461 } 462 } 463 464 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, 465 uint32_t vcpu_id) 466 { 467 return __vm_vcpu_add(vm, vcpu_id); 468 } 469 470 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) 471 { 472 kvm_vm_restart(vm); 473 474 return vm_vcpu_recreate(vm, 0); 475 } 476 477 void kvm_pin_this_task_to_pcpu(uint32_t pcpu) 478 { 479 cpu_set_t mask; 480 int r; 481 482 CPU_ZERO(&mask); 483 CPU_SET(pcpu, &mask); 484 r = sched_setaffinity(0, sizeof(mask), &mask); 485 TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu); 486 } 487 488 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 489 { 490 uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); 491 492 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), 493 "Not allowed to run on pCPU '%d', check cgroups?\n", pcpu); 494 return pcpu; 495 } 496 497 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 498 int nr_vcpus) 499 { 500 cpu_set_t allowed_mask; 501 char *cpu, *cpu_list; 502 char delim[2] = ","; 503 int i, r; 504 505 cpu_list = strdup(pcpus_string); 506 TEST_ASSERT(cpu_list, "strdup() allocation failed.\n"); 507 508 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask); 509 TEST_ASSERT(!r, "sched_getaffinity() failed"); 510 511 cpu = strtok(cpu_list, delim); 512 513 /* 1. Get all pcpus for vcpus. */ 514 for (i = 0; i < nr_vcpus; i++) { 515 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i); 516 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask); 517 cpu = strtok(NULL, delim); 518 } 519 520 /* 2. Check if the main worker needs to be pinned. */ 521 if (cpu) { 522 kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask)); 523 cpu = strtok(NULL, delim); 524 } 525 526 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu); 527 free(cpu_list); 528 } 529 530 /* 531 * Userspace Memory Region Find 532 * 533 * Input Args: 534 * vm - Virtual Machine 535 * start - Starting VM physical address 536 * end - Ending VM physical address, inclusive. 537 * 538 * Output Args: None 539 * 540 * Return: 541 * Pointer to overlapping region, NULL if no such region. 542 * 543 * Searches for a region with any physical memory that overlaps with 544 * any portion of the guest physical addresses from start to end 545 * inclusive. If multiple overlapping regions exist, a pointer to any 546 * of the regions is returned. Null is returned only when no overlapping 547 * region exists. 548 */ 549 static struct userspace_mem_region * 550 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 551 { 552 struct rb_node *node; 553 554 for (node = vm->regions.gpa_tree.rb_node; node; ) { 555 struct userspace_mem_region *region = 556 container_of(node, struct userspace_mem_region, gpa_node); 557 uint64_t existing_start = region->region.guest_phys_addr; 558 uint64_t existing_end = region->region.guest_phys_addr 559 + region->region.memory_size - 1; 560 if (start <= existing_end && end >= existing_start) 561 return region; 562 563 if (start < existing_start) 564 node = node->rb_left; 565 else 566 node = node->rb_right; 567 } 568 569 return NULL; 570 } 571 572 /* 573 * KVM Userspace Memory Region Find 574 * 575 * Input Args: 576 * vm - Virtual Machine 577 * start - Starting VM physical address 578 * end - Ending VM physical address, inclusive. 579 * 580 * Output Args: None 581 * 582 * Return: 583 * Pointer to overlapping region, NULL if no such region. 584 * 585 * Public interface to userspace_mem_region_find. Allows tests to look up 586 * the memslot datastructure for a given range of guest physical memory. 587 */ 588 struct kvm_userspace_memory_region * 589 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 590 uint64_t end) 591 { 592 struct userspace_mem_region *region; 593 594 region = userspace_mem_region_find(vm, start, end); 595 if (!region) 596 return NULL; 597 598 return ®ion->region; 599 } 600 601 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu) 602 { 603 604 } 605 606 /* 607 * VM VCPU Remove 608 * 609 * Input Args: 610 * vcpu - VCPU to remove 611 * 612 * Output Args: None 613 * 614 * Return: None, TEST_ASSERT failures for all error conditions 615 * 616 * Removes a vCPU from a VM and frees its resources. 617 */ 618 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 619 { 620 int ret; 621 622 if (vcpu->dirty_gfns) { 623 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 624 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 625 vcpu->dirty_gfns = NULL; 626 } 627 628 ret = munmap(vcpu->run, vcpu_mmap_sz()); 629 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 630 631 ret = close(vcpu->fd); 632 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 633 634 list_del(&vcpu->list); 635 636 vcpu_arch_free(vcpu); 637 free(vcpu); 638 } 639 640 void kvm_vm_release(struct kvm_vm *vmp) 641 { 642 struct kvm_vcpu *vcpu, *tmp; 643 int ret; 644 645 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 646 vm_vcpu_rm(vmp, vcpu); 647 648 ret = close(vmp->fd); 649 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 650 651 ret = close(vmp->kvm_fd); 652 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 653 } 654 655 static void __vm_mem_region_delete(struct kvm_vm *vm, 656 struct userspace_mem_region *region, 657 bool unlink) 658 { 659 int ret; 660 661 if (unlink) { 662 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); 663 rb_erase(®ion->hva_node, &vm->regions.hva_tree); 664 hash_del(®ion->slot_node); 665 } 666 667 region->region.memory_size = 0; 668 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); 669 670 sparsebit_free(®ion->unused_phy_pages); 671 ret = munmap(region->mmap_start, region->mmap_size); 672 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 673 if (region->fd >= 0) { 674 /* There's an extra map when using shared memory. */ 675 ret = munmap(region->mmap_alias, region->mmap_size); 676 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 677 close(region->fd); 678 } 679 680 free(region); 681 } 682 683 /* 684 * Destroys and frees the VM pointed to by vmp. 685 */ 686 void kvm_vm_free(struct kvm_vm *vmp) 687 { 688 int ctr; 689 struct hlist_node *node; 690 struct userspace_mem_region *region; 691 692 if (vmp == NULL) 693 return; 694 695 /* Free cached stats metadata and close FD */ 696 if (vmp->stats_fd) { 697 free(vmp->stats_desc); 698 close(vmp->stats_fd); 699 } 700 701 /* Free userspace_mem_regions. */ 702 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) 703 __vm_mem_region_delete(vmp, region, false); 704 705 /* Free sparsebit arrays. */ 706 sparsebit_free(&vmp->vpages_valid); 707 sparsebit_free(&vmp->vpages_mapped); 708 709 kvm_vm_release(vmp); 710 711 /* Free the structure describing the VM. */ 712 free(vmp); 713 } 714 715 int kvm_memfd_alloc(size_t size, bool hugepages) 716 { 717 int memfd_flags = MFD_CLOEXEC; 718 int fd, r; 719 720 if (hugepages) 721 memfd_flags |= MFD_HUGETLB; 722 723 fd = memfd_create("kvm_selftest", memfd_flags); 724 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); 725 726 r = ftruncate(fd, size); 727 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r)); 728 729 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size); 730 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); 731 732 return fd; 733 } 734 735 /* 736 * Memory Compare, host virtual to guest virtual 737 * 738 * Input Args: 739 * hva - Starting host virtual address 740 * vm - Virtual Machine 741 * gva - Starting guest virtual address 742 * len - number of bytes to compare 743 * 744 * Output Args: None 745 * 746 * Input/Output Args: None 747 * 748 * Return: 749 * Returns 0 if the bytes starting at hva for a length of len 750 * are equal the guest virtual bytes starting at gva. Returns 751 * a value < 0, if bytes at hva are less than those at gva. 752 * Otherwise a value > 0 is returned. 753 * 754 * Compares the bytes starting at the host virtual address hva, for 755 * a length of len, to the guest bytes starting at the guest virtual 756 * address given by gva. 757 */ 758 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) 759 { 760 size_t amt; 761 762 /* 763 * Compare a batch of bytes until either a match is found 764 * or all the bytes have been compared. 765 */ 766 for (uintptr_t offset = 0; offset < len; offset += amt) { 767 uintptr_t ptr1 = (uintptr_t)hva + offset; 768 769 /* 770 * Determine host address for guest virtual address 771 * at offset. 772 */ 773 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); 774 775 /* 776 * Determine amount to compare on this pass. 777 * Don't allow the comparsion to cross a page boundary. 778 */ 779 amt = len - offset; 780 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) 781 amt = vm->page_size - (ptr1 % vm->page_size); 782 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) 783 amt = vm->page_size - (ptr2 % vm->page_size); 784 785 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); 786 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); 787 788 /* 789 * Perform the comparison. If there is a difference 790 * return that result to the caller, otherwise need 791 * to continue on looking for a mismatch. 792 */ 793 int ret = memcmp((void *)ptr1, (void *)ptr2, amt); 794 if (ret != 0) 795 return ret; 796 } 797 798 /* 799 * No mismatch found. Let the caller know the two memory 800 * areas are equal. 801 */ 802 return 0; 803 } 804 805 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree, 806 struct userspace_mem_region *region) 807 { 808 struct rb_node **cur, *parent; 809 810 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { 811 struct userspace_mem_region *cregion; 812 813 cregion = container_of(*cur, typeof(*cregion), gpa_node); 814 parent = *cur; 815 if (region->region.guest_phys_addr < 816 cregion->region.guest_phys_addr) 817 cur = &(*cur)->rb_left; 818 else { 819 TEST_ASSERT(region->region.guest_phys_addr != 820 cregion->region.guest_phys_addr, 821 "Duplicate GPA in region tree"); 822 823 cur = &(*cur)->rb_right; 824 } 825 } 826 827 rb_link_node(®ion->gpa_node, parent, cur); 828 rb_insert_color(®ion->gpa_node, gpa_tree); 829 } 830 831 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, 832 struct userspace_mem_region *region) 833 { 834 struct rb_node **cur, *parent; 835 836 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { 837 struct userspace_mem_region *cregion; 838 839 cregion = container_of(*cur, typeof(*cregion), hva_node); 840 parent = *cur; 841 if (region->host_mem < cregion->host_mem) 842 cur = &(*cur)->rb_left; 843 else { 844 TEST_ASSERT(region->host_mem != 845 cregion->host_mem, 846 "Duplicate HVA in region tree"); 847 848 cur = &(*cur)->rb_right; 849 } 850 } 851 852 rb_link_node(®ion->hva_node, parent, cur); 853 rb_insert_color(®ion->hva_node, hva_tree); 854 } 855 856 857 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 858 uint64_t gpa, uint64_t size, void *hva) 859 { 860 struct kvm_userspace_memory_region region = { 861 .slot = slot, 862 .flags = flags, 863 .guest_phys_addr = gpa, 864 .memory_size = size, 865 .userspace_addr = (uintptr_t)hva, 866 }; 867 868 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); 869 } 870 871 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 872 uint64_t gpa, uint64_t size, void *hva) 873 { 874 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); 875 876 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)", 877 errno, strerror(errno)); 878 } 879 880 /* 881 * VM Userspace Memory Region Add 882 * 883 * Input Args: 884 * vm - Virtual Machine 885 * src_type - Storage source for this region. 886 * NULL to use anonymous memory. 887 * guest_paddr - Starting guest physical address 888 * slot - KVM region slot 889 * npages - Number of physical pages 890 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) 891 * 892 * Output Args: None 893 * 894 * Return: None 895 * 896 * Allocates a memory area of the number of pages specified by npages 897 * and maps it to the VM specified by vm, at a starting physical address 898 * given by guest_paddr. The region is created with a KVM region slot 899 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The 900 * region is created with the flags given by flags. 901 */ 902 void vm_userspace_mem_region_add(struct kvm_vm *vm, 903 enum vm_mem_backing_src_type src_type, 904 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 905 uint32_t flags) 906 { 907 int ret; 908 struct userspace_mem_region *region; 909 size_t backing_src_pagesz = get_backing_src_pagesz(src_type); 910 size_t alignment; 911 912 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, 913 "Number of guest pages is not compatible with the host. " 914 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 915 916 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 917 "address not on a page boundary.\n" 918 " guest_paddr: 0x%lx vm->page_size: 0x%x", 919 guest_paddr, vm->page_size); 920 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 921 <= vm->max_gfn, "Physical range beyond maximum " 922 "supported physical address,\n" 923 " guest_paddr: 0x%lx npages: 0x%lx\n" 924 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 925 guest_paddr, npages, vm->max_gfn, vm->page_size); 926 927 /* 928 * Confirm a mem region with an overlapping address doesn't 929 * already exist. 930 */ 931 region = (struct userspace_mem_region *) userspace_mem_region_find( 932 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 933 if (region != NULL) 934 TEST_FAIL("overlapping userspace_mem_region already " 935 "exists\n" 936 " requested guest_paddr: 0x%lx npages: 0x%lx " 937 "page_size: 0x%x\n" 938 " existing guest_paddr: 0x%lx size: 0x%lx", 939 guest_paddr, npages, vm->page_size, 940 (uint64_t) region->region.guest_phys_addr, 941 (uint64_t) region->region.memory_size); 942 943 /* Confirm no region with the requested slot already exists. */ 944 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, 945 slot) { 946 if (region->region.slot != slot) 947 continue; 948 949 TEST_FAIL("A mem region with the requested slot " 950 "already exists.\n" 951 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 952 " existing slot: %u paddr: 0x%lx size: 0x%lx", 953 slot, guest_paddr, npages, 954 region->region.slot, 955 (uint64_t) region->region.guest_phys_addr, 956 (uint64_t) region->region.memory_size); 957 } 958 959 /* Allocate and initialize new mem region structure. */ 960 region = calloc(1, sizeof(*region)); 961 TEST_ASSERT(region != NULL, "Insufficient Memory"); 962 region->mmap_size = npages * vm->page_size; 963 964 #ifdef __s390x__ 965 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 966 alignment = 0x100000; 967 #else 968 alignment = 1; 969 #endif 970 971 /* 972 * When using THP mmap is not guaranteed to returned a hugepage aligned 973 * address so we have to pad the mmap. Padding is not needed for HugeTLB 974 * because mmap will always return an address aligned to the HugeTLB 975 * page size. 976 */ 977 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 978 alignment = max(backing_src_pagesz, alignment); 979 980 ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); 981 982 /* Add enough memory to align up if necessary */ 983 if (alignment > 1) 984 region->mmap_size += alignment; 985 986 region->fd = -1; 987 if (backing_src_is_shared(src_type)) 988 region->fd = kvm_memfd_alloc(region->mmap_size, 989 src_type == VM_MEM_SRC_SHARED_HUGETLB); 990 991 region->mmap_start = mmap(NULL, region->mmap_size, 992 PROT_READ | PROT_WRITE, 993 vm_mem_backing_src_alias(src_type)->flag, 994 region->fd, 0); 995 TEST_ASSERT(region->mmap_start != MAP_FAILED, 996 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 997 998 TEST_ASSERT(!is_backing_src_hugetlb(src_type) || 999 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), 1000 "mmap_start %p is not aligned to HugeTLB page size 0x%lx", 1001 region->mmap_start, backing_src_pagesz); 1002 1003 /* Align host address */ 1004 region->host_mem = align_ptr_up(region->mmap_start, alignment); 1005 1006 /* As needed perform madvise */ 1007 if ((src_type == VM_MEM_SRC_ANONYMOUS || 1008 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) { 1009 ret = madvise(region->host_mem, npages * vm->page_size, 1010 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); 1011 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s", 1012 region->host_mem, npages * vm->page_size, 1013 vm_mem_backing_src_alias(src_type)->name); 1014 } 1015 1016 region->backing_src_type = src_type; 1017 region->unused_phy_pages = sparsebit_alloc(); 1018 sparsebit_set_num(region->unused_phy_pages, 1019 guest_paddr >> vm->page_shift, npages); 1020 region->region.slot = slot; 1021 region->region.flags = flags; 1022 region->region.guest_phys_addr = guest_paddr; 1023 region->region.memory_size = npages * vm->page_size; 1024 region->region.userspace_addr = (uintptr_t) region->host_mem; 1025 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); 1026 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 1027 " rc: %i errno: %i\n" 1028 " slot: %u flags: 0x%x\n" 1029 " guest_phys_addr: 0x%lx size: 0x%lx", 1030 ret, errno, slot, flags, 1031 guest_paddr, (uint64_t) region->region.memory_size); 1032 1033 /* Add to quick lookup data structures */ 1034 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); 1035 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); 1036 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); 1037 1038 /* If shared memory, create an alias. */ 1039 if (region->fd >= 0) { 1040 region->mmap_alias = mmap(NULL, region->mmap_size, 1041 PROT_READ | PROT_WRITE, 1042 vm_mem_backing_src_alias(src_type)->flag, 1043 region->fd, 0); 1044 TEST_ASSERT(region->mmap_alias != MAP_FAILED, 1045 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1046 1047 /* Align host alias address */ 1048 region->host_alias = align_ptr_up(region->mmap_alias, alignment); 1049 } 1050 } 1051 1052 /* 1053 * Memslot to region 1054 * 1055 * Input Args: 1056 * vm - Virtual Machine 1057 * memslot - KVM memory slot ID 1058 * 1059 * Output Args: None 1060 * 1061 * Return: 1062 * Pointer to memory region structure that describe memory region 1063 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 1064 * on error (e.g. currently no memory region using memslot as a KVM 1065 * memory slot ID). 1066 */ 1067 struct userspace_mem_region * 1068 memslot2region(struct kvm_vm *vm, uint32_t memslot) 1069 { 1070 struct userspace_mem_region *region; 1071 1072 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, 1073 memslot) 1074 if (region->region.slot == memslot) 1075 return region; 1076 1077 fprintf(stderr, "No mem region with the requested slot found,\n" 1078 " requested slot: %u\n", memslot); 1079 fputs("---- vm dump ----\n", stderr); 1080 vm_dump(stderr, vm, 2); 1081 TEST_FAIL("Mem region not found"); 1082 return NULL; 1083 } 1084 1085 /* 1086 * VM Memory Region Flags Set 1087 * 1088 * Input Args: 1089 * vm - Virtual Machine 1090 * flags - Starting guest physical address 1091 * 1092 * Output Args: None 1093 * 1094 * Return: None 1095 * 1096 * Sets the flags of the memory region specified by the value of slot, 1097 * to the values given by flags. 1098 */ 1099 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 1100 { 1101 int ret; 1102 struct userspace_mem_region *region; 1103 1104 region = memslot2region(vm, slot); 1105 1106 region->region.flags = flags; 1107 1108 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); 1109 1110 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" 1111 " rc: %i errno: %i slot: %u flags: 0x%x", 1112 ret, errno, slot, flags); 1113 } 1114 1115 /* 1116 * VM Memory Region Move 1117 * 1118 * Input Args: 1119 * vm - Virtual Machine 1120 * slot - Slot of the memory region to move 1121 * new_gpa - Starting guest physical address 1122 * 1123 * Output Args: None 1124 * 1125 * Return: None 1126 * 1127 * Change the gpa of a memory region. 1128 */ 1129 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 1130 { 1131 struct userspace_mem_region *region; 1132 int ret; 1133 1134 region = memslot2region(vm, slot); 1135 1136 region->region.guest_phys_addr = new_gpa; 1137 1138 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); 1139 1140 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n" 1141 "ret: %i errno: %i slot: %u new_gpa: 0x%lx", 1142 ret, errno, slot, new_gpa); 1143 } 1144 1145 /* 1146 * VM Memory Region Delete 1147 * 1148 * Input Args: 1149 * vm - Virtual Machine 1150 * slot - Slot of the memory region to delete 1151 * 1152 * Output Args: None 1153 * 1154 * Return: None 1155 * 1156 * Delete a memory region. 1157 */ 1158 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 1159 { 1160 __vm_mem_region_delete(vm, memslot2region(vm, slot), true); 1161 } 1162 1163 /* Returns the size of a vCPU's kvm_run structure. */ 1164 static int vcpu_mmap_sz(void) 1165 { 1166 int dev_fd, ret; 1167 1168 dev_fd = open_kvm_dev_path_or_exit(); 1169 1170 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 1171 TEST_ASSERT(ret >= sizeof(struct kvm_run), 1172 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret)); 1173 1174 close(dev_fd); 1175 1176 return ret; 1177 } 1178 1179 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) 1180 { 1181 struct kvm_vcpu *vcpu; 1182 1183 list_for_each_entry(vcpu, &vm->vcpus, list) { 1184 if (vcpu->id == vcpu_id) 1185 return true; 1186 } 1187 1188 return false; 1189 } 1190 1191 /* 1192 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. 1193 * No additional vCPU setup is done. Returns the vCPU. 1194 */ 1195 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 1196 { 1197 struct kvm_vcpu *vcpu; 1198 1199 /* Confirm a vcpu with the specified id doesn't already exist. */ 1200 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id); 1201 1202 /* Allocate and initialize new vcpu structure. */ 1203 vcpu = calloc(1, sizeof(*vcpu)); 1204 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); 1205 1206 vcpu->vm = vm; 1207 vcpu->id = vcpu_id; 1208 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); 1209 TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd)); 1210 1211 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " 1212 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 1213 vcpu_mmap_sz(), sizeof(*vcpu->run)); 1214 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), 1215 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); 1216 TEST_ASSERT(vcpu->run != MAP_FAILED, 1217 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1218 1219 /* Add to linked-list of VCPUs. */ 1220 list_add(&vcpu->list, &vm->vcpus); 1221 1222 return vcpu; 1223 } 1224 1225 /* 1226 * VM Virtual Address Unused Gap 1227 * 1228 * Input Args: 1229 * vm - Virtual Machine 1230 * sz - Size (bytes) 1231 * vaddr_min - Minimum Virtual Address 1232 * 1233 * Output Args: None 1234 * 1235 * Return: 1236 * Lowest virtual address at or below vaddr_min, with at least 1237 * sz unused bytes. TEST_ASSERT failure if no area of at least 1238 * size sz is available. 1239 * 1240 * Within the VM specified by vm, locates the lowest starting virtual 1241 * address >= vaddr_min, that has at least sz unallocated bytes. A 1242 * TEST_ASSERT failure occurs for invalid input or no area of at least 1243 * sz unallocated bytes >= vaddr_min is available. 1244 */ 1245 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1246 vm_vaddr_t vaddr_min) 1247 { 1248 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1249 1250 /* Determine lowest permitted virtual page index. */ 1251 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1252 if ((pgidx_start * vm->page_size) < vaddr_min) 1253 goto no_va_found; 1254 1255 /* Loop over section with enough valid virtual page indexes. */ 1256 if (!sparsebit_is_set_num(vm->vpages_valid, 1257 pgidx_start, pages)) 1258 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, 1259 pgidx_start, pages); 1260 do { 1261 /* 1262 * Are there enough unused virtual pages available at 1263 * the currently proposed starting virtual page index. 1264 * If not, adjust proposed starting index to next 1265 * possible. 1266 */ 1267 if (sparsebit_is_clear_num(vm->vpages_mapped, 1268 pgidx_start, pages)) 1269 goto va_found; 1270 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, 1271 pgidx_start, pages); 1272 if (pgidx_start == 0) 1273 goto no_va_found; 1274 1275 /* 1276 * If needed, adjust proposed starting virtual address, 1277 * to next range of valid virtual addresses. 1278 */ 1279 if (!sparsebit_is_set_num(vm->vpages_valid, 1280 pgidx_start, pages)) { 1281 pgidx_start = sparsebit_next_set_num( 1282 vm->vpages_valid, pgidx_start, pages); 1283 if (pgidx_start == 0) 1284 goto no_va_found; 1285 } 1286 } while (pgidx_start != 0); 1287 1288 no_va_found: 1289 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1290 1291 /* NOT REACHED */ 1292 return -1; 1293 1294 va_found: 1295 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, 1296 pgidx_start, pages), 1297 "Unexpected, invalid virtual page index range,\n" 1298 " pgidx_start: 0x%lx\n" 1299 " pages: 0x%lx", 1300 pgidx_start, pages); 1301 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, 1302 pgidx_start, pages), 1303 "Unexpected, pages already mapped,\n" 1304 " pgidx_start: 0x%lx\n" 1305 " pages: 0x%lx", 1306 pgidx_start, pages); 1307 1308 return pgidx_start * vm->page_size; 1309 } 1310 1311 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1312 enum kvm_mem_region_type type) 1313 { 1314 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1315 1316 virt_pgd_alloc(vm); 1317 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, 1318 KVM_UTIL_MIN_PFN * vm->page_size, 1319 vm->memslots[type]); 1320 1321 /* 1322 * Find an unused range of virtual page addresses of at least 1323 * pages in length. 1324 */ 1325 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1326 1327 /* Map the virtual pages. */ 1328 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1329 pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1330 1331 virt_pg_map(vm, vaddr, paddr); 1332 1333 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1334 } 1335 1336 return vaddr_start; 1337 } 1338 1339 /* 1340 * VM Virtual Address Allocate 1341 * 1342 * Input Args: 1343 * vm - Virtual Machine 1344 * sz - Size in bytes 1345 * vaddr_min - Minimum starting virtual address 1346 * 1347 * Output Args: None 1348 * 1349 * Return: 1350 * Starting guest virtual address 1351 * 1352 * Allocates at least sz bytes within the virtual address space of the vm 1353 * given by vm. The allocated bytes are mapped to a virtual address >= 1354 * the address given by vaddr_min. Note that each allocation uses a 1355 * a unique set of pages, with the minimum real allocation being at least 1356 * a page. The allocated physical space comes from the TEST_DATA memory region. 1357 */ 1358 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) 1359 { 1360 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); 1361 } 1362 1363 /* 1364 * VM Virtual Address Allocate Pages 1365 * 1366 * Input Args: 1367 * vm - Virtual Machine 1368 * 1369 * Output Args: None 1370 * 1371 * Return: 1372 * Starting guest virtual address 1373 * 1374 * Allocates at least N system pages worth of bytes within the virtual address 1375 * space of the vm. 1376 */ 1377 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) 1378 { 1379 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); 1380 } 1381 1382 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1383 { 1384 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); 1385 } 1386 1387 /* 1388 * VM Virtual Address Allocate Page 1389 * 1390 * Input Args: 1391 * vm - Virtual Machine 1392 * 1393 * Output Args: None 1394 * 1395 * Return: 1396 * Starting guest virtual address 1397 * 1398 * Allocates at least one system page worth of bytes within the virtual address 1399 * space of the vm. 1400 */ 1401 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) 1402 { 1403 return vm_vaddr_alloc_pages(vm, 1); 1404 } 1405 1406 /* 1407 * Map a range of VM virtual address to the VM's physical address 1408 * 1409 * Input Args: 1410 * vm - Virtual Machine 1411 * vaddr - Virtuall address to map 1412 * paddr - VM Physical Address 1413 * npages - The number of pages to map 1414 * 1415 * Output Args: None 1416 * 1417 * Return: None 1418 * 1419 * Within the VM given by @vm, creates a virtual translation for 1420 * @npages starting at @vaddr to the page range starting at @paddr. 1421 */ 1422 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1423 unsigned int npages) 1424 { 1425 size_t page_size = vm->page_size; 1426 size_t size = npages * page_size; 1427 1428 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1429 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1430 1431 while (npages--) { 1432 virt_pg_map(vm, vaddr, paddr); 1433 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1434 1435 vaddr += page_size; 1436 paddr += page_size; 1437 } 1438 } 1439 1440 /* 1441 * Address VM Physical to Host Virtual 1442 * 1443 * Input Args: 1444 * vm - Virtual Machine 1445 * gpa - VM physical address 1446 * 1447 * Output Args: None 1448 * 1449 * Return: 1450 * Equivalent host virtual address 1451 * 1452 * Locates the memory region containing the VM physical address given 1453 * by gpa, within the VM given by vm. When found, the host virtual 1454 * address providing the memory to the vm physical address is returned. 1455 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1456 */ 1457 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1458 { 1459 struct userspace_mem_region *region; 1460 1461 region = userspace_mem_region_find(vm, gpa, gpa); 1462 if (!region) { 1463 TEST_FAIL("No vm physical memory at 0x%lx", gpa); 1464 return NULL; 1465 } 1466 1467 return (void *)((uintptr_t)region->host_mem 1468 + (gpa - region->region.guest_phys_addr)); 1469 } 1470 1471 /* 1472 * Address Host Virtual to VM Physical 1473 * 1474 * Input Args: 1475 * vm - Virtual Machine 1476 * hva - Host virtual address 1477 * 1478 * Output Args: None 1479 * 1480 * Return: 1481 * Equivalent VM physical address 1482 * 1483 * Locates the memory region containing the host virtual address given 1484 * by hva, within the VM given by vm. When found, the equivalent 1485 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1486 * region containing hva exists. 1487 */ 1488 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1489 { 1490 struct rb_node *node; 1491 1492 for (node = vm->regions.hva_tree.rb_node; node; ) { 1493 struct userspace_mem_region *region = 1494 container_of(node, struct userspace_mem_region, hva_node); 1495 1496 if (hva >= region->host_mem) { 1497 if (hva <= (region->host_mem 1498 + region->region.memory_size - 1)) 1499 return (vm_paddr_t)((uintptr_t) 1500 region->region.guest_phys_addr 1501 + (hva - (uintptr_t)region->host_mem)); 1502 1503 node = node->rb_right; 1504 } else 1505 node = node->rb_left; 1506 } 1507 1508 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); 1509 return -1; 1510 } 1511 1512 /* 1513 * Address VM physical to Host Virtual *alias*. 1514 * 1515 * Input Args: 1516 * vm - Virtual Machine 1517 * gpa - VM physical address 1518 * 1519 * Output Args: None 1520 * 1521 * Return: 1522 * Equivalent address within the host virtual *alias* area, or NULL 1523 * (without failing the test) if the guest memory is not shared (so 1524 * no alias exists). 1525 * 1526 * Create a writable, shared virtual=>physical alias for the specific GPA. 1527 * The primary use case is to allow the host selftest to manipulate guest 1528 * memory without mapping said memory in the guest's address space. And, for 1529 * userfaultfd-based demand paging, to do so without triggering userfaults. 1530 */ 1531 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) 1532 { 1533 struct userspace_mem_region *region; 1534 uintptr_t offset; 1535 1536 region = userspace_mem_region_find(vm, gpa, gpa); 1537 if (!region) 1538 return NULL; 1539 1540 if (!region->host_alias) 1541 return NULL; 1542 1543 offset = gpa - region->region.guest_phys_addr; 1544 return (void *) ((uintptr_t) region->host_alias + offset); 1545 } 1546 1547 /* Create an interrupt controller chip for the specified VM. */ 1548 void vm_create_irqchip(struct kvm_vm *vm) 1549 { 1550 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); 1551 1552 vm->has_irqchip = true; 1553 } 1554 1555 int _vcpu_run(struct kvm_vcpu *vcpu) 1556 { 1557 int rc; 1558 1559 do { 1560 rc = __vcpu_run(vcpu); 1561 } while (rc == -1 && errno == EINTR); 1562 1563 assert_on_unhandled_exception(vcpu); 1564 1565 return rc; 1566 } 1567 1568 /* 1569 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR. 1570 * Assert if the KVM returns an error (other than -EINTR). 1571 */ 1572 void vcpu_run(struct kvm_vcpu *vcpu) 1573 { 1574 int ret = _vcpu_run(vcpu); 1575 1576 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); 1577 } 1578 1579 void vcpu_run_complete_io(struct kvm_vcpu *vcpu) 1580 { 1581 int ret; 1582 1583 vcpu->run->immediate_exit = 1; 1584 ret = __vcpu_run(vcpu); 1585 vcpu->run->immediate_exit = 0; 1586 1587 TEST_ASSERT(ret == -1 && errno == EINTR, 1588 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", 1589 ret, errno); 1590 } 1591 1592 /* 1593 * Get the list of guest registers which are supported for 1594 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer, 1595 * it is the caller's responsibility to free the list. 1596 */ 1597 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) 1598 { 1599 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1600 int ret; 1601 1602 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n); 1603 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1604 1605 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1606 reg_list->n = reg_list_n.n; 1607 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list); 1608 return reg_list; 1609 } 1610 1611 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) 1612 { 1613 uint32_t page_size = getpagesize(); 1614 uint32_t size = vcpu->vm->dirty_ring_size; 1615 1616 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1617 1618 if (!vcpu->dirty_gfns) { 1619 void *addr; 1620 1621 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, 1622 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1623 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1624 1625 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, 1626 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1627 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1628 1629 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 1630 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1631 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1632 1633 vcpu->dirty_gfns = addr; 1634 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); 1635 } 1636 1637 return vcpu->dirty_gfns; 1638 } 1639 1640 /* 1641 * Device Ioctl 1642 */ 1643 1644 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 1645 { 1646 struct kvm_device_attr attribute = { 1647 .group = group, 1648 .attr = attr, 1649 .flags = 0, 1650 }; 1651 1652 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); 1653 } 1654 1655 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) 1656 { 1657 struct kvm_create_device create_dev = { 1658 .type = type, 1659 .flags = KVM_CREATE_DEVICE_TEST, 1660 }; 1661 1662 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1663 } 1664 1665 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) 1666 { 1667 struct kvm_create_device create_dev = { 1668 .type = type, 1669 .fd = -1, 1670 .flags = 0, 1671 }; 1672 int err; 1673 1674 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1675 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value"); 1676 return err ? : create_dev.fd; 1677 } 1678 1679 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) 1680 { 1681 struct kvm_device_attr kvmattr = { 1682 .group = group, 1683 .attr = attr, 1684 .flags = 0, 1685 .addr = (uintptr_t)val, 1686 }; 1687 1688 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); 1689 } 1690 1691 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) 1692 { 1693 struct kvm_device_attr kvmattr = { 1694 .group = group, 1695 .attr = attr, 1696 .flags = 0, 1697 .addr = (uintptr_t)val, 1698 }; 1699 1700 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr); 1701 } 1702 1703 /* 1704 * IRQ related functions. 1705 */ 1706 1707 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1708 { 1709 struct kvm_irq_level irq_level = { 1710 .irq = irq, 1711 .level = level, 1712 }; 1713 1714 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); 1715 } 1716 1717 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1718 { 1719 int ret = _kvm_irq_line(vm, irq, level); 1720 1721 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); 1722 } 1723 1724 struct kvm_irq_routing *kvm_gsi_routing_create(void) 1725 { 1726 struct kvm_irq_routing *routing; 1727 size_t size; 1728 1729 size = sizeof(struct kvm_irq_routing); 1730 /* Allocate space for the max number of entries: this wastes 196 KBs. */ 1731 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry); 1732 routing = calloc(1, size); 1733 assert(routing); 1734 1735 return routing; 1736 } 1737 1738 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 1739 uint32_t gsi, uint32_t pin) 1740 { 1741 int i; 1742 1743 assert(routing); 1744 assert(routing->nr < KVM_MAX_IRQ_ROUTES); 1745 1746 i = routing->nr; 1747 routing->entries[i].gsi = gsi; 1748 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; 1749 routing->entries[i].flags = 0; 1750 routing->entries[i].u.irqchip.irqchip = 0; 1751 routing->entries[i].u.irqchip.pin = pin; 1752 routing->nr++; 1753 } 1754 1755 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) 1756 { 1757 int ret; 1758 1759 assert(routing); 1760 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); 1761 free(routing); 1762 1763 return ret; 1764 } 1765 1766 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) 1767 { 1768 int ret; 1769 1770 ret = _kvm_gsi_routing_write(vm, routing); 1771 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret)); 1772 } 1773 1774 /* 1775 * VM Dump 1776 * 1777 * Input Args: 1778 * vm - Virtual Machine 1779 * indent - Left margin indent amount 1780 * 1781 * Output Args: 1782 * stream - Output FILE stream 1783 * 1784 * Return: None 1785 * 1786 * Dumps the current state of the VM given by vm, to the FILE stream 1787 * given by stream. 1788 */ 1789 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1790 { 1791 int ctr; 1792 struct userspace_mem_region *region; 1793 struct kvm_vcpu *vcpu; 1794 1795 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); 1796 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); 1797 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); 1798 fprintf(stream, "%*sMem Regions:\n", indent, ""); 1799 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { 1800 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1801 "host_virt: %p\n", indent + 2, "", 1802 (uint64_t) region->region.guest_phys_addr, 1803 (uint64_t) region->region.memory_size, 1804 region->host_mem); 1805 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1806 sparsebit_dump(stream, region->unused_phy_pages, 0); 1807 } 1808 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); 1809 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); 1810 fprintf(stream, "%*spgd_created: %u\n", indent, "", 1811 vm->pgd_created); 1812 if (vm->pgd_created) { 1813 fprintf(stream, "%*sVirtual Translation Tables:\n", 1814 indent + 2, ""); 1815 virt_dump(stream, vm, indent + 4); 1816 } 1817 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1818 1819 list_for_each_entry(vcpu, &vm->vcpus, list) 1820 vcpu_dump(stream, vcpu, indent + 2); 1821 } 1822 1823 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x} 1824 1825 /* Known KVM exit reasons */ 1826 static struct exit_reason { 1827 unsigned int reason; 1828 const char *name; 1829 } exit_reasons_known[] = { 1830 KVM_EXIT_STRING(UNKNOWN), 1831 KVM_EXIT_STRING(EXCEPTION), 1832 KVM_EXIT_STRING(IO), 1833 KVM_EXIT_STRING(HYPERCALL), 1834 KVM_EXIT_STRING(DEBUG), 1835 KVM_EXIT_STRING(HLT), 1836 KVM_EXIT_STRING(MMIO), 1837 KVM_EXIT_STRING(IRQ_WINDOW_OPEN), 1838 KVM_EXIT_STRING(SHUTDOWN), 1839 KVM_EXIT_STRING(FAIL_ENTRY), 1840 KVM_EXIT_STRING(INTR), 1841 KVM_EXIT_STRING(SET_TPR), 1842 KVM_EXIT_STRING(TPR_ACCESS), 1843 KVM_EXIT_STRING(S390_SIEIC), 1844 KVM_EXIT_STRING(S390_RESET), 1845 KVM_EXIT_STRING(DCR), 1846 KVM_EXIT_STRING(NMI), 1847 KVM_EXIT_STRING(INTERNAL_ERROR), 1848 KVM_EXIT_STRING(OSI), 1849 KVM_EXIT_STRING(PAPR_HCALL), 1850 KVM_EXIT_STRING(S390_UCONTROL), 1851 KVM_EXIT_STRING(WATCHDOG), 1852 KVM_EXIT_STRING(S390_TSCH), 1853 KVM_EXIT_STRING(EPR), 1854 KVM_EXIT_STRING(SYSTEM_EVENT), 1855 KVM_EXIT_STRING(S390_STSI), 1856 KVM_EXIT_STRING(IOAPIC_EOI), 1857 KVM_EXIT_STRING(HYPERV), 1858 KVM_EXIT_STRING(ARM_NISV), 1859 KVM_EXIT_STRING(X86_RDMSR), 1860 KVM_EXIT_STRING(X86_WRMSR), 1861 KVM_EXIT_STRING(DIRTY_RING_FULL), 1862 KVM_EXIT_STRING(AP_RESET_HOLD), 1863 KVM_EXIT_STRING(X86_BUS_LOCK), 1864 KVM_EXIT_STRING(XEN), 1865 KVM_EXIT_STRING(RISCV_SBI), 1866 KVM_EXIT_STRING(RISCV_CSR), 1867 KVM_EXIT_STRING(NOTIFY), 1868 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT 1869 KVM_EXIT_STRING(MEMORY_NOT_PRESENT), 1870 #endif 1871 }; 1872 1873 /* 1874 * Exit Reason String 1875 * 1876 * Input Args: 1877 * exit_reason - Exit reason 1878 * 1879 * Output Args: None 1880 * 1881 * Return: 1882 * Constant string pointer describing the exit reason. 1883 * 1884 * Locates and returns a constant string that describes the KVM exit 1885 * reason given by exit_reason. If no such string is found, a constant 1886 * string of "Unknown" is returned. 1887 */ 1888 const char *exit_reason_str(unsigned int exit_reason) 1889 { 1890 unsigned int n1; 1891 1892 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { 1893 if (exit_reason == exit_reasons_known[n1].reason) 1894 return exit_reasons_known[n1].name; 1895 } 1896 1897 return "Unknown"; 1898 } 1899 1900 /* 1901 * Physical Contiguous Page Allocator 1902 * 1903 * Input Args: 1904 * vm - Virtual Machine 1905 * num - number of pages 1906 * paddr_min - Physical address minimum 1907 * memslot - Memory region to allocate page from 1908 * 1909 * Output Args: None 1910 * 1911 * Return: 1912 * Starting physical address 1913 * 1914 * Within the VM specified by vm, locates a range of available physical 1915 * pages at or above paddr_min. If found, the pages are marked as in use 1916 * and their base address is returned. A TEST_ASSERT failure occurs if 1917 * not enough pages are available at or above paddr_min. 1918 */ 1919 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1920 vm_paddr_t paddr_min, uint32_t memslot) 1921 { 1922 struct userspace_mem_region *region; 1923 sparsebit_idx_t pg, base; 1924 1925 TEST_ASSERT(num > 0, "Must allocate at least one page"); 1926 1927 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1928 "not divisible by page size.\n" 1929 " paddr_min: 0x%lx page_size: 0x%x", 1930 paddr_min, vm->page_size); 1931 1932 region = memslot2region(vm, memslot); 1933 base = pg = paddr_min >> vm->page_shift; 1934 1935 do { 1936 for (; pg < base + num; ++pg) { 1937 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 1938 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); 1939 break; 1940 } 1941 } 1942 } while (pg && pg != base + num); 1943 1944 if (pg == 0) { 1945 fprintf(stderr, "No guest physical page available, " 1946 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 1947 paddr_min, vm->page_size, memslot); 1948 fputs("---- vm dump ----\n", stderr); 1949 vm_dump(stderr, vm, 2); 1950 abort(); 1951 } 1952 1953 for (pg = base; pg < base + num; ++pg) 1954 sparsebit_clear(region->unused_phy_pages, pg); 1955 1956 return base * vm->page_size; 1957 } 1958 1959 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 1960 uint32_t memslot) 1961 { 1962 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 1963 } 1964 1965 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) 1966 { 1967 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 1968 vm->memslots[MEM_REGION_PT]); 1969 } 1970 1971 /* 1972 * Address Guest Virtual to Host Virtual 1973 * 1974 * Input Args: 1975 * vm - Virtual Machine 1976 * gva - VM virtual address 1977 * 1978 * Output Args: None 1979 * 1980 * Return: 1981 * Equivalent host virtual address 1982 */ 1983 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 1984 { 1985 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 1986 } 1987 1988 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) 1989 { 1990 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 1991 } 1992 1993 static unsigned int vm_calc_num_pages(unsigned int num_pages, 1994 unsigned int page_shift, 1995 unsigned int new_page_shift, 1996 bool ceil) 1997 { 1998 unsigned int n = 1 << (new_page_shift - page_shift); 1999 2000 if (page_shift >= new_page_shift) 2001 return num_pages * (1 << (page_shift - new_page_shift)); 2002 2003 return num_pages / n + !!(ceil && num_pages % n); 2004 } 2005 2006 static inline int getpageshift(void) 2007 { 2008 return __builtin_ffs(getpagesize()) - 1; 2009 } 2010 2011 unsigned int 2012 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 2013 { 2014 return vm_calc_num_pages(num_guest_pages, 2015 vm_guest_mode_params[mode].page_shift, 2016 getpageshift(), true); 2017 } 2018 2019 unsigned int 2020 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) 2021 { 2022 return vm_calc_num_pages(num_host_pages, getpageshift(), 2023 vm_guest_mode_params[mode].page_shift, false); 2024 } 2025 2026 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) 2027 { 2028 unsigned int n; 2029 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); 2030 return vm_adjust_num_guest_pages(mode, n); 2031 } 2032 2033 /* 2034 * Read binary stats descriptors 2035 * 2036 * Input Args: 2037 * stats_fd - the file descriptor for the binary stats file from which to read 2038 * header - the binary stats metadata header corresponding to the given FD 2039 * 2040 * Output Args: None 2041 * 2042 * Return: 2043 * A pointer to a newly allocated series of stat descriptors. 2044 * Caller is responsible for freeing the returned kvm_stats_desc. 2045 * 2046 * Read the stats descriptors from the binary stats interface. 2047 */ 2048 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 2049 struct kvm_stats_header *header) 2050 { 2051 struct kvm_stats_desc *stats_desc; 2052 ssize_t desc_size, total_size, ret; 2053 2054 desc_size = get_stats_descriptor_size(header); 2055 total_size = header->num_desc * desc_size; 2056 2057 stats_desc = calloc(header->num_desc, desc_size); 2058 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors"); 2059 2060 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset); 2061 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors"); 2062 2063 return stats_desc; 2064 } 2065 2066 /* 2067 * Read stat data for a particular stat 2068 * 2069 * Input Args: 2070 * stats_fd - the file descriptor for the binary stats file from which to read 2071 * header - the binary stats metadata header corresponding to the given FD 2072 * desc - the binary stat metadata for the particular stat to be read 2073 * max_elements - the maximum number of 8-byte values to read into data 2074 * 2075 * Output Args: 2076 * data - the buffer into which stat data should be read 2077 * 2078 * Read the data values of a specified stat from the binary stats interface. 2079 */ 2080 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 2081 struct kvm_stats_desc *desc, uint64_t *data, 2082 size_t max_elements) 2083 { 2084 size_t nr_elements = min_t(ssize_t, desc->size, max_elements); 2085 size_t size = nr_elements * sizeof(*data); 2086 ssize_t ret; 2087 2088 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name); 2089 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name); 2090 2091 ret = pread(stats_fd, data, size, 2092 header->data_offset + desc->offset); 2093 2094 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)", 2095 desc->name, errno, strerror(errno)); 2096 TEST_ASSERT(ret == size, 2097 "pread() on stat '%s' read %ld bytes, wanted %lu bytes", 2098 desc->name, size, ret); 2099 } 2100 2101 /* 2102 * Read the data of the named stat 2103 * 2104 * Input Args: 2105 * vm - the VM for which the stat should be read 2106 * stat_name - the name of the stat to read 2107 * max_elements - the maximum number of 8-byte values to read into data 2108 * 2109 * Output Args: 2110 * data - the buffer into which stat data should be read 2111 * 2112 * Read the data values of a specified stat from the binary stats interface. 2113 */ 2114 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, 2115 size_t max_elements) 2116 { 2117 struct kvm_stats_desc *desc; 2118 size_t size_desc; 2119 int i; 2120 2121 if (!vm->stats_fd) { 2122 vm->stats_fd = vm_get_stats_fd(vm); 2123 read_stats_header(vm->stats_fd, &vm->stats_header); 2124 vm->stats_desc = read_stats_descriptors(vm->stats_fd, 2125 &vm->stats_header); 2126 } 2127 2128 size_desc = get_stats_descriptor_size(&vm->stats_header); 2129 2130 for (i = 0; i < vm->stats_header.num_desc; ++i) { 2131 desc = (void *)vm->stats_desc + (i * size_desc); 2132 2133 if (strcmp(desc->name, stat_name)) 2134 continue; 2135 2136 read_stat_data(vm->stats_fd, &vm->stats_header, desc, 2137 data, max_elements); 2138 2139 break; 2140 } 2141 } 2142 2143 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm) 2144 { 2145 } 2146 2147 __weak void kvm_selftest_arch_init(void) 2148 { 2149 } 2150 2151 void __attribute((constructor)) kvm_selftest_init(void) 2152 { 2153 /* Tell stdout not to buffer its content. */ 2154 setbuf(stdout, NULL); 2155 2156 kvm_selftest_arch_init(); 2157 } 2158