Lines Matching +full:gpa +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0
3 * A memslot-related performance benchmark.
36 #define MEM_TEST_SIZE (MEM_SIZE - MEM_EXTRA_SIZE)
45 #define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - MEM_EXTRA_SIZE)
62 * memory slot 0, on various architectures and configurations. The
68 * architecture slots memory-per-slot memory-on-last-slot
69 * --------------------------------------------------------------
70 * x86-4KB 32763 16KB 160KB
71 * arm64-4KB 32766 16KB 112KB
72 * arm64-16KB 32766 16KB 112KB
73 * arm64-64KB 8192 64KB 128KB
80 #define MEM_TEST_VAL_1 0x1122334455667788
81 #define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
105 * Technically, we need also for the atomic bool to be address-free, which
109 * all KVM-supported platforms.
122 } while (0)
126 TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit"); in check_mmio_access()
127 TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); in check_mmio_access()
128 TEST_ASSERT(run->mmio.len == 8, in check_mmio_access()
129 "Unexpected exit mmio size = %u", run->mmio.len); in check_mmio_access()
130 TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min && in check_mmio_access()
131 run->mmio.phys_addr <= data->mmio_gpa_max, in check_mmio_access()
132 "Unexpected exit mmio address = 0x%llx", in check_mmio_access()
133 run->mmio.phys_addr); in check_mmio_access()
139 struct kvm_vcpu *vcpu = data->vcpu; in vcpu_worker()
140 struct kvm_run *run = vcpu->run; in vcpu_worker()
148 TEST_ASSERT(uc.args[1] == 0, in vcpu_worker()
154 if (run->exit_reason == KVM_EXIT_MMIO) in vcpu_worker()
185 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() argument
190 uint32_t guest_page_size = data->vm->page_size; in vm_gpa2hva()
192 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); in vm_gpa2hva()
193 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva()
194 "Too high gpa to translate"); in vm_gpa2hva()
195 gpa -= MEM_GPA; in vm_gpa2hva()
197 gpage = gpa / guest_page_size; in vm_gpa2hva()
198 pgoffs = gpa % guest_page_size; in vm_gpa2hva()
199 slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); in vm_gpa2hva()
200 slotoffs = gpage - (slot * data->pages_per_slot); in vm_gpa2hva()
205 if (slot == data->nslots - 1) in vm_gpa2hva()
206 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva()
208 slotpages = data->pages_per_slot; in vm_gpa2hva()
211 "Asking for remaining pages in slot but gpa not page aligned"); in vm_gpa2hva()
212 *rempages = slotpages - slotoffs; in vm_gpa2hva()
215 base = data->hva_slots[slot]; in vm_gpa2hva()
221 uint32_t guest_page_size = data->vm->page_size; in vm_slot2gpa()
223 TEST_ASSERT(slot < data->nslots, "Too high slot number"); in vm_slot2gpa()
225 return MEM_GPA + slot * data->pages_per_slot * guest_page_size; in vm_slot2gpa()
235 data->vm = NULL; in alloc_vm()
236 data->vcpu = NULL; in alloc_vm()
237 data->hva_slots = NULL; in alloc_vm()
260 uint32_t guest_page_size = data->vm->page_size; in get_max_slots()
264 mempages = data->npages; in get_max_slots()
265 slots = data->nslots; in get_max_slots()
266 while (--slots > 1) { in get_max_slots()
274 return slots + 1; /* slot 0 is reserved */ in get_max_slots()
277 return 0; in get_max_slots()
294 data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); in prepare_vm()
295 TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size"); in prepare_vm()
297 data->npages = mempages; in prepare_vm()
298 TEST_ASSERT(data->npages > 1, "Can't test without any memory"); in prepare_vm()
299 data->nslots = nslots; in prepare_vm()
300 data->pages_per_slot = data->npages / data->nslots; in prepare_vm()
301 rempages = data->npages % data->nslots; in prepare_vm()
303 data->pages_per_slot, rempages)) { in prepare_vm()
308 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); in prepare_vm()
309 TEST_ASSERT(data->hva_slots, "malloc() fail"); in prepare_vm()
312 data->nslots, data->pages_per_slot, rempages); in prepare_vm()
315 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { in prepare_vm()
318 npages = data->pages_per_slot; in prepare_vm()
319 if (slot == data->nslots) in prepare_vm()
322 vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS, in prepare_vm()
324 0); in prepare_vm()
329 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { in prepare_vm()
331 uint64_t gpa; in prepare_vm() local
333 npages = data->pages_per_slot; in prepare_vm()
334 if (slot == data->nslots) in prepare_vm()
337 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot); in prepare_vm()
338 TEST_ASSERT(gpa == guest_addr, in prepare_vm()
341 data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr); in prepare_vm()
342 memset(data->hva_slots[slot - 1], 0, npages * guest_page_size); in prepare_vm()
347 virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages); in prepare_vm()
350 sync->guest_page_size = data->vm->page_size; in prepare_vm()
351 atomic_init(&sync->start_flag, false); in prepare_vm()
352 atomic_init(&sync->exit_flag, false); in prepare_vm()
353 atomic_init(&sync->sync_flag, false); in prepare_vm()
355 data->mmio_ok = false; in prepare_vm()
364 pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data); in launch_vm()
372 kvm_vm_free(data->vm); in free_vm()
373 free(data->hva_slots); in free_vm()
379 pthread_join(data->vcpu_thread, NULL); in wait_guest_exit()
384 atomic_store_explicit(&sync->start_flag, true, memory_order_release); in let_guest_run()
391 while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire)) in guest_spin_until_start()
397 atomic_store_explicit(&sync->exit_flag, true, memory_order_release); in make_guest_exit()
404 return atomic_load_explicit(&sync->exit_flag, memory_order_acquire); in _guest_should_exit()
419 atomic_store_explicit(&sync->sync_flag, true, memory_order_release); in host_perform_sync()
420 while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) in host_perform_sync()
423 alarm(0); in host_perform_sync()
436 } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag, in guest_perform_sync()
447 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_move()
448 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); in guest_code_test_memslot_move()
450 GUEST_SYNC(0); in guest_code_test_memslot_move()
476 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_map()
478 GUEST_SYNC(0); in guest_code_test_memslot_map()
509 GUEST_SYNC(0); in guest_code_test_memslot_unmap()
543 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_rw()
545 GUEST_SYNC(0); in guest_code_test_memslot_rw()
564 *(uint64_t *)ptr = 0; in guest_code_test_memslot_rw()
578 uint32_t guest_page_size = data->vm->page_size; in test_memslot_move_prepare()
581 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_prepare()
588 *maxslots = 0; in test_memslot_move_prepare()
593 movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1)); in test_memslot_move_prepare()
594 sync->move_area_ptr = (void *)movetestgpa; in test_memslot_move_prepare()
597 data->mmio_ok = true; in test_memslot_move_prepare()
598 data->mmio_gpa_min = movesrcgpa; in test_memslot_move_prepare()
599 data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1; in test_memslot_move_prepare()
623 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_loop()
624 vm_mem_region_move(data->vm, data->nslots - 1 + 1, in test_memslot_move_loop()
626 vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa); in test_memslot_move_loop()
632 uint64_t gpa, ctr; in test_memslot_do_unmap() local
633 uint32_t guest_page_size = data->vm->page_size; in test_memslot_do_unmap()
635 for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) { in test_memslot_do_unmap()
640 hva = vm_gpa2hva(data, gpa, &npages); in test_memslot_do_unmap()
641 TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa); in test_memslot_do_unmap()
642 npages = min(npages, count - ctr); in test_memslot_do_unmap()
645 "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64, in test_memslot_do_unmap()
646 hva, gpa); in test_memslot_do_unmap()
648 gpa += npages * guest_page_size; in test_memslot_do_unmap()
657 uint64_t gpa; in test_memslot_map_unmap_check() local
659 uint32_t guest_page_size = data->vm->page_size; in test_memslot_map_unmap_check()
664 gpa = MEM_TEST_GPA + offsp * guest_page_size; in test_memslot_map_unmap_check()
665 val = (typeof(val))vm_gpa2hva(data, gpa, NULL); in test_memslot_map_unmap_check()
668 *val, valexp, gpa); in test_memslot_map_unmap_check()
669 *val = 0; in test_memslot_map_unmap_check()
674 uint32_t guest_page_size = data->vm->page_size; in test_memslot_map_loop()
691 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); in test_memslot_map_loop()
692 test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1); in test_memslot_map_loop()
693 test_memslot_do_unmap(data, 0, guest_pages / 2); in test_memslot_map_loop()
707 test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2); in test_memslot_map_loop()
714 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop_common()
726 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); in test_memslot_unmap_loop_common()
727 for (ctr = 0; ctr < guest_pages / 2; ctr += chunk) in test_memslot_unmap_loop_common()
741 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop()
751 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop_chunked()
760 uint32_t guest_page_size = data->vm->page_size; in test_memslot_rw_loop()
776 *vptr = 0; in test_memslot_rw_loop()
798 uint64_t mem_size = tdata->mem_size ? : MEM_SIZE; in test_execute()
805 if (!prepare_vm(data, nslots, maxslots, tdata->guest_code, in test_execute()
812 if (tdata->prepare && in test_execute()
813 !tdata->prepare(data, sync, maxslots)) { in test_execute()
825 if (guest_runtime->tv_sec >= maxtime) in test_execute()
828 tdata->loop(data, sync); in test_execute()
894 …pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r r… in help()
896 pr_info(" -h: print this help screen.\n"); in help()
897 pr_info(" -v: enable verbose mode (not for benchmarking).\n"); in help()
898 pr_info(" -d: enable extra debug checks.\n"); in help()
899 pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n", in help()
900 targs->nslots); in help()
901 pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n", in help()
902 targs->tfirst, NTESTS - 1); in help()
903 pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n", in help()
904 targs->tlast, NTESTS - 1); in help()
905 pr_info(" -l: specify the test length in seconds (currently: %i)\n", in help()
906 targs->seconds); in help()
907 pr_info(" -r: specify the number of runs per test (currently: %i)\n", in help()
908 targs->runs); in help()
911 for (ctr = 0; ctr < NTESTS; ctr++) in help()
921 pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n", in check_memory_sizes()
957 while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) { in parse_args()
961 help(argv[0], targs); in parse_args()
970 targs->nslots = atoi_paranoid(optarg); in parse_args()
971 if (targs->nslots <= 1 && targs->nslots != -1) { in parse_args()
972 pr_info("Slot count cap must be larger than 1 or -1 for no cap\n"); in parse_args()
977 targs->tfirst = atoi_non_negative("First test", optarg); in parse_args()
980 targs->tlast = atoi_non_negative("Last test", optarg); in parse_args()
981 if (targs->tlast >= NTESTS) { in parse_args()
982 pr_info("Last test to run has to be non-negative and less than %zu\n", in parse_args()
988 targs->seconds = atoi_non_negative("Test length", optarg); in parse_args()
991 targs->runs = atoi_positive("Runs per test", optarg); in parse_args()
997 help(argv[0], targs); in parse_args()
1001 if (targs->tfirst > targs->tlast) { in parse_args()
1012 /* Memory slot 0 is reserved */ in parse_args()
1013 if (targs->nslots == -1) in parse_args()
1014 targs->nslots = max_mem_slots - 1; in parse_args()
1016 targs->nslots = min_t(int, targs->nslots, max_mem_slots) - 1; in parse_args()
1019 targs->nslots + 1); in parse_args()
1038 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, in test_loop()
1054 pr_info("No full loops done - too short test time or system too loaded?\n"); in test_loop()
1071 if (!data->mem_size && in test_loop()
1072 (!rbestslottime->slottimens || in test_loop()
1073 result.slottimens < rbestslottime->slottimens)) in test_loop()
1075 if (!rbestruntime->runtimens || in test_loop()
1076 result.runtimens < rbestruntime->runtimens) in test_loop()
1085 .tfirst = 0, in main()
1086 .tlast = NTESTS - 1, in main()
1087 .nslots = -1, in main()
1095 return -1; in main()
1098 return -1; in main()
1109 data->name, targs.runs, targs.seconds); in main()
1111 for (runctr = 0; runctr < targs.runs; runctr++) in main()
1128 return 0; in main()