1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KVM dirty page logging performance test 4 * 5 * Based on dirty_log_test.c 6 * 7 * Copyright (C) 2018, Red Hat, Inc. 8 * Copyright (C) 2020, Google, Inc. 9 */ 10 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <time.h> 14 #include <pthread.h> 15 #include <linux/bitmap.h> 16 17 #include "kvm_util.h" 18 #include "test_util.h" 19 #include "perf_test_util.h" 20 #include "guest_modes.h" 21 22 #ifdef __aarch64__ 23 #include "aarch64/vgic.h" 24 25 #define GICD_BASE_GPA 0x8000000ULL 26 #define GICR_BASE_GPA 0x80A0000ULL 27 28 static int gic_fd; 29 30 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) 31 { 32 /* 33 * The test can still run even if hardware does not support GICv3, as it 34 * is only an optimization to reduce guest exits. 35 */ 36 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); 37 } 38 39 static void arch_cleanup_vm(struct kvm_vm *vm) 40 { 41 if (gic_fd > 0) 42 close(gic_fd); 43 } 44 45 #else /* __aarch64__ */ 46 47 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) 48 { 49 } 50 51 static void arch_cleanup_vm(struct kvm_vm *vm) 52 { 53 } 54 55 #endif 56 57 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/ 58 #define TEST_HOST_LOOP_N 2UL 59 60 static int nr_vcpus = 1; 61 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 62 static bool run_vcpus_while_disabling_dirty_logging; 63 64 /* Host variables */ 65 static u64 dirty_log_manual_caps; 66 static bool host_quit; 67 static int iteration; 68 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 69 70 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 71 { 72 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 73 int vcpu_idx = vcpu_args->vcpu_idx; 74 uint64_t pages_count = 0; 75 struct kvm_run *run; 76 struct timespec start; 77 struct timespec ts_diff; 78 struct timespec total = (struct timespec){0}; 79 struct timespec avg; 80 int ret; 81 82 run = vcpu->run; 83 84 while (!READ_ONCE(host_quit)) { 85 int current_iteration = READ_ONCE(iteration); 86 87 clock_gettime(CLOCK_MONOTONIC, &start); 88 ret = _vcpu_run(vcpu); 89 ts_diff = timespec_elapsed(start); 90 91 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 92 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 93 "Invalid guest sync status: exit_reason=%s\n", 94 exit_reason_str(run->exit_reason)); 95 96 pr_debug("Got sync event from vCPU %d\n", vcpu_idx); 97 vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 98 pr_debug("vCPU %d updated last completed iteration to %d\n", 99 vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]); 100 101 if (current_iteration) { 102 pages_count += vcpu_args->pages; 103 total = timespec_add(total, ts_diff); 104 pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n", 105 vcpu_idx, current_iteration, ts_diff.tv_sec, 106 ts_diff.tv_nsec); 107 } else { 108 pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n", 109 vcpu_idx, current_iteration, ts_diff.tv_sec, 110 ts_diff.tv_nsec); 111 } 112 113 /* 114 * Keep running the guest while dirty logging is being disabled 115 * (iteration is negative) so that vCPUs are accessing memory 116 * for the entire duration of zapping collapsible SPTEs. 117 */ 118 while (current_iteration == READ_ONCE(iteration) && 119 READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {} 120 } 121 122 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]); 123 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 124 vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx], 125 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec); 126 } 127 128 struct test_params { 129 unsigned long iterations; 130 uint64_t phys_offset; 131 bool partition_vcpu_memory_access; 132 enum vm_mem_backing_src_type backing_src; 133 int slots; 134 uint32_t write_percent; 135 uint32_t random_seed; 136 }; 137 138 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable) 139 { 140 int i; 141 142 for (i = 0; i < slots; i++) { 143 int slot = PERF_TEST_MEM_SLOT_INDEX + i; 144 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0; 145 146 vm_mem_region_set_flags(vm, slot, flags); 147 } 148 } 149 150 static inline void enable_dirty_logging(struct kvm_vm *vm, int slots) 151 { 152 toggle_dirty_logging(vm, slots, true); 153 } 154 155 static inline void disable_dirty_logging(struct kvm_vm *vm, int slots) 156 { 157 toggle_dirty_logging(vm, slots, false); 158 } 159 160 static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots) 161 { 162 int i; 163 164 for (i = 0; i < slots; i++) { 165 int slot = PERF_TEST_MEM_SLOT_INDEX + i; 166 167 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]); 168 } 169 } 170 171 static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], 172 int slots, uint64_t pages_per_slot) 173 { 174 int i; 175 176 for (i = 0; i < slots; i++) { 177 int slot = PERF_TEST_MEM_SLOT_INDEX + i; 178 179 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot); 180 } 181 } 182 183 static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot) 184 { 185 unsigned long **bitmaps; 186 int i; 187 188 bitmaps = malloc(slots * sizeof(bitmaps[0])); 189 TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array."); 190 191 for (i = 0; i < slots; i++) { 192 bitmaps[i] = bitmap_zalloc(pages_per_slot); 193 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap."); 194 } 195 196 return bitmaps; 197 } 198 199 static void free_bitmaps(unsigned long *bitmaps[], int slots) 200 { 201 int i; 202 203 for (i = 0; i < slots; i++) 204 free(bitmaps[i]); 205 206 free(bitmaps); 207 } 208 209 static void run_test(enum vm_guest_mode mode, void *arg) 210 { 211 struct test_params *p = arg; 212 struct kvm_vm *vm; 213 unsigned long **bitmaps; 214 uint64_t guest_num_pages; 215 uint64_t host_num_pages; 216 uint64_t pages_per_slot; 217 struct timespec start; 218 struct timespec ts_diff; 219 struct timespec get_dirty_log_total = (struct timespec){0}; 220 struct timespec vcpu_dirty_total = (struct timespec){0}; 221 struct timespec avg; 222 struct timespec clear_dirty_log_total = (struct timespec){0}; 223 int i; 224 225 vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 226 p->slots, p->backing_src, 227 p->partition_vcpu_memory_access); 228 229 pr_info("Random seed: %u\n", p->random_seed); 230 perf_test_set_random_seed(vm, p->random_seed); 231 perf_test_set_write_percent(vm, p->write_percent); 232 233 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift; 234 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 235 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 236 pages_per_slot = host_num_pages / p->slots; 237 238 bitmaps = alloc_bitmaps(p->slots, pages_per_slot); 239 240 if (dirty_log_manual_caps) 241 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 242 dirty_log_manual_caps); 243 244 arch_setup_vm(vm, nr_vcpus); 245 246 /* Start the iterations */ 247 iteration = 0; 248 host_quit = false; 249 250 clock_gettime(CLOCK_MONOTONIC, &start); 251 for (i = 0; i < nr_vcpus; i++) 252 vcpu_last_completed_iteration[i] = -1; 253 254 /* 255 * Use 100% writes during the population phase to ensure all 256 * memory is actually populated and not just mapped to the zero 257 * page. The prevents expensive copy-on-write faults from 258 * occurring during the dirty memory iterations below, which 259 * would pollute the performance results. 260 */ 261 perf_test_set_write_percent(vm, 100); 262 perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); 263 264 /* Allow the vCPUs to populate memory */ 265 pr_debug("Starting iteration %d - Populating\n", iteration); 266 for (i = 0; i < nr_vcpus; i++) { 267 while (READ_ONCE(vcpu_last_completed_iteration[i]) != 268 iteration) 269 ; 270 } 271 272 ts_diff = timespec_elapsed(start); 273 pr_info("Populate memory time: %ld.%.9lds\n", 274 ts_diff.tv_sec, ts_diff.tv_nsec); 275 276 /* Enable dirty logging */ 277 clock_gettime(CLOCK_MONOTONIC, &start); 278 enable_dirty_logging(vm, p->slots); 279 ts_diff = timespec_elapsed(start); 280 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n", 281 ts_diff.tv_sec, ts_diff.tv_nsec); 282 283 perf_test_set_write_percent(vm, p->write_percent); 284 285 while (iteration < p->iterations) { 286 /* 287 * Incrementing the iteration number will start the vCPUs 288 * dirtying memory again. 289 */ 290 clock_gettime(CLOCK_MONOTONIC, &start); 291 iteration++; 292 293 pr_debug("Starting iteration %d\n", iteration); 294 for (i = 0; i < nr_vcpus; i++) { 295 while (READ_ONCE(vcpu_last_completed_iteration[i]) 296 != iteration) 297 ; 298 } 299 300 ts_diff = timespec_elapsed(start); 301 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff); 302 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n", 303 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 304 305 clock_gettime(CLOCK_MONOTONIC, &start); 306 get_dirty_log(vm, bitmaps, p->slots); 307 ts_diff = timespec_elapsed(start); 308 get_dirty_log_total = timespec_add(get_dirty_log_total, 309 ts_diff); 310 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n", 311 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 312 313 if (dirty_log_manual_caps) { 314 clock_gettime(CLOCK_MONOTONIC, &start); 315 clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot); 316 ts_diff = timespec_elapsed(start); 317 clear_dirty_log_total = timespec_add(clear_dirty_log_total, 318 ts_diff); 319 pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n", 320 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 321 } 322 } 323 324 /* 325 * Run vCPUs while dirty logging is being disabled to stress disabling 326 * in terms of both performance and correctness. Opt-in via command 327 * line as this significantly increases time to disable dirty logging. 328 */ 329 if (run_vcpus_while_disabling_dirty_logging) 330 WRITE_ONCE(iteration, -1); 331 332 /* Disable dirty logging */ 333 clock_gettime(CLOCK_MONOTONIC, &start); 334 disable_dirty_logging(vm, p->slots); 335 ts_diff = timespec_elapsed(start); 336 pr_info("Disabling dirty logging time: %ld.%.9lds\n", 337 ts_diff.tv_sec, ts_diff.tv_nsec); 338 339 /* 340 * Tell the vCPU threads to quit. No need to manually check that vCPUs 341 * have stopped running after disabling dirty logging, the join will 342 * wait for them to exit. 343 */ 344 host_quit = true; 345 perf_test_join_vcpu_threads(nr_vcpus); 346 347 avg = timespec_div(get_dirty_log_total, p->iterations); 348 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 349 p->iterations, get_dirty_log_total.tv_sec, 350 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 351 352 if (dirty_log_manual_caps) { 353 avg = timespec_div(clear_dirty_log_total, p->iterations); 354 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 355 p->iterations, clear_dirty_log_total.tv_sec, 356 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 357 } 358 359 free_bitmaps(bitmaps, p->slots); 360 arch_cleanup_vm(vm); 361 perf_test_destroy_vm(vm); 362 } 363 364 static void help(char *name) 365 { 366 puts(""); 367 printf("usage: %s [-h] [-i iterations] [-p offset] [-g] " 368 "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]" 369 "[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name); 370 puts(""); 371 printf(" -i: specify iteration counts (default: %"PRIu64")\n", 372 TEST_HOST_LOOP_N); 373 printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n" 374 " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n" 375 " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n" 376 " and writes will be tracked as soon as dirty logging is\n" 377 " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n" 378 " is not enabled).\n"); 379 printf(" -p: specify guest physical test memory offset\n" 380 " Warning: a low offset can conflict with the loaded test code.\n"); 381 guest_modes_help(); 382 printf(" -n: Run the vCPUs in nested mode (L2)\n"); 383 printf(" -e: Run vCPUs while dirty logging is being disabled. This\n" 384 " can significantly increase runtime, especially if there\n" 385 " isn't a dedicated pCPU for the main thread.\n"); 386 printf(" -b: specify the size of the memory region which should be\n" 387 " dirtied by each vCPU. e.g. 10M or 3G.\n" 388 " (default: 1G)\n"); 389 printf(" -v: specify the number of vCPUs to run.\n"); 390 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 391 " them into a separate region of memory for each vCPU.\n"); 392 printf(" -r: specify the starting random seed.\n"); 393 backing_src_help("-s"); 394 printf(" -x: Split the memory region into this number of memslots.\n" 395 " (default: 1)\n"); 396 printf(" -w: specify the percentage of pages which should be written to\n" 397 " as an integer from 0-100 inclusive. This is probabalistic,\n" 398 " so -w X means each page has an X%% chance of writing\n" 399 " and a (100-X)%% chance of reading.\n" 400 " (default: 100 i.e. all pages are written to.)\n"); 401 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n" 402 " values (target pCPU), one for each vCPU, plus an optional\n" 403 " entry for the main application task (specified via entry\n" 404 " <nr_vcpus + 1>). If used, entries must be provided for all\n" 405 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n" 406 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n" 407 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n" 408 " ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n" 409 " To leave the application task unpinned, drop the final entry:\n\n" 410 " ./dirty_log_perf_test -v 3 -c 22,23,24\n\n" 411 " (default: no pinning)\n"); 412 puts(""); 413 exit(0); 414 } 415 416 int main(int argc, char *argv[]) 417 { 418 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 419 const char *pcpu_list = NULL; 420 struct test_params p = { 421 .iterations = TEST_HOST_LOOP_N, 422 .partition_vcpu_memory_access = true, 423 .backing_src = DEFAULT_VM_MEM_SRC, 424 .slots = 1, 425 .random_seed = 1, 426 .write_percent = 100, 427 }; 428 int opt; 429 430 dirty_log_manual_caps = 431 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); 432 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | 433 KVM_DIRTY_LOG_INITIALLY_SET); 434 435 guest_modes_append_default(); 436 437 while ((opt = getopt(argc, argv, "b:c:eghi:m:nop:r:s:v:x:w:")) != -1) { 438 switch (opt) { 439 case 'b': 440 guest_percpu_mem_size = parse_size(optarg); 441 break; 442 case 'c': 443 pcpu_list = optarg; 444 break; 445 case 'e': 446 /* 'e' is for evil. */ 447 run_vcpus_while_disabling_dirty_logging = true; 448 break; 449 case 'g': 450 dirty_log_manual_caps = 0; 451 break; 452 case 'h': 453 help(argv[0]); 454 break; 455 case 'i': 456 p.iterations = atoi_positive("Number of iterations", optarg); 457 break; 458 case 'm': 459 guest_modes_cmdline(optarg); 460 break; 461 case 'n': 462 perf_test_args.nested = true; 463 break; 464 case 'o': 465 p.partition_vcpu_memory_access = false; 466 break; 467 case 'p': 468 p.phys_offset = strtoull(optarg, NULL, 0); 469 break; 470 case 'r': 471 p.random_seed = atoi_positive("Random seed", optarg); 472 break; 473 case 's': 474 p.backing_src = parse_backing_src_type(optarg); 475 break; 476 case 'v': 477 nr_vcpus = atoi_positive("Number of vCPUs", optarg); 478 TEST_ASSERT(nr_vcpus <= max_vcpus, 479 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); 480 break; 481 case 'w': 482 p.write_percent = atoi_non_negative("Write percentage", optarg); 483 TEST_ASSERT(p.write_percent <= 100, 484 "Write percentage must be between 0 and 100"); 485 break; 486 case 'x': 487 p.slots = atoi_positive("Number of slots", optarg); 488 break; 489 default: 490 help(argv[0]); 491 break; 492 } 493 } 494 495 if (pcpu_list) { 496 kvm_parse_vcpu_pinning(pcpu_list, perf_test_args.vcpu_to_pcpu, 497 nr_vcpus); 498 perf_test_args.pin_vcpus = true; 499 } 500 501 TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations"); 502 503 pr_info("Test iterations: %"PRIu64"\n", p.iterations); 504 505 for_each_guest_mode(run_test, &p); 506 507 return 0; 508 } 509