1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KVM dirty page logging performance test 4 * 5 * Based on dirty_log_test.c 6 * 7 * Copyright (C) 2018, Red Hat, Inc. 8 * Copyright (C) 2020, Google, Inc. 9 */ 10 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <time.h> 14 #include <pthread.h> 15 #include <linux/bitmap.h> 16 17 #include "kvm_util.h" 18 #include "test_util.h" 19 #include "memstress.h" 20 #include "guest_modes.h" 21 22 #ifdef __aarch64__ 23 #include "aarch64/vgic.h" 24 25 #define GICD_BASE_GPA 0x8000000ULL 26 #define GICR_BASE_GPA 0x80A0000ULL 27 28 static int gic_fd; 29 30 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) 31 { 32 /* 33 * The test can still run even if hardware does not support GICv3, as it 34 * is only an optimization to reduce guest exits. 35 */ 36 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); 37 } 38 39 static void arch_cleanup_vm(struct kvm_vm *vm) 40 { 41 if (gic_fd > 0) 42 close(gic_fd); 43 } 44 45 #else /* __aarch64__ */ 46 47 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) 48 { 49 } 50 51 static void arch_cleanup_vm(struct kvm_vm *vm) 52 { 53 } 54 55 #endif 56 57 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/ 58 #define TEST_HOST_LOOP_N 2UL 59 60 static int nr_vcpus = 1; 61 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 62 static bool run_vcpus_while_disabling_dirty_logging; 63 64 /* Host variables */ 65 static u64 dirty_log_manual_caps; 66 static bool host_quit; 67 static int iteration; 68 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 69 70 static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) 71 { 72 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 73 int vcpu_idx = vcpu_args->vcpu_idx; 74 uint64_t pages_count = 0; 75 struct kvm_run *run; 76 struct timespec start; 77 struct timespec ts_diff; 78 struct timespec total = (struct timespec){0}; 79 struct timespec avg; 80 int ret; 81 82 run = vcpu->run; 83 84 while (!READ_ONCE(host_quit)) { 85 int current_iteration = READ_ONCE(iteration); 86 87 clock_gettime(CLOCK_MONOTONIC, &start); 88 ret = _vcpu_run(vcpu); 89 ts_diff = timespec_elapsed(start); 90 91 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 92 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 93 "Invalid guest sync status: exit_reason=%s\n", 94 exit_reason_str(run->exit_reason)); 95 96 pr_debug("Got sync event from vCPU %d\n", vcpu_idx); 97 vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 98 pr_debug("vCPU %d updated last completed iteration to %d\n", 99 vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]); 100 101 if (current_iteration) { 102 pages_count += vcpu_args->pages; 103 total = timespec_add(total, ts_diff); 104 pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n", 105 vcpu_idx, current_iteration, ts_diff.tv_sec, 106 ts_diff.tv_nsec); 107 } else { 108 pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n", 109 vcpu_idx, current_iteration, ts_diff.tv_sec, 110 ts_diff.tv_nsec); 111 } 112 113 /* 114 * Keep running the guest while dirty logging is being disabled 115 * (iteration is negative) so that vCPUs are accessing memory 116 * for the entire duration of zapping collapsible SPTEs. 117 */ 118 while (current_iteration == READ_ONCE(iteration) && 119 READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {} 120 } 121 122 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]); 123 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 124 vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx], 125 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec); 126 } 127 128 struct test_params { 129 unsigned long iterations; 130 uint64_t phys_offset; 131 bool partition_vcpu_memory_access; 132 enum vm_mem_backing_src_type backing_src; 133 int slots; 134 uint32_t write_percent; 135 uint32_t random_seed; 136 bool random_access; 137 }; 138 139 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable) 140 { 141 int i; 142 143 for (i = 0; i < slots; i++) { 144 int slot = MEMSTRESS_MEM_SLOT_INDEX + i; 145 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0; 146 147 vm_mem_region_set_flags(vm, slot, flags); 148 } 149 } 150 151 static inline void enable_dirty_logging(struct kvm_vm *vm, int slots) 152 { 153 toggle_dirty_logging(vm, slots, true); 154 } 155 156 static inline void disable_dirty_logging(struct kvm_vm *vm, int slots) 157 { 158 toggle_dirty_logging(vm, slots, false); 159 } 160 161 static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots) 162 { 163 int i; 164 165 for (i = 0; i < slots; i++) { 166 int slot = MEMSTRESS_MEM_SLOT_INDEX + i; 167 168 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]); 169 } 170 } 171 172 static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], 173 int slots, uint64_t pages_per_slot) 174 { 175 int i; 176 177 for (i = 0; i < slots; i++) { 178 int slot = MEMSTRESS_MEM_SLOT_INDEX + i; 179 180 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot); 181 } 182 } 183 184 static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot) 185 { 186 unsigned long **bitmaps; 187 int i; 188 189 bitmaps = malloc(slots * sizeof(bitmaps[0])); 190 TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array."); 191 192 for (i = 0; i < slots; i++) { 193 bitmaps[i] = bitmap_zalloc(pages_per_slot); 194 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap."); 195 } 196 197 return bitmaps; 198 } 199 200 static void free_bitmaps(unsigned long *bitmaps[], int slots) 201 { 202 int i; 203 204 for (i = 0; i < slots; i++) 205 free(bitmaps[i]); 206 207 free(bitmaps); 208 } 209 210 static void run_test(enum vm_guest_mode mode, void *arg) 211 { 212 struct test_params *p = arg; 213 struct kvm_vm *vm; 214 unsigned long **bitmaps; 215 uint64_t guest_num_pages; 216 uint64_t host_num_pages; 217 uint64_t pages_per_slot; 218 struct timespec start; 219 struct timespec ts_diff; 220 struct timespec get_dirty_log_total = (struct timespec){0}; 221 struct timespec vcpu_dirty_total = (struct timespec){0}; 222 struct timespec avg; 223 struct timespec clear_dirty_log_total = (struct timespec){0}; 224 int i; 225 226 vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 227 p->slots, p->backing_src, 228 p->partition_vcpu_memory_access); 229 230 pr_info("Random seed: %u\n", p->random_seed); 231 memstress_set_random_seed(vm, p->random_seed); 232 memstress_set_write_percent(vm, p->write_percent); 233 234 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift; 235 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 236 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 237 pages_per_slot = host_num_pages / p->slots; 238 239 bitmaps = alloc_bitmaps(p->slots, pages_per_slot); 240 241 if (dirty_log_manual_caps) 242 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 243 dirty_log_manual_caps); 244 245 arch_setup_vm(vm, nr_vcpus); 246 247 /* Start the iterations */ 248 iteration = 0; 249 host_quit = false; 250 251 clock_gettime(CLOCK_MONOTONIC, &start); 252 for (i = 0; i < nr_vcpus; i++) 253 vcpu_last_completed_iteration[i] = -1; 254 255 /* 256 * Use 100% writes during the population phase to ensure all 257 * memory is actually populated and not just mapped to the zero 258 * page. The prevents expensive copy-on-write faults from 259 * occurring during the dirty memory iterations below, which 260 * would pollute the performance results. 261 */ 262 memstress_set_write_percent(vm, 100); 263 memstress_set_random_access(vm, false); 264 memstress_start_vcpu_threads(nr_vcpus, vcpu_worker); 265 266 /* Allow the vCPUs to populate memory */ 267 pr_debug("Starting iteration %d - Populating\n", iteration); 268 for (i = 0; i < nr_vcpus; i++) { 269 while (READ_ONCE(vcpu_last_completed_iteration[i]) != 270 iteration) 271 ; 272 } 273 274 ts_diff = timespec_elapsed(start); 275 pr_info("Populate memory time: %ld.%.9lds\n", 276 ts_diff.tv_sec, ts_diff.tv_nsec); 277 278 /* Enable dirty logging */ 279 clock_gettime(CLOCK_MONOTONIC, &start); 280 enable_dirty_logging(vm, p->slots); 281 ts_diff = timespec_elapsed(start); 282 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n", 283 ts_diff.tv_sec, ts_diff.tv_nsec); 284 285 memstress_set_write_percent(vm, p->write_percent); 286 memstress_set_random_access(vm, p->random_access); 287 288 while (iteration < p->iterations) { 289 /* 290 * Incrementing the iteration number will start the vCPUs 291 * dirtying memory again. 292 */ 293 clock_gettime(CLOCK_MONOTONIC, &start); 294 iteration++; 295 296 pr_debug("Starting iteration %d\n", iteration); 297 for (i = 0; i < nr_vcpus; i++) { 298 while (READ_ONCE(vcpu_last_completed_iteration[i]) 299 != iteration) 300 ; 301 } 302 303 ts_diff = timespec_elapsed(start); 304 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff); 305 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n", 306 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 307 308 clock_gettime(CLOCK_MONOTONIC, &start); 309 get_dirty_log(vm, bitmaps, p->slots); 310 ts_diff = timespec_elapsed(start); 311 get_dirty_log_total = timespec_add(get_dirty_log_total, 312 ts_diff); 313 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n", 314 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 315 316 if (dirty_log_manual_caps) { 317 clock_gettime(CLOCK_MONOTONIC, &start); 318 clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot); 319 ts_diff = timespec_elapsed(start); 320 clear_dirty_log_total = timespec_add(clear_dirty_log_total, 321 ts_diff); 322 pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n", 323 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 324 } 325 } 326 327 /* 328 * Run vCPUs while dirty logging is being disabled to stress disabling 329 * in terms of both performance and correctness. Opt-in via command 330 * line as this significantly increases time to disable dirty logging. 331 */ 332 if (run_vcpus_while_disabling_dirty_logging) 333 WRITE_ONCE(iteration, -1); 334 335 /* Disable dirty logging */ 336 clock_gettime(CLOCK_MONOTONIC, &start); 337 disable_dirty_logging(vm, p->slots); 338 ts_diff = timespec_elapsed(start); 339 pr_info("Disabling dirty logging time: %ld.%.9lds\n", 340 ts_diff.tv_sec, ts_diff.tv_nsec); 341 342 /* 343 * Tell the vCPU threads to quit. No need to manually check that vCPUs 344 * have stopped running after disabling dirty logging, the join will 345 * wait for them to exit. 346 */ 347 host_quit = true; 348 memstress_join_vcpu_threads(nr_vcpus); 349 350 avg = timespec_div(get_dirty_log_total, p->iterations); 351 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 352 p->iterations, get_dirty_log_total.tv_sec, 353 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 354 355 if (dirty_log_manual_caps) { 356 avg = timespec_div(clear_dirty_log_total, p->iterations); 357 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 358 p->iterations, clear_dirty_log_total.tv_sec, 359 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 360 } 361 362 free_bitmaps(bitmaps, p->slots); 363 arch_cleanup_vm(vm); 364 memstress_destroy_vm(vm); 365 } 366 367 static void help(char *name) 368 { 369 puts(""); 370 printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] " 371 "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]" 372 "[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name); 373 puts(""); 374 printf(" -a: access memory randomly rather than in order.\n"); 375 printf(" -i: specify iteration counts (default: %"PRIu64")\n", 376 TEST_HOST_LOOP_N); 377 printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n" 378 " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n" 379 " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n" 380 " and writes will be tracked as soon as dirty logging is\n" 381 " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n" 382 " is not enabled).\n"); 383 printf(" -p: specify guest physical test memory offset\n" 384 " Warning: a low offset can conflict with the loaded test code.\n"); 385 guest_modes_help(); 386 printf(" -n: Run the vCPUs in nested mode (L2)\n"); 387 printf(" -e: Run vCPUs while dirty logging is being disabled. This\n" 388 " can significantly increase runtime, especially if there\n" 389 " isn't a dedicated pCPU for the main thread.\n"); 390 printf(" -b: specify the size of the memory region which should be\n" 391 " dirtied by each vCPU. e.g. 10M or 3G.\n" 392 " (default: 1G)\n"); 393 printf(" -v: specify the number of vCPUs to run.\n"); 394 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 395 " them into a separate region of memory for each vCPU.\n"); 396 printf(" -r: specify the starting random seed.\n"); 397 backing_src_help("-s"); 398 printf(" -x: Split the memory region into this number of memslots.\n" 399 " (default: 1)\n"); 400 printf(" -w: specify the percentage of pages which should be written to\n" 401 " as an integer from 0-100 inclusive. This is probabilistic,\n" 402 " so -w X means each page has an X%% chance of writing\n" 403 " and a (100-X)%% chance of reading.\n" 404 " (default: 100 i.e. all pages are written to.)\n"); 405 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n" 406 " values (target pCPU), one for each vCPU, plus an optional\n" 407 " entry for the main application task (specified via entry\n" 408 " <nr_vcpus + 1>). If used, entries must be provided for all\n" 409 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n" 410 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n" 411 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n" 412 " ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n" 413 " To leave the application task unpinned, drop the final entry:\n\n" 414 " ./dirty_log_perf_test -v 3 -c 22,23,24\n\n" 415 " (default: no pinning)\n"); 416 puts(""); 417 exit(0); 418 } 419 420 int main(int argc, char *argv[]) 421 { 422 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 423 const char *pcpu_list = NULL; 424 struct test_params p = { 425 .iterations = TEST_HOST_LOOP_N, 426 .partition_vcpu_memory_access = true, 427 .backing_src = DEFAULT_VM_MEM_SRC, 428 .slots = 1, 429 .random_seed = 1, 430 .write_percent = 100, 431 }; 432 int opt; 433 434 dirty_log_manual_caps = 435 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); 436 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | 437 KVM_DIRTY_LOG_INITIALLY_SET); 438 439 guest_modes_append_default(); 440 441 while ((opt = getopt(argc, argv, "ab:c:eghi:m:nop:r:s:v:x:w:")) != -1) { 442 switch (opt) { 443 case 'a': 444 p.random_access = true; 445 break; 446 case 'b': 447 guest_percpu_mem_size = parse_size(optarg); 448 break; 449 case 'c': 450 pcpu_list = optarg; 451 break; 452 case 'e': 453 /* 'e' is for evil. */ 454 run_vcpus_while_disabling_dirty_logging = true; 455 break; 456 case 'g': 457 dirty_log_manual_caps = 0; 458 break; 459 case 'h': 460 help(argv[0]); 461 break; 462 case 'i': 463 p.iterations = atoi_positive("Number of iterations", optarg); 464 break; 465 case 'm': 466 guest_modes_cmdline(optarg); 467 break; 468 case 'n': 469 memstress_args.nested = true; 470 break; 471 case 'o': 472 p.partition_vcpu_memory_access = false; 473 break; 474 case 'p': 475 p.phys_offset = strtoull(optarg, NULL, 0); 476 break; 477 case 'r': 478 p.random_seed = atoi_positive("Random seed", optarg); 479 break; 480 case 's': 481 p.backing_src = parse_backing_src_type(optarg); 482 break; 483 case 'v': 484 nr_vcpus = atoi_positive("Number of vCPUs", optarg); 485 TEST_ASSERT(nr_vcpus <= max_vcpus, 486 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); 487 break; 488 case 'w': 489 p.write_percent = atoi_non_negative("Write percentage", optarg); 490 TEST_ASSERT(p.write_percent <= 100, 491 "Write percentage must be between 0 and 100"); 492 break; 493 case 'x': 494 p.slots = atoi_positive("Number of slots", optarg); 495 break; 496 default: 497 help(argv[0]); 498 break; 499 } 500 } 501 502 if (pcpu_list) { 503 kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu, 504 nr_vcpus); 505 memstress_args.pin_vcpus = true; 506 } 507 508 TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations"); 509 510 pr_info("Test iterations: %"PRIu64"\n", p.iterations); 511 512 for_each_guest_mode(run_test, &p); 513 514 return 0; 515 } 516