1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * access_tracking_perf_test 4 * 5 * Copyright (C) 2021, Google, Inc. 6 * 7 * This test measures the performance effects of KVM's access tracking. 8 * Access tracking is driven by the MMU notifiers test_young, clear_young, and 9 * clear_flush_young. These notifiers do not have a direct userspace API, 10 * however the clear_young notifier can be triggered by marking a pages as idle 11 * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to 12 * enable access tracking on guest memory. 13 * 14 * To measure performance this test runs a VM with a configurable number of 15 * vCPUs that each touch every page in disjoint regions of memory. Performance 16 * is measured in the time it takes all vCPUs to finish touching their 17 * predefined region. 18 * 19 * Note that a deterministic correctness test of access tracking is not possible 20 * by using page_idle as it exists today. This is for a few reasons: 21 * 22 * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This 23 * means subsequent guest accesses are not guaranteed to see page table 24 * updates made by KVM until some time in the future. 25 * 26 * 2. page_idle only operates on LRU pages. Newly allocated pages are not 27 * immediately allocated to LRU lists. Instead they are held in a "pagevec", 28 * which is drained to LRU lists some time in the future. There is no 29 * userspace API to force this drain to occur. 30 * 31 * These limitations are worked around in this test by using a large enough 32 * region of memory for each vCPU such that the number of translations cached in 33 * the TLB and the number of pages held in pagevecs are a small fraction of the 34 * overall workload. And if either of those conditions are not true (for example 35 * in nesting, where TLB size is unlimited) this test will print a warning 36 * rather than silently passing. 37 */ 38 #include <inttypes.h> 39 #include <limits.h> 40 #include <pthread.h> 41 #include <sys/mman.h> 42 #include <sys/types.h> 43 #include <sys/stat.h> 44 45 #include "kvm_util.h" 46 #include "test_util.h" 47 #include "perf_test_util.h" 48 #include "guest_modes.h" 49 50 /* Global variable used to synchronize all of the vCPU threads. */ 51 static int iteration; 52 53 /* Defines what vCPU threads should do during a given iteration. */ 54 static enum { 55 /* Run the vCPU to access all its memory. */ 56 ITERATION_ACCESS_MEMORY, 57 /* Mark the vCPU's memory idle in page_idle. */ 58 ITERATION_MARK_IDLE, 59 } iteration_work; 60 61 /* Set to true when vCPU threads should exit. */ 62 static bool done; 63 64 /* The iteration that was last completed by each vCPU. */ 65 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 66 67 /* Whether to overlap the regions of memory vCPUs access. */ 68 static bool overlap_memory_access; 69 70 struct test_params { 71 /* The backing source for the region of memory. */ 72 enum vm_mem_backing_src_type backing_src; 73 74 /* The amount of memory to allocate for each vCPU. */ 75 uint64_t vcpu_memory_bytes; 76 77 /* The number of vCPUs to create in the VM. */ 78 int nr_vcpus; 79 }; 80 81 static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) 82 { 83 uint64_t value; 84 off_t offset = index * sizeof(value); 85 86 TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value), 87 "pread from %s offset 0x%" PRIx64 " failed!", 88 filename, offset); 89 90 return value; 91 92 } 93 94 #define PAGEMAP_PRESENT (1ULL << 63) 95 #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) 96 97 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) 98 { 99 uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); 100 uint64_t entry; 101 uint64_t pfn; 102 103 entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize()); 104 if (!(entry & PAGEMAP_PRESENT)) 105 return 0; 106 107 pfn = entry & PAGEMAP_PFN_MASK; 108 __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN"); 109 110 return pfn; 111 } 112 113 static bool is_page_idle(int page_idle_fd, uint64_t pfn) 114 { 115 uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); 116 117 return !!((bits >> (pfn % 64)) & 1); 118 } 119 120 static void mark_page_idle(int page_idle_fd, uint64_t pfn) 121 { 122 uint64_t bits = 1ULL << (pfn % 64); 123 124 TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8, 125 "Set page_idle bits for PFN 0x%" PRIx64, pfn); 126 } 127 128 static void mark_vcpu_memory_idle(struct kvm_vm *vm, 129 struct perf_test_vcpu_args *vcpu_args) 130 { 131 int vcpu_idx = vcpu_args->vcpu_idx; 132 uint64_t base_gva = vcpu_args->gva; 133 uint64_t pages = vcpu_args->pages; 134 uint64_t page; 135 uint64_t still_idle = 0; 136 uint64_t no_pfn = 0; 137 int page_idle_fd; 138 int pagemap_fd; 139 140 /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */ 141 if (overlap_memory_access && vcpu_idx) 142 return; 143 144 page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 145 TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle."); 146 147 pagemap_fd = open("/proc/self/pagemap", O_RDONLY); 148 TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); 149 150 for (page = 0; page < pages; page++) { 151 uint64_t gva = base_gva + page * perf_test_args.guest_page_size; 152 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); 153 154 if (!pfn) { 155 no_pfn++; 156 continue; 157 } 158 159 if (is_page_idle(page_idle_fd, pfn)) { 160 still_idle++; 161 continue; 162 } 163 164 mark_page_idle(page_idle_fd, pfn); 165 } 166 167 /* 168 * Assumption: Less than 1% of pages are going to be swapped out from 169 * under us during this test. 170 */ 171 TEST_ASSERT(no_pfn < pages / 100, 172 "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.", 173 vcpu_idx, no_pfn, pages); 174 175 /* 176 * Check that at least 90% of memory has been marked idle (the rest 177 * might not be marked idle because the pages have not yet made it to an 178 * LRU list or the translations are still cached in the TLB). 90% is 179 * arbitrary; high enough that we ensure most memory access went through 180 * access tracking but low enough as to not make the test too brittle 181 * over time and across architectures. 182 * 183 * Note that when run in nested virtualization, this check will trigger 184 * much more frequently because TLB size is unlimited and since no flush 185 * happens, much more pages are cached there and guest won't see the 186 * "idle" bit cleared. 187 */ 188 if (still_idle < pages / 10) 189 printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64 190 "out of %" PRIu64 "), this will affect performance results" 191 ".\n", 192 vcpu_idx, still_idle, pages); 193 194 close(page_idle_fd); 195 close(pagemap_fd); 196 } 197 198 static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) 199 { 200 struct ucall uc; 201 uint64_t actual_ucall = get_ucall(vcpu, &uc); 202 203 TEST_ASSERT(expected_ucall == actual_ucall, 204 "Guest exited unexpectedly (expected ucall %" PRIu64 205 ", got %" PRIu64 ")", 206 expected_ucall, actual_ucall); 207 } 208 209 static bool spin_wait_for_next_iteration(int *current_iteration) 210 { 211 int last_iteration = *current_iteration; 212 213 do { 214 if (READ_ONCE(done)) 215 return false; 216 217 *current_iteration = READ_ONCE(iteration); 218 } while (last_iteration == *current_iteration); 219 220 return true; 221 } 222 223 static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args) 224 { 225 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 226 struct kvm_vm *vm = perf_test_args.vm; 227 int vcpu_idx = vcpu_args->vcpu_idx; 228 int current_iteration = 0; 229 230 while (spin_wait_for_next_iteration(¤t_iteration)) { 231 switch (READ_ONCE(iteration_work)) { 232 case ITERATION_ACCESS_MEMORY: 233 vcpu_run(vcpu); 234 assert_ucall(vcpu, UCALL_SYNC); 235 break; 236 case ITERATION_MARK_IDLE: 237 mark_vcpu_memory_idle(vm, vcpu_args); 238 break; 239 }; 240 241 vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 242 } 243 } 244 245 static void spin_wait_for_vcpu(int vcpu_idx, int target_iteration) 246 { 247 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_idx]) != 248 target_iteration) { 249 continue; 250 } 251 } 252 253 /* The type of memory accesses to perform in the VM. */ 254 enum access_type { 255 ACCESS_READ, 256 ACCESS_WRITE, 257 }; 258 259 static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *description) 260 { 261 struct timespec ts_start; 262 struct timespec ts_elapsed; 263 int next_iteration, i; 264 265 /* Kick off the vCPUs by incrementing iteration. */ 266 next_iteration = ++iteration; 267 268 clock_gettime(CLOCK_MONOTONIC, &ts_start); 269 270 /* Wait for all vCPUs to finish the iteration. */ 271 for (i = 0; i < nr_vcpus; i++) 272 spin_wait_for_vcpu(i, next_iteration); 273 274 ts_elapsed = timespec_elapsed(ts_start); 275 pr_info("%-30s: %ld.%09lds\n", 276 description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec); 277 } 278 279 static void access_memory(struct kvm_vm *vm, int nr_vcpus, 280 enum access_type access, const char *description) 281 { 282 perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1); 283 iteration_work = ITERATION_ACCESS_MEMORY; 284 run_iteration(vm, nr_vcpus, description); 285 } 286 287 static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus) 288 { 289 /* 290 * Even though this parallelizes the work across vCPUs, this is still a 291 * very slow operation because page_idle forces the test to mark one pfn 292 * at a time and the clear_young notifier serializes on the KVM MMU 293 * lock. 294 */ 295 pr_debug("Marking VM memory idle (slow)...\n"); 296 iteration_work = ITERATION_MARK_IDLE; 297 run_iteration(vm, nr_vcpus, "Mark memory idle"); 298 } 299 300 static void run_test(enum vm_guest_mode mode, void *arg) 301 { 302 struct test_params *params = arg; 303 struct kvm_vm *vm; 304 int nr_vcpus = params->nr_vcpus; 305 306 vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1, 307 params->backing_src, !overlap_memory_access); 308 309 perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main); 310 311 pr_info("\n"); 312 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory"); 313 314 /* As a control, read and write to the populated memory first. */ 315 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory"); 316 access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from populated memory"); 317 318 /* Repeat on memory that has been marked as idle. */ 319 mark_memory_idle(vm, nr_vcpus); 320 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to idle memory"); 321 mark_memory_idle(vm, nr_vcpus); 322 access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory"); 323 324 /* Set done to signal the vCPU threads to exit */ 325 done = true; 326 327 perf_test_join_vcpu_threads(nr_vcpus); 328 perf_test_destroy_vm(vm); 329 } 330 331 static void help(char *name) 332 { 333 puts(""); 334 printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n", 335 name); 336 puts(""); 337 printf(" -h: Display this help message."); 338 guest_modes_help(); 339 printf(" -b: specify the size of the memory region which should be\n" 340 " dirtied by each vCPU. e.g. 10M or 3G.\n" 341 " (default: 1G)\n"); 342 printf(" -v: specify the number of vCPUs to run.\n"); 343 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 344 " them into a separate region of memory for each vCPU.\n"); 345 backing_src_help("-s"); 346 puts(""); 347 exit(0); 348 } 349 350 int main(int argc, char *argv[]) 351 { 352 struct test_params params = { 353 .backing_src = DEFAULT_VM_MEM_SRC, 354 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE, 355 .nr_vcpus = 1, 356 }; 357 int page_idle_fd; 358 int opt; 359 360 guest_modes_append_default(); 361 362 while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) { 363 switch (opt) { 364 case 'm': 365 guest_modes_cmdline(optarg); 366 break; 367 case 'b': 368 params.vcpu_memory_bytes = parse_size(optarg); 369 break; 370 case 'v': 371 params.nr_vcpus = atoi(optarg); 372 break; 373 case 'o': 374 overlap_memory_access = true; 375 break; 376 case 's': 377 params.backing_src = parse_backing_src_type(optarg); 378 break; 379 case 'h': 380 default: 381 help(argv[0]); 382 break; 383 } 384 } 385 386 page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 387 __TEST_REQUIRE(page_idle_fd >= 0, 388 "CONFIG_IDLE_PAGE_TRACKING is not enabled"); 389 close(page_idle_fd); 390 391 for_each_guest_mode(run_test, ¶ms); 392 393 return 0; 394 } 395