1c33e05d9SDavid Matlack // SPDX-License-Identifier: GPL-2.0 2c33e05d9SDavid Matlack /* 3c33e05d9SDavid Matlack * access_tracking_perf_test 4c33e05d9SDavid Matlack * 5c33e05d9SDavid Matlack * Copyright (C) 2021, Google, Inc. 6c33e05d9SDavid Matlack * 7c33e05d9SDavid Matlack * This test measures the performance effects of KVM's access tracking. 8c33e05d9SDavid Matlack * Access tracking is driven by the MMU notifiers test_young, clear_young, and 9c33e05d9SDavid Matlack * clear_flush_young. These notifiers do not have a direct userspace API, 10c33e05d9SDavid Matlack * however the clear_young notifier can be triggered by marking a pages as idle 11c33e05d9SDavid Matlack * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to 12c33e05d9SDavid Matlack * enable access tracking on guest memory. 13c33e05d9SDavid Matlack * 14c33e05d9SDavid Matlack * To measure performance this test runs a VM with a configurable number of 15c33e05d9SDavid Matlack * vCPUs that each touch every page in disjoint regions of memory. Performance 16c33e05d9SDavid Matlack * is measured in the time it takes all vCPUs to finish touching their 17c33e05d9SDavid Matlack * predefined region. 18c33e05d9SDavid Matlack * 19c33e05d9SDavid Matlack * Note that a deterministic correctness test of access tracking is not possible 20c33e05d9SDavid Matlack * by using page_idle as it exists today. This is for a few reasons: 21c33e05d9SDavid Matlack * 22c33e05d9SDavid Matlack * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This 23c33e05d9SDavid Matlack * means subsequent guest accesses are not guaranteed to see page table 24c33e05d9SDavid Matlack * updates made by KVM until some time in the future. 25c33e05d9SDavid Matlack * 26c33e05d9SDavid Matlack * 2. page_idle only operates on LRU pages. Newly allocated pages are not 27c33e05d9SDavid Matlack * immediately allocated to LRU lists. Instead they are held in a "pagevec", 28c33e05d9SDavid Matlack * which is drained to LRU lists some time in the future. There is no 29c33e05d9SDavid Matlack * userspace API to force this drain to occur. 30c33e05d9SDavid Matlack * 31c33e05d9SDavid Matlack * These limitations are worked around in this test by using a large enough 32c33e05d9SDavid Matlack * region of memory for each vCPU such that the number of translations cached in 33c33e05d9SDavid Matlack * the TLB and the number of pages held in pagevecs are a small fraction of the 34*6336a810SEmanuele Giuseppe Esposito * overall workload. And if either of those conditions are not true (for example 35*6336a810SEmanuele Giuseppe Esposito * in nesting, where TLB size is unlimited) this test will print a warning 36*6336a810SEmanuele Giuseppe Esposito * rather than silently passing. 37c33e05d9SDavid Matlack */ 38c33e05d9SDavid Matlack #include <inttypes.h> 39c33e05d9SDavid Matlack #include <limits.h> 40c33e05d9SDavid Matlack #include <pthread.h> 41c33e05d9SDavid Matlack #include <sys/mman.h> 42c33e05d9SDavid Matlack #include <sys/types.h> 43c33e05d9SDavid Matlack #include <sys/stat.h> 44c33e05d9SDavid Matlack 45c33e05d9SDavid Matlack #include "kvm_util.h" 46c33e05d9SDavid Matlack #include "test_util.h" 47c33e05d9SDavid Matlack #include "perf_test_util.h" 48c33e05d9SDavid Matlack #include "guest_modes.h" 49c33e05d9SDavid Matlack 50c33e05d9SDavid Matlack /* Global variable used to synchronize all of the vCPU threads. */ 5136c5ad73SDavid Matlack static int iteration; 52c33e05d9SDavid Matlack 53c33e05d9SDavid Matlack /* Defines what vCPU threads should do during a given iteration. */ 54c33e05d9SDavid Matlack static enum { 55c33e05d9SDavid Matlack /* Run the vCPU to access all its memory. */ 56c33e05d9SDavid Matlack ITERATION_ACCESS_MEMORY, 57c33e05d9SDavid Matlack /* Mark the vCPU's memory idle in page_idle. */ 58c33e05d9SDavid Matlack ITERATION_MARK_IDLE, 59c33e05d9SDavid Matlack } iteration_work; 60c33e05d9SDavid Matlack 61c33e05d9SDavid Matlack /* Set to true when vCPU threads should exit. */ 62c33e05d9SDavid Matlack static bool done; 63c33e05d9SDavid Matlack 64c33e05d9SDavid Matlack /* The iteration that was last completed by each vCPU. */ 65c33e05d9SDavid Matlack static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 66c33e05d9SDavid Matlack 67c33e05d9SDavid Matlack /* Whether to overlap the regions of memory vCPUs access. */ 68c33e05d9SDavid Matlack static bool overlap_memory_access; 69c33e05d9SDavid Matlack 70c33e05d9SDavid Matlack struct test_params { 71c33e05d9SDavid Matlack /* The backing source for the region of memory. */ 72c33e05d9SDavid Matlack enum vm_mem_backing_src_type backing_src; 73c33e05d9SDavid Matlack 74c33e05d9SDavid Matlack /* The amount of memory to allocate for each vCPU. */ 75c33e05d9SDavid Matlack uint64_t vcpu_memory_bytes; 76c33e05d9SDavid Matlack 77c33e05d9SDavid Matlack /* The number of vCPUs to create in the VM. */ 78df84cef5SSean Christopherson int nr_vcpus; 79c33e05d9SDavid Matlack }; 80c33e05d9SDavid Matlack 81c33e05d9SDavid Matlack static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) 82c33e05d9SDavid Matlack { 83c33e05d9SDavid Matlack uint64_t value; 84c33e05d9SDavid Matlack off_t offset = index * sizeof(value); 85c33e05d9SDavid Matlack 86c33e05d9SDavid Matlack TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value), 87c33e05d9SDavid Matlack "pread from %s offset 0x%" PRIx64 " failed!", 88c33e05d9SDavid Matlack filename, offset); 89c33e05d9SDavid Matlack 90c33e05d9SDavid Matlack return value; 91c33e05d9SDavid Matlack 92c33e05d9SDavid Matlack } 93c33e05d9SDavid Matlack 94c33e05d9SDavid Matlack #define PAGEMAP_PRESENT (1ULL << 63) 95c33e05d9SDavid Matlack #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) 96c33e05d9SDavid Matlack 97c33e05d9SDavid Matlack static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) 98c33e05d9SDavid Matlack { 99c33e05d9SDavid Matlack uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); 100c33e05d9SDavid Matlack uint64_t entry; 101c33e05d9SDavid Matlack uint64_t pfn; 102c33e05d9SDavid Matlack 103c33e05d9SDavid Matlack entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize()); 104c33e05d9SDavid Matlack if (!(entry & PAGEMAP_PRESENT)) 105c33e05d9SDavid Matlack return 0; 106c33e05d9SDavid Matlack 107c33e05d9SDavid Matlack pfn = entry & PAGEMAP_PFN_MASK; 1087ed397d1SSean Christopherson __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN"); 109c33e05d9SDavid Matlack 110c33e05d9SDavid Matlack return pfn; 111c33e05d9SDavid Matlack } 112c33e05d9SDavid Matlack 113c33e05d9SDavid Matlack static bool is_page_idle(int page_idle_fd, uint64_t pfn) 114c33e05d9SDavid Matlack { 115c33e05d9SDavid Matlack uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); 116c33e05d9SDavid Matlack 117c33e05d9SDavid Matlack return !!((bits >> (pfn % 64)) & 1); 118c33e05d9SDavid Matlack } 119c33e05d9SDavid Matlack 120c33e05d9SDavid Matlack static void mark_page_idle(int page_idle_fd, uint64_t pfn) 121c33e05d9SDavid Matlack { 122c33e05d9SDavid Matlack uint64_t bits = 1ULL << (pfn % 64); 123c33e05d9SDavid Matlack 124c33e05d9SDavid Matlack TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8, 125c33e05d9SDavid Matlack "Set page_idle bits for PFN 0x%" PRIx64, pfn); 126c33e05d9SDavid Matlack } 127c33e05d9SDavid Matlack 128df84cef5SSean Christopherson static void mark_vcpu_memory_idle(struct kvm_vm *vm, 129df84cef5SSean Christopherson struct perf_test_vcpu_args *vcpu_args) 130c33e05d9SDavid Matlack { 131df84cef5SSean Christopherson int vcpu_idx = vcpu_args->vcpu_idx; 132df84cef5SSean Christopherson uint64_t base_gva = vcpu_args->gva; 133df84cef5SSean Christopherson uint64_t pages = vcpu_args->pages; 134c33e05d9SDavid Matlack uint64_t page; 135c33e05d9SDavid Matlack uint64_t still_idle = 0; 136c33e05d9SDavid Matlack uint64_t no_pfn = 0; 137c33e05d9SDavid Matlack int page_idle_fd; 138c33e05d9SDavid Matlack int pagemap_fd; 139c33e05d9SDavid Matlack 140c33e05d9SDavid Matlack /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */ 141df84cef5SSean Christopherson if (overlap_memory_access && vcpu_idx) 142c33e05d9SDavid Matlack return; 143c33e05d9SDavid Matlack 144c33e05d9SDavid Matlack page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 145c33e05d9SDavid Matlack TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle."); 146c33e05d9SDavid Matlack 147c33e05d9SDavid Matlack pagemap_fd = open("/proc/self/pagemap", O_RDONLY); 148c33e05d9SDavid Matlack TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); 149c33e05d9SDavid Matlack 150c33e05d9SDavid Matlack for (page = 0; page < pages; page++) { 151c33e05d9SDavid Matlack uint64_t gva = base_gva + page * perf_test_args.guest_page_size; 152c33e05d9SDavid Matlack uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); 153c33e05d9SDavid Matlack 154c33e05d9SDavid Matlack if (!pfn) { 155c33e05d9SDavid Matlack no_pfn++; 156c33e05d9SDavid Matlack continue; 157c33e05d9SDavid Matlack } 158c33e05d9SDavid Matlack 159c33e05d9SDavid Matlack if (is_page_idle(page_idle_fd, pfn)) { 160c33e05d9SDavid Matlack still_idle++; 161c33e05d9SDavid Matlack continue; 162c33e05d9SDavid Matlack } 163c33e05d9SDavid Matlack 164c33e05d9SDavid Matlack mark_page_idle(page_idle_fd, pfn); 165c33e05d9SDavid Matlack } 166c33e05d9SDavid Matlack 167c33e05d9SDavid Matlack /* 168c33e05d9SDavid Matlack * Assumption: Less than 1% of pages are going to be swapped out from 169c33e05d9SDavid Matlack * under us during this test. 170c33e05d9SDavid Matlack */ 171c33e05d9SDavid Matlack TEST_ASSERT(no_pfn < pages / 100, 172c33e05d9SDavid Matlack "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.", 173df84cef5SSean Christopherson vcpu_idx, no_pfn, pages); 174c33e05d9SDavid Matlack 175c33e05d9SDavid Matlack /* 176*6336a810SEmanuele Giuseppe Esposito * Check that at least 90% of memory has been marked idle (the rest 177*6336a810SEmanuele Giuseppe Esposito * might not be marked idle because the pages have not yet made it to an 178*6336a810SEmanuele Giuseppe Esposito * LRU list or the translations are still cached in the TLB). 90% is 179c33e05d9SDavid Matlack * arbitrary; high enough that we ensure most memory access went through 180c33e05d9SDavid Matlack * access tracking but low enough as to not make the test too brittle 181c33e05d9SDavid Matlack * over time and across architectures. 182*6336a810SEmanuele Giuseppe Esposito * 183*6336a810SEmanuele Giuseppe Esposito * Note that when run in nested virtualization, this check will trigger 184*6336a810SEmanuele Giuseppe Esposito * much more frequently because TLB size is unlimited and since no flush 185*6336a810SEmanuele Giuseppe Esposito * happens, much more pages are cached there and guest won't see the 186*6336a810SEmanuele Giuseppe Esposito * "idle" bit cleared. 187c33e05d9SDavid Matlack */ 188*6336a810SEmanuele Giuseppe Esposito if (still_idle < pages / 10) 189*6336a810SEmanuele Giuseppe Esposito printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64 190*6336a810SEmanuele Giuseppe Esposito "out of %" PRIu64 "), this will affect performance results" 191*6336a810SEmanuele Giuseppe Esposito ".\n", 192df84cef5SSean Christopherson vcpu_idx, still_idle, pages); 193c33e05d9SDavid Matlack 194c33e05d9SDavid Matlack close(page_idle_fd); 195c33e05d9SDavid Matlack close(pagemap_fd); 196c33e05d9SDavid Matlack } 197c33e05d9SDavid Matlack 198df84cef5SSean Christopherson static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) 199c33e05d9SDavid Matlack { 200c33e05d9SDavid Matlack struct ucall uc; 201768e9a61SSean Christopherson uint64_t actual_ucall = get_ucall(vcpu, &uc); 202c33e05d9SDavid Matlack 203c33e05d9SDavid Matlack TEST_ASSERT(expected_ucall == actual_ucall, 204c33e05d9SDavid Matlack "Guest exited unexpectedly (expected ucall %" PRIu64 205c33e05d9SDavid Matlack ", got %" PRIu64 ")", 206c33e05d9SDavid Matlack expected_ucall, actual_ucall); 207c33e05d9SDavid Matlack } 208c33e05d9SDavid Matlack 209c33e05d9SDavid Matlack static bool spin_wait_for_next_iteration(int *current_iteration) 210c33e05d9SDavid Matlack { 211c33e05d9SDavid Matlack int last_iteration = *current_iteration; 212c33e05d9SDavid Matlack 213c33e05d9SDavid Matlack do { 214c33e05d9SDavid Matlack if (READ_ONCE(done)) 215c33e05d9SDavid Matlack return false; 216c33e05d9SDavid Matlack 217c33e05d9SDavid Matlack *current_iteration = READ_ONCE(iteration); 218c33e05d9SDavid Matlack } while (last_iteration == *current_iteration); 219c33e05d9SDavid Matlack 220c33e05d9SDavid Matlack return true; 221c33e05d9SDavid Matlack } 222c33e05d9SDavid Matlack 22381bcb261SDavid Matlack static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args) 224c33e05d9SDavid Matlack { 225df84cef5SSean Christopherson struct kvm_vcpu *vcpu = vcpu_args->vcpu; 226c33e05d9SDavid Matlack struct kvm_vm *vm = perf_test_args.vm; 227df84cef5SSean Christopherson int vcpu_idx = vcpu_args->vcpu_idx; 22836c5ad73SDavid Matlack int current_iteration = 0; 229c33e05d9SDavid Matlack 230c33e05d9SDavid Matlack while (spin_wait_for_next_iteration(¤t_iteration)) { 231c33e05d9SDavid Matlack switch (READ_ONCE(iteration_work)) { 232c33e05d9SDavid Matlack case ITERATION_ACCESS_MEMORY: 233768e9a61SSean Christopherson vcpu_run(vcpu); 234df84cef5SSean Christopherson assert_ucall(vcpu, UCALL_SYNC); 235c33e05d9SDavid Matlack break; 236c33e05d9SDavid Matlack case ITERATION_MARK_IDLE: 237df84cef5SSean Christopherson mark_vcpu_memory_idle(vm, vcpu_args); 238c33e05d9SDavid Matlack break; 239c33e05d9SDavid Matlack }; 240c33e05d9SDavid Matlack 241df84cef5SSean Christopherson vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 242c33e05d9SDavid Matlack } 243c33e05d9SDavid Matlack } 244c33e05d9SDavid Matlack 245df84cef5SSean Christopherson static void spin_wait_for_vcpu(int vcpu_idx, int target_iteration) 246c33e05d9SDavid Matlack { 247df84cef5SSean Christopherson while (READ_ONCE(vcpu_last_completed_iteration[vcpu_idx]) != 248c33e05d9SDavid Matlack target_iteration) { 249c33e05d9SDavid Matlack continue; 250c33e05d9SDavid Matlack } 251c33e05d9SDavid Matlack } 252c33e05d9SDavid Matlack 253c33e05d9SDavid Matlack /* The type of memory accesses to perform in the VM. */ 254c33e05d9SDavid Matlack enum access_type { 255c33e05d9SDavid Matlack ACCESS_READ, 256c33e05d9SDavid Matlack ACCESS_WRITE, 257c33e05d9SDavid Matlack }; 258c33e05d9SDavid Matlack 259df84cef5SSean Christopherson static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *description) 260c33e05d9SDavid Matlack { 261c33e05d9SDavid Matlack struct timespec ts_start; 262c33e05d9SDavid Matlack struct timespec ts_elapsed; 263df84cef5SSean Christopherson int next_iteration, i; 264c33e05d9SDavid Matlack 265c33e05d9SDavid Matlack /* Kick off the vCPUs by incrementing iteration. */ 266c33e05d9SDavid Matlack next_iteration = ++iteration; 267c33e05d9SDavid Matlack 268c33e05d9SDavid Matlack clock_gettime(CLOCK_MONOTONIC, &ts_start); 269c33e05d9SDavid Matlack 270c33e05d9SDavid Matlack /* Wait for all vCPUs to finish the iteration. */ 271df84cef5SSean Christopherson for (i = 0; i < nr_vcpus; i++) 272df84cef5SSean Christopherson spin_wait_for_vcpu(i, next_iteration); 273c33e05d9SDavid Matlack 274c33e05d9SDavid Matlack ts_elapsed = timespec_elapsed(ts_start); 275c33e05d9SDavid Matlack pr_info("%-30s: %ld.%09lds\n", 276c33e05d9SDavid Matlack description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec); 277c33e05d9SDavid Matlack } 278c33e05d9SDavid Matlack 279df84cef5SSean Christopherson static void access_memory(struct kvm_vm *vm, int nr_vcpus, 280df84cef5SSean Christopherson enum access_type access, const char *description) 281c33e05d9SDavid Matlack { 28213bbc703SSean Christopherson perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1); 283c33e05d9SDavid Matlack iteration_work = ITERATION_ACCESS_MEMORY; 284df84cef5SSean Christopherson run_iteration(vm, nr_vcpus, description); 285c33e05d9SDavid Matlack } 286c33e05d9SDavid Matlack 287df84cef5SSean Christopherson static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus) 288c33e05d9SDavid Matlack { 289c33e05d9SDavid Matlack /* 290c33e05d9SDavid Matlack * Even though this parallelizes the work across vCPUs, this is still a 291c33e05d9SDavid Matlack * very slow operation because page_idle forces the test to mark one pfn 292c33e05d9SDavid Matlack * at a time and the clear_young notifier serializes on the KVM MMU 293c33e05d9SDavid Matlack * lock. 294c33e05d9SDavid Matlack */ 295c33e05d9SDavid Matlack pr_debug("Marking VM memory idle (slow)...\n"); 296c33e05d9SDavid Matlack iteration_work = ITERATION_MARK_IDLE; 297df84cef5SSean Christopherson run_iteration(vm, nr_vcpus, "Mark memory idle"); 298c33e05d9SDavid Matlack } 299c33e05d9SDavid Matlack 300c33e05d9SDavid Matlack static void run_test(enum vm_guest_mode mode, void *arg) 301c33e05d9SDavid Matlack { 302c33e05d9SDavid Matlack struct test_params *params = arg; 303c33e05d9SDavid Matlack struct kvm_vm *vm; 304df84cef5SSean Christopherson int nr_vcpus = params->nr_vcpus; 305c33e05d9SDavid Matlack 306df84cef5SSean Christopherson vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1, 307cf1d5930SSean Christopherson params->backing_src, !overlap_memory_access); 308c33e05d9SDavid Matlack 309df84cef5SSean Christopherson perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main); 310c33e05d9SDavid Matlack 311c33e05d9SDavid Matlack pr_info("\n"); 312df84cef5SSean Christopherson access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory"); 313c33e05d9SDavid Matlack 314c33e05d9SDavid Matlack /* As a control, read and write to the populated memory first. */ 315df84cef5SSean Christopherson access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory"); 316df84cef5SSean Christopherson access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from populated memory"); 317c33e05d9SDavid Matlack 318c33e05d9SDavid Matlack /* Repeat on memory that has been marked as idle. */ 319df84cef5SSean Christopherson mark_memory_idle(vm, nr_vcpus); 320df84cef5SSean Christopherson access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to idle memory"); 321df84cef5SSean Christopherson mark_memory_idle(vm, nr_vcpus); 322df84cef5SSean Christopherson access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory"); 323c33e05d9SDavid Matlack 32481bcb261SDavid Matlack /* Set done to signal the vCPU threads to exit */ 32581bcb261SDavid Matlack done = true; 32681bcb261SDavid Matlack 327df84cef5SSean Christopherson perf_test_join_vcpu_threads(nr_vcpus); 328c33e05d9SDavid Matlack perf_test_destroy_vm(vm); 329c33e05d9SDavid Matlack } 330c33e05d9SDavid Matlack 331c33e05d9SDavid Matlack static void help(char *name) 332c33e05d9SDavid Matlack { 333c33e05d9SDavid Matlack puts(""); 334c33e05d9SDavid Matlack printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n", 335c33e05d9SDavid Matlack name); 336c33e05d9SDavid Matlack puts(""); 337c33e05d9SDavid Matlack printf(" -h: Display this help message."); 338c33e05d9SDavid Matlack guest_modes_help(); 339c33e05d9SDavid Matlack printf(" -b: specify the size of the memory region which should be\n" 340c33e05d9SDavid Matlack " dirtied by each vCPU. e.g. 10M or 3G.\n" 341c33e05d9SDavid Matlack " (default: 1G)\n"); 342c33e05d9SDavid Matlack printf(" -v: specify the number of vCPUs to run.\n"); 343c33e05d9SDavid Matlack printf(" -o: Overlap guest memory accesses instead of partitioning\n" 344c33e05d9SDavid Matlack " them into a separate region of memory for each vCPU.\n"); 3459f2fc555SDavid Matlack backing_src_help("-s"); 346c33e05d9SDavid Matlack puts(""); 347c33e05d9SDavid Matlack exit(0); 348c33e05d9SDavid Matlack } 349c33e05d9SDavid Matlack 350c33e05d9SDavid Matlack int main(int argc, char *argv[]) 351c33e05d9SDavid Matlack { 352c33e05d9SDavid Matlack struct test_params params = { 3539f2fc555SDavid Matlack .backing_src = DEFAULT_VM_MEM_SRC, 354c33e05d9SDavid Matlack .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE, 355df84cef5SSean Christopherson .nr_vcpus = 1, 356c33e05d9SDavid Matlack }; 357c33e05d9SDavid Matlack int page_idle_fd; 358c33e05d9SDavid Matlack int opt; 359c33e05d9SDavid Matlack 360c33e05d9SDavid Matlack guest_modes_append_default(); 361c33e05d9SDavid Matlack 362c33e05d9SDavid Matlack while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) { 363c33e05d9SDavid Matlack switch (opt) { 364c33e05d9SDavid Matlack case 'm': 365c33e05d9SDavid Matlack guest_modes_cmdline(optarg); 366c33e05d9SDavid Matlack break; 367c33e05d9SDavid Matlack case 'b': 368c33e05d9SDavid Matlack params.vcpu_memory_bytes = parse_size(optarg); 369c33e05d9SDavid Matlack break; 370c33e05d9SDavid Matlack case 'v': 371df84cef5SSean Christopherson params.nr_vcpus = atoi(optarg); 372c33e05d9SDavid Matlack break; 373c33e05d9SDavid Matlack case 'o': 374c33e05d9SDavid Matlack overlap_memory_access = true; 375c33e05d9SDavid Matlack break; 376c33e05d9SDavid Matlack case 's': 377c33e05d9SDavid Matlack params.backing_src = parse_backing_src_type(optarg); 378c33e05d9SDavid Matlack break; 379c33e05d9SDavid Matlack case 'h': 380c33e05d9SDavid Matlack default: 381c33e05d9SDavid Matlack help(argv[0]); 382c33e05d9SDavid Matlack break; 383c33e05d9SDavid Matlack } 384c33e05d9SDavid Matlack } 385c33e05d9SDavid Matlack 386c33e05d9SDavid Matlack page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 3877ed397d1SSean Christopherson __TEST_REQUIRE(page_idle_fd >= 0, 3887ed397d1SSean Christopherson "CONFIG_IDLE_PAGE_TRACKING is not enabled"); 389c33e05d9SDavid Matlack close(page_idle_fd); 390c33e05d9SDavid Matlack 391c33e05d9SDavid Matlack for_each_guest_mode(run_test, ¶ms); 392c33e05d9SDavid Matlack 393c33e05d9SDavid Matlack return 0; 394c33e05d9SDavid Matlack } 395