1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * access_tracking_perf_test
4  *
5  * Copyright (C) 2021, Google, Inc.
6  *
7  * This test measures the performance effects of KVM's access tracking.
8  * Access tracking is driven by the MMU notifiers test_young, clear_young, and
9  * clear_flush_young. These notifiers do not have a direct userspace API,
10  * however the clear_young notifier can be triggered by marking a pages as idle
11  * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
12  * enable access tracking on guest memory.
13  *
14  * To measure performance this test runs a VM with a configurable number of
15  * vCPUs that each touch every page in disjoint regions of memory. Performance
16  * is measured in the time it takes all vCPUs to finish touching their
17  * predefined region.
18  *
19  * Note that a deterministic correctness test of access tracking is not possible
20  * by using page_idle as it exists today. This is for a few reasons:
21  *
22  * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
23  *    means subsequent guest accesses are not guaranteed to see page table
24  *    updates made by KVM until some time in the future.
25  *
26  * 2. page_idle only operates on LRU pages. Newly allocated pages are not
27  *    immediately allocated to LRU lists. Instead they are held in a "pagevec",
28  *    which is drained to LRU lists some time in the future. There is no
29  *    userspace API to force this drain to occur.
30  *
31  * These limitations are worked around in this test by using a large enough
32  * region of memory for each vCPU such that the number of translations cached in
33  * the TLB and the number of pages held in pagevecs are a small fraction of the
34  * overall workload. And if either of those conditions are not true this test
35  * will fail rather than silently passing.
36  */
37 #include <inttypes.h>
38 #include <limits.h>
39 #include <pthread.h>
40 #include <sys/mman.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 
44 #include "kvm_util.h"
45 #include "test_util.h"
46 #include "perf_test_util.h"
47 #include "guest_modes.h"
48 
49 /* Global variable used to synchronize all of the vCPU threads. */
50 static int iteration = -1;
51 
52 /* Defines what vCPU threads should do during a given iteration. */
53 static enum {
54 	/* Run the vCPU to access all its memory. */
55 	ITERATION_ACCESS_MEMORY,
56 	/* Mark the vCPU's memory idle in page_idle. */
57 	ITERATION_MARK_IDLE,
58 } iteration_work;
59 
60 /* Set to true when vCPU threads should exit. */
61 static bool done;
62 
63 /* The iteration that was last completed by each vCPU. */
64 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
65 
66 /* Whether to overlap the regions of memory vCPUs access. */
67 static bool overlap_memory_access;
68 
69 struct test_params {
70 	/* The backing source for the region of memory. */
71 	enum vm_mem_backing_src_type backing_src;
72 
73 	/* The amount of memory to allocate for each vCPU. */
74 	uint64_t vcpu_memory_bytes;
75 
76 	/* The number of vCPUs to create in the VM. */
77 	int vcpus;
78 };
79 
80 static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
81 {
82 	uint64_t value;
83 	off_t offset = index * sizeof(value);
84 
85 	TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
86 		    "pread from %s offset 0x%" PRIx64 " failed!",
87 		    filename, offset);
88 
89 	return value;
90 
91 }
92 
93 #define PAGEMAP_PRESENT (1ULL << 63)
94 #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
95 
96 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
97 {
98 	uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
99 	uint64_t entry;
100 	uint64_t pfn;
101 
102 	entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
103 	if (!(entry & PAGEMAP_PRESENT))
104 		return 0;
105 
106 	pfn = entry & PAGEMAP_PFN_MASK;
107 	if (!pfn) {
108 		print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
109 		exit(KSFT_SKIP);
110 	}
111 
112 	return pfn;
113 }
114 
115 static bool is_page_idle(int page_idle_fd, uint64_t pfn)
116 {
117 	uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
118 
119 	return !!((bits >> (pfn % 64)) & 1);
120 }
121 
122 static void mark_page_idle(int page_idle_fd, uint64_t pfn)
123 {
124 	uint64_t bits = 1ULL << (pfn % 64);
125 
126 	TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
127 		    "Set page_idle bits for PFN 0x%" PRIx64, pfn);
128 }
129 
130 static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
131 {
132 	uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
133 	uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
134 	uint64_t page;
135 	uint64_t still_idle = 0;
136 	uint64_t no_pfn = 0;
137 	int page_idle_fd;
138 	int pagemap_fd;
139 
140 	/* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
141 	if (overlap_memory_access && vcpu_id)
142 		return;
143 
144 	page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
145 	TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
146 
147 	pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
148 	TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
149 
150 	for (page = 0; page < pages; page++) {
151 		uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
152 		uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
153 
154 		if (!pfn) {
155 			no_pfn++;
156 			continue;
157 		}
158 
159 		if (is_page_idle(page_idle_fd, pfn)) {
160 			still_idle++;
161 			continue;
162 		}
163 
164 		mark_page_idle(page_idle_fd, pfn);
165 	}
166 
167 	/*
168 	 * Assumption: Less than 1% of pages are going to be swapped out from
169 	 * under us during this test.
170 	 */
171 	TEST_ASSERT(no_pfn < pages / 100,
172 		    "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
173 		    vcpu_id, no_pfn, pages);
174 
175 	/*
176 	 * Test that at least 90% of memory has been marked idle (the rest might
177 	 * not be marked idle because the pages have not yet made it to an LRU
178 	 * list or the translations are still cached in the TLB). 90% is
179 	 * arbitrary; high enough that we ensure most memory access went through
180 	 * access tracking but low enough as to not make the test too brittle
181 	 * over time and across architectures.
182 	 */
183 	TEST_ASSERT(still_idle < pages / 10,
184 		    "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
185 		    PRIu64 ").\n",
186 		    vcpu_id, still_idle, pages);
187 
188 	close(page_idle_fd);
189 	close(pagemap_fd);
190 }
191 
192 static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
193 			 uint64_t expected_ucall)
194 {
195 	struct ucall uc;
196 	uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
197 
198 	TEST_ASSERT(expected_ucall == actual_ucall,
199 		    "Guest exited unexpectedly (expected ucall %" PRIu64
200 		    ", got %" PRIu64 ")",
201 		    expected_ucall, actual_ucall);
202 }
203 
204 static bool spin_wait_for_next_iteration(int *current_iteration)
205 {
206 	int last_iteration = *current_iteration;
207 
208 	do {
209 		if (READ_ONCE(done))
210 			return false;
211 
212 		*current_iteration = READ_ONCE(iteration);
213 	} while (last_iteration == *current_iteration);
214 
215 	return true;
216 }
217 
218 static void *vcpu_thread_main(void *arg)
219 {
220 	struct perf_test_vcpu_args *vcpu_args = arg;
221 	struct kvm_vm *vm = perf_test_args.vm;
222 	int vcpu_id = vcpu_args->vcpu_id;
223 	int current_iteration = -1;
224 
225 	while (spin_wait_for_next_iteration(&current_iteration)) {
226 		switch (READ_ONCE(iteration_work)) {
227 		case ITERATION_ACCESS_MEMORY:
228 			vcpu_run(vm, vcpu_id);
229 			assert_ucall(vm, vcpu_id, UCALL_SYNC);
230 			break;
231 		case ITERATION_MARK_IDLE:
232 			mark_vcpu_memory_idle(vm, vcpu_id);
233 			break;
234 		};
235 
236 		vcpu_last_completed_iteration[vcpu_id] = current_iteration;
237 	}
238 
239 	return NULL;
240 }
241 
242 static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
243 {
244 	while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
245 	       target_iteration) {
246 		continue;
247 	}
248 }
249 
250 /* The type of memory accesses to perform in the VM. */
251 enum access_type {
252 	ACCESS_READ,
253 	ACCESS_WRITE,
254 };
255 
256 static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
257 {
258 	struct timespec ts_start;
259 	struct timespec ts_elapsed;
260 	int next_iteration;
261 	int vcpu_id;
262 
263 	/* Kick off the vCPUs by incrementing iteration. */
264 	next_iteration = ++iteration;
265 
266 	clock_gettime(CLOCK_MONOTONIC, &ts_start);
267 
268 	/* Wait for all vCPUs to finish the iteration. */
269 	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
270 		spin_wait_for_vcpu(vcpu_id, next_iteration);
271 
272 	ts_elapsed = timespec_elapsed(ts_start);
273 	pr_info("%-30s: %ld.%09lds\n",
274 		description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
275 }
276 
277 static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
278 			  const char *description)
279 {
280 	perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
281 	sync_global_to_guest(vm, perf_test_args);
282 	iteration_work = ITERATION_ACCESS_MEMORY;
283 	run_iteration(vm, vcpus, description);
284 }
285 
286 static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
287 {
288 	/*
289 	 * Even though this parallelizes the work across vCPUs, this is still a
290 	 * very slow operation because page_idle forces the test to mark one pfn
291 	 * at a time and the clear_young notifier serializes on the KVM MMU
292 	 * lock.
293 	 */
294 	pr_debug("Marking VM memory idle (slow)...\n");
295 	iteration_work = ITERATION_MARK_IDLE;
296 	run_iteration(vm, vcpus, "Mark memory idle");
297 }
298 
299 static pthread_t *create_vcpu_threads(int vcpus)
300 {
301 	pthread_t *vcpu_threads;
302 	int i;
303 
304 	vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
305 	TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
306 
307 	for (i = 0; i < vcpus; i++) {
308 		vcpu_last_completed_iteration[i] = iteration;
309 		pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
310 			       &perf_test_args.vcpu_args[i]);
311 	}
312 
313 	return vcpu_threads;
314 }
315 
316 static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
317 {
318 	int i;
319 
320 	/* Set done to signal the vCPU threads to exit */
321 	done = true;
322 
323 	for (i = 0; i < vcpus; i++)
324 		pthread_join(vcpu_threads[i], NULL);
325 }
326 
327 static void run_test(enum vm_guest_mode mode, void *arg)
328 {
329 	struct test_params *params = arg;
330 	struct kvm_vm *vm;
331 	pthread_t *vcpu_threads;
332 	int vcpus = params->vcpus;
333 
334 	vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
335 				 params->backing_src);
336 
337 	perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
338 			      !overlap_memory_access);
339 
340 	vcpu_threads = create_vcpu_threads(vcpus);
341 
342 	pr_info("\n");
343 	access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
344 
345 	/* As a control, read and write to the populated memory first. */
346 	access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
347 	access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
348 
349 	/* Repeat on memory that has been marked as idle. */
350 	mark_memory_idle(vm, vcpus);
351 	access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
352 	mark_memory_idle(vm, vcpus);
353 	access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
354 
355 	terminate_vcpu_threads(vcpu_threads, vcpus);
356 	free(vcpu_threads);
357 	perf_test_destroy_vm(vm);
358 }
359 
360 static void help(char *name)
361 {
362 	puts("");
363 	printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o]  [-s mem_type]\n",
364 	       name);
365 	puts("");
366 	printf(" -h: Display this help message.");
367 	guest_modes_help();
368 	printf(" -b: specify the size of the memory region which should be\n"
369 	       "     dirtied by each vCPU. e.g. 10M or 3G.\n"
370 	       "     (default: 1G)\n");
371 	printf(" -v: specify the number of vCPUs to run.\n");
372 	printf(" -o: Overlap guest memory accesses instead of partitioning\n"
373 	       "     them into a separate region of memory for each vCPU.\n");
374 	printf(" -s: specify the type of memory that should be used to\n"
375 	       "     back the guest data region.\n\n");
376 	backing_src_help();
377 	puts("");
378 	exit(0);
379 }
380 
381 int main(int argc, char *argv[])
382 {
383 	struct test_params params = {
384 		.backing_src = VM_MEM_SRC_ANONYMOUS,
385 		.vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
386 		.vcpus = 1,
387 	};
388 	int page_idle_fd;
389 	int opt;
390 
391 	guest_modes_append_default();
392 
393 	while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
394 		switch (opt) {
395 		case 'm':
396 			guest_modes_cmdline(optarg);
397 			break;
398 		case 'b':
399 			params.vcpu_memory_bytes = parse_size(optarg);
400 			break;
401 		case 'v':
402 			params.vcpus = atoi(optarg);
403 			break;
404 		case 'o':
405 			overlap_memory_access = true;
406 			break;
407 		case 's':
408 			params.backing_src = parse_backing_src_type(optarg);
409 			break;
410 		case 'h':
411 		default:
412 			help(argv[0]);
413 			break;
414 		}
415 	}
416 
417 	page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
418 	if (page_idle_fd < 0) {
419 		print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
420 		exit(KSFT_SKIP);
421 	}
422 	close(page_idle_fd);
423 
424 	for_each_guest_mode(run_test, &params);
425 
426 	return 0;
427 }
428