1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM dirty page logging performance test
4  *
5  * Based on dirty_log_test.c
6  *
7  * Copyright (C) 2018, Red Hat, Inc.
8  * Copyright (C) 2020, Google, Inc.
9  */
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <time.h>
14 #include <pthread.h>
15 #include <linux/bitmap.h>
16 
17 #include "kvm_util.h"
18 #include "test_util.h"
19 #include "perf_test_util.h"
20 #include "guest_modes.h"
21 
22 #ifdef __aarch64__
23 #include "aarch64/vgic.h"
24 
25 #define GICD_BASE_GPA			0x8000000ULL
26 #define GICR_BASE_GPA			0x80A0000ULL
27 
28 static int gic_fd;
29 
30 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
31 {
32 	/*
33 	 * The test can still run even if hardware does not support GICv3, as it
34 	 * is only an optimization to reduce guest exits.
35 	 */
36 	gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
37 }
38 
39 static void arch_cleanup_vm(struct kvm_vm *vm)
40 {
41 	if (gic_fd > 0)
42 		close(gic_fd);
43 }
44 
45 #else /* __aarch64__ */
46 
47 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
48 {
49 }
50 
51 static void arch_cleanup_vm(struct kvm_vm *vm)
52 {
53 }
54 
55 #endif
56 
57 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
58 #define TEST_HOST_LOOP_N		2UL
59 
60 static int nr_vcpus = 1;
61 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
62 
63 /* Host variables */
64 static u64 dirty_log_manual_caps;
65 static bool host_quit;
66 static int iteration;
67 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
68 
69 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
70 {
71 	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
72 	int vcpu_idx = vcpu_args->vcpu_idx;
73 	uint64_t pages_count = 0;
74 	struct kvm_run *run;
75 	struct timespec start;
76 	struct timespec ts_diff;
77 	struct timespec total = (struct timespec){0};
78 	struct timespec avg;
79 	int ret;
80 
81 	run = vcpu->run;
82 
83 	while (!READ_ONCE(host_quit)) {
84 		int current_iteration = READ_ONCE(iteration);
85 
86 		clock_gettime(CLOCK_MONOTONIC, &start);
87 		ret = _vcpu_run(vcpu);
88 		ts_diff = timespec_elapsed(start);
89 
90 		TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
91 		TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
92 			    "Invalid guest sync status: exit_reason=%s\n",
93 			    exit_reason_str(run->exit_reason));
94 
95 		pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
96 		vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
97 		pr_debug("vCPU %d updated last completed iteration to %d\n",
98 			 vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
99 
100 		if (current_iteration) {
101 			pages_count += vcpu_args->pages;
102 			total = timespec_add(total, ts_diff);
103 			pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
104 				vcpu_idx, current_iteration, ts_diff.tv_sec,
105 				ts_diff.tv_nsec);
106 		} else {
107 			pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
108 				vcpu_idx, current_iteration, ts_diff.tv_sec,
109 				ts_diff.tv_nsec);
110 		}
111 
112 		while (current_iteration == READ_ONCE(iteration) &&
113 		       !READ_ONCE(host_quit)) {}
114 	}
115 
116 	avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]);
117 	pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
118 		vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx],
119 		total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
120 }
121 
122 struct test_params {
123 	unsigned long iterations;
124 	uint64_t phys_offset;
125 	int wr_fract;
126 	bool partition_vcpu_memory_access;
127 	enum vm_mem_backing_src_type backing_src;
128 	int slots;
129 };
130 
131 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
132 {
133 	int i;
134 
135 	for (i = 0; i < slots; i++) {
136 		int slot = PERF_TEST_MEM_SLOT_INDEX + i;
137 		int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
138 
139 		vm_mem_region_set_flags(vm, slot, flags);
140 	}
141 }
142 
143 static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
144 {
145 	toggle_dirty_logging(vm, slots, true);
146 }
147 
148 static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
149 {
150 	toggle_dirty_logging(vm, slots, false);
151 }
152 
153 static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
154 {
155 	int i;
156 
157 	for (i = 0; i < slots; i++) {
158 		int slot = PERF_TEST_MEM_SLOT_INDEX + i;
159 
160 		kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
161 	}
162 }
163 
164 static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
165 			    int slots, uint64_t pages_per_slot)
166 {
167 	int i;
168 
169 	for (i = 0; i < slots; i++) {
170 		int slot = PERF_TEST_MEM_SLOT_INDEX + i;
171 
172 		kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
173 	}
174 }
175 
176 static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
177 {
178 	unsigned long **bitmaps;
179 	int i;
180 
181 	bitmaps = malloc(slots * sizeof(bitmaps[0]));
182 	TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
183 
184 	for (i = 0; i < slots; i++) {
185 		bitmaps[i] = bitmap_zalloc(pages_per_slot);
186 		TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
187 	}
188 
189 	return bitmaps;
190 }
191 
192 static void free_bitmaps(unsigned long *bitmaps[], int slots)
193 {
194 	int i;
195 
196 	for (i = 0; i < slots; i++)
197 		free(bitmaps[i]);
198 
199 	free(bitmaps);
200 }
201 
202 static void run_test(enum vm_guest_mode mode, void *arg)
203 {
204 	struct test_params *p = arg;
205 	struct kvm_vm *vm;
206 	unsigned long **bitmaps;
207 	uint64_t guest_num_pages;
208 	uint64_t host_num_pages;
209 	uint64_t pages_per_slot;
210 	struct timespec start;
211 	struct timespec ts_diff;
212 	struct timespec get_dirty_log_total = (struct timespec){0};
213 	struct timespec vcpu_dirty_total = (struct timespec){0};
214 	struct timespec avg;
215 	struct timespec clear_dirty_log_total = (struct timespec){0};
216 	int i;
217 
218 	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
219 				 p->slots, p->backing_src,
220 				 p->partition_vcpu_memory_access);
221 
222 	perf_test_set_wr_fract(vm, p->wr_fract);
223 
224 	guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
225 	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
226 	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
227 	pages_per_slot = host_num_pages / p->slots;
228 
229 	bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
230 
231 	if (dirty_log_manual_caps)
232 		vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
233 			      dirty_log_manual_caps);
234 
235 	arch_setup_vm(vm, nr_vcpus);
236 
237 	/* Start the iterations */
238 	iteration = 0;
239 	host_quit = false;
240 
241 	clock_gettime(CLOCK_MONOTONIC, &start);
242 	for (i = 0; i < nr_vcpus; i++)
243 		vcpu_last_completed_iteration[i] = -1;
244 
245 	perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
246 
247 	/* Allow the vCPUs to populate memory */
248 	pr_debug("Starting iteration %d - Populating\n", iteration);
249 	for (i = 0; i < nr_vcpus; i++) {
250 		while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
251 		       iteration)
252 			;
253 	}
254 
255 	ts_diff = timespec_elapsed(start);
256 	pr_info("Populate memory time: %ld.%.9lds\n",
257 		ts_diff.tv_sec, ts_diff.tv_nsec);
258 
259 	/* Enable dirty logging */
260 	clock_gettime(CLOCK_MONOTONIC, &start);
261 	enable_dirty_logging(vm, p->slots);
262 	ts_diff = timespec_elapsed(start);
263 	pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
264 		ts_diff.tv_sec, ts_diff.tv_nsec);
265 
266 	while (iteration < p->iterations) {
267 		/*
268 		 * Incrementing the iteration number will start the vCPUs
269 		 * dirtying memory again.
270 		 */
271 		clock_gettime(CLOCK_MONOTONIC, &start);
272 		iteration++;
273 
274 		pr_debug("Starting iteration %d\n", iteration);
275 		for (i = 0; i < nr_vcpus; i++) {
276 			while (READ_ONCE(vcpu_last_completed_iteration[i])
277 			       != iteration)
278 				;
279 		}
280 
281 		ts_diff = timespec_elapsed(start);
282 		vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
283 		pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
284 			iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
285 
286 		clock_gettime(CLOCK_MONOTONIC, &start);
287 		get_dirty_log(vm, bitmaps, p->slots);
288 		ts_diff = timespec_elapsed(start);
289 		get_dirty_log_total = timespec_add(get_dirty_log_total,
290 						   ts_diff);
291 		pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
292 			iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
293 
294 		if (dirty_log_manual_caps) {
295 			clock_gettime(CLOCK_MONOTONIC, &start);
296 			clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
297 			ts_diff = timespec_elapsed(start);
298 			clear_dirty_log_total = timespec_add(clear_dirty_log_total,
299 							     ts_diff);
300 			pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
301 				iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
302 		}
303 	}
304 
305 	/* Disable dirty logging */
306 	clock_gettime(CLOCK_MONOTONIC, &start);
307 	disable_dirty_logging(vm, p->slots);
308 	ts_diff = timespec_elapsed(start);
309 	pr_info("Disabling dirty logging time: %ld.%.9lds\n",
310 		ts_diff.tv_sec, ts_diff.tv_nsec);
311 
312 	/* Tell the vcpu thread to quit */
313 	host_quit = true;
314 	perf_test_join_vcpu_threads(nr_vcpus);
315 
316 	avg = timespec_div(get_dirty_log_total, p->iterations);
317 	pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
318 		p->iterations, get_dirty_log_total.tv_sec,
319 		get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
320 
321 	if (dirty_log_manual_caps) {
322 		avg = timespec_div(clear_dirty_log_total, p->iterations);
323 		pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
324 			p->iterations, clear_dirty_log_total.tv_sec,
325 			clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
326 	}
327 
328 	free_bitmaps(bitmaps, p->slots);
329 	arch_cleanup_vm(vm);
330 	perf_test_destroy_vm(vm);
331 }
332 
333 static void help(char *name)
334 {
335 	puts("");
336 	printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
337 	       "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
338 	       "[-x memslots]\n", name);
339 	puts("");
340 	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
341 	       TEST_HOST_LOOP_N);
342 	printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
343 	       "     makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n"
344 	       "     KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n"
345 	       "     and writes will be tracked as soon as dirty logging is\n"
346 	       "     enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n"
347 	       "     is not enabled).\n");
348 	printf(" -p: specify guest physical test memory offset\n"
349 	       "     Warning: a low offset can conflict with the loaded test code.\n");
350 	guest_modes_help();
351 	printf(" -n: Run the vCPUs in nested mode (L2)\n");
352 	printf(" -b: specify the size of the memory region which should be\n"
353 	       "     dirtied by each vCPU. e.g. 10M or 3G.\n"
354 	       "     (default: 1G)\n");
355 	printf(" -f: specify the fraction of pages which should be written to\n"
356 	       "     as opposed to simply read, in the form\n"
357 	       "     1/<fraction of pages to write>.\n"
358 	       "     (default: 1 i.e. all pages are written to.)\n");
359 	printf(" -v: specify the number of vCPUs to run.\n");
360 	printf(" -o: Overlap guest memory accesses instead of partitioning\n"
361 	       "     them into a separate region of memory for each vCPU.\n");
362 	backing_src_help("-s");
363 	printf(" -x: Split the memory region into this number of memslots.\n"
364 	       "     (default: 1)\n");
365 	puts("");
366 	exit(0);
367 }
368 
369 int main(int argc, char *argv[])
370 {
371 	int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
372 	struct test_params p = {
373 		.iterations = TEST_HOST_LOOP_N,
374 		.wr_fract = 1,
375 		.partition_vcpu_memory_access = true,
376 		.backing_src = DEFAULT_VM_MEM_SRC,
377 		.slots = 1,
378 	};
379 	int opt;
380 
381 	dirty_log_manual_caps =
382 		kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
383 	dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
384 				  KVM_DIRTY_LOG_INITIALLY_SET);
385 
386 	guest_modes_append_default();
387 
388 	while ((opt = getopt(argc, argv, "ghi:p:m:nb:f:v:os:x:")) != -1) {
389 		switch (opt) {
390 		case 'g':
391 			dirty_log_manual_caps = 0;
392 			break;
393 		case 'i':
394 			p.iterations = atoi(optarg);
395 			break;
396 		case 'p':
397 			p.phys_offset = strtoull(optarg, NULL, 0);
398 			break;
399 		case 'm':
400 			guest_modes_cmdline(optarg);
401 			break;
402 		case 'n':
403 			perf_test_args.nested = true;
404 			break;
405 		case 'b':
406 			guest_percpu_mem_size = parse_size(optarg);
407 			break;
408 		case 'f':
409 			p.wr_fract = atoi(optarg);
410 			TEST_ASSERT(p.wr_fract >= 1,
411 				    "Write fraction cannot be less than one");
412 			break;
413 		case 'v':
414 			nr_vcpus = atoi(optarg);
415 			TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
416 				    "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
417 			break;
418 		case 'o':
419 			p.partition_vcpu_memory_access = false;
420 			break;
421 		case 's':
422 			p.backing_src = parse_backing_src_type(optarg);
423 			break;
424 		case 'x':
425 			p.slots = atoi(optarg);
426 			break;
427 		case 'h':
428 		default:
429 			help(argv[0]);
430 			break;
431 		}
432 	}
433 
434 	TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
435 
436 	pr_info("Test iterations: %"PRIu64"\n",	p.iterations);
437 
438 	for_each_guest_mode(run_test, &p);
439 
440 	return 0;
441 }
442