1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM dirty page logging test
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7 
8 #define _GNU_SOURCE /* for program_invocation_name */
9 
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <pthread.h>
13 #include <semaphore.h>
14 #include <sys/types.h>
15 #include <signal.h>
16 #include <errno.h>
17 #include <linux/bitmap.h>
18 #include <linux/bitops.h>
19 #include <linux/atomic.h>
20 
21 #include "kvm_util.h"
22 #include "test_util.h"
23 #include "guest_modes.h"
24 #include "processor.h"
25 
26 #define VCPU_ID				1
27 
28 /* The memory slot index to track dirty pages */
29 #define TEST_MEM_SLOT_INDEX		1
30 
31 /* Default guest test virtual memory offset */
32 #define DEFAULT_GUEST_TEST_MEM		0xc0000000
33 
34 /* How many pages to dirty for each guest loop */
35 #define TEST_PAGES_PER_LOOP		1024
36 
37 /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
38 #define TEST_HOST_LOOP_N		32UL
39 
40 /* Interval for each host loop (ms) */
41 #define TEST_HOST_LOOP_INTERVAL		10UL
42 
43 /* Dirty bitmaps are always little endian, so we need to swap on big endian */
44 #if defined(__s390x__)
45 # define BITOP_LE_SWIZZLE	((BITS_PER_LONG-1) & ~0x7)
46 # define test_bit_le(nr, addr) \
47 	test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
48 # define set_bit_le(nr, addr) \
49 	set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
50 # define clear_bit_le(nr, addr) \
51 	clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
52 # define test_and_set_bit_le(nr, addr) \
53 	test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
54 # define test_and_clear_bit_le(nr, addr) \
55 	test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
56 #else
57 # define test_bit_le		test_bit
58 # define set_bit_le		set_bit
59 # define clear_bit_le		clear_bit
60 # define test_and_set_bit_le	test_and_set_bit
61 # define test_and_clear_bit_le	test_and_clear_bit
62 #endif
63 
64 #define TEST_DIRTY_RING_COUNT		65536
65 
66 #define SIG_IPI SIGUSR1
67 
68 /*
69  * Guest/Host shared variables. Ensure addr_gva2hva() and/or
70  * sync_global_to/from_guest() are used when accessing from
71  * the host. READ/WRITE_ONCE() should also be used with anything
72  * that may change.
73  */
74 static uint64_t host_page_size;
75 static uint64_t guest_page_size;
76 static uint64_t guest_num_pages;
77 static uint64_t random_array[TEST_PAGES_PER_LOOP];
78 static uint64_t iteration;
79 
80 /*
81  * Guest physical memory offset of the testing memory slot.
82  * This will be set to the topmost valid physical address minus
83  * the test memory size.
84  */
85 static uint64_t guest_test_phys_mem;
86 
87 /*
88  * Guest virtual memory offset of the testing memory slot.
89  * Must not conflict with identity mapped test code.
90  */
91 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
92 
93 /*
94  * Continuously write to the first 8 bytes of a random pages within
95  * the testing memory region.
96  */
97 static void guest_code(void)
98 {
99 	uint64_t addr;
100 	int i;
101 
102 	/*
103 	 * On s390x, all pages of a 1M segment are initially marked as dirty
104 	 * when a page of the segment is written to for the very first time.
105 	 * To compensate this specialty in this test, we need to touch all
106 	 * pages during the first iteration.
107 	 */
108 	for (i = 0; i < guest_num_pages; i++) {
109 		addr = guest_test_virt_mem + i * guest_page_size;
110 		*(uint64_t *)addr = READ_ONCE(iteration);
111 	}
112 
113 	while (true) {
114 		for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
115 			addr = guest_test_virt_mem;
116 			addr += (READ_ONCE(random_array[i]) % guest_num_pages)
117 				* guest_page_size;
118 			addr = align_down(addr, host_page_size);
119 			*(uint64_t *)addr = READ_ONCE(iteration);
120 		}
121 
122 		/* Tell the host that we need more random numbers */
123 		GUEST_SYNC(1);
124 	}
125 }
126 
127 /* Host variables */
128 static bool host_quit;
129 
130 /* Points to the test VM memory region on which we track dirty logs */
131 static void *host_test_mem;
132 static uint64_t host_num_pages;
133 
134 /* For statistics only */
135 static uint64_t host_dirty_count;
136 static uint64_t host_clear_count;
137 static uint64_t host_track_next_count;
138 
139 /* Whether dirty ring reset is requested, or finished */
140 static sem_t sem_vcpu_stop;
141 static sem_t sem_vcpu_cont;
142 /*
143  * This is only set by main thread, and only cleared by vcpu thread.  It is
144  * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
145  * is the only place that we'll guarantee both "dirty bit" and "dirty data"
146  * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
147  * after setting dirty bit but before the data is written.
148  */
149 static atomic_t vcpu_sync_stop_requested;
150 /*
151  * This is updated by the vcpu thread to tell the host whether it's a
152  * ring-full event.  It should only be read until a sem_wait() of
153  * sem_vcpu_stop and before vcpu continues to run.
154  */
155 static bool dirty_ring_vcpu_ring_full;
156 /*
157  * This is only used for verifying the dirty pages.  Dirty ring has a very
158  * tricky case when the ring just got full, kvm will do userspace exit due to
159  * ring full.  When that happens, the very last PFN is set but actually the
160  * data is not changed (the guest WRITE is not really applied yet), because
161  * we found that the dirty ring is full, refused to continue the vcpu, and
162  * recorded the dirty gfn with the old contents.
163  *
164  * For this specific case, it's safe to skip checking this pfn for this
165  * bit, because it's a redundant bit, and when the write happens later the bit
166  * will be set again.  We use this variable to always keep track of the latest
167  * dirty gfn we've collected, so that if a mismatch of data found later in the
168  * verifying process, we let it pass.
169  */
170 static uint64_t dirty_ring_last_page;
171 
172 enum log_mode_t {
173 	/* Only use KVM_GET_DIRTY_LOG for logging */
174 	LOG_MODE_DIRTY_LOG = 0,
175 
176 	/* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
177 	LOG_MODE_CLEAR_LOG = 1,
178 
179 	/* Use dirty ring for logging */
180 	LOG_MODE_DIRTY_RING = 2,
181 
182 	LOG_MODE_NUM,
183 
184 	/* Run all supported modes */
185 	LOG_MODE_ALL = LOG_MODE_NUM,
186 };
187 
188 /* Mode of logging to test.  Default is to run all supported modes */
189 static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
190 /* Logging mode for current run */
191 static enum log_mode_t host_log_mode;
192 static pthread_t vcpu_thread;
193 static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
194 
195 static void vcpu_kick(void)
196 {
197 	pthread_kill(vcpu_thread, SIG_IPI);
198 }
199 
200 /*
201  * In our test we do signal tricks, let's use a better version of
202  * sem_wait to avoid signal interrupts
203  */
204 static void sem_wait_until(sem_t *sem)
205 {
206 	int ret;
207 
208 	do
209 		ret = sem_wait(sem);
210 	while (ret == -1 && errno == EINTR);
211 }
212 
213 static bool clear_log_supported(void)
214 {
215 	return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
216 }
217 
218 static void clear_log_create_vm_done(struct kvm_vm *vm)
219 {
220 	u64 manual_caps;
221 
222 	manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
223 	TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
224 	manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
225 			KVM_DIRTY_LOG_INITIALLY_SET);
226 	vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
227 }
228 
229 static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
230 					  void *bitmap, uint32_t num_pages)
231 {
232 	kvm_vm_get_dirty_log(vm, slot, bitmap);
233 }
234 
235 static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
236 					  void *bitmap, uint32_t num_pages)
237 {
238 	kvm_vm_get_dirty_log(vm, slot, bitmap);
239 	kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
240 }
241 
242 /* Should only be called after a GUEST_SYNC */
243 static void vcpu_handle_sync_stop(void)
244 {
245 	if (atomic_read(&vcpu_sync_stop_requested)) {
246 		/* It means main thread is sleeping waiting */
247 		atomic_set(&vcpu_sync_stop_requested, false);
248 		sem_post(&sem_vcpu_stop);
249 		sem_wait_until(&sem_vcpu_cont);
250 	}
251 }
252 
253 static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
254 {
255 	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
256 
257 	TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
258 		    "vcpu run failed: errno=%d", err);
259 
260 	TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
261 		    "Invalid guest sync status: exit_reason=%s\n",
262 		    exit_reason_str(run->exit_reason));
263 
264 	vcpu_handle_sync_stop();
265 }
266 
267 static bool dirty_ring_supported(void)
268 {
269 	return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
270 }
271 
272 static void dirty_ring_create_vm_done(struct kvm_vm *vm)
273 {
274 	/*
275 	 * Switch to dirty ring mode after VM creation but before any
276 	 * of the vcpu creation.
277 	 */
278 	vm_enable_dirty_ring(vm, test_dirty_ring_count *
279 			     sizeof(struct kvm_dirty_gfn));
280 }
281 
282 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
283 {
284 	return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
285 }
286 
287 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
288 {
289 	gfn->flags = KVM_DIRTY_GFN_F_RESET;
290 }
291 
292 static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
293 				       int slot, void *bitmap,
294 				       uint32_t num_pages, uint32_t *fetch_index)
295 {
296 	struct kvm_dirty_gfn *cur;
297 	uint32_t count = 0;
298 
299 	while (true) {
300 		cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
301 		if (!dirty_gfn_is_dirtied(cur))
302 			break;
303 		TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
304 			    "%u != %u", cur->slot, slot);
305 		TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
306 			    "0x%llx >= 0x%x", cur->offset, num_pages);
307 		//pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
308 		set_bit_le(cur->offset, bitmap);
309 		dirty_ring_last_page = cur->offset;
310 		dirty_gfn_set_collected(cur);
311 		(*fetch_index)++;
312 		count++;
313 	}
314 
315 	return count;
316 }
317 
318 static void dirty_ring_wait_vcpu(void)
319 {
320 	/* This makes sure that hardware PML cache flushed */
321 	vcpu_kick();
322 	sem_wait_until(&sem_vcpu_stop);
323 }
324 
325 static void dirty_ring_continue_vcpu(void)
326 {
327 	pr_info("Notifying vcpu to continue\n");
328 	sem_post(&sem_vcpu_cont);
329 }
330 
331 static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
332 					   void *bitmap, uint32_t num_pages)
333 {
334 	/* We only have one vcpu */
335 	static uint32_t fetch_index = 0;
336 	uint32_t count = 0, cleared;
337 	bool continued_vcpu = false;
338 
339 	dirty_ring_wait_vcpu();
340 
341 	if (!dirty_ring_vcpu_ring_full) {
342 		/*
343 		 * This is not a ring-full event, it's safe to allow
344 		 * vcpu to continue
345 		 */
346 		dirty_ring_continue_vcpu();
347 		continued_vcpu = true;
348 	}
349 
350 	/* Only have one vcpu */
351 	count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
352 				       slot, bitmap, num_pages, &fetch_index);
353 
354 	cleared = kvm_vm_reset_dirty_ring(vm);
355 
356 	/* Cleared pages should be the same as collected */
357 	TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
358 		    "with collected (%u)", cleared, count);
359 
360 	if (!continued_vcpu) {
361 		TEST_ASSERT(dirty_ring_vcpu_ring_full,
362 			    "Didn't continue vcpu even without ring full");
363 		dirty_ring_continue_vcpu();
364 	}
365 
366 	pr_info("Iteration %ld collected %u pages\n", iteration, count);
367 }
368 
369 static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
370 {
371 	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
372 
373 	/* A ucall-sync or ring-full event is allowed */
374 	if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
375 		/* We should allow this to continue */
376 		;
377 	} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
378 		   (ret == -1 && err == EINTR)) {
379 		/* Update the flag first before pause */
380 		WRITE_ONCE(dirty_ring_vcpu_ring_full,
381 			   run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
382 		sem_post(&sem_vcpu_stop);
383 		pr_info("vcpu stops because %s...\n",
384 			dirty_ring_vcpu_ring_full ?
385 			"dirty ring is full" : "vcpu is kicked out");
386 		sem_wait_until(&sem_vcpu_cont);
387 		pr_info("vcpu continues now.\n");
388 	} else {
389 		TEST_ASSERT(false, "Invalid guest sync status: "
390 			    "exit_reason=%s\n",
391 			    exit_reason_str(run->exit_reason));
392 	}
393 }
394 
395 static void dirty_ring_before_vcpu_join(void)
396 {
397 	/* Kick another round of vcpu just to make sure it will quit */
398 	sem_post(&sem_vcpu_cont);
399 }
400 
401 struct log_mode {
402 	const char *name;
403 	/* Return true if this mode is supported, otherwise false */
404 	bool (*supported)(void);
405 	/* Hook when the vm creation is done (before vcpu creation) */
406 	void (*create_vm_done)(struct kvm_vm *vm);
407 	/* Hook to collect the dirty pages into the bitmap provided */
408 	void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
409 				     void *bitmap, uint32_t num_pages);
410 	/* Hook to call when after each vcpu run */
411 	void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
412 	void (*before_vcpu_join) (void);
413 } log_modes[LOG_MODE_NUM] = {
414 	{
415 		.name = "dirty-log",
416 		.collect_dirty_pages = dirty_log_collect_dirty_pages,
417 		.after_vcpu_run = default_after_vcpu_run,
418 	},
419 	{
420 		.name = "clear-log",
421 		.supported = clear_log_supported,
422 		.create_vm_done = clear_log_create_vm_done,
423 		.collect_dirty_pages = clear_log_collect_dirty_pages,
424 		.after_vcpu_run = default_after_vcpu_run,
425 	},
426 	{
427 		.name = "dirty-ring",
428 		.supported = dirty_ring_supported,
429 		.create_vm_done = dirty_ring_create_vm_done,
430 		.collect_dirty_pages = dirty_ring_collect_dirty_pages,
431 		.before_vcpu_join = dirty_ring_before_vcpu_join,
432 		.after_vcpu_run = dirty_ring_after_vcpu_run,
433 	},
434 };
435 
436 /*
437  * We use this bitmap to track some pages that should have its dirty
438  * bit set in the _next_ iteration.  For example, if we detected the
439  * page value changed to current iteration but at the same time the
440  * page bit is cleared in the latest bitmap, then the system must
441  * report that write in the next get dirty log call.
442  */
443 static unsigned long *host_bmap_track;
444 
445 static void log_modes_dump(void)
446 {
447 	int i;
448 
449 	printf("all");
450 	for (i = 0; i < LOG_MODE_NUM; i++)
451 		printf(", %s", log_modes[i].name);
452 	printf("\n");
453 }
454 
455 static bool log_mode_supported(void)
456 {
457 	struct log_mode *mode = &log_modes[host_log_mode];
458 
459 	if (mode->supported)
460 		return mode->supported();
461 
462 	return true;
463 }
464 
465 static void log_mode_create_vm_done(struct kvm_vm *vm)
466 {
467 	struct log_mode *mode = &log_modes[host_log_mode];
468 
469 	if (mode->create_vm_done)
470 		mode->create_vm_done(vm);
471 }
472 
473 static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
474 					 void *bitmap, uint32_t num_pages)
475 {
476 	struct log_mode *mode = &log_modes[host_log_mode];
477 
478 	TEST_ASSERT(mode->collect_dirty_pages != NULL,
479 		    "collect_dirty_pages() is required for any log mode!");
480 	mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
481 }
482 
483 static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
484 {
485 	struct log_mode *mode = &log_modes[host_log_mode];
486 
487 	if (mode->after_vcpu_run)
488 		mode->after_vcpu_run(vm, ret, err);
489 }
490 
491 static void log_mode_before_vcpu_join(void)
492 {
493 	struct log_mode *mode = &log_modes[host_log_mode];
494 
495 	if (mode->before_vcpu_join)
496 		mode->before_vcpu_join();
497 }
498 
499 static void generate_random_array(uint64_t *guest_array, uint64_t size)
500 {
501 	uint64_t i;
502 
503 	for (i = 0; i < size; i++)
504 		guest_array[i] = random();
505 }
506 
507 static void *vcpu_worker(void *data)
508 {
509 	int ret;
510 	struct kvm_vm *vm = data;
511 	uint64_t *guest_array;
512 	uint64_t pages_count = 0;
513 	struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
514 						 + sizeof(sigset_t));
515 	sigset_t *sigset = (sigset_t *) &sigmask->sigset;
516 
517 	/*
518 	 * SIG_IPI is unblocked atomically while in KVM_RUN.  It causes the
519 	 * ioctl to return with -EINTR, but it is still pending and we need
520 	 * to accept it with the sigwait.
521 	 */
522 	sigmask->len = 8;
523 	pthread_sigmask(0, NULL, sigset);
524 	sigdelset(sigset, SIG_IPI);
525 	vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
526 
527 	sigemptyset(sigset);
528 	sigaddset(sigset, SIG_IPI);
529 
530 	guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
531 
532 	while (!READ_ONCE(host_quit)) {
533 		/* Clear any existing kick signals */
534 		generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
535 		pages_count += TEST_PAGES_PER_LOOP;
536 		/* Let the guest dirty the random pages */
537 		ret = __vcpu_run(vm, VCPU_ID);
538 		if (ret == -1 && errno == EINTR) {
539 			int sig = -1;
540 			sigwait(sigset, &sig);
541 			assert(sig == SIG_IPI);
542 		}
543 		log_mode_after_vcpu_run(vm, ret, errno);
544 	}
545 
546 	pr_info("Dirtied %"PRIu64" pages\n", pages_count);
547 
548 	return NULL;
549 }
550 
551 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
552 {
553 	uint64_t step = vm_num_host_pages(mode, 1);
554 	uint64_t page;
555 	uint64_t *value_ptr;
556 	uint64_t min_iter = 0;
557 
558 	for (page = 0; page < host_num_pages; page += step) {
559 		value_ptr = host_test_mem + page * host_page_size;
560 
561 		/* If this is a special page that we were tracking... */
562 		if (test_and_clear_bit_le(page, host_bmap_track)) {
563 			host_track_next_count++;
564 			TEST_ASSERT(test_bit_le(page, bmap),
565 				    "Page %"PRIu64" should have its dirty bit "
566 				    "set in this iteration but it is missing",
567 				    page);
568 		}
569 
570 		if (test_and_clear_bit_le(page, bmap)) {
571 			bool matched;
572 
573 			host_dirty_count++;
574 
575 			/*
576 			 * If the bit is set, the value written onto
577 			 * the corresponding page should be either the
578 			 * previous iteration number or the current one.
579 			 */
580 			matched = (*value_ptr == iteration ||
581 				   *value_ptr == iteration - 1);
582 
583 			if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
584 				if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
585 					/*
586 					 * Short answer: this case is special
587 					 * only for dirty ring test where the
588 					 * page is the last page before a kvm
589 					 * dirty ring full in iteration N-2.
590 					 *
591 					 * Long answer: Assuming ring size R,
592 					 * one possible condition is:
593 					 *
594 					 *      main thr       vcpu thr
595 					 *      --------       --------
596 					 *    iter=1
597 					 *                   write 1 to page 0~(R-1)
598 					 *                   full, vmexit
599 					 *    collect 0~(R-1)
600 					 *    kick vcpu
601 					 *                   write 1 to (R-1)~(2R-2)
602 					 *                   full, vmexit
603 					 *    iter=2
604 					 *    collect (R-1)~(2R-2)
605 					 *    kick vcpu
606 					 *                   write 1 to (2R-2)
607 					 *                   (NOTE!!! "1" cached in cpu reg)
608 					 *                   write 2 to (2R-1)~(3R-3)
609 					 *                   full, vmexit
610 					 *    iter=3
611 					 *    collect (2R-2)~(3R-3)
612 					 *    (here if we read value on page
613 					 *     "2R-2" is 1, while iter=3!!!)
614 					 *
615 					 * This however can only happen once per iteration.
616 					 */
617 					min_iter = iteration - 1;
618 					continue;
619 				} else if (page == dirty_ring_last_page) {
620 					/*
621 					 * Please refer to comments in
622 					 * dirty_ring_last_page.
623 					 */
624 					continue;
625 				}
626 			}
627 
628 			TEST_ASSERT(matched,
629 				    "Set page %"PRIu64" value %"PRIu64
630 				    " incorrect (iteration=%"PRIu64")",
631 				    page, *value_ptr, iteration);
632 		} else {
633 			host_clear_count++;
634 			/*
635 			 * If cleared, the value written can be any
636 			 * value smaller or equals to the iteration
637 			 * number.  Note that the value can be exactly
638 			 * (iteration-1) if that write can happen
639 			 * like this:
640 			 *
641 			 * (1) increase loop count to "iteration-1"
642 			 * (2) write to page P happens (with value
643 			 *     "iteration-1")
644 			 * (3) get dirty log for "iteration-1"; we'll
645 			 *     see that page P bit is set (dirtied),
646 			 *     and not set the bit in host_bmap_track
647 			 * (4) increase loop count to "iteration"
648 			 *     (which is current iteration)
649 			 * (5) get dirty log for current iteration,
650 			 *     we'll see that page P is cleared, with
651 			 *     value "iteration-1".
652 			 */
653 			TEST_ASSERT(*value_ptr <= iteration,
654 				    "Clear page %"PRIu64" value %"PRIu64
655 				    " incorrect (iteration=%"PRIu64")",
656 				    page, *value_ptr, iteration);
657 			if (*value_ptr == iteration) {
658 				/*
659 				 * This page is _just_ modified; it
660 				 * should report its dirtyness in the
661 				 * next run
662 				 */
663 				set_bit_le(page, host_bmap_track);
664 			}
665 		}
666 	}
667 }
668 
669 static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
670 				uint64_t extra_mem_pages, void *guest_code)
671 {
672 	struct kvm_vm *vm;
673 	uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
674 
675 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
676 
677 	vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages);
678 	kvm_vm_elf_load(vm, program_invocation_name);
679 #ifdef __x86_64__
680 	vm_create_irqchip(vm);
681 #endif
682 	log_mode_create_vm_done(vm);
683 	vm_vcpu_add_default(vm, vcpuid, guest_code);
684 	return vm;
685 }
686 
687 #define DIRTY_MEM_BITS 30 /* 1G */
688 #define PAGE_SHIFT_4K  12
689 
690 struct test_params {
691 	unsigned long iterations;
692 	unsigned long interval;
693 	uint64_t phys_offset;
694 };
695 
696 static void run_test(enum vm_guest_mode mode, void *arg)
697 {
698 	struct test_params *p = arg;
699 	struct kvm_vm *vm;
700 	unsigned long *bmap;
701 
702 	if (!log_mode_supported()) {
703 		print_skip("Log mode '%s' not supported",
704 			   log_modes[host_log_mode].name);
705 		return;
706 	}
707 
708 	/*
709 	 * We reserve page table for 2 times of extra dirty mem which
710 	 * will definitely cover the original (1G+) test range.  Here
711 	 * we do the calculation with 4K page size which is the
712 	 * smallest so the page number will be enough for all archs
713 	 * (e.g., 64K page size guest will need even less memory for
714 	 * page tables).
715 	 */
716 	vm = create_vm(mode, VCPU_ID,
717 		       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
718 		       guest_code);
719 
720 	guest_page_size = vm_get_page_size(vm);
721 	/*
722 	 * A little more than 1G of guest page sized pages.  Cover the
723 	 * case where the size is not aligned to 64 pages.
724 	 */
725 	guest_num_pages = (1ul << (DIRTY_MEM_BITS -
726 				   vm_get_page_shift(vm))) + 3;
727 	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
728 
729 	host_page_size = getpagesize();
730 	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
731 
732 	if (!p->phys_offset) {
733 		guest_test_phys_mem = (vm_get_max_gfn(vm) -
734 				       guest_num_pages) * guest_page_size;
735 		guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
736 	} else {
737 		guest_test_phys_mem = p->phys_offset;
738 	}
739 
740 #ifdef __s390x__
741 	/* Align to 1M (segment size) */
742 	guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
743 #endif
744 
745 	pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
746 
747 	bmap = bitmap_zalloc(host_num_pages);
748 	host_bmap_track = bitmap_zalloc(host_num_pages);
749 
750 	/* Add an extra memory slot for testing dirty logging */
751 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
752 				    guest_test_phys_mem,
753 				    TEST_MEM_SLOT_INDEX,
754 				    guest_num_pages,
755 				    KVM_MEM_LOG_DIRTY_PAGES);
756 
757 	/* Do mapping for the dirty track memory slot */
758 	virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
759 
760 	/* Cache the HVA pointer of the region */
761 	host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
762 
763 	ucall_init(vm, NULL);
764 
765 	/* Export the shared variables to the guest */
766 	sync_global_to_guest(vm, host_page_size);
767 	sync_global_to_guest(vm, guest_page_size);
768 	sync_global_to_guest(vm, guest_test_virt_mem);
769 	sync_global_to_guest(vm, guest_num_pages);
770 
771 	/* Start the iterations */
772 	iteration = 1;
773 	sync_global_to_guest(vm, iteration);
774 	host_quit = false;
775 	host_dirty_count = 0;
776 	host_clear_count = 0;
777 	host_track_next_count = 0;
778 
779 	pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
780 
781 	while (iteration < p->iterations) {
782 		/* Give the vcpu thread some time to dirty some pages */
783 		usleep(p->interval * 1000);
784 		log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
785 					     bmap, host_num_pages);
786 
787 		/*
788 		 * See vcpu_sync_stop_requested definition for details on why
789 		 * we need to stop vcpu when verify data.
790 		 */
791 		atomic_set(&vcpu_sync_stop_requested, true);
792 		sem_wait_until(&sem_vcpu_stop);
793 		/*
794 		 * NOTE: for dirty ring, it's possible that we didn't stop at
795 		 * GUEST_SYNC but instead we stopped because ring is full;
796 		 * that's okay too because ring full means we're only missing
797 		 * the flush of the last page, and since we handle the last
798 		 * page specially verification will succeed anyway.
799 		 */
800 		assert(host_log_mode == LOG_MODE_DIRTY_RING ||
801 		       atomic_read(&vcpu_sync_stop_requested) == false);
802 		vm_dirty_log_verify(mode, bmap);
803 		sem_post(&sem_vcpu_cont);
804 
805 		iteration++;
806 		sync_global_to_guest(vm, iteration);
807 	}
808 
809 	/* Tell the vcpu thread to quit */
810 	host_quit = true;
811 	log_mode_before_vcpu_join();
812 	pthread_join(vcpu_thread, NULL);
813 
814 	pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
815 		"track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
816 		host_track_next_count);
817 
818 	free(bmap);
819 	free(host_bmap_track);
820 	ucall_uninit(vm);
821 	kvm_vm_free(vm);
822 }
823 
824 static void help(char *name)
825 {
826 	puts("");
827 	printf("usage: %s [-h] [-i iterations] [-I interval] "
828 	       "[-p offset] [-m mode]\n", name);
829 	puts("");
830 	printf(" -c: specify dirty ring size, in number of entries\n");
831 	printf("     (only useful for dirty-ring test; default: %"PRIu32")\n",
832 	       TEST_DIRTY_RING_COUNT);
833 	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
834 	       TEST_HOST_LOOP_N);
835 	printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
836 	       TEST_HOST_LOOP_INTERVAL);
837 	printf(" -p: specify guest physical test memory offset\n"
838 	       "     Warning: a low offset can conflict with the loaded test code.\n");
839 	printf(" -M: specify the host logging mode "
840 	       "(default: run all log modes).  Supported modes: \n\t");
841 	log_modes_dump();
842 	guest_modes_help();
843 	puts("");
844 	exit(0);
845 }
846 
847 int main(int argc, char *argv[])
848 {
849 	struct test_params p = {
850 		.iterations = TEST_HOST_LOOP_N,
851 		.interval = TEST_HOST_LOOP_INTERVAL,
852 	};
853 	int opt, i;
854 	sigset_t sigset;
855 
856 	sem_init(&sem_vcpu_stop, 0, 0);
857 	sem_init(&sem_vcpu_cont, 0, 0);
858 
859 	guest_modes_append_default();
860 
861 	while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
862 		switch (opt) {
863 		case 'c':
864 			test_dirty_ring_count = strtol(optarg, NULL, 10);
865 			break;
866 		case 'i':
867 			p.iterations = strtol(optarg, NULL, 10);
868 			break;
869 		case 'I':
870 			p.interval = strtol(optarg, NULL, 10);
871 			break;
872 		case 'p':
873 			p.phys_offset = strtoull(optarg, NULL, 0);
874 			break;
875 		case 'm':
876 			guest_modes_cmdline(optarg);
877 			break;
878 		case 'M':
879 			if (!strcmp(optarg, "all")) {
880 				host_log_mode_option = LOG_MODE_ALL;
881 				break;
882 			}
883 			for (i = 0; i < LOG_MODE_NUM; i++) {
884 				if (!strcmp(optarg, log_modes[i].name)) {
885 					pr_info("Setting log mode to: '%s'\n",
886 						optarg);
887 					host_log_mode_option = i;
888 					break;
889 				}
890 			}
891 			if (i == LOG_MODE_NUM) {
892 				printf("Log mode '%s' invalid. Please choose "
893 				       "from: ", optarg);
894 				log_modes_dump();
895 				exit(1);
896 			}
897 			break;
898 		case 'h':
899 		default:
900 			help(argv[0]);
901 			break;
902 		}
903 	}
904 
905 	TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
906 	TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
907 
908 	pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
909 		p.iterations, p.interval);
910 
911 	srandom(time(0));
912 
913 	/* Ensure that vCPU threads start with SIG_IPI blocked.  */
914 	sigemptyset(&sigset);
915 	sigaddset(&sigset, SIG_IPI);
916 	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
917 
918 	if (host_log_mode_option == LOG_MODE_ALL) {
919 		/* Run each log mode */
920 		for (i = 0; i < LOG_MODE_NUM; i++) {
921 			pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
922 			host_log_mode = i;
923 			for_each_guest_mode(run_test, &p);
924 		}
925 	} else {
926 		host_log_mode = host_log_mode_option;
927 		for_each_guest_mode(run_test, &p);
928 	}
929 
930 	return 0;
931 }
932