1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM dirty page logging test
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7 
8 #define _GNU_SOURCE /* for program_invocation_name */
9 
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <pthread.h>
13 #include <semaphore.h>
14 #include <sys/types.h>
15 #include <signal.h>
16 #include <errno.h>
17 #include <linux/bitmap.h>
18 #include <linux/bitops.h>
19 #include <linux/atomic.h>
20 
21 #include "kvm_util.h"
22 #include "test_util.h"
23 #include "guest_modes.h"
24 #include "processor.h"
25 
26 /* The memory slot index to track dirty pages */
27 #define TEST_MEM_SLOT_INDEX		1
28 
29 /* Default guest test virtual memory offset */
30 #define DEFAULT_GUEST_TEST_MEM		0xc0000000
31 
32 /* How many pages to dirty for each guest loop */
33 #define TEST_PAGES_PER_LOOP		1024
34 
35 /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
36 #define TEST_HOST_LOOP_N		32UL
37 
38 /* Interval for each host loop (ms) */
39 #define TEST_HOST_LOOP_INTERVAL		10UL
40 
41 /* Dirty bitmaps are always little endian, so we need to swap on big endian */
42 #if defined(__s390x__)
43 # define BITOP_LE_SWIZZLE	((BITS_PER_LONG-1) & ~0x7)
44 # define test_bit_le(nr, addr) \
45 	test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
46 # define set_bit_le(nr, addr) \
47 	set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
48 # define clear_bit_le(nr, addr) \
49 	clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
50 # define test_and_set_bit_le(nr, addr) \
51 	test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
52 # define test_and_clear_bit_le(nr, addr) \
53 	test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
54 #else
55 # define test_bit_le		test_bit
56 # define set_bit_le		set_bit
57 # define clear_bit_le		clear_bit
58 # define test_and_set_bit_le	test_and_set_bit
59 # define test_and_clear_bit_le	test_and_clear_bit
60 #endif
61 
62 #define TEST_DIRTY_RING_COUNT		65536
63 
64 #define SIG_IPI SIGUSR1
65 
66 /*
67  * Guest/Host shared variables. Ensure addr_gva2hva() and/or
68  * sync_global_to/from_guest() are used when accessing from
69  * the host. READ/WRITE_ONCE() should also be used with anything
70  * that may change.
71  */
72 static uint64_t host_page_size;
73 static uint64_t guest_page_size;
74 static uint64_t guest_num_pages;
75 static uint64_t random_array[TEST_PAGES_PER_LOOP];
76 static uint64_t iteration;
77 
78 /*
79  * Guest physical memory offset of the testing memory slot.
80  * This will be set to the topmost valid physical address minus
81  * the test memory size.
82  */
83 static uint64_t guest_test_phys_mem;
84 
85 /*
86  * Guest virtual memory offset of the testing memory slot.
87  * Must not conflict with identity mapped test code.
88  */
89 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
90 
91 /*
92  * Continuously write to the first 8 bytes of a random pages within
93  * the testing memory region.
94  */
95 static void guest_code(void)
96 {
97 	uint64_t addr;
98 	int i;
99 
100 	/*
101 	 * On s390x, all pages of a 1M segment are initially marked as dirty
102 	 * when a page of the segment is written to for the very first time.
103 	 * To compensate this specialty in this test, we need to touch all
104 	 * pages during the first iteration.
105 	 */
106 	for (i = 0; i < guest_num_pages; i++) {
107 		addr = guest_test_virt_mem + i * guest_page_size;
108 		*(uint64_t *)addr = READ_ONCE(iteration);
109 	}
110 
111 	while (true) {
112 		for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
113 			addr = guest_test_virt_mem;
114 			addr += (READ_ONCE(random_array[i]) % guest_num_pages)
115 				* guest_page_size;
116 			addr = align_down(addr, host_page_size);
117 			*(uint64_t *)addr = READ_ONCE(iteration);
118 		}
119 
120 		/* Tell the host that we need more random numbers */
121 		GUEST_SYNC(1);
122 	}
123 }
124 
125 /* Host variables */
126 static bool host_quit;
127 
128 /* Points to the test VM memory region on which we track dirty logs */
129 static void *host_test_mem;
130 static uint64_t host_num_pages;
131 
132 /* For statistics only */
133 static uint64_t host_dirty_count;
134 static uint64_t host_clear_count;
135 static uint64_t host_track_next_count;
136 
137 /* Whether dirty ring reset is requested, or finished */
138 static sem_t sem_vcpu_stop;
139 static sem_t sem_vcpu_cont;
140 /*
141  * This is only set by main thread, and only cleared by vcpu thread.  It is
142  * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
143  * is the only place that we'll guarantee both "dirty bit" and "dirty data"
144  * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
145  * after setting dirty bit but before the data is written.
146  */
147 static atomic_t vcpu_sync_stop_requested;
148 /*
149  * This is updated by the vcpu thread to tell the host whether it's a
150  * ring-full event.  It should only be read until a sem_wait() of
151  * sem_vcpu_stop and before vcpu continues to run.
152  */
153 static bool dirty_ring_vcpu_ring_full;
154 /*
155  * This is only used for verifying the dirty pages.  Dirty ring has a very
156  * tricky case when the ring just got full, kvm will do userspace exit due to
157  * ring full.  When that happens, the very last PFN is set but actually the
158  * data is not changed (the guest WRITE is not really applied yet), because
159  * we found that the dirty ring is full, refused to continue the vcpu, and
160  * recorded the dirty gfn with the old contents.
161  *
162  * For this specific case, it's safe to skip checking this pfn for this
163  * bit, because it's a redundant bit, and when the write happens later the bit
164  * will be set again.  We use this variable to always keep track of the latest
165  * dirty gfn we've collected, so that if a mismatch of data found later in the
166  * verifying process, we let it pass.
167  */
168 static uint64_t dirty_ring_last_page;
169 
170 enum log_mode_t {
171 	/* Only use KVM_GET_DIRTY_LOG for logging */
172 	LOG_MODE_DIRTY_LOG = 0,
173 
174 	/* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
175 	LOG_MODE_CLEAR_LOG = 1,
176 
177 	/* Use dirty ring for logging */
178 	LOG_MODE_DIRTY_RING = 2,
179 
180 	LOG_MODE_NUM,
181 
182 	/* Run all supported modes */
183 	LOG_MODE_ALL = LOG_MODE_NUM,
184 };
185 
186 /* Mode of logging to test.  Default is to run all supported modes */
187 static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
188 /* Logging mode for current run */
189 static enum log_mode_t host_log_mode;
190 static pthread_t vcpu_thread;
191 static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
192 
193 static void vcpu_kick(void)
194 {
195 	pthread_kill(vcpu_thread, SIG_IPI);
196 }
197 
198 /*
199  * In our test we do signal tricks, let's use a better version of
200  * sem_wait to avoid signal interrupts
201  */
202 static void sem_wait_until(sem_t *sem)
203 {
204 	int ret;
205 
206 	do
207 		ret = sem_wait(sem);
208 	while (ret == -1 && errno == EINTR);
209 }
210 
211 static bool clear_log_supported(void)
212 {
213 	return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
214 }
215 
216 static void clear_log_create_vm_done(struct kvm_vm *vm)
217 {
218 	u64 manual_caps;
219 
220 	manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
221 	TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
222 	manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
223 			KVM_DIRTY_LOG_INITIALLY_SET);
224 	vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
225 }
226 
227 static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
228 					  void *bitmap, uint32_t num_pages)
229 {
230 	kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
231 }
232 
233 static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
234 					  void *bitmap, uint32_t num_pages)
235 {
236 	kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
237 	kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
238 }
239 
240 /* Should only be called after a GUEST_SYNC */
241 static void vcpu_handle_sync_stop(void)
242 {
243 	if (atomic_read(&vcpu_sync_stop_requested)) {
244 		/* It means main thread is sleeping waiting */
245 		atomic_set(&vcpu_sync_stop_requested, false);
246 		sem_post(&sem_vcpu_stop);
247 		sem_wait_until(&sem_vcpu_cont);
248 	}
249 }
250 
251 static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
252 {
253 	struct kvm_run *run = vcpu->run;
254 
255 	TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
256 		    "vcpu run failed: errno=%d", err);
257 
258 	TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
259 		    "Invalid guest sync status: exit_reason=%s\n",
260 		    exit_reason_str(run->exit_reason));
261 
262 	vcpu_handle_sync_stop();
263 }
264 
265 static bool dirty_ring_supported(void)
266 {
267 	return kvm_has_cap(KVM_CAP_DIRTY_LOG_RING);
268 }
269 
270 static void dirty_ring_create_vm_done(struct kvm_vm *vm)
271 {
272 	/*
273 	 * Switch to dirty ring mode after VM creation but before any
274 	 * of the vcpu creation.
275 	 */
276 	vm_enable_dirty_ring(vm, test_dirty_ring_count *
277 			     sizeof(struct kvm_dirty_gfn));
278 }
279 
280 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
281 {
282 	return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
283 }
284 
285 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
286 {
287 	gfn->flags = KVM_DIRTY_GFN_F_RESET;
288 }
289 
290 static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
291 				       int slot, void *bitmap,
292 				       uint32_t num_pages, uint32_t *fetch_index)
293 {
294 	struct kvm_dirty_gfn *cur;
295 	uint32_t count = 0;
296 
297 	while (true) {
298 		cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
299 		if (!dirty_gfn_is_dirtied(cur))
300 			break;
301 		TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
302 			    "%u != %u", cur->slot, slot);
303 		TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
304 			    "0x%llx >= 0x%x", cur->offset, num_pages);
305 		//pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
306 		set_bit_le(cur->offset, bitmap);
307 		dirty_ring_last_page = cur->offset;
308 		dirty_gfn_set_collected(cur);
309 		(*fetch_index)++;
310 		count++;
311 	}
312 
313 	return count;
314 }
315 
316 static void dirty_ring_wait_vcpu(void)
317 {
318 	/* This makes sure that hardware PML cache flushed */
319 	vcpu_kick();
320 	sem_wait_until(&sem_vcpu_stop);
321 }
322 
323 static void dirty_ring_continue_vcpu(void)
324 {
325 	pr_info("Notifying vcpu to continue\n");
326 	sem_post(&sem_vcpu_cont);
327 }
328 
329 static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
330 					   void *bitmap, uint32_t num_pages)
331 {
332 	/* We only have one vcpu */
333 	static uint32_t fetch_index = 0;
334 	uint32_t count = 0, cleared;
335 	bool continued_vcpu = false;
336 
337 	dirty_ring_wait_vcpu();
338 
339 	if (!dirty_ring_vcpu_ring_full) {
340 		/*
341 		 * This is not a ring-full event, it's safe to allow
342 		 * vcpu to continue
343 		 */
344 		dirty_ring_continue_vcpu();
345 		continued_vcpu = true;
346 	}
347 
348 	/* Only have one vcpu */
349 	count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
350 				       slot, bitmap, num_pages, &fetch_index);
351 
352 	cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
353 
354 	/* Cleared pages should be the same as collected */
355 	TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
356 		    "with collected (%u)", cleared, count);
357 
358 	if (!continued_vcpu) {
359 		TEST_ASSERT(dirty_ring_vcpu_ring_full,
360 			    "Didn't continue vcpu even without ring full");
361 		dirty_ring_continue_vcpu();
362 	}
363 
364 	pr_info("Iteration %ld collected %u pages\n", iteration, count);
365 }
366 
367 static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
368 {
369 	struct kvm_run *run = vcpu->run;
370 
371 	/* A ucall-sync or ring-full event is allowed */
372 	if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
373 		/* We should allow this to continue */
374 		;
375 	} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
376 		   (ret == -1 && err == EINTR)) {
377 		/* Update the flag first before pause */
378 		WRITE_ONCE(dirty_ring_vcpu_ring_full,
379 			   run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
380 		sem_post(&sem_vcpu_stop);
381 		pr_info("vcpu stops because %s...\n",
382 			dirty_ring_vcpu_ring_full ?
383 			"dirty ring is full" : "vcpu is kicked out");
384 		sem_wait_until(&sem_vcpu_cont);
385 		pr_info("vcpu continues now.\n");
386 	} else {
387 		TEST_ASSERT(false, "Invalid guest sync status: "
388 			    "exit_reason=%s\n",
389 			    exit_reason_str(run->exit_reason));
390 	}
391 }
392 
393 static void dirty_ring_before_vcpu_join(void)
394 {
395 	/* Kick another round of vcpu just to make sure it will quit */
396 	sem_post(&sem_vcpu_cont);
397 }
398 
399 struct log_mode {
400 	const char *name;
401 	/* Return true if this mode is supported, otherwise false */
402 	bool (*supported)(void);
403 	/* Hook when the vm creation is done (before vcpu creation) */
404 	void (*create_vm_done)(struct kvm_vm *vm);
405 	/* Hook to collect the dirty pages into the bitmap provided */
406 	void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
407 				     void *bitmap, uint32_t num_pages);
408 	/* Hook to call when after each vcpu run */
409 	void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
410 	void (*before_vcpu_join) (void);
411 } log_modes[LOG_MODE_NUM] = {
412 	{
413 		.name = "dirty-log",
414 		.collect_dirty_pages = dirty_log_collect_dirty_pages,
415 		.after_vcpu_run = default_after_vcpu_run,
416 	},
417 	{
418 		.name = "clear-log",
419 		.supported = clear_log_supported,
420 		.create_vm_done = clear_log_create_vm_done,
421 		.collect_dirty_pages = clear_log_collect_dirty_pages,
422 		.after_vcpu_run = default_after_vcpu_run,
423 	},
424 	{
425 		.name = "dirty-ring",
426 		.supported = dirty_ring_supported,
427 		.create_vm_done = dirty_ring_create_vm_done,
428 		.collect_dirty_pages = dirty_ring_collect_dirty_pages,
429 		.before_vcpu_join = dirty_ring_before_vcpu_join,
430 		.after_vcpu_run = dirty_ring_after_vcpu_run,
431 	},
432 };
433 
434 /*
435  * We use this bitmap to track some pages that should have its dirty
436  * bit set in the _next_ iteration.  For example, if we detected the
437  * page value changed to current iteration but at the same time the
438  * page bit is cleared in the latest bitmap, then the system must
439  * report that write in the next get dirty log call.
440  */
441 static unsigned long *host_bmap_track;
442 
443 static void log_modes_dump(void)
444 {
445 	int i;
446 
447 	printf("all");
448 	for (i = 0; i < LOG_MODE_NUM; i++)
449 		printf(", %s", log_modes[i].name);
450 	printf("\n");
451 }
452 
453 static bool log_mode_supported(void)
454 {
455 	struct log_mode *mode = &log_modes[host_log_mode];
456 
457 	if (mode->supported)
458 		return mode->supported();
459 
460 	return true;
461 }
462 
463 static void log_mode_create_vm_done(struct kvm_vm *vm)
464 {
465 	struct log_mode *mode = &log_modes[host_log_mode];
466 
467 	if (mode->create_vm_done)
468 		mode->create_vm_done(vm);
469 }
470 
471 static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
472 					 void *bitmap, uint32_t num_pages)
473 {
474 	struct log_mode *mode = &log_modes[host_log_mode];
475 
476 	TEST_ASSERT(mode->collect_dirty_pages != NULL,
477 		    "collect_dirty_pages() is required for any log mode!");
478 	mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
479 }
480 
481 static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
482 {
483 	struct log_mode *mode = &log_modes[host_log_mode];
484 
485 	if (mode->after_vcpu_run)
486 		mode->after_vcpu_run(vcpu, ret, err);
487 }
488 
489 static void log_mode_before_vcpu_join(void)
490 {
491 	struct log_mode *mode = &log_modes[host_log_mode];
492 
493 	if (mode->before_vcpu_join)
494 		mode->before_vcpu_join();
495 }
496 
497 static void generate_random_array(uint64_t *guest_array, uint64_t size)
498 {
499 	uint64_t i;
500 
501 	for (i = 0; i < size; i++)
502 		guest_array[i] = random();
503 }
504 
505 static void *vcpu_worker(void *data)
506 {
507 	int ret;
508 	struct kvm_vcpu *vcpu = data;
509 	struct kvm_vm *vm = vcpu->vm;
510 	uint64_t *guest_array;
511 	uint64_t pages_count = 0;
512 	struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
513 						 + sizeof(sigset_t));
514 	sigset_t *sigset = (sigset_t *) &sigmask->sigset;
515 
516 	/*
517 	 * SIG_IPI is unblocked atomically while in KVM_RUN.  It causes the
518 	 * ioctl to return with -EINTR, but it is still pending and we need
519 	 * to accept it with the sigwait.
520 	 */
521 	sigmask->len = 8;
522 	pthread_sigmask(0, NULL, sigset);
523 	sigdelset(sigset, SIG_IPI);
524 	vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
525 
526 	sigemptyset(sigset);
527 	sigaddset(sigset, SIG_IPI);
528 
529 	guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
530 
531 	while (!READ_ONCE(host_quit)) {
532 		/* Clear any existing kick signals */
533 		generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
534 		pages_count += TEST_PAGES_PER_LOOP;
535 		/* Let the guest dirty the random pages */
536 		ret = __vcpu_run(vcpu);
537 		if (ret == -1 && errno == EINTR) {
538 			int sig = -1;
539 			sigwait(sigset, &sig);
540 			assert(sig == SIG_IPI);
541 		}
542 		log_mode_after_vcpu_run(vcpu, ret, errno);
543 	}
544 
545 	pr_info("Dirtied %"PRIu64" pages\n", pages_count);
546 
547 	return NULL;
548 }
549 
550 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
551 {
552 	uint64_t step = vm_num_host_pages(mode, 1);
553 	uint64_t page;
554 	uint64_t *value_ptr;
555 	uint64_t min_iter = 0;
556 
557 	for (page = 0; page < host_num_pages; page += step) {
558 		value_ptr = host_test_mem + page * host_page_size;
559 
560 		/* If this is a special page that we were tracking... */
561 		if (test_and_clear_bit_le(page, host_bmap_track)) {
562 			host_track_next_count++;
563 			TEST_ASSERT(test_bit_le(page, bmap),
564 				    "Page %"PRIu64" should have its dirty bit "
565 				    "set in this iteration but it is missing",
566 				    page);
567 		}
568 
569 		if (test_and_clear_bit_le(page, bmap)) {
570 			bool matched;
571 
572 			host_dirty_count++;
573 
574 			/*
575 			 * If the bit is set, the value written onto
576 			 * the corresponding page should be either the
577 			 * previous iteration number or the current one.
578 			 */
579 			matched = (*value_ptr == iteration ||
580 				   *value_ptr == iteration - 1);
581 
582 			if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
583 				if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
584 					/*
585 					 * Short answer: this case is special
586 					 * only for dirty ring test where the
587 					 * page is the last page before a kvm
588 					 * dirty ring full in iteration N-2.
589 					 *
590 					 * Long answer: Assuming ring size R,
591 					 * one possible condition is:
592 					 *
593 					 *      main thr       vcpu thr
594 					 *      --------       --------
595 					 *    iter=1
596 					 *                   write 1 to page 0~(R-1)
597 					 *                   full, vmexit
598 					 *    collect 0~(R-1)
599 					 *    kick vcpu
600 					 *                   write 1 to (R-1)~(2R-2)
601 					 *                   full, vmexit
602 					 *    iter=2
603 					 *    collect (R-1)~(2R-2)
604 					 *    kick vcpu
605 					 *                   write 1 to (2R-2)
606 					 *                   (NOTE!!! "1" cached in cpu reg)
607 					 *                   write 2 to (2R-1)~(3R-3)
608 					 *                   full, vmexit
609 					 *    iter=3
610 					 *    collect (2R-2)~(3R-3)
611 					 *    (here if we read value on page
612 					 *     "2R-2" is 1, while iter=3!!!)
613 					 *
614 					 * This however can only happen once per iteration.
615 					 */
616 					min_iter = iteration - 1;
617 					continue;
618 				} else if (page == dirty_ring_last_page) {
619 					/*
620 					 * Please refer to comments in
621 					 * dirty_ring_last_page.
622 					 */
623 					continue;
624 				}
625 			}
626 
627 			TEST_ASSERT(matched,
628 				    "Set page %"PRIu64" value %"PRIu64
629 				    " incorrect (iteration=%"PRIu64")",
630 				    page, *value_ptr, iteration);
631 		} else {
632 			host_clear_count++;
633 			/*
634 			 * If cleared, the value written can be any
635 			 * value smaller or equals to the iteration
636 			 * number.  Note that the value can be exactly
637 			 * (iteration-1) if that write can happen
638 			 * like this:
639 			 *
640 			 * (1) increase loop count to "iteration-1"
641 			 * (2) write to page P happens (with value
642 			 *     "iteration-1")
643 			 * (3) get dirty log for "iteration-1"; we'll
644 			 *     see that page P bit is set (dirtied),
645 			 *     and not set the bit in host_bmap_track
646 			 * (4) increase loop count to "iteration"
647 			 *     (which is current iteration)
648 			 * (5) get dirty log for current iteration,
649 			 *     we'll see that page P is cleared, with
650 			 *     value "iteration-1".
651 			 */
652 			TEST_ASSERT(*value_ptr <= iteration,
653 				    "Clear page %"PRIu64" value %"PRIu64
654 				    " incorrect (iteration=%"PRIu64")",
655 				    page, *value_ptr, iteration);
656 			if (*value_ptr == iteration) {
657 				/*
658 				 * This page is _just_ modified; it
659 				 * should report its dirtyness in the
660 				 * next run
661 				 */
662 				set_bit_le(page, host_bmap_track);
663 			}
664 		}
665 	}
666 }
667 
668 static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
669 				uint64_t extra_mem_pages, void *guest_code)
670 {
671 	struct kvm_vm *vm;
672 
673 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
674 
675 	vm = __vm_create(mode, 1, extra_mem_pages);
676 
677 	log_mode_create_vm_done(vm);
678 	*vcpu = vm_vcpu_add(vm, 0, guest_code);
679 	return vm;
680 }
681 
682 #define DIRTY_MEM_BITS 30 /* 1G */
683 #define PAGE_SHIFT_4K  12
684 
685 struct test_params {
686 	unsigned long iterations;
687 	unsigned long interval;
688 	uint64_t phys_offset;
689 };
690 
691 static void run_test(enum vm_guest_mode mode, void *arg)
692 {
693 	struct test_params *p = arg;
694 	struct kvm_vcpu *vcpu;
695 	struct kvm_vm *vm;
696 	unsigned long *bmap;
697 
698 	if (!log_mode_supported()) {
699 		print_skip("Log mode '%s' not supported",
700 			   log_modes[host_log_mode].name);
701 		return;
702 	}
703 
704 	/*
705 	 * We reserve page table for 2 times of extra dirty mem which
706 	 * will definitely cover the original (1G+) test range.  Here
707 	 * we do the calculation with 4K page size which is the
708 	 * smallest so the page number will be enough for all archs
709 	 * (e.g., 64K page size guest will need even less memory for
710 	 * page tables).
711 	 */
712 	vm = create_vm(mode, &vcpu,
713 		       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
714 
715 	guest_page_size = vm->page_size;
716 	/*
717 	 * A little more than 1G of guest page sized pages.  Cover the
718 	 * case where the size is not aligned to 64 pages.
719 	 */
720 	guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
721 	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
722 
723 	host_page_size = getpagesize();
724 	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
725 
726 	if (!p->phys_offset) {
727 		guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
728 				      guest_page_size;
729 		guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
730 	} else {
731 		guest_test_phys_mem = p->phys_offset;
732 	}
733 
734 #ifdef __s390x__
735 	/* Align to 1M (segment size) */
736 	guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
737 #endif
738 
739 	pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
740 
741 	bmap = bitmap_zalloc(host_num_pages);
742 	host_bmap_track = bitmap_zalloc(host_num_pages);
743 
744 	/* Add an extra memory slot for testing dirty logging */
745 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
746 				    guest_test_phys_mem,
747 				    TEST_MEM_SLOT_INDEX,
748 				    guest_num_pages,
749 				    KVM_MEM_LOG_DIRTY_PAGES);
750 
751 	/* Do mapping for the dirty track memory slot */
752 	virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
753 
754 	/* Cache the HVA pointer of the region */
755 	host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
756 
757 	ucall_init(vm, NULL);
758 
759 	/* Export the shared variables to the guest */
760 	sync_global_to_guest(vm, host_page_size);
761 	sync_global_to_guest(vm, guest_page_size);
762 	sync_global_to_guest(vm, guest_test_virt_mem);
763 	sync_global_to_guest(vm, guest_num_pages);
764 
765 	/* Start the iterations */
766 	iteration = 1;
767 	sync_global_to_guest(vm, iteration);
768 	host_quit = false;
769 	host_dirty_count = 0;
770 	host_clear_count = 0;
771 	host_track_next_count = 0;
772 
773 	pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
774 
775 	while (iteration < p->iterations) {
776 		/* Give the vcpu thread some time to dirty some pages */
777 		usleep(p->interval * 1000);
778 		log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
779 					     bmap, host_num_pages);
780 
781 		/*
782 		 * See vcpu_sync_stop_requested definition for details on why
783 		 * we need to stop vcpu when verify data.
784 		 */
785 		atomic_set(&vcpu_sync_stop_requested, true);
786 		sem_wait_until(&sem_vcpu_stop);
787 		/*
788 		 * NOTE: for dirty ring, it's possible that we didn't stop at
789 		 * GUEST_SYNC but instead we stopped because ring is full;
790 		 * that's okay too because ring full means we're only missing
791 		 * the flush of the last page, and since we handle the last
792 		 * page specially verification will succeed anyway.
793 		 */
794 		assert(host_log_mode == LOG_MODE_DIRTY_RING ||
795 		       atomic_read(&vcpu_sync_stop_requested) == false);
796 		vm_dirty_log_verify(mode, bmap);
797 		sem_post(&sem_vcpu_cont);
798 
799 		iteration++;
800 		sync_global_to_guest(vm, iteration);
801 	}
802 
803 	/* Tell the vcpu thread to quit */
804 	host_quit = true;
805 	log_mode_before_vcpu_join();
806 	pthread_join(vcpu_thread, NULL);
807 
808 	pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
809 		"track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
810 		host_track_next_count);
811 
812 	free(bmap);
813 	free(host_bmap_track);
814 	ucall_uninit(vm);
815 	kvm_vm_free(vm);
816 }
817 
818 static void help(char *name)
819 {
820 	puts("");
821 	printf("usage: %s [-h] [-i iterations] [-I interval] "
822 	       "[-p offset] [-m mode]\n", name);
823 	puts("");
824 	printf(" -c: specify dirty ring size, in number of entries\n");
825 	printf("     (only useful for dirty-ring test; default: %"PRIu32")\n",
826 	       TEST_DIRTY_RING_COUNT);
827 	printf(" -i: specify iteration counts (default: %"PRIu64")\n",
828 	       TEST_HOST_LOOP_N);
829 	printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
830 	       TEST_HOST_LOOP_INTERVAL);
831 	printf(" -p: specify guest physical test memory offset\n"
832 	       "     Warning: a low offset can conflict with the loaded test code.\n");
833 	printf(" -M: specify the host logging mode "
834 	       "(default: run all log modes).  Supported modes: \n\t");
835 	log_modes_dump();
836 	guest_modes_help();
837 	puts("");
838 	exit(0);
839 }
840 
841 int main(int argc, char *argv[])
842 {
843 	struct test_params p = {
844 		.iterations = TEST_HOST_LOOP_N,
845 		.interval = TEST_HOST_LOOP_INTERVAL,
846 	};
847 	int opt, i;
848 	sigset_t sigset;
849 
850 	sem_init(&sem_vcpu_stop, 0, 0);
851 	sem_init(&sem_vcpu_cont, 0, 0);
852 
853 	guest_modes_append_default();
854 
855 	while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
856 		switch (opt) {
857 		case 'c':
858 			test_dirty_ring_count = strtol(optarg, NULL, 10);
859 			break;
860 		case 'i':
861 			p.iterations = strtol(optarg, NULL, 10);
862 			break;
863 		case 'I':
864 			p.interval = strtol(optarg, NULL, 10);
865 			break;
866 		case 'p':
867 			p.phys_offset = strtoull(optarg, NULL, 0);
868 			break;
869 		case 'm':
870 			guest_modes_cmdline(optarg);
871 			break;
872 		case 'M':
873 			if (!strcmp(optarg, "all")) {
874 				host_log_mode_option = LOG_MODE_ALL;
875 				break;
876 			}
877 			for (i = 0; i < LOG_MODE_NUM; i++) {
878 				if (!strcmp(optarg, log_modes[i].name)) {
879 					pr_info("Setting log mode to: '%s'\n",
880 						optarg);
881 					host_log_mode_option = i;
882 					break;
883 				}
884 			}
885 			if (i == LOG_MODE_NUM) {
886 				printf("Log mode '%s' invalid. Please choose "
887 				       "from: ", optarg);
888 				log_modes_dump();
889 				exit(1);
890 			}
891 			break;
892 		case 'h':
893 		default:
894 			help(argv[0]);
895 			break;
896 		}
897 	}
898 
899 	TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
900 	TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
901 
902 	pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
903 		p.iterations, p.interval);
904 
905 	srandom(time(0));
906 
907 	/* Ensure that vCPU threads start with SIG_IPI blocked.  */
908 	sigemptyset(&sigset);
909 	sigaddset(&sigset, SIG_IPI);
910 	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
911 
912 	if (host_log_mode_option == LOG_MODE_ALL) {
913 		/* Run each log mode */
914 		for (i = 0; i < LOG_MODE_NUM; i++) {
915 			pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
916 			host_log_mode = i;
917 			for_each_guest_mode(run_test, &p);
918 		}
919 	} else {
920 		host_log_mode = host_log_mode_option;
921 		for_each_guest_mode(run_test, &p);
922 	}
923 
924 	return 0;
925 }
926