xref: /openbmc/linux/tools/testing/selftests/kvm/aarch64/page_fault_test.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
135c58101SRicardo Koller // SPDX-License-Identifier: GPL-2.0
235c58101SRicardo Koller /*
335c58101SRicardo Koller  * page_fault_test.c - Test stage 2 faults.
435c58101SRicardo Koller  *
535c58101SRicardo Koller  * This test tries different combinations of guest accesses (e.g., write,
635c58101SRicardo Koller  * S1PTW), backing source type (e.g., anon) and types of faults (e.g., read on
735c58101SRicardo Koller  * hugetlbfs with a hole). It checks that the expected handling method is
835c58101SRicardo Koller  * called (e.g., uffd faults with the right address and write/read flag).
935c58101SRicardo Koller  */
1035c58101SRicardo Koller #define _GNU_SOURCE
1135c58101SRicardo Koller #include <linux/bitmap.h>
1235c58101SRicardo Koller #include <fcntl.h>
1335c58101SRicardo Koller #include <test_util.h>
1435c58101SRicardo Koller #include <kvm_util.h>
1535c58101SRicardo Koller #include <processor.h>
1635c58101SRicardo Koller #include <asm/sysreg.h>
1735c58101SRicardo Koller #include <linux/bitfield.h>
1835c58101SRicardo Koller #include "guest_modes.h"
1935c58101SRicardo Koller #include "userfaultfd_util.h"
2035c58101SRicardo Koller 
2135c58101SRicardo Koller /* Guest virtual addresses that point to the test page and its PTE. */
2235c58101SRicardo Koller #define TEST_GVA				0xc0000000
2335c58101SRicardo Koller #define TEST_EXEC_GVA				(TEST_GVA + 0x8)
2435c58101SRicardo Koller #define TEST_PTE_GVA				0xb0000000
2535c58101SRicardo Koller #define TEST_DATA				0x0123456789ABCDEF
2635c58101SRicardo Koller 
2735c58101SRicardo Koller static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
2835c58101SRicardo Koller 
2935c58101SRicardo Koller #define CMD_NONE				(0)
3035c58101SRicardo Koller #define CMD_SKIP_TEST				(1ULL << 1)
3135c58101SRicardo Koller #define CMD_HOLE_PT				(1ULL << 2)
3235c58101SRicardo Koller #define CMD_HOLE_DATA				(1ULL << 3)
33a4edf25bSRicardo Koller #define CMD_CHECK_WRITE_IN_DIRTY_LOG		(1ULL << 4)
34a4edf25bSRicardo Koller #define CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG		(1ULL << 5)
35a4edf25bSRicardo Koller #define CMD_CHECK_NO_WRITE_IN_DIRTY_LOG		(1ULL << 6)
36a4edf25bSRicardo Koller #define CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG	(1ULL << 7)
37a4edf25bSRicardo Koller #define CMD_SET_PTE_AF				(1ULL << 8)
3835c58101SRicardo Koller 
3935c58101SRicardo Koller #define PREPARE_FN_NR				10
4035c58101SRicardo Koller #define CHECK_FN_NR				10
4135c58101SRicardo Koller 
423b1d9156SRicardo Koller static struct event_cnt {
4345acde40SRicardo Koller 	int mmio_exits;
4445acde40SRicardo Koller 	int fail_vcpu_runs;
453b1d9156SRicardo Koller 	int uffd_faults;
463b1d9156SRicardo Koller 	/* uffd_faults is incremented from multiple threads. */
473b1d9156SRicardo Koller 	pthread_mutex_t uffd_faults_mutex;
483b1d9156SRicardo Koller } events;
493b1d9156SRicardo Koller 
5035c58101SRicardo Koller struct test_desc {
5135c58101SRicardo Koller 	const char *name;
5235c58101SRicardo Koller 	uint64_t mem_mark_cmd;
5335c58101SRicardo Koller 	/* Skip the test if any prepare function returns false */
5435c58101SRicardo Koller 	bool (*guest_prepare[PREPARE_FN_NR])(void);
5535c58101SRicardo Koller 	void (*guest_test)(void);
5635c58101SRicardo Koller 	void (*guest_test_check[CHECK_FN_NR])(void);
573b1d9156SRicardo Koller 	uffd_handler_t uffd_pt_handler;
583b1d9156SRicardo Koller 	uffd_handler_t uffd_data_handler;
5935c58101SRicardo Koller 	void (*dabt_handler)(struct ex_regs *regs);
6035c58101SRicardo Koller 	void (*iabt_handler)(struct ex_regs *regs);
6145acde40SRicardo Koller 	void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
6245acde40SRicardo Koller 	void (*fail_vcpu_run_handler)(int ret);
6335c58101SRicardo Koller 	uint32_t pt_memslot_flags;
6435c58101SRicardo Koller 	uint32_t data_memslot_flags;
6535c58101SRicardo Koller 	bool skip;
663b1d9156SRicardo Koller 	struct event_cnt expected_events;
6735c58101SRicardo Koller };
6835c58101SRicardo Koller 
6935c58101SRicardo Koller struct test_params {
7035c58101SRicardo Koller 	enum vm_mem_backing_src_type src_type;
7135c58101SRicardo Koller 	struct test_desc *test_desc;
7235c58101SRicardo Koller };
7335c58101SRicardo Koller 
flush_tlb_page(uint64_t vaddr)7435c58101SRicardo Koller static inline void flush_tlb_page(uint64_t vaddr)
7535c58101SRicardo Koller {
7635c58101SRicardo Koller 	uint64_t page = vaddr >> 12;
7735c58101SRicardo Koller 
7835c58101SRicardo Koller 	dsb(ishst);
7935c58101SRicardo Koller 	asm volatile("tlbi vaae1is, %0" :: "r" (page));
8035c58101SRicardo Koller 	dsb(ish);
8135c58101SRicardo Koller 	isb();
8235c58101SRicardo Koller }
8335c58101SRicardo Koller 
guest_write64(void)8435c58101SRicardo Koller static void guest_write64(void)
8535c58101SRicardo Koller {
8635c58101SRicardo Koller 	uint64_t val;
8735c58101SRicardo Koller 
8835c58101SRicardo Koller 	WRITE_ONCE(*guest_test_memory, TEST_DATA);
8935c58101SRicardo Koller 	val = READ_ONCE(*guest_test_memory);
9035c58101SRicardo Koller 	GUEST_ASSERT_EQ(val, TEST_DATA);
9135c58101SRicardo Koller }
9235c58101SRicardo Koller 
9335c58101SRicardo Koller /* Check the system for atomic instructions. */
guest_check_lse(void)9435c58101SRicardo Koller static bool guest_check_lse(void)
9535c58101SRicardo Koller {
9635c58101SRicardo Koller 	uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
9735c58101SRicardo Koller 	uint64_t atomic;
9835c58101SRicardo Koller 
9935c58101SRicardo Koller 	atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0);
10035c58101SRicardo Koller 	return atomic >= 2;
10135c58101SRicardo Koller }
10235c58101SRicardo Koller 
guest_check_dc_zva(void)10335c58101SRicardo Koller static bool guest_check_dc_zva(void)
10435c58101SRicardo Koller {
10535c58101SRicardo Koller 	uint64_t dczid = read_sysreg(dczid_el0);
10635c58101SRicardo Koller 	uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid);
10735c58101SRicardo Koller 
10835c58101SRicardo Koller 	return dzp == 0;
10935c58101SRicardo Koller }
11035c58101SRicardo Koller 
11135c58101SRicardo Koller /* Compare and swap instruction. */
guest_cas(void)11235c58101SRicardo Koller static void guest_cas(void)
11335c58101SRicardo Koller {
11435c58101SRicardo Koller 	uint64_t val;
11535c58101SRicardo Koller 
11635c58101SRicardo Koller 	GUEST_ASSERT(guest_check_lse());
11735c58101SRicardo Koller 	asm volatile(".arch_extension lse\n"
11835c58101SRicardo Koller 		     "casal %0, %1, [%2]\n"
119e779fd53SSean Christopherson 		     :: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
12035c58101SRicardo Koller 	val = READ_ONCE(*guest_test_memory);
12135c58101SRicardo Koller 	GUEST_ASSERT_EQ(val, TEST_DATA);
12235c58101SRicardo Koller }
12335c58101SRicardo Koller 
guest_read64(void)12435c58101SRicardo Koller static void guest_read64(void)
12535c58101SRicardo Koller {
12635c58101SRicardo Koller 	uint64_t val;
12735c58101SRicardo Koller 
12835c58101SRicardo Koller 	val = READ_ONCE(*guest_test_memory);
12935c58101SRicardo Koller 	GUEST_ASSERT_EQ(val, 0);
13035c58101SRicardo Koller }
13135c58101SRicardo Koller 
13235c58101SRicardo Koller /* Address translation instruction */
guest_at(void)13335c58101SRicardo Koller static void guest_at(void)
13435c58101SRicardo Koller {
13535c58101SRicardo Koller 	uint64_t par;
13635c58101SRicardo Koller 
13735c58101SRicardo Koller 	asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
13835c58101SRicardo Koller 	par = read_sysreg(par_el1);
13935c58101SRicardo Koller 	isb();
14035c58101SRicardo Koller 
14135c58101SRicardo Koller 	/* Bit 1 indicates whether the AT was successful */
14235c58101SRicardo Koller 	GUEST_ASSERT_EQ(par & 1, 0);
14335c58101SRicardo Koller }
14435c58101SRicardo Koller 
14535c58101SRicardo Koller /*
14635c58101SRicardo Koller  * The size of the block written by "dc zva" is guaranteed to be between (2 <<
14735c58101SRicardo Koller  * 0) and (2 << 9), which is safe in our case as we need the write to happen
14835c58101SRicardo Koller  * for at least a word, and not more than a page.
14935c58101SRicardo Koller  */
guest_dc_zva(void)15035c58101SRicardo Koller static void guest_dc_zva(void)
15135c58101SRicardo Koller {
15235c58101SRicardo Koller 	uint16_t val;
15335c58101SRicardo Koller 
15435c58101SRicardo Koller 	asm volatile("dc zva, %0" :: "r" (guest_test_memory));
15535c58101SRicardo Koller 	dsb(ish);
15635c58101SRicardo Koller 	val = READ_ONCE(*guest_test_memory);
15735c58101SRicardo Koller 	GUEST_ASSERT_EQ(val, 0);
15835c58101SRicardo Koller }
15935c58101SRicardo Koller 
16035c58101SRicardo Koller /*
16135c58101SRicardo Koller  * Pre-indexing loads and stores don't have a valid syndrome (ESR_EL2.ISV==0).
16235c58101SRicardo Koller  * And that's special because KVM must take special care with those: they
16335c58101SRicardo Koller  * should still count as accesses for dirty logging or user-faulting, but
16435c58101SRicardo Koller  * should be handled differently on mmio.
16535c58101SRicardo Koller  */
guest_ld_preidx(void)16635c58101SRicardo Koller static void guest_ld_preidx(void)
16735c58101SRicardo Koller {
16835c58101SRicardo Koller 	uint64_t val;
16935c58101SRicardo Koller 	uint64_t addr = TEST_GVA - 8;
17035c58101SRicardo Koller 
17135c58101SRicardo Koller 	/*
17235c58101SRicardo Koller 	 * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
17335c58101SRicardo Koller 	 * in a gap between memslots not backing by anything.
17435c58101SRicardo Koller 	 */
17535c58101SRicardo Koller 	asm volatile("ldr %0, [%1, #8]!"
17635c58101SRicardo Koller 		     : "=r" (val), "+r" (addr));
17735c58101SRicardo Koller 	GUEST_ASSERT_EQ(val, 0);
17835c58101SRicardo Koller 	GUEST_ASSERT_EQ(addr, TEST_GVA);
17935c58101SRicardo Koller }
18035c58101SRicardo Koller 
guest_st_preidx(void)18135c58101SRicardo Koller static void guest_st_preidx(void)
18235c58101SRicardo Koller {
18335c58101SRicardo Koller 	uint64_t val = TEST_DATA;
18435c58101SRicardo Koller 	uint64_t addr = TEST_GVA - 8;
18535c58101SRicardo Koller 
18635c58101SRicardo Koller 	asm volatile("str %0, [%1, #8]!"
18735c58101SRicardo Koller 		     : "+r" (val), "+r" (addr));
18835c58101SRicardo Koller 
18935c58101SRicardo Koller 	GUEST_ASSERT_EQ(addr, TEST_GVA);
19035c58101SRicardo Koller 	val = READ_ONCE(*guest_test_memory);
19135c58101SRicardo Koller }
19235c58101SRicardo Koller 
guest_set_ha(void)19335c58101SRicardo Koller static bool guest_set_ha(void)
19435c58101SRicardo Koller {
19535c58101SRicardo Koller 	uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
19635c58101SRicardo Koller 	uint64_t hadbs, tcr;
19735c58101SRicardo Koller 
19835c58101SRicardo Koller 	/* Skip if HA is not supported. */
19935c58101SRicardo Koller 	hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1);
20035c58101SRicardo Koller 	if (hadbs == 0)
20135c58101SRicardo Koller 		return false;
20235c58101SRicardo Koller 
20335c58101SRicardo Koller 	tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
20435c58101SRicardo Koller 	write_sysreg(tcr, tcr_el1);
20535c58101SRicardo Koller 	isb();
20635c58101SRicardo Koller 
20735c58101SRicardo Koller 	return true;
20835c58101SRicardo Koller }
20935c58101SRicardo Koller 
guest_clear_pte_af(void)21035c58101SRicardo Koller static bool guest_clear_pte_af(void)
21135c58101SRicardo Koller {
21235c58101SRicardo Koller 	*((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
21335c58101SRicardo Koller 	flush_tlb_page(TEST_GVA);
21435c58101SRicardo Koller 
21535c58101SRicardo Koller 	return true;
21635c58101SRicardo Koller }
21735c58101SRicardo Koller 
guest_check_pte_af(void)21835c58101SRicardo Koller static void guest_check_pte_af(void)
21935c58101SRicardo Koller {
22035c58101SRicardo Koller 	dsb(ish);
22135c58101SRicardo Koller 	GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
22235c58101SRicardo Koller }
22335c58101SRicardo Koller 
guest_check_write_in_dirty_log(void)224a4edf25bSRicardo Koller static void guest_check_write_in_dirty_log(void)
225a4edf25bSRicardo Koller {
226a4edf25bSRicardo Koller 	GUEST_SYNC(CMD_CHECK_WRITE_IN_DIRTY_LOG);
227a4edf25bSRicardo Koller }
228a4edf25bSRicardo Koller 
guest_check_no_write_in_dirty_log(void)229a4edf25bSRicardo Koller static void guest_check_no_write_in_dirty_log(void)
230a4edf25bSRicardo Koller {
231a4edf25bSRicardo Koller 	GUEST_SYNC(CMD_CHECK_NO_WRITE_IN_DIRTY_LOG);
232a4edf25bSRicardo Koller }
233a4edf25bSRicardo Koller 
guest_check_s1ptw_wr_in_dirty_log(void)234a4edf25bSRicardo Koller static void guest_check_s1ptw_wr_in_dirty_log(void)
235a4edf25bSRicardo Koller {
236a4edf25bSRicardo Koller 	GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
237a4edf25bSRicardo Koller }
238a4edf25bSRicardo Koller 
guest_check_no_s1ptw_wr_in_dirty_log(void)23942561751SRicardo Koller static void guest_check_no_s1ptw_wr_in_dirty_log(void)
24042561751SRicardo Koller {
24142561751SRicardo Koller 	GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
24242561751SRicardo Koller }
24342561751SRicardo Koller 
guest_exec(void)24435c58101SRicardo Koller static void guest_exec(void)
24535c58101SRicardo Koller {
24635c58101SRicardo Koller 	int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
24735c58101SRicardo Koller 	int ret;
24835c58101SRicardo Koller 
24935c58101SRicardo Koller 	ret = code();
25035c58101SRicardo Koller 	GUEST_ASSERT_EQ(ret, 0x77);
25135c58101SRicardo Koller }
25235c58101SRicardo Koller 
guest_prepare(struct test_desc * test)25335c58101SRicardo Koller static bool guest_prepare(struct test_desc *test)
25435c58101SRicardo Koller {
25535c58101SRicardo Koller 	bool (*prepare_fn)(void);
25635c58101SRicardo Koller 	int i;
25735c58101SRicardo Koller 
25835c58101SRicardo Koller 	for (i = 0; i < PREPARE_FN_NR; i++) {
25935c58101SRicardo Koller 		prepare_fn = test->guest_prepare[i];
26035c58101SRicardo Koller 		if (prepare_fn && !prepare_fn())
26135c58101SRicardo Koller 			return false;
26235c58101SRicardo Koller 	}
26335c58101SRicardo Koller 
26435c58101SRicardo Koller 	return true;
26535c58101SRicardo Koller }
26635c58101SRicardo Koller 
guest_test_check(struct test_desc * test)26735c58101SRicardo Koller static void guest_test_check(struct test_desc *test)
26835c58101SRicardo Koller {
26935c58101SRicardo Koller 	void (*check_fn)(void);
27035c58101SRicardo Koller 	int i;
27135c58101SRicardo Koller 
27235c58101SRicardo Koller 	for (i = 0; i < CHECK_FN_NR; i++) {
27335c58101SRicardo Koller 		check_fn = test->guest_test_check[i];
27435c58101SRicardo Koller 		if (check_fn)
27535c58101SRicardo Koller 			check_fn();
27635c58101SRicardo Koller 	}
27735c58101SRicardo Koller }
27835c58101SRicardo Koller 
guest_code(struct test_desc * test)27935c58101SRicardo Koller static void guest_code(struct test_desc *test)
28035c58101SRicardo Koller {
28135c58101SRicardo Koller 	if (!guest_prepare(test))
28235c58101SRicardo Koller 		GUEST_SYNC(CMD_SKIP_TEST);
28335c58101SRicardo Koller 
28435c58101SRicardo Koller 	GUEST_SYNC(test->mem_mark_cmd);
28535c58101SRicardo Koller 
28635c58101SRicardo Koller 	if (test->guest_test)
28735c58101SRicardo Koller 		test->guest_test();
28835c58101SRicardo Koller 
28935c58101SRicardo Koller 	guest_test_check(test);
29035c58101SRicardo Koller 	GUEST_DONE();
29135c58101SRicardo Koller }
29235c58101SRicardo Koller 
no_dabt_handler(struct ex_regs * regs)29335c58101SRicardo Koller static void no_dabt_handler(struct ex_regs *regs)
29435c58101SRicardo Koller {
295*df27f6b4SSean Christopherson 	GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1));
29635c58101SRicardo Koller }
29735c58101SRicardo Koller 
no_iabt_handler(struct ex_regs * regs)29835c58101SRicardo Koller static void no_iabt_handler(struct ex_regs *regs)
29935c58101SRicardo Koller {
300*df27f6b4SSean Christopherson 	GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc);
30135c58101SRicardo Koller }
30235c58101SRicardo Koller 
3033b1d9156SRicardo Koller static struct uffd_args {
3043b1d9156SRicardo Koller 	char *copy;
3053b1d9156SRicardo Koller 	void *hva;
3063b1d9156SRicardo Koller 	uint64_t paging_size;
3073b1d9156SRicardo Koller } pt_args, data_args;
3083b1d9156SRicardo Koller 
30935c58101SRicardo Koller /* Returns true to continue the test, and false if it should be skipped. */
uffd_generic_handler(int uffd_mode,int uffd,struct uffd_msg * msg,struct uffd_args * args)3103b1d9156SRicardo Koller static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
3110dd8d22aSRicardo Koller 				struct uffd_args *args)
3123b1d9156SRicardo Koller {
3133b1d9156SRicardo Koller 	uint64_t addr = msg->arg.pagefault.address;
3143b1d9156SRicardo Koller 	uint64_t flags = msg->arg.pagefault.flags;
3153b1d9156SRicardo Koller 	struct uffdio_copy copy;
3163b1d9156SRicardo Koller 	int ret;
3173b1d9156SRicardo Koller 
3183b1d9156SRicardo Koller 	TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
3193b1d9156SRicardo Koller 		    "The only expected UFFD mode is MISSING");
3206d85f51aSThomas Huth 	TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
3213b1d9156SRicardo Koller 
3223b1d9156SRicardo Koller 	pr_debug("uffd fault: addr=%p write=%d\n",
3233b1d9156SRicardo Koller 		 (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
3243b1d9156SRicardo Koller 
3253b1d9156SRicardo Koller 	copy.src = (uint64_t)args->copy;
3263b1d9156SRicardo Koller 	copy.dst = addr;
3273b1d9156SRicardo Koller 	copy.len = args->paging_size;
3283b1d9156SRicardo Koller 	copy.mode = 0;
3293b1d9156SRicardo Koller 
3303b1d9156SRicardo Koller 	ret = ioctl(uffd, UFFDIO_COPY, &copy);
3313b1d9156SRicardo Koller 	if (ret == -1) {
3323b1d9156SRicardo Koller 		pr_info("Failed UFFDIO_COPY in 0x%lx with errno: %d\n",
3333b1d9156SRicardo Koller 			addr, errno);
3343b1d9156SRicardo Koller 		return ret;
3353b1d9156SRicardo Koller 	}
3363b1d9156SRicardo Koller 
3373b1d9156SRicardo Koller 	pthread_mutex_lock(&events.uffd_faults_mutex);
3383b1d9156SRicardo Koller 	events.uffd_faults += 1;
3393b1d9156SRicardo Koller 	pthread_mutex_unlock(&events.uffd_faults_mutex);
3403b1d9156SRicardo Koller 	return 0;
3413b1d9156SRicardo Koller }
3423b1d9156SRicardo Koller 
uffd_pt_handler(int mode,int uffd,struct uffd_msg * msg)3430dd8d22aSRicardo Koller static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
3443b1d9156SRicardo Koller {
3450dd8d22aSRicardo Koller 	return uffd_generic_handler(mode, uffd, msg, &pt_args);
3463b1d9156SRicardo Koller }
3473b1d9156SRicardo Koller 
uffd_data_handler(int mode,int uffd,struct uffd_msg * msg)3480dd8d22aSRicardo Koller static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
3493b1d9156SRicardo Koller {
3500dd8d22aSRicardo Koller 	return uffd_generic_handler(mode, uffd, msg, &data_args);
3513b1d9156SRicardo Koller }
3523b1d9156SRicardo Koller 
setup_uffd_args(struct userspace_mem_region * region,struct uffd_args * args)3533b1d9156SRicardo Koller static void setup_uffd_args(struct userspace_mem_region *region,
3543b1d9156SRicardo Koller 			    struct uffd_args *args)
3553b1d9156SRicardo Koller {
3563b1d9156SRicardo Koller 	args->hva = (void *)region->region.userspace_addr;
3573b1d9156SRicardo Koller 	args->paging_size = region->region.memory_size;
3583b1d9156SRicardo Koller 
3593b1d9156SRicardo Koller 	args->copy = malloc(args->paging_size);
3603b1d9156SRicardo Koller 	TEST_ASSERT(args->copy, "Failed to allocate data copy.");
3613b1d9156SRicardo Koller 	memcpy(args->copy, args->hva, args->paging_size);
3623b1d9156SRicardo Koller }
3633b1d9156SRicardo Koller 
setup_uffd(struct kvm_vm * vm,struct test_params * p,struct uffd_desc ** pt_uffd,struct uffd_desc ** data_uffd)3643b1d9156SRicardo Koller static void setup_uffd(struct kvm_vm *vm, struct test_params *p,
3653b1d9156SRicardo Koller 		       struct uffd_desc **pt_uffd, struct uffd_desc **data_uffd)
3663b1d9156SRicardo Koller {
3673b1d9156SRicardo Koller 	struct test_desc *test = p->test_desc;
3683b1d9156SRicardo Koller 	int uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
3693b1d9156SRicardo Koller 
3703b1d9156SRicardo Koller 	setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args);
3713b1d9156SRicardo Koller 	setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args);
3723b1d9156SRicardo Koller 
3733b1d9156SRicardo Koller 	*pt_uffd = NULL;
3743b1d9156SRicardo Koller 	if (test->uffd_pt_handler)
3753b1d9156SRicardo Koller 		*pt_uffd = uffd_setup_demand_paging(uffd_mode, 0,
3763b1d9156SRicardo Koller 						    pt_args.hva,
3773b1d9156SRicardo Koller 						    pt_args.paging_size,
3783b1d9156SRicardo Koller 						    test->uffd_pt_handler);
3793b1d9156SRicardo Koller 
3803b1d9156SRicardo Koller 	*data_uffd = NULL;
3813b1d9156SRicardo Koller 	if (test->uffd_data_handler)
3823b1d9156SRicardo Koller 		*data_uffd = uffd_setup_demand_paging(uffd_mode, 0,
3833b1d9156SRicardo Koller 						      data_args.hva,
3843b1d9156SRicardo Koller 						      data_args.paging_size,
3853b1d9156SRicardo Koller 						      test->uffd_data_handler);
3863b1d9156SRicardo Koller }
3873b1d9156SRicardo Koller 
free_uffd(struct test_desc * test,struct uffd_desc * pt_uffd,struct uffd_desc * data_uffd)3883b1d9156SRicardo Koller static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
3893b1d9156SRicardo Koller 		      struct uffd_desc *data_uffd)
3903b1d9156SRicardo Koller {
3913b1d9156SRicardo Koller 	if (test->uffd_pt_handler)
3923b1d9156SRicardo Koller 		uffd_stop_demand_paging(pt_uffd);
3933b1d9156SRicardo Koller 	if (test->uffd_data_handler)
3943b1d9156SRicardo Koller 		uffd_stop_demand_paging(data_uffd);
3953b1d9156SRicardo Koller 
3963b1d9156SRicardo Koller 	free(pt_args.copy);
3973b1d9156SRicardo Koller 	free(data_args.copy);
3983b1d9156SRicardo Koller }
3993b1d9156SRicardo Koller 
uffd_no_handler(int mode,int uffd,struct uffd_msg * msg)400ff2b5509SRicardo Koller static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
401ff2b5509SRicardo Koller {
402ff2b5509SRicardo Koller 	TEST_FAIL("There was no UFFD fault expected.");
403ff2b5509SRicardo Koller 	return -1;
404ff2b5509SRicardo Koller }
405ff2b5509SRicardo Koller 
4063b1d9156SRicardo Koller /* Returns false if the test should be skipped. */
punch_hole_in_backing_store(struct kvm_vm * vm,struct userspace_mem_region * region)40735c58101SRicardo Koller static bool punch_hole_in_backing_store(struct kvm_vm *vm,
40835c58101SRicardo Koller 					struct userspace_mem_region *region)
40935c58101SRicardo Koller {
41035c58101SRicardo Koller 	void *hva = (void *)region->region.userspace_addr;
41135c58101SRicardo Koller 	uint64_t paging_size = region->region.memory_size;
41235c58101SRicardo Koller 	int ret, fd = region->fd;
41335c58101SRicardo Koller 
41435c58101SRicardo Koller 	if (fd != -1) {
41535c58101SRicardo Koller 		ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
41635c58101SRicardo Koller 				0, paging_size);
41735c58101SRicardo Koller 		TEST_ASSERT(ret == 0, "fallocate failed\n");
41835c58101SRicardo Koller 	} else {
41935c58101SRicardo Koller 		ret = madvise(hva, paging_size, MADV_DONTNEED);
42035c58101SRicardo Koller 		TEST_ASSERT(ret == 0, "madvise failed\n");
42135c58101SRicardo Koller 	}
42235c58101SRicardo Koller 
42335c58101SRicardo Koller 	return true;
42435c58101SRicardo Koller }
42535c58101SRicardo Koller 
mmio_on_test_gpa_handler(struct kvm_vm * vm,struct kvm_run * run)42645acde40SRicardo Koller static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
42745acde40SRicardo Koller {
42845acde40SRicardo Koller 	struct userspace_mem_region *region;
42945acde40SRicardo Koller 	void *hva;
43045acde40SRicardo Koller 
43145acde40SRicardo Koller 	region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
43245acde40SRicardo Koller 	hva = (void *)region->region.userspace_addr;
43345acde40SRicardo Koller 
4346d85f51aSThomas Huth 	TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
43545acde40SRicardo Koller 
43645acde40SRicardo Koller 	memcpy(hva, run->mmio.data, run->mmio.len);
43745acde40SRicardo Koller 	events.mmio_exits += 1;
43845acde40SRicardo Koller }
43945acde40SRicardo Koller 
mmio_no_handler(struct kvm_vm * vm,struct kvm_run * run)44045acde40SRicardo Koller static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
44145acde40SRicardo Koller {
44245acde40SRicardo Koller 	uint64_t data;
44345acde40SRicardo Koller 
44445acde40SRicardo Koller 	memcpy(&data, run->mmio.data, sizeof(data));
44545acde40SRicardo Koller 	pr_debug("addr=%lld len=%d w=%d data=%lx\n",
44645acde40SRicardo Koller 		 run->mmio.phys_addr, run->mmio.len,
44745acde40SRicardo Koller 		 run->mmio.is_write, data);
44845acde40SRicardo Koller 	TEST_FAIL("There was no MMIO exit expected.");
44945acde40SRicardo Koller }
45045acde40SRicardo Koller 
check_write_in_dirty_log(struct kvm_vm * vm,struct userspace_mem_region * region,uint64_t host_pg_nr)451a4edf25bSRicardo Koller static bool check_write_in_dirty_log(struct kvm_vm *vm,
452a4edf25bSRicardo Koller 				     struct userspace_mem_region *region,
453a4edf25bSRicardo Koller 				     uint64_t host_pg_nr)
454a4edf25bSRicardo Koller {
455a4edf25bSRicardo Koller 	unsigned long *bmap;
456a4edf25bSRicardo Koller 	bool first_page_dirty;
457a4edf25bSRicardo Koller 	uint64_t size = region->region.memory_size;
458a4edf25bSRicardo Koller 
459a4edf25bSRicardo Koller 	/* getpage_size() is not always equal to vm->page_size */
460a4edf25bSRicardo Koller 	bmap = bitmap_zalloc(size / getpagesize());
461a4edf25bSRicardo Koller 	kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
462a4edf25bSRicardo Koller 	first_page_dirty = test_bit(host_pg_nr, bmap);
463a4edf25bSRicardo Koller 	free(bmap);
464a4edf25bSRicardo Koller 	return first_page_dirty;
465a4edf25bSRicardo Koller }
466a4edf25bSRicardo Koller 
46735c58101SRicardo Koller /* Returns true to continue the test, and false if it should be skipped. */
handle_cmd(struct kvm_vm * vm,int cmd)46835c58101SRicardo Koller static bool handle_cmd(struct kvm_vm *vm, int cmd)
46935c58101SRicardo Koller {
47035c58101SRicardo Koller 	struct userspace_mem_region *data_region, *pt_region;
47135c58101SRicardo Koller 	bool continue_test = true;
4728b03c97fSRicardo Koller 	uint64_t pte_gpa, pte_pg;
47335c58101SRicardo Koller 
47435c58101SRicardo Koller 	data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
47535c58101SRicardo Koller 	pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
4768b03c97fSRicardo Koller 	pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
4778b03c97fSRicardo Koller 	pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
47835c58101SRicardo Koller 
47935c58101SRicardo Koller 	if (cmd == CMD_SKIP_TEST)
48035c58101SRicardo Koller 		continue_test = false;
48135c58101SRicardo Koller 
48235c58101SRicardo Koller 	if (cmd & CMD_HOLE_PT)
48335c58101SRicardo Koller 		continue_test = punch_hole_in_backing_store(vm, pt_region);
48435c58101SRicardo Koller 	if (cmd & CMD_HOLE_DATA)
48535c58101SRicardo Koller 		continue_test = punch_hole_in_backing_store(vm, data_region);
486a4edf25bSRicardo Koller 	if (cmd & CMD_CHECK_WRITE_IN_DIRTY_LOG)
487a4edf25bSRicardo Koller 		TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
488a4edf25bSRicardo Koller 			    "Missing write in dirty log");
489a4edf25bSRicardo Koller 	if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
4908b03c97fSRicardo Koller 		TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
491a4edf25bSRicardo Koller 			    "Missing s1ptw write in dirty log");
492a4edf25bSRicardo Koller 	if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
493a4edf25bSRicardo Koller 		TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
494a4edf25bSRicardo Koller 			    "Unexpected write in dirty log");
495a4edf25bSRicardo Koller 	if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
4968b03c97fSRicardo Koller 		TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
497a4edf25bSRicardo Koller 			    "Unexpected s1ptw write in dirty log");
49835c58101SRicardo Koller 
49935c58101SRicardo Koller 	return continue_test;
50035c58101SRicardo Koller }
50135c58101SRicardo Koller 
fail_vcpu_run_no_handler(int ret)50245acde40SRicardo Koller void fail_vcpu_run_no_handler(int ret)
50345acde40SRicardo Koller {
50445acde40SRicardo Koller 	TEST_FAIL("Unexpected vcpu run failure\n");
50545acde40SRicardo Koller }
50645acde40SRicardo Koller 
fail_vcpu_run_mmio_no_syndrome_handler(int ret)50745acde40SRicardo Koller void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
50845acde40SRicardo Koller {
50945acde40SRicardo Koller 	TEST_ASSERT(errno == ENOSYS,
51045acde40SRicardo Koller 		    "The mmio handler should have returned not implemented.");
51145acde40SRicardo Koller 	events.fail_vcpu_runs += 1;
51245acde40SRicardo Koller }
51345acde40SRicardo Koller 
51435c58101SRicardo Koller typedef uint32_t aarch64_insn_t;
51535c58101SRicardo Koller extern aarch64_insn_t __exec_test[2];
51635c58101SRicardo Koller 
__return_0x77(void)51735c58101SRicardo Koller noinline void __return_0x77(void)
51835c58101SRicardo Koller {
51935c58101SRicardo Koller 	asm volatile("__exec_test: mov x0, #0x77\n"
52035c58101SRicardo Koller 		     "ret\n");
52135c58101SRicardo Koller }
52235c58101SRicardo Koller 
52335c58101SRicardo Koller /*
52435c58101SRicardo Koller  * Note that this function runs on the host before the test VM starts: there's
52535c58101SRicardo Koller  * no need to sync the D$ and I$ caches.
52635c58101SRicardo Koller  */
load_exec_code_for_test(struct kvm_vm * vm)52735c58101SRicardo Koller static void load_exec_code_for_test(struct kvm_vm *vm)
52835c58101SRicardo Koller {
52935c58101SRicardo Koller 	uint64_t *code;
53035c58101SRicardo Koller 	struct userspace_mem_region *region;
53135c58101SRicardo Koller 	void *hva;
53235c58101SRicardo Koller 
53335c58101SRicardo Koller 	region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
53435c58101SRicardo Koller 	hva = (void *)region->region.userspace_addr;
53535c58101SRicardo Koller 
53635c58101SRicardo Koller 	assert(TEST_EXEC_GVA > TEST_GVA);
53735c58101SRicardo Koller 	code = hva + TEST_EXEC_GVA - TEST_GVA;
53835c58101SRicardo Koller 	memcpy(code, __exec_test, sizeof(__exec_test));
53935c58101SRicardo Koller }
54035c58101SRicardo Koller 
setup_abort_handlers(struct kvm_vm * vm,struct kvm_vcpu * vcpu,struct test_desc * test)54135c58101SRicardo Koller static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
54235c58101SRicardo Koller 				 struct test_desc *test)
54335c58101SRicardo Koller {
54435c58101SRicardo Koller 	vm_init_descriptor_tables(vm);
54535c58101SRicardo Koller 	vcpu_init_descriptor_tables(vcpu);
54635c58101SRicardo Koller 
54735c58101SRicardo Koller 	vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
54835c58101SRicardo Koller 				ESR_EC_DABT, no_dabt_handler);
54935c58101SRicardo Koller 	vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
55035c58101SRicardo Koller 				ESR_EC_IABT, no_iabt_handler);
55135c58101SRicardo Koller }
55235c58101SRicardo Koller 
setup_gva_maps(struct kvm_vm * vm)55335c58101SRicardo Koller static void setup_gva_maps(struct kvm_vm *vm)
55435c58101SRicardo Koller {
55535c58101SRicardo Koller 	struct userspace_mem_region *region;
55635c58101SRicardo Koller 	uint64_t pte_gpa;
55735c58101SRicardo Koller 
55835c58101SRicardo Koller 	region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
55935c58101SRicardo Koller 	/* Map TEST_GVA first. This will install a new PTE. */
56035c58101SRicardo Koller 	virt_pg_map(vm, TEST_GVA, region->region.guest_phys_addr);
56135c58101SRicardo Koller 	/* Then map TEST_PTE_GVA to the above PTE. */
56235c58101SRicardo Koller 	pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
56335c58101SRicardo Koller 	virt_pg_map(vm, TEST_PTE_GVA, pte_gpa);
56435c58101SRicardo Koller }
56535c58101SRicardo Koller 
56635c58101SRicardo Koller enum pf_test_memslots {
56735c58101SRicardo Koller 	CODE_AND_DATA_MEMSLOT,
56835c58101SRicardo Koller 	PAGE_TABLE_MEMSLOT,
56935c58101SRicardo Koller 	TEST_DATA_MEMSLOT,
57035c58101SRicardo Koller };
57135c58101SRicardo Koller 
57235c58101SRicardo Koller /*
57335c58101SRicardo Koller  * Create a memslot for code and data at pfn=0, and test-data and PT ones
57435c58101SRicardo Koller  * at max_gfn.
57535c58101SRicardo Koller  */
setup_memslots(struct kvm_vm * vm,struct test_params * p)57635c58101SRicardo Koller static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
57735c58101SRicardo Koller {
57835c58101SRicardo Koller 	uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
57935c58101SRicardo Koller 	uint64_t guest_page_size = vm->page_size;
58035c58101SRicardo Koller 	uint64_t max_gfn = vm_compute_max_gfn(vm);
58135c58101SRicardo Koller 	/* Enough for 2M of code when using 4K guest pages. */
58235c58101SRicardo Koller 	uint64_t code_npages = 512;
58335c58101SRicardo Koller 	uint64_t pt_size, data_size, data_gpa;
58435c58101SRicardo Koller 
58535c58101SRicardo Koller 	/*
58635c58101SRicardo Koller 	 * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
58735c58101SRicardo Koller 	 * VM_MODE_P48V48_4K. Note that the .text takes ~1.6MBs.  That's 13
58835c58101SRicardo Koller 	 * pages. VM_MODE_P48V48_4K is the mode with most PT pages; let's use
58935c58101SRicardo Koller 	 * twice that just in case.
59035c58101SRicardo Koller 	 */
59135c58101SRicardo Koller 	pt_size = 26 * guest_page_size;
59235c58101SRicardo Koller 
59335c58101SRicardo Koller 	/* memslot sizes and gpa's must be aligned to the backing page size */
59435c58101SRicardo Koller 	pt_size = align_up(pt_size, backing_src_pagesz);
59535c58101SRicardo Koller 	data_size = align_up(guest_page_size, backing_src_pagesz);
59635c58101SRicardo Koller 	data_gpa = (max_gfn * guest_page_size) - data_size;
59735c58101SRicardo Koller 	data_gpa = align_down(data_gpa, backing_src_pagesz);
59835c58101SRicardo Koller 
59935c58101SRicardo Koller 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0,
60035c58101SRicardo Koller 				    CODE_AND_DATA_MEMSLOT, code_npages, 0);
60135c58101SRicardo Koller 	vm->memslots[MEM_REGION_CODE] = CODE_AND_DATA_MEMSLOT;
60235c58101SRicardo Koller 	vm->memslots[MEM_REGION_DATA] = CODE_AND_DATA_MEMSLOT;
60335c58101SRicardo Koller 
60435c58101SRicardo Koller 	vm_userspace_mem_region_add(vm, p->src_type, data_gpa - pt_size,
60535c58101SRicardo Koller 				    PAGE_TABLE_MEMSLOT, pt_size / guest_page_size,
60635c58101SRicardo Koller 				    p->test_desc->pt_memslot_flags);
60735c58101SRicardo Koller 	vm->memslots[MEM_REGION_PT] = PAGE_TABLE_MEMSLOT;
60835c58101SRicardo Koller 
60935c58101SRicardo Koller 	vm_userspace_mem_region_add(vm, p->src_type, data_gpa, TEST_DATA_MEMSLOT,
61035c58101SRicardo Koller 				    data_size / guest_page_size,
61135c58101SRicardo Koller 				    p->test_desc->data_memslot_flags);
61235c58101SRicardo Koller 	vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
61335c58101SRicardo Koller }
61435c58101SRicardo Koller 
setup_ucall(struct kvm_vm * vm)615eb561891SPaolo Bonzini static void setup_ucall(struct kvm_vm *vm)
616eb561891SPaolo Bonzini {
617eb561891SPaolo Bonzini 	struct userspace_mem_region *region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
618eb561891SPaolo Bonzini 
619eb561891SPaolo Bonzini 	ucall_init(vm, region->region.guest_phys_addr + region->region.memory_size);
620eb561891SPaolo Bonzini }
621eb561891SPaolo Bonzini 
setup_default_handlers(struct test_desc * test)62245acde40SRicardo Koller static void setup_default_handlers(struct test_desc *test)
62345acde40SRicardo Koller {
62445acde40SRicardo Koller 	if (!test->mmio_handler)
62545acde40SRicardo Koller 		test->mmio_handler = mmio_no_handler;
62645acde40SRicardo Koller 
62745acde40SRicardo Koller 	if (!test->fail_vcpu_run_handler)
62845acde40SRicardo Koller 		test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
62945acde40SRicardo Koller }
63045acde40SRicardo Koller 
check_event_counts(struct test_desc * test)6313b1d9156SRicardo Koller static void check_event_counts(struct test_desc *test)
6323b1d9156SRicardo Koller {
6336d85f51aSThomas Huth 	TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
6346d85f51aSThomas Huth 	TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
6356d85f51aSThomas Huth 	TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
6363b1d9156SRicardo Koller }
6373b1d9156SRicardo Koller 
print_test_banner(enum vm_guest_mode mode,struct test_params * p)63835c58101SRicardo Koller static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
63935c58101SRicardo Koller {
64035c58101SRicardo Koller 	struct test_desc *test = p->test_desc;
64135c58101SRicardo Koller 
64235c58101SRicardo Koller 	pr_debug("Test: %s\n", test->name);
64335c58101SRicardo Koller 	pr_debug("Testing guest mode: %s\n", vm_guest_mode_string(mode));
64435c58101SRicardo Koller 	pr_debug("Testing memory backing src type: %s\n",
64535c58101SRicardo Koller 		 vm_mem_backing_src_alias(p->src_type)->name);
64635c58101SRicardo Koller }
64735c58101SRicardo Koller 
reset_event_counts(void)6483b1d9156SRicardo Koller static void reset_event_counts(void)
6493b1d9156SRicardo Koller {
6503b1d9156SRicardo Koller 	memset(&events, 0, sizeof(events));
6513b1d9156SRicardo Koller }
6523b1d9156SRicardo Koller 
65335c58101SRicardo Koller /*
65435c58101SRicardo Koller  * This function either succeeds, skips the test (after setting test->skip), or
65535c58101SRicardo Koller  * fails with a TEST_FAIL that aborts all tests.
65635c58101SRicardo Koller  */
vcpu_run_loop(struct kvm_vm * vm,struct kvm_vcpu * vcpu,struct test_desc * test)65735c58101SRicardo Koller static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
65835c58101SRicardo Koller 			  struct test_desc *test)
65935c58101SRicardo Koller {
66045acde40SRicardo Koller 	struct kvm_run *run;
66135c58101SRicardo Koller 	struct ucall uc;
66245acde40SRicardo Koller 	int ret;
66345acde40SRicardo Koller 
66445acde40SRicardo Koller 	run = vcpu->run;
66535c58101SRicardo Koller 
66635c58101SRicardo Koller 	for (;;) {
66745acde40SRicardo Koller 		ret = _vcpu_run(vcpu);
66845acde40SRicardo Koller 		if (ret) {
66945acde40SRicardo Koller 			test->fail_vcpu_run_handler(ret);
67045acde40SRicardo Koller 			goto done;
67145acde40SRicardo Koller 		}
67235c58101SRicardo Koller 
67335c58101SRicardo Koller 		switch (get_ucall(vcpu, &uc)) {
67435c58101SRicardo Koller 		case UCALL_SYNC:
67535c58101SRicardo Koller 			if (!handle_cmd(vm, uc.args[1])) {
67635c58101SRicardo Koller 				test->skip = true;
67735c58101SRicardo Koller 				goto done;
67835c58101SRicardo Koller 			}
67935c58101SRicardo Koller 			break;
68035c58101SRicardo Koller 		case UCALL_ABORT:
681*df27f6b4SSean Christopherson 			REPORT_GUEST_ASSERT(uc);
68235c58101SRicardo Koller 			break;
68335c58101SRicardo Koller 		case UCALL_DONE:
68435c58101SRicardo Koller 			goto done;
68545acde40SRicardo Koller 		case UCALL_NONE:
68645acde40SRicardo Koller 			if (run->exit_reason == KVM_EXIT_MMIO)
68745acde40SRicardo Koller 				test->mmio_handler(vm, run);
68845acde40SRicardo Koller 			break;
68935c58101SRicardo Koller 		default:
69035c58101SRicardo Koller 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
69135c58101SRicardo Koller 		}
69235c58101SRicardo Koller 	}
69335c58101SRicardo Koller 
69435c58101SRicardo Koller done:
69535c58101SRicardo Koller 	pr_debug(test->skip ? "Skipped.\n" : "Done.\n");
69635c58101SRicardo Koller }
69735c58101SRicardo Koller 
run_test(enum vm_guest_mode mode,void * arg)69835c58101SRicardo Koller static void run_test(enum vm_guest_mode mode, void *arg)
69935c58101SRicardo Koller {
70035c58101SRicardo Koller 	struct test_params *p = (struct test_params *)arg;
70135c58101SRicardo Koller 	struct test_desc *test = p->test_desc;
70235c58101SRicardo Koller 	struct kvm_vm *vm;
70335c58101SRicardo Koller 	struct kvm_vcpu *vcpu;
7043b1d9156SRicardo Koller 	struct uffd_desc *pt_uffd, *data_uffd;
70535c58101SRicardo Koller 
70635c58101SRicardo Koller 	print_test_banner(mode, p);
70735c58101SRicardo Koller 
70835c58101SRicardo Koller 	vm = ____vm_create(mode);
70935c58101SRicardo Koller 	setup_memslots(vm, p);
71035c58101SRicardo Koller 	kvm_vm_elf_load(vm, program_invocation_name);
711eb561891SPaolo Bonzini 	setup_ucall(vm);
71235c58101SRicardo Koller 	vcpu = vm_vcpu_add(vm, 0, guest_code);
71335c58101SRicardo Koller 
71435c58101SRicardo Koller 	setup_gva_maps(vm);
71535c58101SRicardo Koller 
7163b1d9156SRicardo Koller 	reset_event_counts();
7173b1d9156SRicardo Koller 
7183b1d9156SRicardo Koller 	/*
7193b1d9156SRicardo Koller 	 * Set some code in the data memslot for the guest to execute (only
7203b1d9156SRicardo Koller 	 * applicable to the EXEC tests). This has to be done before
7213b1d9156SRicardo Koller 	 * setup_uffd() as that function copies the memslot data for the uffd
7223b1d9156SRicardo Koller 	 * handler.
7233b1d9156SRicardo Koller 	 */
72435c58101SRicardo Koller 	load_exec_code_for_test(vm);
7253b1d9156SRicardo Koller 	setup_uffd(vm, p, &pt_uffd, &data_uffd);
72635c58101SRicardo Koller 	setup_abort_handlers(vm, vcpu, test);
72745acde40SRicardo Koller 	setup_default_handlers(test);
72835c58101SRicardo Koller 	vcpu_args_set(vcpu, 1, test);
72935c58101SRicardo Koller 
73035c58101SRicardo Koller 	vcpu_run_loop(vm, vcpu, test);
73135c58101SRicardo Koller 
73235c58101SRicardo Koller 	kvm_vm_free(vm);
7333b1d9156SRicardo Koller 	free_uffd(test, pt_uffd, data_uffd);
7343b1d9156SRicardo Koller 
7353b1d9156SRicardo Koller 	/*
7363b1d9156SRicardo Koller 	 * Make sure we check the events after the uffd threads have exited,
7373b1d9156SRicardo Koller 	 * which means they updated their respective event counters.
7383b1d9156SRicardo Koller 	 */
7393b1d9156SRicardo Koller 	if (!test->skip)
7403b1d9156SRicardo Koller 		check_event_counts(test);
74135c58101SRicardo Koller }
74235c58101SRicardo Koller 
help(char * name)74335c58101SRicardo Koller static void help(char *name)
74435c58101SRicardo Koller {
74535c58101SRicardo Koller 	puts("");
74635c58101SRicardo Koller 	printf("usage: %s [-h] [-s mem-type]\n", name);
74735c58101SRicardo Koller 	puts("");
74835c58101SRicardo Koller 	guest_modes_help();
74935c58101SRicardo Koller 	backing_src_help("-s");
75035c58101SRicardo Koller 	puts("");
75135c58101SRicardo Koller }
75235c58101SRicardo Koller 
75335c58101SRicardo Koller #define SNAME(s)			#s
75435c58101SRicardo Koller #define SCAT2(a, b)			SNAME(a ## _ ## b)
75535c58101SRicardo Koller #define SCAT3(a, b, c)			SCAT2(a, SCAT2(b, c))
7563b1d9156SRicardo Koller #define SCAT4(a, b, c, d)		SCAT2(a, SCAT3(b, c, d))
75735c58101SRicardo Koller 
75835c58101SRicardo Koller #define _CHECK(_test)			_CHECK_##_test
75935c58101SRicardo Koller #define _PREPARE(_test)			_PREPARE_##_test
76035c58101SRicardo Koller #define _PREPARE_guest_read64		NULL
76135c58101SRicardo Koller #define _PREPARE_guest_ld_preidx	NULL
76235c58101SRicardo Koller #define _PREPARE_guest_write64		NULL
76335c58101SRicardo Koller #define _PREPARE_guest_st_preidx	NULL
76435c58101SRicardo Koller #define _PREPARE_guest_exec		NULL
76535c58101SRicardo Koller #define _PREPARE_guest_at		NULL
76635c58101SRicardo Koller #define _PREPARE_guest_dc_zva		guest_check_dc_zva
76735c58101SRicardo Koller #define _PREPARE_guest_cas		guest_check_lse
76835c58101SRicardo Koller 
76935c58101SRicardo Koller /* With or without access flag checks */
77035c58101SRicardo Koller #define _PREPARE_with_af		guest_set_ha, guest_clear_pte_af
77135c58101SRicardo Koller #define _PREPARE_no_af			NULL
77235c58101SRicardo Koller #define _CHECK_with_af			guest_check_pte_af
77335c58101SRicardo Koller #define _CHECK_no_af			NULL
77435c58101SRicardo Koller 
77535c58101SRicardo Koller /* Performs an access and checks that no faults were triggered. */
77635c58101SRicardo Koller #define TEST_ACCESS(_access, _with_af, _mark_cmd)				\
77735c58101SRicardo Koller {										\
77835c58101SRicardo Koller 	.name			= SCAT3(_access, _with_af, #_mark_cmd),		\
77935c58101SRicardo Koller 	.guest_prepare		= { _PREPARE(_with_af),				\
78035c58101SRicardo Koller 				    _PREPARE(_access) },			\
78135c58101SRicardo Koller 	.mem_mark_cmd		= _mark_cmd,					\
78235c58101SRicardo Koller 	.guest_test		= _access,					\
78335c58101SRicardo Koller 	.guest_test_check	= { _CHECK(_with_af) },				\
7843b1d9156SRicardo Koller 	.expected_events	= { 0 },					\
7853b1d9156SRicardo Koller }
7863b1d9156SRicardo Koller 
7873b1d9156SRicardo Koller #define TEST_UFFD(_access, _with_af, _mark_cmd,					\
7883b1d9156SRicardo Koller 		  _uffd_data_handler, _uffd_pt_handler, _uffd_faults)		\
7893b1d9156SRicardo Koller {										\
7903b1d9156SRicardo Koller 	.name			= SCAT4(uffd, _access, _with_af, #_mark_cmd),	\
7913b1d9156SRicardo Koller 	.guest_prepare		= { _PREPARE(_with_af),				\
7923b1d9156SRicardo Koller 				    _PREPARE(_access) },			\
7933b1d9156SRicardo Koller 	.guest_test		= _access,					\
7943b1d9156SRicardo Koller 	.mem_mark_cmd		= _mark_cmd,					\
7953b1d9156SRicardo Koller 	.guest_test_check	= { _CHECK(_with_af) },				\
7963b1d9156SRicardo Koller 	.uffd_data_handler	= _uffd_data_handler,				\
7973b1d9156SRicardo Koller 	.uffd_pt_handler	= _uffd_pt_handler,				\
7983b1d9156SRicardo Koller 	.expected_events	= { .uffd_faults = _uffd_faults, },		\
79935c58101SRicardo Koller }
80035c58101SRicardo Koller 
80142561751SRicardo Koller #define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check)		\
802a4edf25bSRicardo Koller {										\
803a4edf25bSRicardo Koller 	.name			= SCAT3(dirty_log, _access, _with_af),		\
804a4edf25bSRicardo Koller 	.data_memslot_flags	= KVM_MEM_LOG_DIRTY_PAGES,			\
805a4edf25bSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_LOG_DIRTY_PAGES,			\
806a4edf25bSRicardo Koller 	.guest_prepare		= { _PREPARE(_with_af),				\
807a4edf25bSRicardo Koller 				    _PREPARE(_access) },			\
808a4edf25bSRicardo Koller 	.guest_test		= _access,					\
80942561751SRicardo Koller 	.guest_test_check	= { _CHECK(_with_af), _test_check, _pt_check },	\
810a4edf25bSRicardo Koller 	.expected_events	= { 0 },					\
811a4edf25bSRicardo Koller }
812a4edf25bSRicardo Koller 
813ff2b5509SRicardo Koller #define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler,		\
81442561751SRicardo Koller 				_uffd_faults, _test_check, _pt_check)		\
815ff2b5509SRicardo Koller {										\
816ff2b5509SRicardo Koller 	.name			= SCAT3(uffd_and_dirty_log, _access, _with_af),	\
817ff2b5509SRicardo Koller 	.data_memslot_flags	= KVM_MEM_LOG_DIRTY_PAGES,			\
818ff2b5509SRicardo Koller 	.pt_memslot_flags	= KVM_MEM_LOG_DIRTY_PAGES,			\
819ff2b5509SRicardo Koller 	.guest_prepare		= { _PREPARE(_with_af),				\
820ff2b5509SRicardo Koller 				    _PREPARE(_access) },			\
821ff2b5509SRicardo Koller 	.guest_test		= _access,					\
822ff2b5509SRicardo Koller 	.mem_mark_cmd		= CMD_HOLE_DATA | CMD_HOLE_PT,			\
82342561751SRicardo Koller 	.guest_test_check	= { _CHECK(_with_af), _test_check, _pt_check },	\
824ff2b5509SRicardo Koller 	.uffd_data_handler	= _uffd_data_handler,				\
8250dd8d22aSRicardo Koller 	.uffd_pt_handler	= uffd_pt_handler,				\
826ff2b5509SRicardo Koller 	.expected_events	= { .uffd_faults = _uffd_faults, },		\
827ff2b5509SRicardo Koller }
828ff2b5509SRicardo Koller 
82945acde40SRicardo Koller #define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits)			\
83045acde40SRicardo Koller {										\
83108ddbbdfSRicardo Koller 	.name			= SCAT2(ro_memslot, _access),			\
83245acde40SRicardo Koller 	.data_memslot_flags	= KVM_MEM_READONLY,				\
83308ddbbdfSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_READONLY,				\
83445acde40SRicardo Koller 	.guest_prepare		= { _PREPARE(_access) },			\
83545acde40SRicardo Koller 	.guest_test		= _access,					\
83645acde40SRicardo Koller 	.mmio_handler		= _mmio_handler,				\
83745acde40SRicardo Koller 	.expected_events	= { .mmio_exits = _mmio_exits },		\
83845acde40SRicardo Koller }
83945acde40SRicardo Koller 
84045acde40SRicardo Koller #define TEST_RO_MEMSLOT_NO_SYNDROME(_access)					\
84145acde40SRicardo Koller {										\
84245acde40SRicardo Koller 	.name			= SCAT2(ro_memslot_no_syndrome, _access),	\
84345acde40SRicardo Koller 	.data_memslot_flags	= KVM_MEM_READONLY,				\
84408ddbbdfSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_READONLY,				\
84545acde40SRicardo Koller 	.guest_test		= _access,					\
84645acde40SRicardo Koller 	.fail_vcpu_run_handler	= fail_vcpu_run_mmio_no_syndrome_handler,	\
84745acde40SRicardo Koller 	.expected_events	= { .fail_vcpu_runs = 1 },			\
84845acde40SRicardo Koller }
84945acde40SRicardo Koller 
850ff2b5509SRicardo Koller #define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits,	\
851ff2b5509SRicardo Koller 				      _test_check)				\
852ff2b5509SRicardo Koller {										\
85308ddbbdfSRicardo Koller 	.name			= SCAT2(ro_memslot, _access),			\
854ff2b5509SRicardo Koller 	.data_memslot_flags	= KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,	\
85508ddbbdfSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,	\
856ff2b5509SRicardo Koller 	.guest_prepare		= { _PREPARE(_access) },			\
857ff2b5509SRicardo Koller 	.guest_test		= _access,					\
858ff2b5509SRicardo Koller 	.guest_test_check	= { _test_check },				\
859ff2b5509SRicardo Koller 	.mmio_handler		= _mmio_handler,				\
860ff2b5509SRicardo Koller 	.expected_events	= { .mmio_exits = _mmio_exits},			\
861ff2b5509SRicardo Koller }
862ff2b5509SRicardo Koller 
863ff2b5509SRicardo Koller #define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check)		\
864ff2b5509SRicardo Koller {										\
865ff2b5509SRicardo Koller 	.name			= SCAT2(ro_memslot_no_syn_and_dlog, _access),	\
866ff2b5509SRicardo Koller 	.data_memslot_flags	= KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,	\
86708ddbbdfSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,	\
868ff2b5509SRicardo Koller 	.guest_test		= _access,					\
869ff2b5509SRicardo Koller 	.guest_test_check	= { _test_check },				\
870ff2b5509SRicardo Koller 	.fail_vcpu_run_handler	= fail_vcpu_run_mmio_no_syndrome_handler,	\
871ff2b5509SRicardo Koller 	.expected_events	= { .fail_vcpu_runs = 1 },			\
872ff2b5509SRicardo Koller }
873ff2b5509SRicardo Koller 
874ff2b5509SRicardo Koller #define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits,		\
875ff2b5509SRicardo Koller 				 _uffd_data_handler, _uffd_faults)		\
876ff2b5509SRicardo Koller {										\
877ff2b5509SRicardo Koller 	.name			= SCAT2(ro_memslot_uffd, _access),		\
878ff2b5509SRicardo Koller 	.data_memslot_flags	= KVM_MEM_READONLY,				\
87908ddbbdfSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_READONLY,				\
880ff2b5509SRicardo Koller 	.mem_mark_cmd		= CMD_HOLE_DATA | CMD_HOLE_PT,			\
881ff2b5509SRicardo Koller 	.guest_prepare		= { _PREPARE(_access) },			\
882ff2b5509SRicardo Koller 	.guest_test		= _access,					\
883ff2b5509SRicardo Koller 	.uffd_data_handler	= _uffd_data_handler,				\
8840dd8d22aSRicardo Koller 	.uffd_pt_handler	= uffd_pt_handler,				\
885ff2b5509SRicardo Koller 	.mmio_handler		= _mmio_handler,				\
886ff2b5509SRicardo Koller 	.expected_events	= { .mmio_exits = _mmio_exits,			\
887ff2b5509SRicardo Koller 				    .uffd_faults = _uffd_faults },		\
888ff2b5509SRicardo Koller }
889ff2b5509SRicardo Koller 
890ff2b5509SRicardo Koller #define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler,	\
891ff2b5509SRicardo Koller 					     _uffd_faults)			\
892ff2b5509SRicardo Koller {										\
893ff2b5509SRicardo Koller 	.name			= SCAT2(ro_memslot_no_syndrome, _access),	\
894ff2b5509SRicardo Koller 	.data_memslot_flags	= KVM_MEM_READONLY,				\
89508ddbbdfSRicardo Koller 	.pt_memslot_flags	= KVM_MEM_READONLY,				\
896ff2b5509SRicardo Koller 	.mem_mark_cmd		= CMD_HOLE_DATA | CMD_HOLE_PT,			\
897ff2b5509SRicardo Koller 	.guest_test		= _access,					\
898ff2b5509SRicardo Koller 	.uffd_data_handler	= _uffd_data_handler,				\
8990dd8d22aSRicardo Koller 	.uffd_pt_handler	= uffd_pt_handler,			\
900ff2b5509SRicardo Koller 	.fail_vcpu_run_handler	= fail_vcpu_run_mmio_no_syndrome_handler,	\
901ff2b5509SRicardo Koller 	.expected_events	= { .fail_vcpu_runs = 1,			\
902ff2b5509SRicardo Koller 				    .uffd_faults = _uffd_faults },		\
903ff2b5509SRicardo Koller }
904ff2b5509SRicardo Koller 
90535c58101SRicardo Koller static struct test_desc tests[] = {
90635c58101SRicardo Koller 
90735c58101SRicardo Koller 	/* Check that HW is setting the Access Flag (AF) (sanity checks). */
90835c58101SRicardo Koller 	TEST_ACCESS(guest_read64, with_af, CMD_NONE),
90935c58101SRicardo Koller 	TEST_ACCESS(guest_ld_preidx, with_af, CMD_NONE),
91035c58101SRicardo Koller 	TEST_ACCESS(guest_cas, with_af, CMD_NONE),
91135c58101SRicardo Koller 	TEST_ACCESS(guest_write64, with_af, CMD_NONE),
91235c58101SRicardo Koller 	TEST_ACCESS(guest_st_preidx, with_af, CMD_NONE),
91335c58101SRicardo Koller 	TEST_ACCESS(guest_dc_zva, with_af, CMD_NONE),
91435c58101SRicardo Koller 	TEST_ACCESS(guest_exec, with_af, CMD_NONE),
91535c58101SRicardo Koller 
91635c58101SRicardo Koller 	/*
91735c58101SRicardo Koller 	 * Punch a hole in the data backing store, and then try multiple
91835c58101SRicardo Koller 	 * accesses: reads should rturn zeroes, and writes should
91935c58101SRicardo Koller 	 * re-populate the page. Moreover, the test also check that no
92035c58101SRicardo Koller 	 * exception was generated in the guest.  Note that this
92135c58101SRicardo Koller 	 * reading/writing behavior is the same as reading/writing a
92235c58101SRicardo Koller 	 * punched page (with fallocate(FALLOC_FL_PUNCH_HOLE)) from
92335c58101SRicardo Koller 	 * userspace.
92435c58101SRicardo Koller 	 */
92535c58101SRicardo Koller 	TEST_ACCESS(guest_read64, no_af, CMD_HOLE_DATA),
92635c58101SRicardo Koller 	TEST_ACCESS(guest_cas, no_af, CMD_HOLE_DATA),
92735c58101SRicardo Koller 	TEST_ACCESS(guest_ld_preidx, no_af, CMD_HOLE_DATA),
92835c58101SRicardo Koller 	TEST_ACCESS(guest_write64, no_af, CMD_HOLE_DATA),
92935c58101SRicardo Koller 	TEST_ACCESS(guest_st_preidx, no_af, CMD_HOLE_DATA),
93035c58101SRicardo Koller 	TEST_ACCESS(guest_at, no_af, CMD_HOLE_DATA),
93135c58101SRicardo Koller 	TEST_ACCESS(guest_dc_zva, no_af, CMD_HOLE_DATA),
93235c58101SRicardo Koller 
9333b1d9156SRicardo Koller 	/*
9343b1d9156SRicardo Koller 	 * Punch holes in the data and PT backing stores and mark them for
9353b1d9156SRicardo Koller 	 * userfaultfd handling. This should result in 2 faults: the access
9363b1d9156SRicardo Koller 	 * on the data backing store, and its respective S1 page table walk
9373b1d9156SRicardo Koller 	 * (S1PTW).
9383b1d9156SRicardo Koller 	 */
9393b1d9156SRicardo Koller 	TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9400dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9413b1d9156SRicardo Koller 	TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9420dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9433b1d9156SRicardo Koller 	TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9440dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9453b1d9156SRicardo Koller 	/*
9463b1d9156SRicardo Koller 	 * Can't test guest_at with_af as it's IMPDEF whether the AF is set.
9473b1d9156SRicardo Koller 	 * The S1PTW fault should still be marked as a write.
9483b1d9156SRicardo Koller 	 */
9493b1d9156SRicardo Koller 	TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9500dd8d22aSRicardo Koller 		  uffd_no_handler, uffd_pt_handler, 1),
9513b1d9156SRicardo Koller 	TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9520dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9533b1d9156SRicardo Koller 	TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9540dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9553b1d9156SRicardo Koller 	TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9560dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9573b1d9156SRicardo Koller 	TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9580dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9593b1d9156SRicardo Koller 	TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
9600dd8d22aSRicardo Koller 		  uffd_data_handler, uffd_pt_handler, 2),
9613b1d9156SRicardo Koller 
962a4edf25bSRicardo Koller 	/*
963a4edf25bSRicardo Koller 	 * Try accesses when the data and PT memory regions are both
964a4edf25bSRicardo Koller 	 * tracked for dirty logging.
965a4edf25bSRicardo Koller 	 */
96642561751SRicardo Koller 	TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
96742561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
96842561751SRicardo Koller 	TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
96942561751SRicardo Koller 		       guest_check_no_s1ptw_wr_in_dirty_log),
97042561751SRicardo Koller 	TEST_DIRTY_LOG(guest_ld_preidx, with_af,
97142561751SRicardo Koller 		       guest_check_no_write_in_dirty_log,
97242561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
97342561751SRicardo Koller 	TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
97442561751SRicardo Koller 		       guest_check_no_s1ptw_wr_in_dirty_log),
97542561751SRicardo Koller 	TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
97642561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
97742561751SRicardo Koller 	TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
97842561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
97942561751SRicardo Koller 	TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
98042561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
98142561751SRicardo Koller 	TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
98242561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
98342561751SRicardo Koller 	TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
98442561751SRicardo Koller 		       guest_check_s1ptw_wr_in_dirty_log),
985a4edf25bSRicardo Koller 
98645acde40SRicardo Koller 	/*
987ff2b5509SRicardo Koller 	 * Access when the data and PT memory regions are both marked for
988ff2b5509SRicardo Koller 	 * dirty logging and UFFD at the same time. The expected result is
989ff2b5509SRicardo Koller 	 * that writes should mark the dirty log and trigger a userfaultfd
990ff2b5509SRicardo Koller 	 * write fault.  Reads/execs should result in a read userfaultfd
991ff2b5509SRicardo Koller 	 * fault, and nothing in the dirty log.  Any S1PTW should result in
992ff2b5509SRicardo Koller 	 * a write in the dirty log and a userfaultfd write.
993ff2b5509SRicardo Koller 	 */
99442561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
99542561751SRicardo Koller 				uffd_data_handler, 2,
99642561751SRicardo Koller 				guest_check_no_write_in_dirty_log,
99742561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
99842561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
99942561751SRicardo Koller 				uffd_data_handler, 2,
100042561751SRicardo Koller 				guest_check_no_write_in_dirty_log,
100142561751SRicardo Koller 				guest_check_no_s1ptw_wr_in_dirty_log),
100242561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
100342561751SRicardo Koller 				uffd_data_handler,
100442561751SRicardo Koller 				2, guest_check_no_write_in_dirty_log,
100542561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
10060dd8d22aSRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
100742561751SRicardo Koller 				guest_check_no_write_in_dirty_log,
100842561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
100942561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
101042561751SRicardo Koller 				uffd_data_handler, 2,
101142561751SRicardo Koller 				guest_check_no_write_in_dirty_log,
101242561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
101342561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
101442561751SRicardo Koller 				uffd_data_handler,
101542561751SRicardo Koller 				2, guest_check_write_in_dirty_log,
101642561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
101742561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
101842561751SRicardo Koller 				uffd_data_handler, 2,
101942561751SRicardo Koller 				guest_check_write_in_dirty_log,
102042561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
102142561751SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
102242561751SRicardo Koller 				uffd_data_handler,
102342561751SRicardo Koller 				2, guest_check_write_in_dirty_log,
102442561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
1025ff2b5509SRicardo Koller 	TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
10260dd8d22aSRicardo Koller 				uffd_data_handler, 2,
102742561751SRicardo Koller 				guest_check_write_in_dirty_log,
102842561751SRicardo Koller 				guest_check_s1ptw_wr_in_dirty_log),
1029ff2b5509SRicardo Koller 	/*
103008ddbbdfSRicardo Koller 	 * Access when both the PT and data regions are marked read-only
103145acde40SRicardo Koller 	 * (with KVM_MEM_READONLY). Writes with a syndrome result in an
103245acde40SRicardo Koller 	 * MMIO exit, writes with no syndrome (e.g., CAS) result in a
103345acde40SRicardo Koller 	 * failed vcpu run, and reads/execs with and without syndroms do
103445acde40SRicardo Koller 	 * not fault.
103545acde40SRicardo Koller 	 */
103645acde40SRicardo Koller 	TEST_RO_MEMSLOT(guest_read64, 0, 0),
103745acde40SRicardo Koller 	TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
103845acde40SRicardo Koller 	TEST_RO_MEMSLOT(guest_at, 0, 0),
103945acde40SRicardo Koller 	TEST_RO_MEMSLOT(guest_exec, 0, 0),
104045acde40SRicardo Koller 	TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
104145acde40SRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
104245acde40SRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
104345acde40SRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
104445acde40SRicardo Koller 
1045ff2b5509SRicardo Koller 	/*
104608ddbbdfSRicardo Koller 	 * The PT and data regions are both read-only and marked
1047ff2b5509SRicardo Koller 	 * for dirty logging at the same time. The expected result is that
1048ff2b5509SRicardo Koller 	 * for writes there should be no write in the dirty log. The
1049ff2b5509SRicardo Koller 	 * readonly handling is the same as if the memslot was not marked
1050ff2b5509SRicardo Koller 	 * for dirty logging: writes with a syndrome result in an MMIO
1051ff2b5509SRicardo Koller 	 * exit, and writes with no syndrome result in a failed vcpu run.
1052ff2b5509SRicardo Koller 	 */
1053ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
1054ff2b5509SRicardo Koller 				      guest_check_no_write_in_dirty_log),
1055ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
1056ff2b5509SRicardo Koller 				      guest_check_no_write_in_dirty_log),
1057ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
1058ff2b5509SRicardo Koller 				      guest_check_no_write_in_dirty_log),
1059ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
1060ff2b5509SRicardo Koller 				      guest_check_no_write_in_dirty_log),
1061ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
1062ff2b5509SRicardo Koller 				      1, guest_check_no_write_in_dirty_log),
1063ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
1064ff2b5509SRicardo Koller 						  guest_check_no_write_in_dirty_log),
1065ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
1066ff2b5509SRicardo Koller 						  guest_check_no_write_in_dirty_log),
1067ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
1068ff2b5509SRicardo Koller 						  guest_check_no_write_in_dirty_log),
1069ff2b5509SRicardo Koller 
1070ff2b5509SRicardo Koller 	/*
107108ddbbdfSRicardo Koller 	 * The PT and data regions are both read-only and punched with
1072ff2b5509SRicardo Koller 	 * holes tracked with userfaultfd.  The expected result is the
1073ff2b5509SRicardo Koller 	 * union of both userfaultfd and read-only behaviors. For example,
1074ff2b5509SRicardo Koller 	 * write accesses result in a userfaultfd write fault and an MMIO
1075ff2b5509SRicardo Koller 	 * exit.  Writes with no syndrome result in a failed vcpu run and
1076ff2b5509SRicardo Koller 	 * no userfaultfd write fault. Reads result in userfaultfd getting
1077ff2b5509SRicardo Koller 	 * triggered.
1078ff2b5509SRicardo Koller 	 */
10790dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
10800dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
10810dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
10820dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
1083ff2b5509SRicardo Koller 	TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
10840dd8d22aSRicardo Koller 				 uffd_data_handler, 2),
10850dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
10860dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
10870dd8d22aSRicardo Koller 	TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
1088ff2b5509SRicardo Koller 
108935c58101SRicardo Koller 	{ 0 }
109035c58101SRicardo Koller };
109135c58101SRicardo Koller 
for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)109235c58101SRicardo Koller static void for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)
109335c58101SRicardo Koller {
109435c58101SRicardo Koller 	struct test_desc *t;
109535c58101SRicardo Koller 
109635c58101SRicardo Koller 	for (t = &tests[0]; t->name; t++) {
109735c58101SRicardo Koller 		if (t->skip)
109835c58101SRicardo Koller 			continue;
109935c58101SRicardo Koller 
110035c58101SRicardo Koller 		struct test_params p = {
110135c58101SRicardo Koller 			.src_type = src_type,
110235c58101SRicardo Koller 			.test_desc = t,
110335c58101SRicardo Koller 		};
110435c58101SRicardo Koller 
110535c58101SRicardo Koller 		for_each_guest_mode(run_test, &p);
110635c58101SRicardo Koller 	}
110735c58101SRicardo Koller }
110835c58101SRicardo Koller 
main(int argc,char * argv[])110935c58101SRicardo Koller int main(int argc, char *argv[])
111035c58101SRicardo Koller {
111135c58101SRicardo Koller 	enum vm_mem_backing_src_type src_type;
111235c58101SRicardo Koller 	int opt;
111335c58101SRicardo Koller 
111435c58101SRicardo Koller 	src_type = DEFAULT_VM_MEM_SRC;
111535c58101SRicardo Koller 
111635c58101SRicardo Koller 	while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
111735c58101SRicardo Koller 		switch (opt) {
111835c58101SRicardo Koller 		case 'm':
111935c58101SRicardo Koller 			guest_modes_cmdline(optarg);
112035c58101SRicardo Koller 			break;
112135c58101SRicardo Koller 		case 's':
112235c58101SRicardo Koller 			src_type = parse_backing_src_type(optarg);
112335c58101SRicardo Koller 			break;
112435c58101SRicardo Koller 		case 'h':
112535c58101SRicardo Koller 		default:
112635c58101SRicardo Koller 			help(argv[0]);
112735c58101SRicardo Koller 			exit(0);
112835c58101SRicardo Koller 		}
112935c58101SRicardo Koller 	}
113035c58101SRicardo Koller 
113135c58101SRicardo Koller 	for_each_test_and_guest_mode(src_type);
113235c58101SRicardo Koller 	return 0;
113335c58101SRicardo Koller }
1134