109444420SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0
209444420SPaolo Bonzini /*
309444420SPaolo Bonzini  * KVM dirty page logging test
409444420SPaolo Bonzini  *
509444420SPaolo Bonzini  * Copyright (C) 2018, Red Hat, Inc.
609444420SPaolo Bonzini  */
709444420SPaolo Bonzini 
809444420SPaolo Bonzini #define _GNU_SOURCE /* for program_invocation_name */
909444420SPaolo Bonzini 
1009444420SPaolo Bonzini #include <stdio.h>
1109444420SPaolo Bonzini #include <stdlib.h>
1209444420SPaolo Bonzini #include <linux/bitmap.h>
1309444420SPaolo Bonzini #include <linux/bitops.h>
1409444420SPaolo Bonzini 
1509444420SPaolo Bonzini #include "test_util.h"
1609444420SPaolo Bonzini #include "kvm_util.h"
1709444420SPaolo Bonzini #include "processor.h"
1809444420SPaolo Bonzini #include "vmx.h"
1909444420SPaolo Bonzini 
2009444420SPaolo Bonzini /* The memory slot index to track dirty pages */
2109444420SPaolo Bonzini #define TEST_MEM_SLOT_INDEX		1
22beca5470SAndrew Jones #define TEST_MEM_PAGES			3
2309444420SPaolo Bonzini 
2409444420SPaolo Bonzini /* L1 guest test virtual memory offset */
2509444420SPaolo Bonzini #define GUEST_TEST_MEM			0xc0000000
2609444420SPaolo Bonzini 
2709444420SPaolo Bonzini /* L2 guest test virtual memory offset */
2809444420SPaolo Bonzini #define NESTED_TEST_MEM1		0xc0001000
2909444420SPaolo Bonzini #define NESTED_TEST_MEM2		0xc0002000
3009444420SPaolo Bonzini 
l2_guest_code(void)3109444420SPaolo Bonzini static void l2_guest_code(void)
3209444420SPaolo Bonzini {
3309444420SPaolo Bonzini 	*(volatile uint64_t *)NESTED_TEST_MEM1;
3409444420SPaolo Bonzini 	*(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
3509444420SPaolo Bonzini 	GUEST_SYNC(true);
3609444420SPaolo Bonzini 	GUEST_SYNC(false);
3709444420SPaolo Bonzini 
3809444420SPaolo Bonzini 	*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
3909444420SPaolo Bonzini 	GUEST_SYNC(true);
4009444420SPaolo Bonzini 	*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
4109444420SPaolo Bonzini 	GUEST_SYNC(true);
4209444420SPaolo Bonzini 	GUEST_SYNC(false);
4309444420SPaolo Bonzini 
4409444420SPaolo Bonzini 	/* Exit to L1 and never come back.  */
4509444420SPaolo Bonzini 	vmcall();
4609444420SPaolo Bonzini }
4709444420SPaolo Bonzini 
l1_guest_code(struct vmx_pages * vmx)4809444420SPaolo Bonzini void l1_guest_code(struct vmx_pages *vmx)
4909444420SPaolo Bonzini {
5009444420SPaolo Bonzini #define L2_GUEST_STACK_SIZE 64
5109444420SPaolo Bonzini 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
5209444420SPaolo Bonzini 
5309444420SPaolo Bonzini 	GUEST_ASSERT(vmx->vmcs_gpa);
5409444420SPaolo Bonzini 	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
5509444420SPaolo Bonzini 	GUEST_ASSERT(load_vmcs(vmx));
5609444420SPaolo Bonzini 
5709444420SPaolo Bonzini 	prepare_vmcs(vmx, l2_guest_code,
5809444420SPaolo Bonzini 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
5909444420SPaolo Bonzini 
6009444420SPaolo Bonzini 	GUEST_SYNC(false);
6109444420SPaolo Bonzini 	GUEST_ASSERT(!vmlaunch());
6209444420SPaolo Bonzini 	GUEST_SYNC(false);
6309444420SPaolo Bonzini 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
6409444420SPaolo Bonzini 	GUEST_DONE();
6509444420SPaolo Bonzini }
6609444420SPaolo Bonzini 
main(int argc,char * argv[])6709444420SPaolo Bonzini int main(int argc, char *argv[])
6809444420SPaolo Bonzini {
6909444420SPaolo Bonzini 	vm_vaddr_t vmx_pages_gva = 0;
7009444420SPaolo Bonzini 	struct vmx_pages *vmx;
7109444420SPaolo Bonzini 	unsigned long *bmap;
7209444420SPaolo Bonzini 	uint64_t *host_test_mem;
7309444420SPaolo Bonzini 
74ec7b769aSSean Christopherson 	struct kvm_vcpu *vcpu;
7509444420SPaolo Bonzini 	struct kvm_vm *vm;
7609444420SPaolo Bonzini 	struct ucall uc;
7709444420SPaolo Bonzini 	bool done = false;
7809444420SPaolo Bonzini 
791ecbb337SSean Christopherson 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
805c107f70SDavid Matlack 	TEST_REQUIRE(kvm_cpu_has_ept());
8111eada47SVitaly Kuznetsov 
8209444420SPaolo Bonzini 	/* Create VM */
83ec7b769aSSean Christopherson 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
8409444420SPaolo Bonzini 	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
85768e9a61SSean Christopherson 	vcpu_args_set(vcpu, 1, vmx_pages_gva);
8609444420SPaolo Bonzini 
8709444420SPaolo Bonzini 	/* Add an extra memory slot for testing dirty logging */
8809444420SPaolo Bonzini 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
8909444420SPaolo Bonzini 				    GUEST_TEST_MEM,
9009444420SPaolo Bonzini 				    TEST_MEM_SLOT_INDEX,
91beca5470SAndrew Jones 				    TEST_MEM_PAGES,
9209444420SPaolo Bonzini 				    KVM_MEM_LOG_DIRTY_PAGES);
9309444420SPaolo Bonzini 
9409444420SPaolo Bonzini 	/*
9509444420SPaolo Bonzini 	 * Add an identity map for GVA range [0xc0000000, 0xc0002000).  This
9609444420SPaolo Bonzini 	 * affects both L1 and L2.  However...
9709444420SPaolo Bonzini 	 */
984307af73SSean Christopherson 	virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
9909444420SPaolo Bonzini 
10009444420SPaolo Bonzini 	/*
10109444420SPaolo Bonzini 	 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
10209444420SPaolo Bonzini 	 * 0xc0000000.
10309444420SPaolo Bonzini 	 *
10409444420SPaolo Bonzini 	 * Note that prepare_eptp should be called only L1's GPA map is done,
10509444420SPaolo Bonzini 	 * meaning after the last call to virt_map.
10609444420SPaolo Bonzini 	 */
10709444420SPaolo Bonzini 	prepare_eptp(vmx, vm, 0);
108444d084bSSean Christopherson 	nested_map_memslot(vmx, vm, 0);
109444d084bSSean Christopherson 	nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
110444d084bSSean Christopherson 	nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
11109444420SPaolo Bonzini 
1127fc5b571SAndy Shevchenko 	bmap = bitmap_zalloc(TEST_MEM_PAGES);
11309444420SPaolo Bonzini 	host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
11409444420SPaolo Bonzini 
11509444420SPaolo Bonzini 	while (!done) {
116beca5470SAndrew Jones 		memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
117768e9a61SSean Christopherson 		vcpu_run(vcpu);
118*c96f57b0SVipin Sharma 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
11909444420SPaolo Bonzini 
120768e9a61SSean Christopherson 		switch (get_ucall(vcpu, &uc)) {
12109444420SPaolo Bonzini 		case UCALL_ABORT:
122594a1c27SColton Lewis 			REPORT_GUEST_ASSERT(uc);
12309444420SPaolo Bonzini 			/* NOT REACHED */
12409444420SPaolo Bonzini 		case UCALL_SYNC:
12509444420SPaolo Bonzini 			/*
12609444420SPaolo Bonzini 			 * The nested guest wrote at offset 0x1000 in the memslot, but the
12709444420SPaolo Bonzini 			 * dirty bitmap must be filled in according to L1 GPA, not L2.
12809444420SPaolo Bonzini 			 */
12909444420SPaolo Bonzini 			kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
13009444420SPaolo Bonzini 			if (uc.args[1]) {
13109444420SPaolo Bonzini 				TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
13209444420SPaolo Bonzini 				TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
13309444420SPaolo Bonzini 			} else {
13409444420SPaolo Bonzini 				TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
13509444420SPaolo Bonzini 				TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
13609444420SPaolo Bonzini 			}
13709444420SPaolo Bonzini 
13809444420SPaolo Bonzini 			TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
13909444420SPaolo Bonzini 			TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
14009444420SPaolo Bonzini 			TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
14109444420SPaolo Bonzini 			TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
14209444420SPaolo Bonzini 			break;
14309444420SPaolo Bonzini 		case UCALL_DONE:
14409444420SPaolo Bonzini 			done = true;
14509444420SPaolo Bonzini 			break;
14609444420SPaolo Bonzini 		default:
147352be2c5SWainer dos Santos Moschetta 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
14809444420SPaolo Bonzini 		}
14909444420SPaolo Bonzini 	}
15009444420SPaolo Bonzini }
151