197a71a54SJim Mattson // SPDX-License-Identifier: GPL-2.0-only
297a71a54SJim Mattson /*
397a71a54SJim Mattson  * vmx_apic_access_test
497a71a54SJim Mattson  *
597a71a54SJim Mattson  * Copyright (C) 2020, Google LLC.
697a71a54SJim Mattson  *
797a71a54SJim Mattson  * This work is licensed under the terms of the GNU GPL, version 2.
897a71a54SJim Mattson  *
997a71a54SJim Mattson  * The first subtest simply checks to see that an L2 guest can be
1097a71a54SJim Mattson  * launched with a valid APIC-access address that is backed by a
1197a71a54SJim Mattson  * page of L1 physical memory.
1297a71a54SJim Mattson  *
1397a71a54SJim Mattson  * The second subtest sets the APIC-access address to a (valid) L1
1497a71a54SJim Mattson  * physical address that is not backed by memory. KVM can't handle
1597a71a54SJim Mattson  * this situation, so resuming L2 should result in a KVM exit for
1697a71a54SJim Mattson  * internal error (emulation). This is not an architectural
1797a71a54SJim Mattson  * requirement. It is just a shortcoming of KVM. The internal error
1897a71a54SJim Mattson  * is unfortunate, but it's better than what used to happen!
1997a71a54SJim Mattson  */
2097a71a54SJim Mattson 
2197a71a54SJim Mattson #include "test_util.h"
2297a71a54SJim Mattson #include "kvm_util.h"
2397a71a54SJim Mattson #include "processor.h"
2497a71a54SJim Mattson #include "vmx.h"
2597a71a54SJim Mattson 
2697a71a54SJim Mattson #include <string.h>
2797a71a54SJim Mattson #include <sys/ioctl.h>
2897a71a54SJim Mattson 
2997a71a54SJim Mattson #include "kselftest.h"
3097a71a54SJim Mattson 
3197a71a54SJim Mattson static void l2_guest_code(void)
3297a71a54SJim Mattson {
3397a71a54SJim Mattson 	/* Exit to L1 */
3497a71a54SJim Mattson 	__asm__ __volatile__("vmcall");
3597a71a54SJim Mattson }
3697a71a54SJim Mattson 
3797a71a54SJim Mattson static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
3897a71a54SJim Mattson {
3997a71a54SJim Mattson #define L2_GUEST_STACK_SIZE 64
4097a71a54SJim Mattson 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
4197a71a54SJim Mattson 	uint32_t control;
4297a71a54SJim Mattson 
4397a71a54SJim Mattson 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
4497a71a54SJim Mattson 	GUEST_ASSERT(load_vmcs(vmx_pages));
4597a71a54SJim Mattson 
4697a71a54SJim Mattson 	/* Prepare the VMCS for L2 execution. */
4797a71a54SJim Mattson 	prepare_vmcs(vmx_pages, l2_guest_code,
4897a71a54SJim Mattson 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
4997a71a54SJim Mattson 	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
5097a71a54SJim Mattson 	control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5197a71a54SJim Mattson 	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
5297a71a54SJim Mattson 	control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
5397a71a54SJim Mattson 	control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5497a71a54SJim Mattson 	vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
5597a71a54SJim Mattson 	vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);
5697a71a54SJim Mattson 
5797a71a54SJim Mattson 	/* Try to launch L2 with the memory-backed APIC-access address. */
5897a71a54SJim Mattson 	GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
5997a71a54SJim Mattson 	GUEST_ASSERT(!vmlaunch());
6097a71a54SJim Mattson 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
6197a71a54SJim Mattson 
6297a71a54SJim Mattson 	vmwrite(APIC_ACCESS_ADDR, high_gpa);
6397a71a54SJim Mattson 
6497a71a54SJim Mattson 	/* Try to resume L2 with the unbacked APIC-access address. */
6597a71a54SJim Mattson 	GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
6697a71a54SJim Mattson 	GUEST_ASSERT(!vmresume());
6797a71a54SJim Mattson 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
6897a71a54SJim Mattson 
6997a71a54SJim Mattson 	GUEST_DONE();
7097a71a54SJim Mattson }
7197a71a54SJim Mattson 
7297a71a54SJim Mattson int main(int argc, char *argv[])
7397a71a54SJim Mattson {
7497a71a54SJim Mattson 	unsigned long apic_access_addr = ~0ul;
7597a71a54SJim Mattson 	vm_vaddr_t vmx_pages_gva;
7697a71a54SJim Mattson 	unsigned long high_gpa;
7797a71a54SJim Mattson 	struct vmx_pages *vmx;
7897a71a54SJim Mattson 	bool done = false;
7997a71a54SJim Mattson 
8021c602e6SSean Christopherson 	struct kvm_vcpu *vcpu;
8121c602e6SSean Christopherson 	struct kvm_vm *vm;
8221c602e6SSean Christopherson 
8397a71a54SJim Mattson 	nested_vmx_check_supported();
8497a71a54SJim Mattson 
8521c602e6SSean Christopherson 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
8697a71a54SJim Mattson 
87*38081d28SSean Christopherson 	high_gpa = (vm->max_gfn - 1) << vm->page_shift;
8897a71a54SJim Mattson 
8997a71a54SJim Mattson 	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
90444d084bSSean Christopherson 	prepare_virtualize_apic_accesses(vmx, vm);
91768e9a61SSean Christopherson 	vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);
9297a71a54SJim Mattson 
9397a71a54SJim Mattson 	while (!done) {
9421c602e6SSean Christopherson 		volatile struct kvm_run *run = vcpu->run;
9597a71a54SJim Mattson 		struct ucall uc;
9697a71a54SJim Mattson 
97768e9a61SSean Christopherson 		vcpu_run(vcpu);
9897a71a54SJim Mattson 		if (apic_access_addr == high_gpa) {
9997a71a54SJim Mattson 			TEST_ASSERT(run->exit_reason ==
10097a71a54SJim Mattson 				    KVM_EXIT_INTERNAL_ERROR,
10197a71a54SJim Mattson 				    "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
10297a71a54SJim Mattson 				    run->exit_reason,
10397a71a54SJim Mattson 				    exit_reason_str(run->exit_reason));
10497a71a54SJim Mattson 			TEST_ASSERT(run->internal.suberror ==
10597a71a54SJim Mattson 				    KVM_INTERNAL_ERROR_EMULATION,
10697a71a54SJim Mattson 				    "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
10797a71a54SJim Mattson 				    run->internal.suberror);
10897a71a54SJim Mattson 			break;
10997a71a54SJim Mattson 		}
11097a71a54SJim Mattson 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
11197a71a54SJim Mattson 			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
11297a71a54SJim Mattson 			    run->exit_reason,
11397a71a54SJim Mattson 			    exit_reason_str(run->exit_reason));
11497a71a54SJim Mattson 
115768e9a61SSean Christopherson 		switch (get_ucall(vcpu, &uc)) {
11697a71a54SJim Mattson 		case UCALL_ABORT:
11797a71a54SJim Mattson 			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
11897a71a54SJim Mattson 				  __FILE__, uc.args[1]);
11997a71a54SJim Mattson 			/* NOT REACHED */
12097a71a54SJim Mattson 		case UCALL_SYNC:
12197a71a54SJim Mattson 			apic_access_addr = uc.args[1];
12297a71a54SJim Mattson 			break;
12397a71a54SJim Mattson 		case UCALL_DONE:
12497a71a54SJim Mattson 			done = true;
12597a71a54SJim Mattson 			break;
12697a71a54SJim Mattson 		default:
12797a71a54SJim Mattson 			TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
12897a71a54SJim Mattson 		}
12997a71a54SJim Mattson 	}
13097a71a54SJim Mattson 	kvm_vm_free(vm);
13197a71a54SJim Mattson 	return 0;
13297a71a54SJim Mattson }
133