197a71a54SJim Mattson // SPDX-License-Identifier: GPL-2.0-only 297a71a54SJim Mattson /* 397a71a54SJim Mattson * vmx_apic_access_test 497a71a54SJim Mattson * 597a71a54SJim Mattson * Copyright (C) 2020, Google LLC. 697a71a54SJim Mattson * 797a71a54SJim Mattson * This work is licensed under the terms of the GNU GPL, version 2. 897a71a54SJim Mattson * 997a71a54SJim Mattson * The first subtest simply checks to see that an L2 guest can be 1097a71a54SJim Mattson * launched with a valid APIC-access address that is backed by a 1197a71a54SJim Mattson * page of L1 physical memory. 1297a71a54SJim Mattson * 1397a71a54SJim Mattson * The second subtest sets the APIC-access address to a (valid) L1 1497a71a54SJim Mattson * physical address that is not backed by memory. KVM can't handle 1597a71a54SJim Mattson * this situation, so resuming L2 should result in a KVM exit for 1697a71a54SJim Mattson * internal error (emulation). This is not an architectural 1797a71a54SJim Mattson * requirement. It is just a shortcoming of KVM. The internal error 1897a71a54SJim Mattson * is unfortunate, but it's better than what used to happen! 1997a71a54SJim Mattson */ 2097a71a54SJim Mattson 2197a71a54SJim Mattson #include "test_util.h" 2297a71a54SJim Mattson #include "kvm_util.h" 2397a71a54SJim Mattson #include "processor.h" 2497a71a54SJim Mattson #include "vmx.h" 2597a71a54SJim Mattson 2697a71a54SJim Mattson #include <string.h> 2797a71a54SJim Mattson #include <sys/ioctl.h> 2897a71a54SJim Mattson 2997a71a54SJim Mattson #include "kselftest.h" 3097a71a54SJim Mattson 3197a71a54SJim Mattson #define VCPU_ID 0 3297a71a54SJim Mattson 3397a71a54SJim Mattson /* The virtual machine object. */ 3497a71a54SJim Mattson static struct kvm_vm *vm; 3597a71a54SJim Mattson 3697a71a54SJim Mattson static void l2_guest_code(void) 3797a71a54SJim Mattson { 3897a71a54SJim Mattson /* Exit to L1 */ 3997a71a54SJim Mattson __asm__ __volatile__("vmcall"); 4097a71a54SJim Mattson } 4197a71a54SJim Mattson 4297a71a54SJim Mattson static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa) 4397a71a54SJim Mattson { 4497a71a54SJim Mattson #define L2_GUEST_STACK_SIZE 64 4597a71a54SJim Mattson unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 4697a71a54SJim Mattson uint32_t control; 4797a71a54SJim Mattson 4897a71a54SJim Mattson GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 4997a71a54SJim Mattson GUEST_ASSERT(load_vmcs(vmx_pages)); 5097a71a54SJim Mattson 5197a71a54SJim Mattson /* Prepare the VMCS for L2 execution. */ 5297a71a54SJim Mattson prepare_vmcs(vmx_pages, l2_guest_code, 5397a71a54SJim Mattson &l2_guest_stack[L2_GUEST_STACK_SIZE]); 5497a71a54SJim Mattson control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); 5597a71a54SJim Mattson control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 5697a71a54SJim Mattson vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); 5797a71a54SJim Mattson control = vmreadz(SECONDARY_VM_EXEC_CONTROL); 5897a71a54SJim Mattson control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 5997a71a54SJim Mattson vmwrite(SECONDARY_VM_EXEC_CONTROL, control); 6097a71a54SJim Mattson vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa); 6197a71a54SJim Mattson 6297a71a54SJim Mattson /* Try to launch L2 with the memory-backed APIC-access address. */ 6397a71a54SJim Mattson GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR)); 6497a71a54SJim Mattson GUEST_ASSERT(!vmlaunch()); 6597a71a54SJim Mattson GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 6697a71a54SJim Mattson 6797a71a54SJim Mattson vmwrite(APIC_ACCESS_ADDR, high_gpa); 6897a71a54SJim Mattson 6997a71a54SJim Mattson /* Try to resume L2 with the unbacked APIC-access address. */ 7097a71a54SJim Mattson GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR)); 7197a71a54SJim Mattson GUEST_ASSERT(!vmresume()); 7297a71a54SJim Mattson GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 7397a71a54SJim Mattson 7497a71a54SJim Mattson GUEST_DONE(); 7597a71a54SJim Mattson } 7697a71a54SJim Mattson 7797a71a54SJim Mattson int main(int argc, char *argv[]) 7897a71a54SJim Mattson { 7997a71a54SJim Mattson unsigned long apic_access_addr = ~0ul; 8097a71a54SJim Mattson unsigned int paddr_width; 8197a71a54SJim Mattson unsigned int vaddr_width; 8297a71a54SJim Mattson vm_vaddr_t vmx_pages_gva; 8397a71a54SJim Mattson unsigned long high_gpa; 8497a71a54SJim Mattson struct vmx_pages *vmx; 8597a71a54SJim Mattson bool done = false; 8697a71a54SJim Mattson 8797a71a54SJim Mattson nested_vmx_check_supported(); 8897a71a54SJim Mattson 8997a71a54SJim Mattson vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); 9097a71a54SJim Mattson 9197a71a54SJim Mattson kvm_get_cpu_address_width(&paddr_width, &vaddr_width); 9297a71a54SJim Mattson high_gpa = (1ul << paddr_width) - getpagesize(); 9397a71a54SJim Mattson if ((unsigned long)DEFAULT_GUEST_PHY_PAGES * getpagesize() > high_gpa) { 9497a71a54SJim Mattson print_skip("No unbacked physical page available"); 9597a71a54SJim Mattson exit(KSFT_SKIP); 9697a71a54SJim Mattson } 9797a71a54SJim Mattson 9897a71a54SJim Mattson vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); 99*444d084bSSean Christopherson prepare_virtualize_apic_accesses(vmx, vm); 10097a71a54SJim Mattson vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa); 10197a71a54SJim Mattson 10297a71a54SJim Mattson while (!done) { 10397a71a54SJim Mattson volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); 10497a71a54SJim Mattson struct ucall uc; 10597a71a54SJim Mattson 10697a71a54SJim Mattson vcpu_run(vm, VCPU_ID); 10797a71a54SJim Mattson if (apic_access_addr == high_gpa) { 10897a71a54SJim Mattson TEST_ASSERT(run->exit_reason == 10997a71a54SJim Mattson KVM_EXIT_INTERNAL_ERROR, 11097a71a54SJim Mattson "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n", 11197a71a54SJim Mattson run->exit_reason, 11297a71a54SJim Mattson exit_reason_str(run->exit_reason)); 11397a71a54SJim Mattson TEST_ASSERT(run->internal.suberror == 11497a71a54SJim Mattson KVM_INTERNAL_ERROR_EMULATION, 11597a71a54SJim Mattson "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n", 11697a71a54SJim Mattson run->internal.suberror); 11797a71a54SJim Mattson break; 11897a71a54SJim Mattson } 11997a71a54SJim Mattson TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 12097a71a54SJim Mattson "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 12197a71a54SJim Mattson run->exit_reason, 12297a71a54SJim Mattson exit_reason_str(run->exit_reason)); 12397a71a54SJim Mattson 12497a71a54SJim Mattson switch (get_ucall(vm, VCPU_ID, &uc)) { 12597a71a54SJim Mattson case UCALL_ABORT: 12697a71a54SJim Mattson TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 12797a71a54SJim Mattson __FILE__, uc.args[1]); 12897a71a54SJim Mattson /* NOT REACHED */ 12997a71a54SJim Mattson case UCALL_SYNC: 13097a71a54SJim Mattson apic_access_addr = uc.args[1]; 13197a71a54SJim Mattson break; 13297a71a54SJim Mattson case UCALL_DONE: 13397a71a54SJim Mattson done = true; 13497a71a54SJim Mattson break; 13597a71a54SJim Mattson default: 13697a71a54SJim Mattson TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd); 13797a71a54SJim Mattson } 13897a71a54SJim Mattson } 13997a71a54SJim Mattson kvm_vm_free(vm); 14097a71a54SJim Mattson return 0; 14197a71a54SJim Mattson } 142