130267b43SChenyi Qiang // SPDX-License-Identifier: GPL-2.0-only
230267b43SChenyi Qiang #include "test_util.h"
330267b43SChenyi Qiang #include "kvm_util.h"
430267b43SChenyi Qiang #include "processor.h"
530267b43SChenyi Qiang #include "vmx.h"
68357b9e1SMaxim Levitsky #include "svm_util.h"
730267b43SChenyi Qiang
830267b43SChenyi Qiang #include <string.h>
930267b43SChenyi Qiang #include <sys/ioctl.h>
1030267b43SChenyi Qiang
1130267b43SChenyi Qiang #include "kselftest.h"
1230267b43SChenyi Qiang
1330267b43SChenyi Qiang #define ARBITRARY_IO_PORT 0x2000
1430267b43SChenyi Qiang
1530267b43SChenyi Qiang /* The virtual machine object. */
1630267b43SChenyi Qiang static struct kvm_vm *vm;
1730267b43SChenyi Qiang
l2_guest_code(void)1830267b43SChenyi Qiang static void l2_guest_code(void)
1930267b43SChenyi Qiang {
2030267b43SChenyi Qiang asm volatile("inb %%dx, %%al"
2130267b43SChenyi Qiang : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
2230267b43SChenyi Qiang }
2330267b43SChenyi Qiang
2430267b43SChenyi Qiang #define L2_GUEST_STACK_SIZE 64
2530267b43SChenyi Qiang unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
2630267b43SChenyi Qiang
l1_guest_code_vmx(struct vmx_pages * vmx)278357b9e1SMaxim Levitsky void l1_guest_code_vmx(struct vmx_pages *vmx)
288357b9e1SMaxim Levitsky {
298357b9e1SMaxim Levitsky
3030267b43SChenyi Qiang GUEST_ASSERT(vmx->vmcs_gpa);
3130267b43SChenyi Qiang GUEST_ASSERT(prepare_for_vmx_operation(vmx));
3230267b43SChenyi Qiang GUEST_ASSERT(load_vmcs(vmx));
3330267b43SChenyi Qiang
3430267b43SChenyi Qiang prepare_vmcs(vmx, l2_guest_code,
3530267b43SChenyi Qiang &l2_guest_stack[L2_GUEST_STACK_SIZE]);
3630267b43SChenyi Qiang
3730267b43SChenyi Qiang GUEST_ASSERT(!vmlaunch());
3830267b43SChenyi Qiang /* L2 should triple fault after a triple fault event injected. */
3930267b43SChenyi Qiang GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
4030267b43SChenyi Qiang GUEST_DONE();
4130267b43SChenyi Qiang }
4230267b43SChenyi Qiang
l1_guest_code_svm(struct svm_test_data * svm)438357b9e1SMaxim Levitsky void l1_guest_code_svm(struct svm_test_data *svm)
448357b9e1SMaxim Levitsky {
458357b9e1SMaxim Levitsky struct vmcb *vmcb = svm->vmcb;
468357b9e1SMaxim Levitsky
478357b9e1SMaxim Levitsky generic_svm_setup(svm, l2_guest_code,
488357b9e1SMaxim Levitsky &l2_guest_stack[L2_GUEST_STACK_SIZE]);
498357b9e1SMaxim Levitsky
508357b9e1SMaxim Levitsky /* don't intercept shutdown to test the case of SVM allowing to do so */
518357b9e1SMaxim Levitsky vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
528357b9e1SMaxim Levitsky
538357b9e1SMaxim Levitsky run_guest(vmcb, svm->vmcb_gpa);
548357b9e1SMaxim Levitsky
558357b9e1SMaxim Levitsky /* should not reach here, L1 should crash */
568357b9e1SMaxim Levitsky GUEST_ASSERT(0);
578357b9e1SMaxim Levitsky }
588357b9e1SMaxim Levitsky
main(void)5930267b43SChenyi Qiang int main(void)
6030267b43SChenyi Qiang {
61f3443bedSSean Christopherson struct kvm_vcpu *vcpu;
6230267b43SChenyi Qiang struct kvm_run *run;
6330267b43SChenyi Qiang struct kvm_vcpu_events events;
6430267b43SChenyi Qiang struct ucall uc;
6530267b43SChenyi Qiang
668357b9e1SMaxim Levitsky bool has_vmx = kvm_cpu_has(X86_FEATURE_VMX);
678357b9e1SMaxim Levitsky bool has_svm = kvm_cpu_has(X86_FEATURE_SVM);
688357b9e1SMaxim Levitsky
698357b9e1SMaxim Levitsky TEST_REQUIRE(has_vmx || has_svm);
7030267b43SChenyi Qiang
717ed397d1SSean Christopherson TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
7230267b43SChenyi Qiang
7330267b43SChenyi Qiang
748357b9e1SMaxim Levitsky if (has_vmx) {
758357b9e1SMaxim Levitsky vm_vaddr_t vmx_pages_gva;
768357b9e1SMaxim Levitsky
778357b9e1SMaxim Levitsky vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
7830267b43SChenyi Qiang vcpu_alloc_vmx(vm, &vmx_pages_gva);
79768e9a61SSean Christopherson vcpu_args_set(vcpu, 1, vmx_pages_gva);
808357b9e1SMaxim Levitsky } else {
818357b9e1SMaxim Levitsky vm_vaddr_t svm_gva;
828357b9e1SMaxim Levitsky
838357b9e1SMaxim Levitsky vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
848357b9e1SMaxim Levitsky vcpu_alloc_svm(vm, &svm_gva);
858357b9e1SMaxim Levitsky vcpu_args_set(vcpu, 1, svm_gva);
868357b9e1SMaxim Levitsky }
878357b9e1SMaxim Levitsky
888357b9e1SMaxim Levitsky vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
898357b9e1SMaxim Levitsky run = vcpu->run;
90768e9a61SSean Christopherson vcpu_run(vcpu);
9130267b43SChenyi Qiang
92*c96f57b0SVipin Sharma TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
9330267b43SChenyi Qiang TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
9430267b43SChenyi Qiang "Expected IN from port %d from L2, got port %d",
9530267b43SChenyi Qiang ARBITRARY_IO_PORT, run->io.port);
96768e9a61SSean Christopherson vcpu_events_get(vcpu, &events);
9730267b43SChenyi Qiang events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
9830267b43SChenyi Qiang events.triple_fault.pending = true;
99768e9a61SSean Christopherson vcpu_events_set(vcpu, &events);
10030267b43SChenyi Qiang run->immediate_exit = true;
101768e9a61SSean Christopherson vcpu_run_complete_io(vcpu);
10230267b43SChenyi Qiang
103768e9a61SSean Christopherson vcpu_events_get(vcpu, &events);
10430267b43SChenyi Qiang TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT,
10530267b43SChenyi Qiang "Triple fault event invalid");
10630267b43SChenyi Qiang TEST_ASSERT(events.triple_fault.pending,
10730267b43SChenyi Qiang "No triple fault pending");
108768e9a61SSean Christopherson vcpu_run(vcpu);
10930267b43SChenyi Qiang
1108357b9e1SMaxim Levitsky
1118357b9e1SMaxim Levitsky if (has_svm) {
112*c96f57b0SVipin Sharma TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
1138357b9e1SMaxim Levitsky } else {
114768e9a61SSean Christopherson switch (get_ucall(vcpu, &uc)) {
11530267b43SChenyi Qiang case UCALL_DONE:
11630267b43SChenyi Qiang break;
11730267b43SChenyi Qiang case UCALL_ABORT:
118594a1c27SColton Lewis REPORT_GUEST_ASSERT(uc);
11930267b43SChenyi Qiang default:
12030267b43SChenyi Qiang TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
12130267b43SChenyi Qiang }
1228357b9e1SMaxim Levitsky }
1238357b9e1SMaxim Levitsky return 0;
12430267b43SChenyi Qiang }
125