1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "test_util.h"
3 #include "kvm_util.h"
4 #include "processor.h"
5 #include "vmx.h"
6 #include "svm_util.h"
7 
8 #include <string.h>
9 #include <sys/ioctl.h>
10 
11 #include "kselftest.h"
12 
13 #define ARBITRARY_IO_PORT	0x2000
14 
15 /* The virtual machine object. */
16 static struct kvm_vm *vm;
17 
18 static void l2_guest_code(void)
19 {
20 	asm volatile("inb %%dx, %%al"
21 		     : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
22 }
23 
24 #define L2_GUEST_STACK_SIZE 64
25 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
26 
27 void l1_guest_code_vmx(struct vmx_pages *vmx)
28 {
29 
30 	GUEST_ASSERT(vmx->vmcs_gpa);
31 	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
32 	GUEST_ASSERT(load_vmcs(vmx));
33 
34 	prepare_vmcs(vmx, l2_guest_code,
35 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
36 
37 	GUEST_ASSERT(!vmlaunch());
38 	/* L2 should triple fault after a triple fault event injected. */
39 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
40 	GUEST_DONE();
41 }
42 
43 void l1_guest_code_svm(struct svm_test_data *svm)
44 {
45 	struct vmcb *vmcb = svm->vmcb;
46 
47 	generic_svm_setup(svm, l2_guest_code,
48 			&l2_guest_stack[L2_GUEST_STACK_SIZE]);
49 
50 	/* don't intercept shutdown to test the case of SVM allowing to do so */
51 	vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
52 
53 	run_guest(vmcb, svm->vmcb_gpa);
54 
55 	/* should not reach here, L1 should crash  */
56 	GUEST_ASSERT(0);
57 }
58 
59 int main(void)
60 {
61 	struct kvm_vcpu *vcpu;
62 	struct kvm_run *run;
63 	struct kvm_vcpu_events events;
64 	struct ucall uc;
65 
66 	bool has_vmx = kvm_cpu_has(X86_FEATURE_VMX);
67 	bool has_svm = kvm_cpu_has(X86_FEATURE_SVM);
68 
69 	TEST_REQUIRE(has_vmx || has_svm);
70 
71 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
72 
73 
74 	if (has_vmx) {
75 		vm_vaddr_t vmx_pages_gva;
76 
77 		vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
78 		vcpu_alloc_vmx(vm, &vmx_pages_gva);
79 		vcpu_args_set(vcpu, 1, vmx_pages_gva);
80 	} else {
81 		vm_vaddr_t svm_gva;
82 
83 		vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
84 		vcpu_alloc_svm(vm, &svm_gva);
85 		vcpu_args_set(vcpu, 1, svm_gva);
86 	}
87 
88 	vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
89 	run = vcpu->run;
90 	vcpu_run(vcpu);
91 
92 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
93 	TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
94 		    "Expected IN from port %d from L2, got port %d",
95 		    ARBITRARY_IO_PORT, run->io.port);
96 	vcpu_events_get(vcpu, &events);
97 	events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
98 	events.triple_fault.pending = true;
99 	vcpu_events_set(vcpu, &events);
100 	run->immediate_exit = true;
101 	vcpu_run_complete_io(vcpu);
102 
103 	vcpu_events_get(vcpu, &events);
104 	TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT,
105 		    "Triple fault event invalid");
106 	TEST_ASSERT(events.triple_fault.pending,
107 		    "No triple fault pending");
108 	vcpu_run(vcpu);
109 
110 
111 	if (has_svm) {
112 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
113 	} else {
114 		switch (get_ucall(vcpu, &uc)) {
115 		case UCALL_DONE:
116 			break;
117 		case UCALL_ABORT:
118 			REPORT_GUEST_ASSERT(uc);
119 		default:
120 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
121 		}
122 	}
123 	return 0;
124 }
125