1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2022 Oracle and/or its affiliates.
4  *
5  * Based on:
6  *   svm_int_ctl_test
7  *
8  *   Copyright (C) 2021, Red Hat, Inc.
9  *
10  */
11 #include <stdatomic.h>
12 #include <stdio.h>
13 #include <unistd.h>
14 #include "apic.h"
15 #include "kvm_util.h"
16 #include "processor.h"
17 #include "svm_util.h"
18 #include "test_util.h"
19 
20 #define INT_NR			0x20
21 
22 static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
23 
24 static unsigned int bp_fired;
guest_bp_handler(struct ex_regs * regs)25 static void guest_bp_handler(struct ex_regs *regs)
26 {
27 	bp_fired++;
28 }
29 
30 static unsigned int int_fired;
31 static void l2_guest_code_int(void);
32 
guest_int_handler(struct ex_regs * regs)33 static void guest_int_handler(struct ex_regs *regs)
34 {
35 	int_fired++;
36 	GUEST_ASSERT_EQ(regs->rip, (unsigned long)l2_guest_code_int);
37 }
38 
l2_guest_code_int(void)39 static void l2_guest_code_int(void)
40 {
41 	GUEST_ASSERT_EQ(int_fired, 1);
42 
43 	/*
44          * Same as the vmmcall() function, but with a ud2 sneaked after the
45          * vmmcall.  The caller injects an exception with the return address
46          * increased by 2, so the "pop rbp" must be after the ud2 and we cannot
47 	 * use vmmcall() directly.
48          */
49 	__asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp"
50                              : : "a"(0xdeadbeef), "c"(0xbeefdead)
51                              : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
52                                "r10", "r11", "r12", "r13", "r14", "r15");
53 
54 	GUEST_ASSERT_EQ(bp_fired, 1);
55 	hlt();
56 }
57 
58 static atomic_int nmi_stage;
59 #define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
60 #define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
guest_nmi_handler(struct ex_regs * regs)61 static void guest_nmi_handler(struct ex_regs *regs)
62 {
63 	nmi_stage_inc();
64 
65 	if (nmi_stage_get() == 1) {
66 		vmmcall();
67 		GUEST_FAIL("Unexpected resume after VMMCALL");
68 	} else {
69 		GUEST_ASSERT_EQ(nmi_stage_get(), 3);
70 		GUEST_DONE();
71 	}
72 }
73 
l2_guest_code_nmi(void)74 static void l2_guest_code_nmi(void)
75 {
76 	ud2();
77 }
78 
l1_guest_code(struct svm_test_data * svm,uint64_t is_nmi,uint64_t idt_alt)79 static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
80 {
81 	#define L2_GUEST_STACK_SIZE 64
82 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
83 	struct vmcb *vmcb = svm->vmcb;
84 
85 	if (is_nmi)
86 		x2apic_enable();
87 
88 	/* Prepare for L2 execution. */
89 	generic_svm_setup(svm,
90 			  is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
91 			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
92 
93 	vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
94 	vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
95 
96 	if (is_nmi) {
97 		vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
98 	} else {
99 		vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
100 		/* The return address pushed on stack */
101 		vmcb->control.next_rip = vmcb->save.rip;
102 	}
103 
104 	run_guest(vmcb, svm->vmcb_gpa);
105 	__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
106 		       "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
107 		       vmcb->control.exit_code,
108 		       vmcb->control.exit_info_1, vmcb->control.exit_info_2);
109 
110 	if (is_nmi) {
111 		clgi();
112 		x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
113 
114 		GUEST_ASSERT_EQ(nmi_stage_get(), 1);
115 		nmi_stage_inc();
116 
117 		stgi();
118 		/* self-NMI happens here */
119 		while (true)
120 			cpu_relax();
121 	}
122 
123 	/* Skip over VMMCALL */
124 	vmcb->save.rip += 3;
125 
126 	/* Switch to alternate IDT to cause intervening NPF again */
127 	vmcb->save.idtr.base = idt_alt;
128 	vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
129 
130 	vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
131 	/* The return address pushed on stack, skip over UD2 */
132 	vmcb->control.next_rip = vmcb->save.rip + 2;
133 
134 	run_guest(vmcb, svm->vmcb_gpa);
135 	__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT,
136 		       "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
137 		       vmcb->control.exit_code,
138 		       vmcb->control.exit_info_1, vmcb->control.exit_info_2);
139 
140 	GUEST_DONE();
141 }
142 
run_test(bool is_nmi)143 static void run_test(bool is_nmi)
144 {
145 	struct kvm_vcpu *vcpu;
146 	struct kvm_vm *vm;
147 	vm_vaddr_t svm_gva;
148 	vm_vaddr_t idt_alt_vm;
149 	struct kvm_guest_debug debug;
150 
151 	pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
152 
153 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
154 
155 	vm_init_descriptor_tables(vm);
156 	vcpu_init_descriptor_tables(vcpu);
157 
158 	vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
159 	vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
160 	vm_install_exception_handler(vm, INT_NR, guest_int_handler);
161 
162 	vcpu_alloc_svm(vm, &svm_gva);
163 
164 	if (!is_nmi) {
165 		void *idt, *idt_alt;
166 
167 		idt_alt_vm = vm_vaddr_alloc_page(vm);
168 		idt_alt = addr_gva2hva(vm, idt_alt_vm);
169 		idt = addr_gva2hva(vm, vm->idt);
170 		memcpy(idt_alt, idt, getpagesize());
171 	} else {
172 		idt_alt_vm = 0;
173 	}
174 	vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
175 
176 	memset(&debug, 0, sizeof(debug));
177 	vcpu_guest_debug_set(vcpu, &debug);
178 
179 	struct ucall uc;
180 
181 	alarm(2);
182 	vcpu_run(vcpu);
183 	alarm(0);
184 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
185 
186 	switch (get_ucall(vcpu, &uc)) {
187 	case UCALL_ABORT:
188 		REPORT_GUEST_ASSERT(uc);
189 		break;
190 		/* NOT REACHED */
191 	case UCALL_DONE:
192 		goto done;
193 	default:
194 		TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
195 	}
196 done:
197 	kvm_vm_free(vm);
198 }
199 
main(int argc,char * argv[])200 int main(int argc, char *argv[])
201 {
202 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
203 
204 	TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
205 		    "KVM with nSVM is supposed to unconditionally advertise nRIP Save");
206 
207 	atomic_init(&nmi_stage, 0);
208 
209 	run_test(false);
210 	run_test(true);
211 
212 	return 0;
213 }
214