1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2022 Oracle and/or its affiliates.
4  *
5  * Based on:
6  *   svm_int_ctl_test
7  *
8  *   Copyright (C) 2021, Red Hat, Inc.
9  *
10  */
11 
12 #include <stdatomic.h>
13 #include <stdio.h>
14 #include <unistd.h>
15 #include "apic.h"
16 #include "kvm_util.h"
17 #include "processor.h"
18 #include "svm_util.h"
19 #include "test_util.h"
20 
21 #define INT_NR			0x20
22 
23 static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
24 
25 static unsigned int bp_fired;
26 static void guest_bp_handler(struct ex_regs *regs)
27 {
28 	bp_fired++;
29 }
30 
31 static unsigned int int_fired;
32 static void l2_guest_code_int(void);
33 
34 static void guest_int_handler(struct ex_regs *regs)
35 {
36 	int_fired++;
37 	GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
38 		       regs->rip, (unsigned long)l2_guest_code_int);
39 }
40 
41 static void l2_guest_code_int(void)
42 {
43 	GUEST_ASSERT_1(int_fired == 1, int_fired);
44 
45 	/*
46          * Same as the vmmcall() function, but with a ud2 sneaked after the
47          * vmmcall.  The caller injects an exception with the return address
48          * increased by 2, so the "pop rbp" must be after the ud2 and we cannot
49 	 * use vmmcall() directly.
50          */
51 	__asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp"
52                              : : "a"(0xdeadbeef), "c"(0xbeefdead)
53                              : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
54                                "r10", "r11", "r12", "r13", "r14", "r15");
55 
56 	GUEST_ASSERT_1(bp_fired == 1, bp_fired);
57 	hlt();
58 }
59 
60 static atomic_int nmi_stage;
61 #define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
62 #define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
63 static void guest_nmi_handler(struct ex_regs *regs)
64 {
65 	nmi_stage_inc();
66 
67 	if (nmi_stage_get() == 1) {
68 		vmmcall();
69 		GUEST_ASSERT(false);
70 	} else {
71 		GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
72 		GUEST_DONE();
73 	}
74 }
75 
76 static void l2_guest_code_nmi(void)
77 {
78 	ud2();
79 }
80 
81 static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
82 {
83 	#define L2_GUEST_STACK_SIZE 64
84 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
85 	struct vmcb *vmcb = svm->vmcb;
86 
87 	if (is_nmi)
88 		x2apic_enable();
89 
90 	/* Prepare for L2 execution. */
91 	generic_svm_setup(svm,
92 			  is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
93 			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
94 
95 	vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
96 	vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
97 
98 	if (is_nmi) {
99 		vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
100 	} else {
101 		vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
102 		/* The return address pushed on stack */
103 		vmcb->control.next_rip = vmcb->save.rip;
104 	}
105 
106 	run_guest(vmcb, svm->vmcb_gpa);
107 	GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
108 		       vmcb->control.exit_code,
109 		       vmcb->control.exit_info_1, vmcb->control.exit_info_2);
110 
111 	if (is_nmi) {
112 		clgi();
113 		x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
114 
115 		GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
116 		nmi_stage_inc();
117 
118 		stgi();
119 		/* self-NMI happens here */
120 		while (true)
121 			cpu_relax();
122 	}
123 
124 	/* Skip over VMMCALL */
125 	vmcb->save.rip += 3;
126 
127 	/* Switch to alternate IDT to cause intervening NPF again */
128 	vmcb->save.idtr.base = idt_alt;
129 	vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
130 
131 	vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
132 	/* The return address pushed on stack, skip over UD2 */
133 	vmcb->control.next_rip = vmcb->save.rip + 2;
134 
135 	run_guest(vmcb, svm->vmcb_gpa);
136 	GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
137 		       vmcb->control.exit_code,
138 		       vmcb->control.exit_info_1, vmcb->control.exit_info_2);
139 
140 	GUEST_DONE();
141 }
142 
143 static void run_test(bool is_nmi)
144 {
145 	struct kvm_vcpu *vcpu;
146 	struct kvm_vm *vm;
147 	vm_vaddr_t svm_gva;
148 	vm_vaddr_t idt_alt_vm;
149 	struct kvm_guest_debug debug;
150 
151 	pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
152 
153 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
154 
155 	vm_init_descriptor_tables(vm);
156 	vcpu_init_descriptor_tables(vcpu);
157 
158 	vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
159 	vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
160 	vm_install_exception_handler(vm, INT_NR, guest_int_handler);
161 
162 	vcpu_alloc_svm(vm, &svm_gva);
163 
164 	if (!is_nmi) {
165 		void *idt, *idt_alt;
166 
167 		idt_alt_vm = vm_vaddr_alloc_page(vm);
168 		idt_alt = addr_gva2hva(vm, idt_alt_vm);
169 		idt = addr_gva2hva(vm, vm->idt);
170 		memcpy(idt_alt, idt, getpagesize());
171 	} else {
172 		idt_alt_vm = 0;
173 	}
174 	vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
175 
176 	memset(&debug, 0, sizeof(debug));
177 	vcpu_guest_debug_set(vcpu, &debug);
178 
179 	struct ucall uc;
180 
181 	alarm(2);
182 	vcpu_run(vcpu);
183 	alarm(0);
184 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
185 
186 	switch (get_ucall(vcpu, &uc)) {
187 	case UCALL_ABORT:
188 		REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx");
189 		break;
190 		/* NOT REACHED */
191 	case UCALL_DONE:
192 		goto done;
193 	default:
194 		TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
195 	}
196 done:
197 	kvm_vm_free(vm);
198 }
199 
200 int main(int argc, char *argv[])
201 {
202 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
203 
204 	TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
205 		    "KVM with nSVM is supposed to unconditionally advertise nRIP Save");
206 
207 	atomic_init(&nmi_stage, 0);
208 
209 	run_test(false);
210 	run_test(true);
211 
212 	return 0;
213 }
214