1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V HvCallSendSyntheticClusterIpi{,Ex} tests
4  *
5  * Copyright (C) 2022, Red Hat, Inc.
6  *
7  */
8 
9 #define _GNU_SOURCE /* for program_invocation_short_name */
10 #include <pthread.h>
11 #include <inttypes.h>
12 
13 #include "kvm_util.h"
14 #include "hyperv.h"
15 #include "test_util.h"
16 #include "vmx.h"
17 
18 #define RECEIVER_VCPU_ID_1 2
19 #define RECEIVER_VCPU_ID_2 65
20 
21 #define IPI_VECTOR	 0xfe
22 
23 static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
24 
25 struct hv_vpset {
26 	u64 format;
27 	u64 valid_bank_mask;
28 	u64 bank_contents[2];
29 };
30 
31 enum HV_GENERIC_SET_FORMAT {
32 	HV_GENERIC_SET_SPARSE_4K,
33 	HV_GENERIC_SET_ALL,
34 };
35 
36 /* HvCallSendSyntheticClusterIpi hypercall */
37 struct hv_send_ipi {
38 	u32 vector;
39 	u32 reserved;
40 	u64 cpu_mask;
41 };
42 
43 /* HvCallSendSyntheticClusterIpiEx hypercall */
44 struct hv_send_ipi_ex {
45 	u32 vector;
46 	u32 reserved;
47 	struct hv_vpset vp_set;
48 };
49 
50 static inline void hv_init(vm_vaddr_t pgs_gpa)
51 {
52 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
53 	wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
54 }
55 
56 static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
57 {
58 	u32 vcpu_id;
59 
60 	x2apic_enable();
61 	hv_init(pgs_gpa);
62 
63 	vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
64 
65 	/* Signal sender vCPU we're ready */
66 	ipis_rcvd[vcpu_id] = (u64)-1;
67 
68 	for (;;)
69 		asm volatile("sti; hlt; cli");
70 }
71 
72 static void guest_ipi_handler(struct ex_regs *regs)
73 {
74 	u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
75 
76 	ipis_rcvd[vcpu_id]++;
77 	wrmsr(HV_X64_MSR_EOI, 1);
78 }
79 
80 static inline void nop_loop(void)
81 {
82 	int i;
83 
84 	for (i = 0; i < 100000000; i++)
85 		asm volatile("nop");
86 }
87 
88 static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
89 {
90 	struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
91 	struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
92 	int stage = 1, ipis_expected[2] = {0};
93 
94 	hv_init(pgs_gpa);
95 	GUEST_SYNC(stage++);
96 
97 	/* Wait for receiver vCPUs to come up */
98 	while (!ipis_rcvd[RECEIVER_VCPU_ID_1] || !ipis_rcvd[RECEIVER_VCPU_ID_2])
99 		nop_loop();
100 	ipis_rcvd[RECEIVER_VCPU_ID_1] = ipis_rcvd[RECEIVER_VCPU_ID_2] = 0;
101 
102 	/* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
103 	ipi->vector = IPI_VECTOR;
104 	ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
105 	hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
106 	nop_loop();
107 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
108 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
109 	GUEST_SYNC(stage++);
110 	/* 'Fast' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
111 	hyperv_hypercall(HVCALL_SEND_IPI | HV_HYPERCALL_FAST_BIT,
112 			 IPI_VECTOR, 1 << RECEIVER_VCPU_ID_1);
113 	nop_loop();
114 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
115 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
116 	GUEST_SYNC(stage++);
117 
118 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
119 	memset(hcall_page, 0, 4096);
120 	ipi_ex->vector = IPI_VECTOR;
121 	ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
122 	ipi_ex->vp_set.valid_bank_mask = 1 << 0;
123 	ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
124 	hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
125 			 pgs_gpa, pgs_gpa + 4096);
126 	nop_loop();
127 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
128 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
129 	GUEST_SYNC(stage++);
130 	/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
131 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
132 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
133 			 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
134 			 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
135 	nop_loop();
136 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
137 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
138 	GUEST_SYNC(stage++);
139 
140 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
141 	memset(hcall_page, 0, 4096);
142 	ipi_ex->vector = IPI_VECTOR;
143 	ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
144 	ipi_ex->vp_set.valid_bank_mask = 1 << 1;
145 	ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
146 	hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
147 			 pgs_gpa, pgs_gpa + 4096);
148 	nop_loop();
149 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
150 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
151 	GUEST_SYNC(stage++);
152 	/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
153 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
154 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
155 			 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
156 			 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
157 	nop_loop();
158 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
159 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
160 	GUEST_SYNC(stage++);
161 
162 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
163 	memset(hcall_page, 0, 4096);
164 	ipi_ex->vector = IPI_VECTOR;
165 	ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
166 	ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
167 	ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
168 	ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
169 	hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
170 			 pgs_gpa, pgs_gpa + 4096);
171 	nop_loop();
172 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
173 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
174 	GUEST_SYNC(stage++);
175 	/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1, 2} */
176 	hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
177 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
178 			 (2 << HV_HYPERCALL_VARHEAD_OFFSET),
179 			 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
180 	nop_loop();
181 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
182 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
183 	GUEST_SYNC(stage++);
184 
185 	/* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
186 	memset(hcall_page, 0, 4096);
187 	ipi_ex->vector = IPI_VECTOR;
188 	ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
189 	hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
190 	nop_loop();
191 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
192 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
193 	GUEST_SYNC(stage++);
194 	/*
195 	 * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
196 	 * Nothing to write anything to XMM regs.
197 	 */
198 	hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
199 			 IPI_VECTOR, HV_GENERIC_SET_ALL);
200 	nop_loop();
201 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
202 	GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
203 	GUEST_SYNC(stage++);
204 
205 	GUEST_DONE();
206 }
207 
208 static void *vcpu_thread(void *arg)
209 {
210 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
211 	int old, r;
212 
213 	r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
214 	TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
215 		    vcpu->id, r);
216 
217 	vcpu_run(vcpu);
218 
219 	TEST_FAIL("vCPU %u exited unexpectedly", vcpu->id);
220 
221 	return NULL;
222 }
223 
224 static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
225 {
226 	void *retval;
227 	int r;
228 
229 	r = pthread_cancel(thread);
230 	TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
231 		    vcpu->id, r);
232 
233 	r = pthread_join(thread, &retval);
234 	TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
235 		    vcpu->id, r);
236 	TEST_ASSERT(retval == PTHREAD_CANCELED,
237 		    "expected retval=%p, got %p", PTHREAD_CANCELED,
238 		    retval);
239 }
240 
241 int main(int argc, char *argv[])
242 {
243 	struct kvm_vm *vm;
244 	struct kvm_vcpu *vcpu[3];
245 	unsigned int exit_reason;
246 	vm_vaddr_t hcall_page;
247 	pthread_t threads[2];
248 	int stage = 1, r;
249 	struct ucall uc;
250 
251 	vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
252 
253 	/* Hypercall input/output */
254 	hcall_page = vm_vaddr_alloc_pages(vm, 2);
255 	memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
256 
257 	vm_init_descriptor_tables(vm);
258 
259 	vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code);
260 	vcpu_init_descriptor_tables(vcpu[1]);
261 	vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
262 	vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1);
263 	vcpu_set_hv_cpuid(vcpu[1]);
264 
265 	vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code);
266 	vcpu_init_descriptor_tables(vcpu[2]);
267 	vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
268 	vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2);
269 	vcpu_set_hv_cpuid(vcpu[2]);
270 
271 	vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
272 
273 	vcpu_args_set(vcpu[0], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
274 	vcpu_set_hv_cpuid(vcpu[0]);
275 
276 	r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
277 	TEST_ASSERT(!r, "pthread_create failed errno=%d", r);
278 
279 	r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
280 	TEST_ASSERT(!r, "pthread_create failed errno=%d", errno);
281 
282 	while (true) {
283 		vcpu_run(vcpu[0]);
284 
285 		exit_reason = vcpu[0]->run->exit_reason;
286 		TEST_ASSERT(exit_reason == KVM_EXIT_IO,
287 			    "unexpected exit reason: %u (%s)",
288 			    exit_reason, exit_reason_str(exit_reason));
289 
290 		switch (get_ucall(vcpu[0], &uc)) {
291 		case UCALL_SYNC:
292 			TEST_ASSERT(uc.args[1] == stage,
293 				    "Unexpected stage: %ld (%d expected)\n",
294 				    uc.args[1], stage);
295 			break;
296 		case UCALL_DONE:
297 			goto done;
298 		case UCALL_ABORT:
299 			REPORT_GUEST_ASSERT(uc);
300 			/* NOT REACHED */
301 		default:
302 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
303 		}
304 
305 		stage++;
306 	}
307 
308 done:
309 	cancel_join_vcpu_thread(threads[0], vcpu[1]);
310 	cancel_join_vcpu_thread(threads[1], vcpu[2]);
311 	kvm_vm_free(vm);
312 
313 	return r;
314 }
315