1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat, Inc.
4  *
5  * Tests for Hyper-V features enablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10 
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15 
16 /*
17  * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18  * but to activate the feature it is sufficient to set it to a non-zero
19  * value. Use BIT(0) for that.
20  */
21 #define HV_PV_SPINLOCKS_TEST            \
22 	KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
23 
24 struct msr_data {
25 	uint32_t idx;
26 	bool fault_expected;
27 	bool write;
28 	u64 write_val;
29 };
30 
31 struct hcall_data {
32 	uint64_t control;
33 	uint64_t expect;
34 	bool ud_expected;
35 };
36 
is_write_only_msr(uint32_t msr)37 static bool is_write_only_msr(uint32_t msr)
38 {
39 	return msr == HV_X64_MSR_EOI;
40 }
41 
guest_msr(struct msr_data * msr)42 static void guest_msr(struct msr_data *msr)
43 {
44 	uint8_t vector = 0;
45 	uint64_t msr_val = 0;
46 
47 	GUEST_ASSERT(msr->idx);
48 
49 	if (msr->write)
50 		vector = wrmsr_safe(msr->idx, msr->write_val);
51 
52 	if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53 		vector = rdmsr_safe(msr->idx, &msr_val);
54 
55 	if (msr->fault_expected)
56 		__GUEST_ASSERT(vector == GP_VECTOR,
57 			       "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
58 			       msr->idx, msr->write ? "WR" : "RD", vector);
59 	else
60 		__GUEST_ASSERT(!vector,
61 			       "Expected success on %sMSR(0x%x), got vector '0x%x'",
62 			       msr->idx, msr->write ? "WR" : "RD", vector);
63 
64 	if (vector || is_write_only_msr(msr->idx))
65 		goto done;
66 
67 	if (msr->write)
68 		__GUEST_ASSERT(!vector,
69 			       "WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'",
70 			       msr->idx, msr->write_val, msr_val);
71 
72 	/* Invariant TSC bit appears when TSC invariant control MSR is written to */
73 	if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
74 		if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
75 			GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
76 		else
77 			GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
78 				     !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
79 	}
80 
81 done:
82 	GUEST_DONE();
83 }
84 
guest_hcall(vm_vaddr_t pgs_gpa,struct hcall_data * hcall)85 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
86 {
87 	u64 res, input, output;
88 	uint8_t vector;
89 
90 	GUEST_ASSERT_NE(hcall->control, 0);
91 
92 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
93 	wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
94 
95 	if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
96 		input = pgs_gpa;
97 		output = pgs_gpa + 4096;
98 	} else {
99 		input = output = 0;
100 	}
101 
102 	vector = __hyperv_hypercall(hcall->control, input, output, &res);
103 	if (hcall->ud_expected) {
104 		__GUEST_ASSERT(vector == UD_VECTOR,
105 			       "Expected #UD for control '%u', got vector '0x%x'",
106 			       hcall->control, vector);
107 	} else {
108 		__GUEST_ASSERT(!vector,
109 			       "Expected no exception for control '%u', got vector '0x%x'",
110 			       hcall->control, vector);
111 		GUEST_ASSERT_EQ(res, hcall->expect);
112 	}
113 
114 	GUEST_DONE();
115 }
116 
vcpu_reset_hv_cpuid(struct kvm_vcpu * vcpu)117 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
118 {
119 	/*
120 	 * Enable all supported Hyper-V features, then clear the leafs holding
121 	 * the features that will be tested one by one.
122 	 */
123 	vcpu_set_hv_cpuid(vcpu);
124 
125 	vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
126 	vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
127 	vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
128 }
129 
guest_test_msrs_access(void)130 static void guest_test_msrs_access(void)
131 {
132 	struct kvm_cpuid2 *prev_cpuid = NULL;
133 	struct kvm_vcpu *vcpu;
134 	struct kvm_vm *vm;
135 	struct ucall uc;
136 	int stage = 0;
137 	vm_vaddr_t msr_gva;
138 	struct msr_data *msr;
139 	bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
140 
141 	while (true) {
142 		vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
143 
144 		msr_gva = vm_vaddr_alloc_page(vm);
145 		memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
146 		msr = addr_gva2hva(vm, msr_gva);
147 
148 		vcpu_args_set(vcpu, 1, msr_gva);
149 		vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
150 
151 		if (!prev_cpuid) {
152 			vcpu_reset_hv_cpuid(vcpu);
153 
154 			prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
155 		} else {
156 			vcpu_init_cpuid(vcpu, prev_cpuid);
157 		}
158 
159 		vm_init_descriptor_tables(vm);
160 		vcpu_init_descriptor_tables(vcpu);
161 
162 		/* TODO: Make this entire test easier to maintain. */
163 		if (stage >= 21)
164 			vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
165 
166 		switch (stage) {
167 		case 0:
168 			/*
169 			 * Only available when Hyper-V identification is set
170 			 */
171 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
172 			msr->write = false;
173 			msr->fault_expected = true;
174 			break;
175 		case 1:
176 			msr->idx = HV_X64_MSR_HYPERCALL;
177 			msr->write = false;
178 			msr->fault_expected = true;
179 			break;
180 		case 2:
181 			vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
182 			/*
183 			 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
184 			 * HV_X64_MSR_HYPERCALL available.
185 			 */
186 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
187 			msr->write = true;
188 			msr->write_val = HYPERV_LINUX_OS_ID;
189 			msr->fault_expected = false;
190 			break;
191 		case 3:
192 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
193 			msr->write = false;
194 			msr->fault_expected = false;
195 			break;
196 		case 4:
197 			msr->idx = HV_X64_MSR_HYPERCALL;
198 			msr->write = false;
199 			msr->fault_expected = false;
200 			break;
201 
202 		case 5:
203 			msr->idx = HV_X64_MSR_VP_RUNTIME;
204 			msr->write = false;
205 			msr->fault_expected = true;
206 			break;
207 		case 6:
208 			vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
209 			msr->idx = HV_X64_MSR_VP_RUNTIME;
210 			msr->write = false;
211 			msr->fault_expected = false;
212 			break;
213 		case 7:
214 			/* Read only */
215 			msr->idx = HV_X64_MSR_VP_RUNTIME;
216 			msr->write = true;
217 			msr->write_val = 1;
218 			msr->fault_expected = true;
219 			break;
220 
221 		case 8:
222 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
223 			msr->write = false;
224 			msr->fault_expected = true;
225 			break;
226 		case 9:
227 			vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
228 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
229 			msr->write = false;
230 			msr->fault_expected = false;
231 			break;
232 		case 10:
233 			/* Read only */
234 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
235 			msr->write = true;
236 			msr->write_val = 1;
237 			msr->fault_expected = true;
238 			break;
239 
240 		case 11:
241 			msr->idx = HV_X64_MSR_VP_INDEX;
242 			msr->write = false;
243 			msr->fault_expected = true;
244 			break;
245 		case 12:
246 			vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
247 			msr->idx = HV_X64_MSR_VP_INDEX;
248 			msr->write = false;
249 			msr->fault_expected = false;
250 			break;
251 		case 13:
252 			/* Read only */
253 			msr->idx = HV_X64_MSR_VP_INDEX;
254 			msr->write = true;
255 			msr->write_val = 1;
256 			msr->fault_expected = true;
257 			break;
258 
259 		case 14:
260 			msr->idx = HV_X64_MSR_RESET;
261 			msr->write = false;
262 			msr->fault_expected = true;
263 			break;
264 		case 15:
265 			vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
266 			msr->idx = HV_X64_MSR_RESET;
267 			msr->write = false;
268 			msr->fault_expected = false;
269 			break;
270 		case 16:
271 			msr->idx = HV_X64_MSR_RESET;
272 			msr->write = true;
273 			/*
274 			 * TODO: the test only writes '0' to HV_X64_MSR_RESET
275 			 * at the moment, writing some other value there will
276 			 * trigger real vCPU reset and the code is not prepared
277 			 * to handle it yet.
278 			 */
279 			msr->write_val = 0;
280 			msr->fault_expected = false;
281 			break;
282 
283 		case 17:
284 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
285 			msr->write = false;
286 			msr->fault_expected = true;
287 			break;
288 		case 18:
289 			vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
290 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
291 			msr->write = false;
292 			msr->fault_expected = false;
293 			break;
294 		case 19:
295 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
296 			msr->write = true;
297 			msr->write_val = 0;
298 			msr->fault_expected = false;
299 			break;
300 
301 		case 20:
302 			msr->idx = HV_X64_MSR_EOM;
303 			msr->write = false;
304 			msr->fault_expected = true;
305 			break;
306 		case 21:
307 			/*
308 			 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
309 			 * capability enabled and guest visible CPUID bit unset.
310 			 */
311 			msr->idx = HV_X64_MSR_EOM;
312 			msr->write = false;
313 			msr->fault_expected = true;
314 			break;
315 		case 22:
316 			vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
317 			msr->idx = HV_X64_MSR_EOM;
318 			msr->write = false;
319 			msr->fault_expected = false;
320 			break;
321 		case 23:
322 			msr->idx = HV_X64_MSR_EOM;
323 			msr->write = true;
324 			msr->write_val = 0;
325 			msr->fault_expected = false;
326 			break;
327 
328 		case 24:
329 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
330 			msr->write = false;
331 			msr->fault_expected = true;
332 			break;
333 		case 25:
334 			vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
335 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
336 			msr->write = false;
337 			msr->fault_expected = false;
338 			break;
339 		case 26:
340 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
341 			msr->write = true;
342 			msr->write_val = 0;
343 			msr->fault_expected = false;
344 			break;
345 		case 27:
346 			/* Direct mode test */
347 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
348 			msr->write = true;
349 			msr->write_val = 1 << 12;
350 			msr->fault_expected = true;
351 			break;
352 		case 28:
353 			vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
354 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
355 			msr->write = true;
356 			msr->write_val = 1 << 12;
357 			msr->fault_expected = false;
358 			break;
359 
360 		case 29:
361 			msr->idx = HV_X64_MSR_EOI;
362 			msr->write = false;
363 			msr->fault_expected = true;
364 			break;
365 		case 30:
366 			vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
367 			msr->idx = HV_X64_MSR_EOI;
368 			msr->write = true;
369 			msr->write_val = 1;
370 			msr->fault_expected = false;
371 			break;
372 
373 		case 31:
374 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
375 			msr->write = false;
376 			msr->fault_expected = true;
377 			break;
378 		case 32:
379 			vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
380 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
381 			msr->write = false;
382 			msr->fault_expected = false;
383 			break;
384 		case 33:
385 			/* Read only */
386 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
387 			msr->write = true;
388 			msr->write_val = 1;
389 			msr->fault_expected = true;
390 			break;
391 
392 		case 34:
393 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
394 			msr->write = false;
395 			msr->fault_expected = true;
396 			break;
397 		case 35:
398 			vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
399 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
400 			msr->write = false;
401 			msr->fault_expected = false;
402 			break;
403 		case 36:
404 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
405 			msr->write = true;
406 			msr->write_val = 1;
407 			msr->fault_expected = false;
408 			break;
409 		case 37:
410 			/* Can only write '0' */
411 			msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
412 			msr->write = true;
413 			msr->write_val = 1;
414 			msr->fault_expected = true;
415 			break;
416 
417 		case 38:
418 			msr->idx = HV_X64_MSR_CRASH_P0;
419 			msr->write = false;
420 			msr->fault_expected = true;
421 			break;
422 		case 39:
423 			vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
424 			msr->idx = HV_X64_MSR_CRASH_P0;
425 			msr->write = false;
426 			msr->fault_expected = false;
427 			break;
428 		case 40:
429 			msr->idx = HV_X64_MSR_CRASH_P0;
430 			msr->write = true;
431 			msr->write_val = 1;
432 			msr->fault_expected = false;
433 			break;
434 
435 		case 41:
436 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
437 			msr->write = false;
438 			msr->fault_expected = true;
439 			break;
440 		case 42:
441 			vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
442 			vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
443 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
444 			msr->write = false;
445 			msr->fault_expected = false;
446 			break;
447 		case 43:
448 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
449 			msr->write = true;
450 			msr->write_val = 0;
451 			msr->fault_expected = false;
452 			break;
453 
454 		case 44:
455 			/* MSR is not available when CPUID feature bit is unset */
456 			if (!has_invtsc)
457 				goto next_stage;
458 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
459 			msr->write = false;
460 			msr->fault_expected = true;
461 			break;
462 		case 45:
463 			/* MSR is vailable when CPUID feature bit is set */
464 			if (!has_invtsc)
465 				goto next_stage;
466 			vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
467 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
468 			msr->write = false;
469 			msr->fault_expected = false;
470 			break;
471 		case 46:
472 			/* Writing bits other than 0 is forbidden */
473 			if (!has_invtsc)
474 				goto next_stage;
475 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
476 			msr->write = true;
477 			msr->write_val = 0xdeadbeef;
478 			msr->fault_expected = true;
479 			break;
480 		case 47:
481 			/* Setting bit 0 enables the feature */
482 			if (!has_invtsc)
483 				goto next_stage;
484 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
485 			msr->write = true;
486 			msr->write_val = 1;
487 			msr->fault_expected = false;
488 			break;
489 
490 		default:
491 			kvm_vm_free(vm);
492 			return;
493 		}
494 
495 		vcpu_set_cpuid(vcpu);
496 
497 		memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
498 
499 		pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
500 			 msr->idx, msr->write ? "write" : "read");
501 
502 		vcpu_run(vcpu);
503 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
504 
505 		switch (get_ucall(vcpu, &uc)) {
506 		case UCALL_ABORT:
507 			REPORT_GUEST_ASSERT(uc);
508 			return;
509 		case UCALL_DONE:
510 			break;
511 		default:
512 			TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
513 			return;
514 		}
515 
516 next_stage:
517 		stage++;
518 		kvm_vm_free(vm);
519 	}
520 }
521 
guest_test_hcalls_access(void)522 static void guest_test_hcalls_access(void)
523 {
524 	struct kvm_cpuid2 *prev_cpuid = NULL;
525 	struct kvm_vcpu *vcpu;
526 	struct kvm_vm *vm;
527 	struct ucall uc;
528 	int stage = 0;
529 	vm_vaddr_t hcall_page, hcall_params;
530 	struct hcall_data *hcall;
531 
532 	while (true) {
533 		vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
534 
535 		vm_init_descriptor_tables(vm);
536 		vcpu_init_descriptor_tables(vcpu);
537 
538 		/* Hypercall input/output */
539 		hcall_page = vm_vaddr_alloc_pages(vm, 2);
540 		memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
541 
542 		hcall_params = vm_vaddr_alloc_page(vm);
543 		memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
544 		hcall = addr_gva2hva(vm, hcall_params);
545 
546 		vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
547 		vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
548 
549 		if (!prev_cpuid) {
550 			vcpu_reset_hv_cpuid(vcpu);
551 
552 			prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
553 		} else {
554 			vcpu_init_cpuid(vcpu, prev_cpuid);
555 		}
556 
557 		switch (stage) {
558 		case 0:
559 			vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
560 			hcall->control = 0xbeef;
561 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
562 			break;
563 
564 		case 1:
565 			hcall->control = HVCALL_POST_MESSAGE;
566 			hcall->expect = HV_STATUS_ACCESS_DENIED;
567 			break;
568 		case 2:
569 			vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
570 			hcall->control = HVCALL_POST_MESSAGE;
571 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
572 			break;
573 
574 		case 3:
575 			hcall->control = HVCALL_SIGNAL_EVENT;
576 			hcall->expect = HV_STATUS_ACCESS_DENIED;
577 			break;
578 		case 4:
579 			vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
580 			hcall->control = HVCALL_SIGNAL_EVENT;
581 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
582 			break;
583 
584 		case 5:
585 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
586 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
587 			break;
588 		case 6:
589 			vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
590 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
591 			hcall->expect = HV_STATUS_ACCESS_DENIED;
592 			break;
593 		case 7:
594 			vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
595 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
596 			hcall->expect = HV_STATUS_OPERATION_DENIED;
597 			break;
598 
599 		case 8:
600 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
601 			hcall->expect = HV_STATUS_ACCESS_DENIED;
602 			break;
603 		case 9:
604 			vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
605 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
606 			hcall->expect = HV_STATUS_SUCCESS;
607 			break;
608 		case 10:
609 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
610 			hcall->expect = HV_STATUS_ACCESS_DENIED;
611 			break;
612 		case 11:
613 			vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
614 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
615 			hcall->expect = HV_STATUS_SUCCESS;
616 			break;
617 
618 		case 12:
619 			hcall->control = HVCALL_SEND_IPI;
620 			hcall->expect = HV_STATUS_ACCESS_DENIED;
621 			break;
622 		case 13:
623 			vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
624 			hcall->control = HVCALL_SEND_IPI;
625 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
626 			break;
627 		case 14:
628 			/* Nothing in 'sparse banks' -> success */
629 			hcall->control = HVCALL_SEND_IPI_EX;
630 			hcall->expect = HV_STATUS_SUCCESS;
631 			break;
632 
633 		case 15:
634 			hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
635 			hcall->expect = HV_STATUS_ACCESS_DENIED;
636 			break;
637 		case 16:
638 			vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
639 			hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
640 			hcall->expect = HV_STATUS_SUCCESS;
641 			break;
642 		case 17:
643 			/* XMM fast hypercall */
644 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
645 			hcall->ud_expected = true;
646 			break;
647 		case 18:
648 			vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
649 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
650 			hcall->ud_expected = false;
651 			hcall->expect = HV_STATUS_SUCCESS;
652 			break;
653 		case 19:
654 			hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
655 			hcall->expect = HV_STATUS_ACCESS_DENIED;
656 			break;
657 		case 20:
658 			vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
659 			hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
660 			hcall->expect = HV_STATUS_INVALID_PARAMETER;
661 			break;
662 		case 21:
663 			kvm_vm_free(vm);
664 			return;
665 		}
666 
667 		vcpu_set_cpuid(vcpu);
668 
669 		memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
670 
671 		pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
672 
673 		vcpu_run(vcpu);
674 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
675 
676 		switch (get_ucall(vcpu, &uc)) {
677 		case UCALL_ABORT:
678 			REPORT_GUEST_ASSERT(uc);
679 			return;
680 		case UCALL_DONE:
681 			break;
682 		default:
683 			TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
684 			return;
685 		}
686 
687 		stage++;
688 		kvm_vm_free(vm);
689 	}
690 }
691 
main(void)692 int main(void)
693 {
694 	pr_info("Testing access to Hyper-V specific MSRs\n");
695 	guest_test_msrs_access();
696 
697 	pr_info("Testing access to Hyper-V hypercalls\n");
698 	guest_test_hcalls_access();
699 }
700