1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat, Inc.
4  *
5  * Tests for Hyper-V features enablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10 
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15 
16 #define VCPU_ID 0
17 #define LINUX_OS_ID ((u64)0x8100 << 48)
18 
19 extern unsigned char rdmsr_start;
20 extern unsigned char rdmsr_end;
21 
22 static u64 do_rdmsr(u32 idx)
23 {
24 	u32 lo, hi;
25 
26 	asm volatile("rdmsr_start: rdmsr;"
27 		     "rdmsr_end:"
28 		     : "=a"(lo), "=c"(hi)
29 		     : "c"(idx));
30 
31 	return (((u64) hi) << 32) | lo;
32 }
33 
34 extern unsigned char wrmsr_start;
35 extern unsigned char wrmsr_end;
36 
37 static void do_wrmsr(u32 idx, u64 val)
38 {
39 	u32 lo, hi;
40 
41 	lo = val;
42 	hi = val >> 32;
43 
44 	asm volatile("wrmsr_start: wrmsr;"
45 		     "wrmsr_end:"
46 		     : : "a"(lo), "c"(idx), "d"(hi));
47 }
48 
49 static int nr_gp;
50 static int nr_ud;
51 
52 static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
53 			    vm_vaddr_t output_address)
54 {
55 	u64 hv_status;
56 
57 	asm volatile("mov %3, %%r8\n"
58 		     "vmcall"
59 		     : "=a" (hv_status),
60 		       "+c" (control), "+d" (input_address)
61 		     :  "r" (output_address)
62 		     : "cc", "memory", "r8", "r9", "r10", "r11");
63 
64 	return hv_status;
65 }
66 
67 static void guest_gp_handler(struct ex_regs *regs)
68 {
69 	unsigned char *rip = (unsigned char *)regs->rip;
70 	bool r, w;
71 
72 	r = rip == &rdmsr_start;
73 	w = rip == &wrmsr_start;
74 	GUEST_ASSERT(r || w);
75 
76 	nr_gp++;
77 
78 	if (r)
79 		regs->rip = (uint64_t)&rdmsr_end;
80 	else
81 		regs->rip = (uint64_t)&wrmsr_end;
82 }
83 
84 static void guest_ud_handler(struct ex_regs *regs)
85 {
86 	nr_ud++;
87 	regs->rip += 3;
88 }
89 
90 struct msr_data {
91 	uint32_t idx;
92 	bool available;
93 	bool write;
94 	u64 write_val;
95 };
96 
97 struct hcall_data {
98 	uint64_t control;
99 	uint64_t expect;
100 	bool ud_expected;
101 };
102 
103 static void guest_msr(struct msr_data *msr)
104 {
105 	int i = 0;
106 
107 	while (msr->idx) {
108 		WRITE_ONCE(nr_gp, 0);
109 		if (!msr->write)
110 			do_rdmsr(msr->idx);
111 		else
112 			do_wrmsr(msr->idx, msr->write_val);
113 
114 		if (msr->available)
115 			GUEST_ASSERT(READ_ONCE(nr_gp) == 0);
116 		else
117 			GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
118 
119 		GUEST_SYNC(i++);
120 	}
121 
122 	GUEST_DONE();
123 }
124 
125 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
126 {
127 	int i = 0;
128 	u64 res, input, output;
129 
130 	wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
131 	wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
132 
133 	while (hcall->control) {
134 		nr_ud = 0;
135 		if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
136 			input = pgs_gpa;
137 			output = pgs_gpa + 4096;
138 		} else {
139 			input = output = 0;
140 		}
141 
142 		res = hypercall(hcall->control, input, output);
143 		if (hcall->ud_expected)
144 			GUEST_ASSERT(nr_ud == 1);
145 		else
146 			GUEST_ASSERT(res == hcall->expect);
147 
148 		GUEST_SYNC(i++);
149 	}
150 
151 	GUEST_DONE();
152 }
153 
154 static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
155 			 struct kvm_cpuid_entry2 *feat,
156 			 struct kvm_cpuid_entry2 *recomm,
157 			 struct kvm_cpuid_entry2 *dbg)
158 {
159 	TEST_ASSERT(set_cpuid(cpuid, feat),
160 		    "failed to set KVM_CPUID_FEATURES leaf");
161 	TEST_ASSERT(set_cpuid(cpuid, recomm),
162 		    "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
163 	TEST_ASSERT(set_cpuid(cpuid, dbg),
164 		    "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
165 	vcpu_set_cpuid(vm, VCPU_ID, cpuid);
166 }
167 
168 static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
169 				   struct kvm_cpuid2 *best)
170 {
171 	struct kvm_run *run;
172 	struct ucall uc;
173 	int stage = 0, r;
174 	struct kvm_cpuid_entry2 feat = {
175 		.function = HYPERV_CPUID_FEATURES
176 	};
177 	struct kvm_cpuid_entry2 recomm = {
178 		.function = HYPERV_CPUID_ENLIGHTMENT_INFO
179 	};
180 	struct kvm_cpuid_entry2 dbg = {
181 		.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
182 	};
183 	struct kvm_enable_cap cap = {0};
184 
185 	run = vcpu_state(vm, VCPU_ID);
186 
187 	while (true) {
188 		switch (stage) {
189 		case 0:
190 			/*
191 			 * Only available when Hyper-V identification is set
192 			 */
193 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
194 			msr->write = 0;
195 			msr->available = 0;
196 			break;
197 		case 1:
198 			msr->idx = HV_X64_MSR_HYPERCALL;
199 			msr->write = 0;
200 			msr->available = 0;
201 			break;
202 		case 2:
203 			feat.eax |= HV_MSR_HYPERCALL_AVAILABLE;
204 			/*
205 			 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
206 			 * HV_X64_MSR_HYPERCALL available.
207 			 */
208 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
209 			msr->write = 1;
210 			msr->write_val = LINUX_OS_ID;
211 			msr->available = 1;
212 			break;
213 		case 3:
214 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
215 			msr->write = 0;
216 			msr->available = 1;
217 			break;
218 		case 4:
219 			msr->idx = HV_X64_MSR_HYPERCALL;
220 			msr->write = 0;
221 			msr->available = 1;
222 			break;
223 
224 		case 5:
225 			msr->idx = HV_X64_MSR_VP_RUNTIME;
226 			msr->write = 0;
227 			msr->available = 0;
228 			break;
229 		case 6:
230 			feat.eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
231 			msr->write = 0;
232 			msr->available = 1;
233 			break;
234 		case 7:
235 			/* Read only */
236 			msr->write = 1;
237 			msr->write_val = 1;
238 			msr->available = 0;
239 			break;
240 
241 		case 8:
242 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
243 			msr->write = 0;
244 			msr->available = 0;
245 			break;
246 		case 9:
247 			feat.eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
248 			msr->write = 0;
249 			msr->available = 1;
250 			break;
251 		case 10:
252 			/* Read only */
253 			msr->write = 1;
254 			msr->write_val = 1;
255 			msr->available = 0;
256 			break;
257 
258 		case 11:
259 			msr->idx = HV_X64_MSR_VP_INDEX;
260 			msr->write = 0;
261 			msr->available = 0;
262 			break;
263 		case 12:
264 			feat.eax |= HV_MSR_VP_INDEX_AVAILABLE;
265 			msr->write = 0;
266 			msr->available = 1;
267 			break;
268 		case 13:
269 			/* Read only */
270 			msr->write = 1;
271 			msr->write_val = 1;
272 			msr->available = 0;
273 			break;
274 
275 		case 14:
276 			msr->idx = HV_X64_MSR_RESET;
277 			msr->write = 0;
278 			msr->available = 0;
279 			break;
280 		case 15:
281 			feat.eax |= HV_MSR_RESET_AVAILABLE;
282 			msr->write = 0;
283 			msr->available = 1;
284 			break;
285 		case 16:
286 			msr->write = 1;
287 			msr->write_val = 0;
288 			msr->available = 1;
289 			break;
290 
291 		case 17:
292 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
293 			msr->write = 0;
294 			msr->available = 0;
295 			break;
296 		case 18:
297 			feat.eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
298 			msr->write = 0;
299 			msr->available = 1;
300 			break;
301 		case 19:
302 			msr->write = 1;
303 			msr->write_val = 0;
304 			msr->available = 1;
305 			break;
306 
307 		case 20:
308 			msr->idx = HV_X64_MSR_EOM;
309 			msr->write = 0;
310 			msr->available = 0;
311 			break;
312 		case 21:
313 			/*
314 			 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
315 			 * capability enabled and guest visible CPUID bit unset.
316 			 */
317 			cap.cap = KVM_CAP_HYPERV_SYNIC2;
318 			vcpu_enable_cap(vm, VCPU_ID, &cap);
319 			break;
320 		case 22:
321 			feat.eax |= HV_MSR_SYNIC_AVAILABLE;
322 			msr->write = 0;
323 			msr->available = 1;
324 			break;
325 		case 23:
326 			msr->write = 1;
327 			msr->write_val = 0;
328 			msr->available = 1;
329 			break;
330 
331 		case 24:
332 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
333 			msr->write = 0;
334 			msr->available = 0;
335 			break;
336 		case 25:
337 			feat.eax |= HV_MSR_SYNTIMER_AVAILABLE;
338 			msr->write = 0;
339 			msr->available = 1;
340 			break;
341 		case 26:
342 			msr->write = 1;
343 			msr->write_val = 0;
344 			msr->available = 1;
345 			break;
346 		case 27:
347 			/* Direct mode test */
348 			msr->write = 1;
349 			msr->write_val = 1 << 12;
350 			msr->available = 0;
351 			break;
352 		case 28:
353 			feat.edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
354 			msr->available = 1;
355 			break;
356 
357 		case 29:
358 			msr->idx = HV_X64_MSR_EOI;
359 			msr->write = 0;
360 			msr->available = 0;
361 			break;
362 		case 30:
363 			feat.eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
364 			msr->write = 1;
365 			msr->write_val = 1;
366 			msr->available = 1;
367 			break;
368 
369 		case 31:
370 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
371 			msr->write = 0;
372 			msr->available = 0;
373 			break;
374 		case 32:
375 			feat.eax |= HV_ACCESS_FREQUENCY_MSRS;
376 			msr->write = 0;
377 			msr->available = 1;
378 			break;
379 		case 33:
380 			/* Read only */
381 			msr->write = 1;
382 			msr->write_val = 1;
383 			msr->available = 0;
384 			break;
385 
386 		case 34:
387 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
388 			msr->write = 0;
389 			msr->available = 0;
390 			break;
391 		case 35:
392 			feat.eax |= HV_ACCESS_REENLIGHTENMENT;
393 			msr->write = 0;
394 			msr->available = 1;
395 			break;
396 		case 36:
397 			msr->write = 1;
398 			msr->write_val = 1;
399 			msr->available = 1;
400 			break;
401 		case 37:
402 			/* Can only write '0' */
403 			msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
404 			msr->write = 1;
405 			msr->write_val = 1;
406 			msr->available = 0;
407 			break;
408 
409 		case 38:
410 			msr->idx = HV_X64_MSR_CRASH_P0;
411 			msr->write = 0;
412 			msr->available = 0;
413 			break;
414 		case 39:
415 			feat.edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
416 			msr->write = 0;
417 			msr->available = 1;
418 			break;
419 		case 40:
420 			msr->write = 1;
421 			msr->write_val = 1;
422 			msr->available = 1;
423 			break;
424 
425 		case 41:
426 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
427 			msr->write = 0;
428 			msr->available = 0;
429 			break;
430 		case 42:
431 			feat.edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
432 			dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
433 			msr->write = 0;
434 			msr->available = 1;
435 			break;
436 		case 43:
437 			msr->write = 1;
438 			msr->write_val = 0;
439 			msr->available = 1;
440 			break;
441 
442 		case 44:
443 			/* END */
444 			msr->idx = 0;
445 			break;
446 		}
447 
448 		hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
449 
450 		if (msr->idx)
451 			pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
452 				 msr->idx, msr->write ? "write" : "read");
453 		else
454 			pr_debug("Stage %d: finish\n", stage);
455 
456 		r = _vcpu_run(vm, VCPU_ID);
457 		TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
458 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
459 			    "unexpected exit reason: %u (%s)",
460 			    run->exit_reason, exit_reason_str(run->exit_reason));
461 
462 		switch (get_ucall(vm, VCPU_ID, &uc)) {
463 		case UCALL_SYNC:
464 			TEST_ASSERT(uc.args[1] == stage,
465 				    "Unexpected stage: %ld (%d expected)\n",
466 				    uc.args[1], stage);
467 			break;
468 		case UCALL_ABORT:
469 			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
470 				  __FILE__, uc.args[1]);
471 			return;
472 		case UCALL_DONE:
473 			return;
474 		}
475 
476 		stage++;
477 	}
478 }
479 
480 static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
481 				     void *input, void *output, struct kvm_cpuid2 *best)
482 {
483 	struct kvm_run *run;
484 	struct ucall uc;
485 	int stage = 0, r;
486 	struct kvm_cpuid_entry2 feat = {
487 		.function = HYPERV_CPUID_FEATURES,
488 		.eax = HV_MSR_HYPERCALL_AVAILABLE
489 	};
490 	struct kvm_cpuid_entry2 recomm = {
491 		.function = HYPERV_CPUID_ENLIGHTMENT_INFO
492 	};
493 	struct kvm_cpuid_entry2 dbg = {
494 		.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
495 	};
496 
497 	run = vcpu_state(vm, VCPU_ID);
498 
499 	while (true) {
500 		switch (stage) {
501 		case 0:
502 			hcall->control = 0xdeadbeef;
503 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
504 			break;
505 
506 		case 1:
507 			hcall->control = HVCALL_POST_MESSAGE;
508 			hcall->expect = HV_STATUS_ACCESS_DENIED;
509 			break;
510 		case 2:
511 			feat.ebx |= HV_POST_MESSAGES;
512 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
513 			break;
514 
515 		case 3:
516 			hcall->control = HVCALL_SIGNAL_EVENT;
517 			hcall->expect = HV_STATUS_ACCESS_DENIED;
518 			break;
519 		case 4:
520 			feat.ebx |= HV_SIGNAL_EVENTS;
521 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
522 			break;
523 
524 		case 5:
525 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
526 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
527 			break;
528 		case 6:
529 			dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
530 			hcall->expect = HV_STATUS_ACCESS_DENIED;
531 			break;
532 		case 7:
533 			feat.ebx |= HV_DEBUGGING;
534 			hcall->expect = HV_STATUS_OPERATION_DENIED;
535 			break;
536 
537 		case 8:
538 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
539 			hcall->expect = HV_STATUS_ACCESS_DENIED;
540 			break;
541 		case 9:
542 			recomm.eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
543 			hcall->expect = HV_STATUS_SUCCESS;
544 			break;
545 		case 10:
546 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
547 			hcall->expect = HV_STATUS_ACCESS_DENIED;
548 			break;
549 		case 11:
550 			recomm.eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
551 			hcall->expect = HV_STATUS_SUCCESS;
552 			break;
553 
554 		case 12:
555 			hcall->control = HVCALL_SEND_IPI;
556 			hcall->expect = HV_STATUS_ACCESS_DENIED;
557 			break;
558 		case 13:
559 			recomm.eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
560 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
561 			break;
562 		case 14:
563 			/* Nothing in 'sparse banks' -> success */
564 			hcall->control = HVCALL_SEND_IPI_EX;
565 			hcall->expect = HV_STATUS_SUCCESS;
566 			break;
567 
568 		case 15:
569 			hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
570 			hcall->expect = HV_STATUS_ACCESS_DENIED;
571 			break;
572 		case 16:
573 			recomm.ebx = 0xfff;
574 			hcall->expect = HV_STATUS_SUCCESS;
575 			break;
576 		case 17:
577 			/* XMM fast hypercall */
578 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
579 			hcall->ud_expected = true;
580 			break;
581 		case 18:
582 			feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
583 			hcall->ud_expected = false;
584 			hcall->expect = HV_STATUS_SUCCESS;
585 			break;
586 
587 		case 19:
588 			/* END */
589 			hcall->control = 0;
590 			break;
591 		}
592 
593 		hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
594 
595 		if (hcall->control)
596 			pr_debug("Stage %d: testing hcall: 0x%lx\n", stage,
597 				 hcall->control);
598 		else
599 			pr_debug("Stage %d: finish\n", stage);
600 
601 		r = _vcpu_run(vm, VCPU_ID);
602 		TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
603 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
604 			    "unexpected exit reason: %u (%s)",
605 			    run->exit_reason, exit_reason_str(run->exit_reason));
606 
607 		switch (get_ucall(vm, VCPU_ID, &uc)) {
608 		case UCALL_SYNC:
609 			TEST_ASSERT(uc.args[1] == stage,
610 				    "Unexpected stage: %ld (%d expected)\n",
611 				    uc.args[1], stage);
612 			break;
613 		case UCALL_ABORT:
614 			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
615 				  __FILE__, uc.args[1]);
616 			return;
617 		case UCALL_DONE:
618 			return;
619 		}
620 
621 		stage++;
622 	}
623 }
624 
625 int main(void)
626 {
627 	struct kvm_cpuid2 *best;
628 	struct kvm_vm *vm;
629 	vm_vaddr_t msr_gva, hcall_page, hcall_params;
630 	struct kvm_enable_cap cap = {
631 		.cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
632 		.args = {1}
633 	};
634 
635 	/* Test MSRs */
636 	vm = vm_create_default(VCPU_ID, 0, guest_msr);
637 
638 	msr_gva = vm_vaddr_alloc_page(vm);
639 	memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
640 	vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
641 	vcpu_enable_cap(vm, VCPU_ID, &cap);
642 
643 	vcpu_set_hv_cpuid(vm, VCPU_ID);
644 
645 	best = kvm_get_supported_hv_cpuid();
646 
647 	vm_init_descriptor_tables(vm);
648 	vcpu_init_descriptor_tables(vm, VCPU_ID);
649 	vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
650 
651 	pr_info("Testing access to Hyper-V specific MSRs\n");
652 	guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
653 			       best);
654 	kvm_vm_free(vm);
655 
656 	/* Test hypercalls */
657 	vm = vm_create_default(VCPU_ID, 0, guest_hcall);
658 
659 	vm_init_descriptor_tables(vm);
660 	vcpu_init_descriptor_tables(vm, VCPU_ID);
661 	vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
662 
663 	/* Hypercall input/output */
664 	hcall_page = vm_vaddr_alloc_pages(vm, 2);
665 	memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
666 
667 	hcall_params = vm_vaddr_alloc_page(vm);
668 	memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
669 
670 	vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
671 	vcpu_enable_cap(vm, VCPU_ID, &cap);
672 
673 	vcpu_set_hv_cpuid(vm, VCPU_ID);
674 
675 	best = kvm_get_supported_hv_cpuid();
676 
677 	pr_info("Testing access to Hyper-V hypercalls\n");
678 	guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
679 				 addr_gva2hva(vm, hcall_page),
680 				 addr_gva2hva(vm, hcall_page) + getpagesize(),
681 				 best);
682 
683 	kvm_vm_free(vm);
684 }
685