1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2021, Red Hat, Inc. 4 * 5 * Tests for Hyper-V features enablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <stdint.h> 10 11 #include "test_util.h" 12 #include "kvm_util.h" 13 #include "processor.h" 14 #include "hyperv.h" 15 16 struct msr_data { 17 uint32_t idx; 18 bool available; 19 bool write; 20 u64 write_val; 21 }; 22 23 struct hcall_data { 24 uint64_t control; 25 uint64_t expect; 26 bool ud_expected; 27 }; 28 29 static void guest_msr(struct msr_data *msr) 30 { 31 uint64_t ignored; 32 uint8_t vector; 33 34 GUEST_ASSERT(msr->idx); 35 36 if (!msr->write) 37 vector = rdmsr_safe(msr->idx, &ignored); 38 else 39 vector = wrmsr_safe(msr->idx, msr->write_val); 40 41 if (msr->available) 42 GUEST_ASSERT_2(!vector, msr->idx, vector); 43 else 44 GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); 45 GUEST_DONE(); 46 } 47 48 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) 49 { 50 u64 res, input, output; 51 uint8_t vector; 52 53 GUEST_ASSERT(hcall->control); 54 55 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); 56 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); 57 58 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) { 59 input = pgs_gpa; 60 output = pgs_gpa + 4096; 61 } else { 62 input = output = 0; 63 } 64 65 vector = __hyperv_hypercall(hcall->control, input, output, &res); 66 if (hcall->ud_expected) { 67 GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector); 68 } else { 69 GUEST_ASSERT_2(!vector, hcall->control, vector); 70 GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res); 71 } 72 73 GUEST_DONE(); 74 } 75 76 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu) 77 { 78 /* 79 * Enable all supported Hyper-V features, then clear the leafs holding 80 * the features that will be tested one by one. 81 */ 82 vcpu_set_hv_cpuid(vcpu); 83 84 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); 85 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); 86 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); 87 } 88 89 static void guest_test_msrs_access(void) 90 { 91 struct kvm_cpuid2 *prev_cpuid = NULL; 92 struct kvm_cpuid_entry2 *feat, *dbg; 93 struct kvm_vcpu *vcpu; 94 struct kvm_run *run; 95 struct kvm_vm *vm; 96 struct ucall uc; 97 int stage = 0; 98 vm_vaddr_t msr_gva; 99 struct msr_data *msr; 100 101 while (true) { 102 vm = vm_create_with_one_vcpu(&vcpu, guest_msr); 103 104 msr_gva = vm_vaddr_alloc_page(vm); 105 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); 106 msr = addr_gva2hva(vm, msr_gva); 107 108 vcpu_args_set(vcpu, 1, msr_gva); 109 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); 110 111 if (!prev_cpuid) { 112 vcpu_reset_hv_cpuid(vcpu); 113 114 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent); 115 } else { 116 vcpu_init_cpuid(vcpu, prev_cpuid); 117 } 118 119 feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); 120 dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); 121 122 vm_init_descriptor_tables(vm); 123 vcpu_init_descriptor_tables(vcpu); 124 125 run = vcpu->run; 126 127 /* TODO: Make this entire test easier to maintain. */ 128 if (stage >= 21) 129 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0); 130 131 switch (stage) { 132 case 0: 133 /* 134 * Only available when Hyper-V identification is set 135 */ 136 msr->idx = HV_X64_MSR_GUEST_OS_ID; 137 msr->write = 0; 138 msr->available = 0; 139 break; 140 case 1: 141 msr->idx = HV_X64_MSR_HYPERCALL; 142 msr->write = 0; 143 msr->available = 0; 144 break; 145 case 2: 146 feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; 147 /* 148 * HV_X64_MSR_GUEST_OS_ID has to be written first to make 149 * HV_X64_MSR_HYPERCALL available. 150 */ 151 msr->idx = HV_X64_MSR_GUEST_OS_ID; 152 msr->write = 1; 153 msr->write_val = HYPERV_LINUX_OS_ID; 154 msr->available = 1; 155 break; 156 case 3: 157 msr->idx = HV_X64_MSR_GUEST_OS_ID; 158 msr->write = 0; 159 msr->available = 1; 160 break; 161 case 4: 162 msr->idx = HV_X64_MSR_HYPERCALL; 163 msr->write = 0; 164 msr->available = 1; 165 break; 166 167 case 5: 168 msr->idx = HV_X64_MSR_VP_RUNTIME; 169 msr->write = 0; 170 msr->available = 0; 171 break; 172 case 6: 173 feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; 174 msr->idx = HV_X64_MSR_VP_RUNTIME; 175 msr->write = 0; 176 msr->available = 1; 177 break; 178 case 7: 179 /* Read only */ 180 msr->idx = HV_X64_MSR_VP_RUNTIME; 181 msr->write = 1; 182 msr->write_val = 1; 183 msr->available = 0; 184 break; 185 186 case 8: 187 msr->idx = HV_X64_MSR_TIME_REF_COUNT; 188 msr->write = 0; 189 msr->available = 0; 190 break; 191 case 9: 192 feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; 193 msr->idx = HV_X64_MSR_TIME_REF_COUNT; 194 msr->write = 0; 195 msr->available = 1; 196 break; 197 case 10: 198 /* Read only */ 199 msr->idx = HV_X64_MSR_TIME_REF_COUNT; 200 msr->write = 1; 201 msr->write_val = 1; 202 msr->available = 0; 203 break; 204 205 case 11: 206 msr->idx = HV_X64_MSR_VP_INDEX; 207 msr->write = 0; 208 msr->available = 0; 209 break; 210 case 12: 211 feat->eax |= HV_MSR_VP_INDEX_AVAILABLE; 212 msr->idx = HV_X64_MSR_VP_INDEX; 213 msr->write = 0; 214 msr->available = 1; 215 break; 216 case 13: 217 /* Read only */ 218 msr->idx = HV_X64_MSR_VP_INDEX; 219 msr->write = 1; 220 msr->write_val = 1; 221 msr->available = 0; 222 break; 223 224 case 14: 225 msr->idx = HV_X64_MSR_RESET; 226 msr->write = 0; 227 msr->available = 0; 228 break; 229 case 15: 230 feat->eax |= HV_MSR_RESET_AVAILABLE; 231 msr->idx = HV_X64_MSR_RESET; 232 msr->write = 0; 233 msr->available = 1; 234 break; 235 case 16: 236 msr->idx = HV_X64_MSR_RESET; 237 msr->write = 1; 238 msr->write_val = 0; 239 msr->available = 1; 240 break; 241 242 case 17: 243 msr->idx = HV_X64_MSR_REFERENCE_TSC; 244 msr->write = 0; 245 msr->available = 0; 246 break; 247 case 18: 248 feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; 249 msr->idx = HV_X64_MSR_REFERENCE_TSC; 250 msr->write = 0; 251 msr->available = 1; 252 break; 253 case 19: 254 msr->idx = HV_X64_MSR_REFERENCE_TSC; 255 msr->write = 1; 256 msr->write_val = 0; 257 msr->available = 1; 258 break; 259 260 case 20: 261 msr->idx = HV_X64_MSR_EOM; 262 msr->write = 0; 263 msr->available = 0; 264 break; 265 case 21: 266 /* 267 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2 268 * capability enabled and guest visible CPUID bit unset. 269 */ 270 msr->idx = HV_X64_MSR_EOM; 271 msr->write = 0; 272 msr->available = 0; 273 break; 274 case 22: 275 feat->eax |= HV_MSR_SYNIC_AVAILABLE; 276 msr->idx = HV_X64_MSR_EOM; 277 msr->write = 0; 278 msr->available = 1; 279 break; 280 case 23: 281 msr->idx = HV_X64_MSR_EOM; 282 msr->write = 1; 283 msr->write_val = 0; 284 msr->available = 1; 285 break; 286 287 case 24: 288 msr->idx = HV_X64_MSR_STIMER0_CONFIG; 289 msr->write = 0; 290 msr->available = 0; 291 break; 292 case 25: 293 feat->eax |= HV_MSR_SYNTIMER_AVAILABLE; 294 msr->idx = HV_X64_MSR_STIMER0_CONFIG; 295 msr->write = 0; 296 msr->available = 1; 297 break; 298 case 26: 299 msr->idx = HV_X64_MSR_STIMER0_CONFIG; 300 msr->write = 1; 301 msr->write_val = 0; 302 msr->available = 1; 303 break; 304 case 27: 305 /* Direct mode test */ 306 msr->idx = HV_X64_MSR_STIMER0_CONFIG; 307 msr->write = 1; 308 msr->write_val = 1 << 12; 309 msr->available = 0; 310 break; 311 case 28: 312 feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; 313 msr->idx = HV_X64_MSR_STIMER0_CONFIG; 314 msr->write = 1; 315 msr->write_val = 1 << 12; 316 msr->available = 1; 317 break; 318 319 case 29: 320 msr->idx = HV_X64_MSR_EOI; 321 msr->write = 0; 322 msr->available = 0; 323 break; 324 case 30: 325 feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; 326 msr->idx = HV_X64_MSR_EOI; 327 msr->write = 1; 328 msr->write_val = 1; 329 msr->available = 1; 330 break; 331 332 case 31: 333 msr->idx = HV_X64_MSR_TSC_FREQUENCY; 334 msr->write = 0; 335 msr->available = 0; 336 break; 337 case 32: 338 feat->eax |= HV_ACCESS_FREQUENCY_MSRS; 339 msr->idx = HV_X64_MSR_TSC_FREQUENCY; 340 msr->write = 0; 341 msr->available = 1; 342 break; 343 case 33: 344 /* Read only */ 345 msr->idx = HV_X64_MSR_TSC_FREQUENCY; 346 msr->write = 1; 347 msr->write_val = 1; 348 msr->available = 0; 349 break; 350 351 case 34: 352 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; 353 msr->write = 0; 354 msr->available = 0; 355 break; 356 case 35: 357 feat->eax |= HV_ACCESS_REENLIGHTENMENT; 358 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; 359 msr->write = 0; 360 msr->available = 1; 361 break; 362 case 36: 363 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; 364 msr->write = 1; 365 msr->write_val = 1; 366 msr->available = 1; 367 break; 368 case 37: 369 /* Can only write '0' */ 370 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS; 371 msr->write = 1; 372 msr->write_val = 1; 373 msr->available = 0; 374 break; 375 376 case 38: 377 msr->idx = HV_X64_MSR_CRASH_P0; 378 msr->write = 0; 379 msr->available = 0; 380 break; 381 case 39: 382 feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; 383 msr->idx = HV_X64_MSR_CRASH_P0; 384 msr->write = 0; 385 msr->available = 1; 386 break; 387 case 40: 388 msr->idx = HV_X64_MSR_CRASH_P0; 389 msr->write = 1; 390 msr->write_val = 1; 391 msr->available = 1; 392 break; 393 394 case 41: 395 msr->idx = HV_X64_MSR_SYNDBG_STATUS; 396 msr->write = 0; 397 msr->available = 0; 398 break; 399 case 42: 400 feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; 401 dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; 402 msr->idx = HV_X64_MSR_SYNDBG_STATUS; 403 msr->write = 0; 404 msr->available = 1; 405 break; 406 case 43: 407 msr->idx = HV_X64_MSR_SYNDBG_STATUS; 408 msr->write = 1; 409 msr->write_val = 0; 410 msr->available = 1; 411 break; 412 413 case 44: 414 kvm_vm_free(vm); 415 return; 416 } 417 418 vcpu_set_cpuid(vcpu); 419 420 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent)); 421 422 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage, 423 msr->idx, msr->write ? "write" : "read"); 424 425 vcpu_run(vcpu); 426 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 427 "unexpected exit reason: %u (%s)", 428 run->exit_reason, exit_reason_str(run->exit_reason)); 429 430 switch (get_ucall(vcpu, &uc)) { 431 case UCALL_ABORT: 432 REPORT_GUEST_ASSERT_2(uc, "MSR = %lx, vector = %lx"); 433 return; 434 case UCALL_DONE: 435 break; 436 default: 437 TEST_FAIL("Unhandled ucall: %ld", uc.cmd); 438 return; 439 } 440 441 stage++; 442 kvm_vm_free(vm); 443 } 444 } 445 446 static void guest_test_hcalls_access(void) 447 { 448 struct kvm_cpuid_entry2 *feat, *recomm, *dbg; 449 struct kvm_cpuid2 *prev_cpuid = NULL; 450 struct kvm_vcpu *vcpu; 451 struct kvm_run *run; 452 struct kvm_vm *vm; 453 struct ucall uc; 454 int stage = 0; 455 vm_vaddr_t hcall_page, hcall_params; 456 struct hcall_data *hcall; 457 458 while (true) { 459 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); 460 461 vm_init_descriptor_tables(vm); 462 vcpu_init_descriptor_tables(vcpu); 463 464 /* Hypercall input/output */ 465 hcall_page = vm_vaddr_alloc_pages(vm, 2); 466 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); 467 468 hcall_params = vm_vaddr_alloc_page(vm); 469 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); 470 hcall = addr_gva2hva(vm, hcall_params); 471 472 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params); 473 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); 474 475 if (!prev_cpuid) { 476 vcpu_reset_hv_cpuid(vcpu); 477 478 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent); 479 } else { 480 vcpu_init_cpuid(vcpu, prev_cpuid); 481 } 482 483 feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); 484 recomm = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); 485 dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); 486 487 run = vcpu->run; 488 489 switch (stage) { 490 case 0: 491 feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; 492 hcall->control = 0xbeef; 493 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; 494 break; 495 496 case 1: 497 hcall->control = HVCALL_POST_MESSAGE; 498 hcall->expect = HV_STATUS_ACCESS_DENIED; 499 break; 500 case 2: 501 feat->ebx |= HV_POST_MESSAGES; 502 hcall->control = HVCALL_POST_MESSAGE; 503 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; 504 break; 505 506 case 3: 507 hcall->control = HVCALL_SIGNAL_EVENT; 508 hcall->expect = HV_STATUS_ACCESS_DENIED; 509 break; 510 case 4: 511 feat->ebx |= HV_SIGNAL_EVENTS; 512 hcall->control = HVCALL_SIGNAL_EVENT; 513 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; 514 break; 515 516 case 5: 517 hcall->control = HVCALL_RESET_DEBUG_SESSION; 518 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; 519 break; 520 case 6: 521 dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; 522 hcall->control = HVCALL_RESET_DEBUG_SESSION; 523 hcall->expect = HV_STATUS_ACCESS_DENIED; 524 break; 525 case 7: 526 feat->ebx |= HV_DEBUGGING; 527 hcall->control = HVCALL_RESET_DEBUG_SESSION; 528 hcall->expect = HV_STATUS_OPERATION_DENIED; 529 break; 530 531 case 8: 532 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE; 533 hcall->expect = HV_STATUS_ACCESS_DENIED; 534 break; 535 case 9: 536 recomm->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; 537 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE; 538 hcall->expect = HV_STATUS_SUCCESS; 539 break; 540 case 10: 541 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX; 542 hcall->expect = HV_STATUS_ACCESS_DENIED; 543 break; 544 case 11: 545 recomm->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; 546 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX; 547 hcall->expect = HV_STATUS_SUCCESS; 548 break; 549 550 case 12: 551 hcall->control = HVCALL_SEND_IPI; 552 hcall->expect = HV_STATUS_ACCESS_DENIED; 553 break; 554 case 13: 555 recomm->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; 556 hcall->control = HVCALL_SEND_IPI; 557 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; 558 break; 559 case 14: 560 /* Nothing in 'sparse banks' -> success */ 561 hcall->control = HVCALL_SEND_IPI_EX; 562 hcall->expect = HV_STATUS_SUCCESS; 563 break; 564 565 case 15: 566 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT; 567 hcall->expect = HV_STATUS_ACCESS_DENIED; 568 break; 569 case 16: 570 recomm->ebx = 0xfff; 571 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT; 572 hcall->expect = HV_STATUS_SUCCESS; 573 break; 574 case 17: 575 /* XMM fast hypercall */ 576 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; 577 hcall->ud_expected = true; 578 break; 579 case 18: 580 feat->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; 581 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; 582 hcall->ud_expected = false; 583 hcall->expect = HV_STATUS_SUCCESS; 584 break; 585 case 19: 586 kvm_vm_free(vm); 587 return; 588 } 589 590 vcpu_set_cpuid(vcpu); 591 592 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent)); 593 594 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control); 595 596 vcpu_run(vcpu); 597 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 598 "unexpected exit reason: %u (%s)", 599 run->exit_reason, exit_reason_str(run->exit_reason)); 600 601 switch (get_ucall(vcpu, &uc)) { 602 case UCALL_ABORT: 603 REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx"); 604 return; 605 case UCALL_DONE: 606 break; 607 default: 608 TEST_FAIL("Unhandled ucall: %ld", uc.cmd); 609 return; 610 } 611 612 stage++; 613 kvm_vm_free(vm); 614 } 615 } 616 617 int main(void) 618 { 619 pr_info("Testing access to Hyper-V specific MSRs\n"); 620 guest_test_msrs_access(); 621 622 pr_info("Testing access to Hyper-V hypercalls\n"); 623 guest_test_hcalls_access(); 624 } 625