1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <stdint.h> 10 11 #include "test_util.h" 12 #include "kvm_util.h" 13 #include "processor.h" 14 15 struct msr_data { 16 uint32_t idx; 17 const char *name; 18 }; 19 20 #define TEST_MSR(msr) { .idx = msr, .name = #msr } 21 #define UCALL_PR_MSR 0xdeadbeef 22 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr) 23 24 /* 25 * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or 26 * written, as the KVM_CPUID_FEATURES leaf is cleared. 27 */ 28 static struct msr_data msrs_to_test[] = { 29 TEST_MSR(MSR_KVM_SYSTEM_TIME), 30 TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW), 31 TEST_MSR(MSR_KVM_WALL_CLOCK), 32 TEST_MSR(MSR_KVM_WALL_CLOCK_NEW), 33 TEST_MSR(MSR_KVM_ASYNC_PF_EN), 34 TEST_MSR(MSR_KVM_STEAL_TIME), 35 TEST_MSR(MSR_KVM_PV_EOI_EN), 36 TEST_MSR(MSR_KVM_POLL_CONTROL), 37 TEST_MSR(MSR_KVM_ASYNC_PF_INT), 38 TEST_MSR(MSR_KVM_ASYNC_PF_ACK), 39 }; 40 41 static void test_msr(struct msr_data *msr) 42 { 43 uint64_t ignored; 44 uint8_t vector; 45 46 PR_MSR(msr); 47 48 vector = rdmsr_safe(msr->idx, &ignored); 49 GUEST_ASSERT_1(vector == GP_VECTOR, vector); 50 51 vector = wrmsr_safe(msr->idx, 0); 52 GUEST_ASSERT_1(vector == GP_VECTOR, vector); 53 } 54 55 struct hcall_data { 56 uint64_t nr; 57 const char *name; 58 }; 59 60 #define TEST_HCALL(hc) { .nr = hc, .name = #hc } 61 #define UCALL_PR_HCALL 0xdeadc0de 62 #define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc) 63 64 /* 65 * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding 66 * features have been cleared in KVM_CPUID_FEATURES. 67 */ 68 static struct hcall_data hcalls_to_test[] = { 69 TEST_HCALL(KVM_HC_KICK_CPU), 70 TEST_HCALL(KVM_HC_SEND_IPI), 71 TEST_HCALL(KVM_HC_SCHED_YIELD), 72 }; 73 74 static void test_hcall(struct hcall_data *hc) 75 { 76 uint64_t r; 77 78 PR_HCALL(hc); 79 r = kvm_hypercall(hc->nr, 0, 0, 0, 0); 80 GUEST_ASSERT(r == -KVM_ENOSYS); 81 } 82 83 static void guest_main(void) 84 { 85 int i; 86 87 for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) { 88 test_msr(&msrs_to_test[i]); 89 } 90 91 for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) { 92 test_hcall(&hcalls_to_test[i]); 93 } 94 95 GUEST_DONE(); 96 } 97 98 static void pr_msr(struct ucall *uc) 99 { 100 struct msr_data *msr = (struct msr_data *)uc->args[0]; 101 102 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx); 103 } 104 105 static void pr_hcall(struct ucall *uc) 106 { 107 struct hcall_data *hc = (struct hcall_data *)uc->args[0]; 108 109 pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr); 110 } 111 112 static void enter_guest(struct kvm_vcpu *vcpu) 113 { 114 struct kvm_run *run = vcpu->run; 115 struct ucall uc; 116 117 while (true) { 118 vcpu_run(vcpu); 119 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 120 "unexpected exit reason: %u (%s)", 121 run->exit_reason, exit_reason_str(run->exit_reason)); 122 123 switch (get_ucall(vcpu, &uc)) { 124 case UCALL_PR_MSR: 125 pr_msr(&uc); 126 break; 127 case UCALL_PR_HCALL: 128 pr_hcall(&uc); 129 break; 130 case UCALL_ABORT: 131 REPORT_GUEST_ASSERT_1(uc, "vector = %lu"); 132 return; 133 case UCALL_DONE: 134 return; 135 } 136 } 137 } 138 139 int main(void) 140 { 141 struct kvm_vcpu *vcpu; 142 struct kvm_vm *vm; 143 144 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)); 145 146 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 147 148 vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1); 149 150 vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES); 151 152 vm_init_descriptor_tables(vm); 153 vcpu_init_descriptor_tables(vcpu); 154 155 enter_guest(vcpu); 156 kvm_vm_free(vm); 157 } 158