1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <linux/stringify.h> 10 #include <stdint.h> 11 12 #include "apic.h" 13 #include "test_util.h" 14 #include "kvm_util.h" 15 #include "processor.h" 16 17 static bool ud_expected; 18 19 static void guest_ud_handler(struct ex_regs *regs) 20 { 21 GUEST_ASSERT(ud_expected); 22 GUEST_DONE(); 23 } 24 25 extern unsigned char svm_hypercall_insn; 26 static uint64_t svm_do_sched_yield(uint8_t apic_id) 27 { 28 uint64_t ret; 29 30 asm volatile("mov %1, %%rax\n\t" 31 "mov %2, %%rbx\n\t" 32 "svm_hypercall_insn:\n\t" 33 "vmmcall\n\t" 34 "mov %%rax, %0\n\t" 35 : "=r"(ret) 36 : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) 37 : "rax", "rbx", "memory"); 38 39 return ret; 40 } 41 42 extern unsigned char vmx_hypercall_insn; 43 static uint64_t vmx_do_sched_yield(uint8_t apic_id) 44 { 45 uint64_t ret; 46 47 asm volatile("mov %1, %%rax\n\t" 48 "mov %2, %%rbx\n\t" 49 "vmx_hypercall_insn:\n\t" 50 "vmcall\n\t" 51 "mov %%rax, %0\n\t" 52 : "=r"(ret) 53 : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) 54 : "rax", "rbx", "memory"); 55 56 return ret; 57 } 58 59 static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn) 60 { 61 uint32_t exp = 0, obs = 0; 62 63 memcpy(&exp, exp_insn, sizeof(exp)); 64 memcpy(&obs, obs_insn, sizeof(obs)); 65 66 GUEST_ASSERT_EQ(exp, obs); 67 } 68 69 static void guest_main(void) 70 { 71 unsigned char *native_hypercall_insn, *hypercall_insn; 72 uint8_t apic_id; 73 74 apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); 75 76 if (is_intel_cpu()) { 77 native_hypercall_insn = &vmx_hypercall_insn; 78 hypercall_insn = &svm_hypercall_insn; 79 svm_do_sched_yield(apic_id); 80 } else if (is_amd_cpu()) { 81 native_hypercall_insn = &svm_hypercall_insn; 82 hypercall_insn = &vmx_hypercall_insn; 83 vmx_do_sched_yield(apic_id); 84 } else { 85 GUEST_ASSERT(0); 86 /* unreachable */ 87 return; 88 } 89 90 GUEST_ASSERT(!ud_expected); 91 assert_hypercall_insn(native_hypercall_insn, hypercall_insn); 92 GUEST_DONE(); 93 } 94 95 static void setup_ud_vector(struct kvm_vcpu *vcpu) 96 { 97 vm_init_descriptor_tables(vcpu->vm); 98 vcpu_init_descriptor_tables(vcpu); 99 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); 100 } 101 102 static void enter_guest(struct kvm_vcpu *vcpu) 103 { 104 struct kvm_run *run = vcpu->run; 105 struct ucall uc; 106 107 vcpu_run(vcpu); 108 switch (get_ucall(vcpu, &uc)) { 109 case UCALL_SYNC: 110 pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); 111 break; 112 case UCALL_DONE: 113 return; 114 case UCALL_ABORT: 115 REPORT_GUEST_ASSERT(uc); 116 default: 117 TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)", 118 uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason)); 119 } 120 } 121 122 static void test_fix_hypercall(void) 123 { 124 struct kvm_vcpu *vcpu; 125 struct kvm_vm *vm; 126 127 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 128 setup_ud_vector(vcpu); 129 130 ud_expected = false; 131 sync_global_to_guest(vm, ud_expected); 132 133 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 134 135 enter_guest(vcpu); 136 } 137 138 static void test_fix_hypercall_disabled(void) 139 { 140 struct kvm_vcpu *vcpu; 141 struct kvm_vm *vm; 142 143 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 144 setup_ud_vector(vcpu); 145 146 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, 147 KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 148 149 ud_expected = true; 150 sync_global_to_guest(vm, ud_expected); 151 152 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 153 154 enter_guest(vcpu); 155 } 156 157 int main(void) 158 { 159 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 160 161 test_fix_hypercall(); 162 test_fix_hypercall_disabled(); 163 } 164