1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <linux/stringify.h> 10 #include <stdint.h> 11 12 #include "apic.h" 13 #include "test_util.h" 14 #include "kvm_util.h" 15 #include "processor.h" 16 17 /* VMCALL and VMMCALL are both 3-byte opcodes. */ 18 #define HYPERCALL_INSN_SIZE 3 19 20 static bool ud_expected; 21 22 static void guest_ud_handler(struct ex_regs *regs) 23 { 24 GUEST_ASSERT(ud_expected); 25 GUEST_DONE(); 26 } 27 28 extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE]; 29 static uint64_t svm_do_sched_yield(uint8_t apic_id) 30 { 31 uint64_t ret; 32 33 asm volatile("mov %1, %%rax\n\t" 34 "mov %2, %%rbx\n\t" 35 "svm_hypercall_insn:\n\t" 36 "vmmcall\n\t" 37 "mov %%rax, %0\n\t" 38 : "=r"(ret) 39 : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) 40 : "rax", "rbx", "memory"); 41 42 return ret; 43 } 44 45 extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE]; 46 static uint64_t vmx_do_sched_yield(uint8_t apic_id) 47 { 48 uint64_t ret; 49 50 asm volatile("mov %1, %%rax\n\t" 51 "mov %2, %%rbx\n\t" 52 "vmx_hypercall_insn:\n\t" 53 "vmcall\n\t" 54 "mov %%rax, %0\n\t" 55 : "=r"(ret) 56 : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) 57 : "rax", "rbx", "memory"); 58 59 return ret; 60 } 61 62 static void guest_main(void) 63 { 64 uint8_t *native_hypercall_insn, *hypercall_insn; 65 uint8_t apic_id; 66 67 apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); 68 69 if (is_intel_cpu()) { 70 native_hypercall_insn = vmx_hypercall_insn; 71 hypercall_insn = svm_hypercall_insn; 72 svm_do_sched_yield(apic_id); 73 } else if (is_amd_cpu()) { 74 native_hypercall_insn = svm_hypercall_insn; 75 hypercall_insn = vmx_hypercall_insn; 76 vmx_do_sched_yield(apic_id); 77 } else { 78 GUEST_ASSERT(0); 79 /* unreachable */ 80 return; 81 } 82 83 /* 84 * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD 85 * occurs). Verify that a #UD is NOT expected and that KVM patched in 86 * the native hypercall. 87 */ 88 GUEST_ASSERT(!ud_expected); 89 GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE)); 90 GUEST_DONE(); 91 } 92 93 static void setup_ud_vector(struct kvm_vcpu *vcpu) 94 { 95 vm_init_descriptor_tables(vcpu->vm); 96 vcpu_init_descriptor_tables(vcpu); 97 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); 98 } 99 100 static void enter_guest(struct kvm_vcpu *vcpu) 101 { 102 struct kvm_run *run = vcpu->run; 103 struct ucall uc; 104 105 vcpu_run(vcpu); 106 switch (get_ucall(vcpu, &uc)) { 107 case UCALL_SYNC: 108 pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); 109 break; 110 case UCALL_DONE: 111 return; 112 case UCALL_ABORT: 113 REPORT_GUEST_ASSERT(uc); 114 default: 115 TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)", 116 uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason)); 117 } 118 } 119 120 static void test_fix_hypercall(void) 121 { 122 struct kvm_vcpu *vcpu; 123 struct kvm_vm *vm; 124 125 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 126 setup_ud_vector(vcpu); 127 128 ud_expected = false; 129 sync_global_to_guest(vm, ud_expected); 130 131 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 132 133 enter_guest(vcpu); 134 } 135 136 static void test_fix_hypercall_disabled(void) 137 { 138 struct kvm_vcpu *vcpu; 139 struct kvm_vm *vm; 140 141 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 142 setup_ud_vector(vcpu); 143 144 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, 145 KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 146 147 ud_expected = true; 148 sync_global_to_guest(vm, ud_expected); 149 150 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 151 152 enter_guest(vcpu); 153 } 154 155 int main(void) 156 { 157 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 158 159 test_fix_hypercall(); 160 test_fix_hypercall_disabled(); 161 } 162