1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <linux/stringify.h> 10 #include <stdint.h> 11 12 #include "apic.h" 13 #include "test_util.h" 14 #include "kvm_util.h" 15 #include "processor.h" 16 17 /* VMCALL and VMMCALL are both 3-byte opcodes. */ 18 #define HYPERCALL_INSN_SIZE 3 19 20 static bool quirk_disabled; 21 22 static void guest_ud_handler(struct ex_regs *regs) 23 { 24 regs->rax = -EFAULT; 25 regs->rip += HYPERCALL_INSN_SIZE; 26 } 27 28 static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; 29 static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; 30 31 extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE]; 32 static uint64_t do_sched_yield(uint8_t apic_id) 33 { 34 uint64_t ret; 35 36 asm volatile("hypercall_insn:\n\t" 37 ".byte 0xcc,0xcc,0xcc\n\t" 38 : "=a"(ret) 39 : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) 40 : "memory"); 41 42 return ret; 43 } 44 45 static void guest_main(void) 46 { 47 const uint8_t *native_hypercall_insn; 48 const uint8_t *other_hypercall_insn; 49 uint64_t ret; 50 51 if (this_cpu_is_intel()) { 52 native_hypercall_insn = vmx_vmcall; 53 other_hypercall_insn = svm_vmmcall; 54 } else if (this_cpu_is_amd()) { 55 native_hypercall_insn = svm_vmmcall; 56 other_hypercall_insn = vmx_vmcall; 57 } else { 58 GUEST_ASSERT(0); 59 /* unreachable */ 60 return; 61 } 62 63 memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE); 64 65 ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID))); 66 67 /* 68 * If the quirk is disabled, verify that guest_ud_handler() "returned" 69 * -EFAULT and that KVM did NOT patch the hypercall. If the quirk is 70 * enabled, verify that the hypercall succeeded and that KVM patched in 71 * the "right" hypercall. 72 */ 73 if (quirk_disabled) { 74 GUEST_ASSERT(ret == (uint64_t)-EFAULT); 75 GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, 76 HYPERCALL_INSN_SIZE)); 77 } else { 78 GUEST_ASSERT(!ret); 79 GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, 80 HYPERCALL_INSN_SIZE)); 81 } 82 83 GUEST_DONE(); 84 } 85 86 static void enter_guest(struct kvm_vcpu *vcpu) 87 { 88 struct kvm_run *run = vcpu->run; 89 struct ucall uc; 90 91 vcpu_run(vcpu); 92 switch (get_ucall(vcpu, &uc)) { 93 case UCALL_SYNC: 94 pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); 95 break; 96 case UCALL_DONE: 97 return; 98 case UCALL_ABORT: 99 REPORT_GUEST_ASSERT(uc); 100 default: 101 TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)", 102 uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason)); 103 } 104 } 105 106 static void test_fix_hypercall(bool disable_quirk) 107 { 108 struct kvm_vcpu *vcpu; 109 struct kvm_vm *vm; 110 111 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 112 113 vm_init_descriptor_tables(vcpu->vm); 114 vcpu_init_descriptor_tables(vcpu); 115 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); 116 117 if (disable_quirk) 118 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, 119 KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 120 121 quirk_disabled = disable_quirk; 122 sync_global_to_guest(vm, quirk_disabled); 123 124 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 125 126 enter_guest(vcpu); 127 } 128 129 int main(void) 130 { 131 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN); 132 133 test_fix_hypercall(false); 134 test_fix_hypercall(true); 135 } 136