1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020, Google LLC.
4  *
5  * Tests for KVM paravirtual feature disablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <linux/stringify.h>
10 #include <stdint.h>
11 
12 #include "apic.h"
13 #include "test_util.h"
14 #include "kvm_util.h"
15 #include "processor.h"
16 
17 /* VMCALL and VMMCALL are both 3-byte opcodes. */
18 #define HYPERCALL_INSN_SIZE	3
19 
20 static bool ud_expected;
21 
22 static void guest_ud_handler(struct ex_regs *regs)
23 {
24 	GUEST_ASSERT(ud_expected);
25 	GUEST_DONE();
26 }
27 
28 static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE]  = { 0x0f, 0x01, 0xc1 };
29 static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
30 
31 extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
32 static uint64_t do_sched_yield(uint8_t apic_id)
33 {
34 	uint64_t ret;
35 
36 	asm volatile("hypercall_insn:\n\t"
37 		     ".byte 0xcc,0xcc,0xcc\n\t"
38 		     : "=a"(ret)
39 		     : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
40 		     : "memory");
41 
42 	return ret;
43 }
44 
45 static void guest_main(void)
46 {
47 	const uint8_t *native_hypercall_insn;
48 	const uint8_t *other_hypercall_insn;
49 
50 	if (is_intel_cpu()) {
51 		native_hypercall_insn = vmx_vmcall;
52 		other_hypercall_insn  = svm_vmmcall;
53 	} else if (is_amd_cpu()) {
54 		native_hypercall_insn = svm_vmmcall;
55 		other_hypercall_insn  = vmx_vmcall;
56 	} else {
57 		GUEST_ASSERT(0);
58 		/* unreachable */
59 		return;
60 	}
61 
62 	memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);
63 
64 	do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));
65 
66 	/*
67 	 * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
68 	 * occurs).  Verify that a #UD is NOT expected and that KVM patched in
69 	 * the native hypercall.
70 	 */
71 	GUEST_ASSERT(!ud_expected);
72 	GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE));
73 	GUEST_DONE();
74 }
75 
76 static void setup_ud_vector(struct kvm_vcpu *vcpu)
77 {
78 	vm_init_descriptor_tables(vcpu->vm);
79 	vcpu_init_descriptor_tables(vcpu);
80 	vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
81 }
82 
83 static void enter_guest(struct kvm_vcpu *vcpu)
84 {
85 	struct kvm_run *run = vcpu->run;
86 	struct ucall uc;
87 
88 	vcpu_run(vcpu);
89 	switch (get_ucall(vcpu, &uc)) {
90 	case UCALL_SYNC:
91 		pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
92 		break;
93 	case UCALL_DONE:
94 		return;
95 	case UCALL_ABORT:
96 		REPORT_GUEST_ASSERT(uc);
97 	default:
98 		TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
99 			  uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
100 	}
101 }
102 
103 static void test_fix_hypercall(void)
104 {
105 	struct kvm_vcpu *vcpu;
106 	struct kvm_vm *vm;
107 
108 	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
109 	setup_ud_vector(vcpu);
110 
111 	ud_expected = false;
112 	sync_global_to_guest(vm, ud_expected);
113 
114 	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
115 
116 	enter_guest(vcpu);
117 }
118 
119 static void test_fix_hypercall_disabled(void)
120 {
121 	struct kvm_vcpu *vcpu;
122 	struct kvm_vm *vm;
123 
124 	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
125 	setup_ud_vector(vcpu);
126 
127 	vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
128 		      KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
129 
130 	ud_expected = true;
131 	sync_global_to_guest(vm, ud_expected);
132 
133 	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
134 
135 	enter_guest(vcpu);
136 }
137 
138 int main(void)
139 {
140 	TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
141 
142 	test_fix_hypercall();
143 	test_fix_hypercall_disabled();
144 }
145