1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020, Google LLC.
4  *
5  * Tests for KVM paravirtual feature disablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <linux/stringify.h>
10 #include <stdint.h>
11 
12 #include "apic.h"
13 #include "test_util.h"
14 #include "kvm_util.h"
15 #include "processor.h"
16 
17 /* VMCALL and VMMCALL are both 3-byte opcodes. */
18 #define HYPERCALL_INSN_SIZE	3
19 
20 static bool ud_expected;
21 
22 static void guest_ud_handler(struct ex_regs *regs)
23 {
24 	GUEST_ASSERT(ud_expected);
25 	GUEST_DONE();
26 }
27 
28 extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE];
29 static uint64_t svm_do_sched_yield(uint8_t apic_id)
30 {
31 	uint64_t ret;
32 
33 	asm volatile("svm_hypercall_insn:\n\t"
34 		     "vmmcall\n\t"
35 		     : "=a"(ret)
36 		     : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
37 		     : "memory");
38 
39 	return ret;
40 }
41 
42 extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE];
43 static uint64_t vmx_do_sched_yield(uint8_t apic_id)
44 {
45 	uint64_t ret;
46 
47 	asm volatile("vmx_hypercall_insn:\n\t"
48 		     "vmcall\n\t"
49 		     : "=a"(ret)
50 		     : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
51 		     : "memory");
52 
53 	return ret;
54 }
55 
56 static void guest_main(void)
57 {
58 	uint8_t *native_hypercall_insn, *hypercall_insn;
59 	uint8_t apic_id;
60 
61 	apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
62 
63 	if (is_intel_cpu()) {
64 		native_hypercall_insn = vmx_hypercall_insn;
65 		hypercall_insn = svm_hypercall_insn;
66 		svm_do_sched_yield(apic_id);
67 	} else if (is_amd_cpu()) {
68 		native_hypercall_insn = svm_hypercall_insn;
69 		hypercall_insn = vmx_hypercall_insn;
70 		vmx_do_sched_yield(apic_id);
71 	} else {
72 		GUEST_ASSERT(0);
73 		/* unreachable */
74 		return;
75 	}
76 
77 	/*
78 	 * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
79 	 * occurs).  Verify that a #UD is NOT expected and that KVM patched in
80 	 * the native hypercall.
81 	 */
82 	GUEST_ASSERT(!ud_expected);
83 	GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE));
84 	GUEST_DONE();
85 }
86 
87 static void setup_ud_vector(struct kvm_vcpu *vcpu)
88 {
89 	vm_init_descriptor_tables(vcpu->vm);
90 	vcpu_init_descriptor_tables(vcpu);
91 	vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
92 }
93 
94 static void enter_guest(struct kvm_vcpu *vcpu)
95 {
96 	struct kvm_run *run = vcpu->run;
97 	struct ucall uc;
98 
99 	vcpu_run(vcpu);
100 	switch (get_ucall(vcpu, &uc)) {
101 	case UCALL_SYNC:
102 		pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
103 		break;
104 	case UCALL_DONE:
105 		return;
106 	case UCALL_ABORT:
107 		REPORT_GUEST_ASSERT(uc);
108 	default:
109 		TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
110 			  uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
111 	}
112 }
113 
114 static void test_fix_hypercall(void)
115 {
116 	struct kvm_vcpu *vcpu;
117 	struct kvm_vm *vm;
118 
119 	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
120 	setup_ud_vector(vcpu);
121 
122 	ud_expected = false;
123 	sync_global_to_guest(vm, ud_expected);
124 
125 	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
126 
127 	enter_guest(vcpu);
128 }
129 
130 static void test_fix_hypercall_disabled(void)
131 {
132 	struct kvm_vcpu *vcpu;
133 	struct kvm_vm *vm;
134 
135 	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
136 	setup_ud_vector(vcpu);
137 
138 	vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
139 		      KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
140 
141 	ud_expected = true;
142 	sync_global_to_guest(vm, ud_expected);
143 
144 	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
145 
146 	enter_guest(vcpu);
147 }
148 
149 int main(void)
150 {
151 	TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
152 
153 	test_fix_hypercall();
154 	test_fix_hypercall_disabled();
155 }
156