1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test for x86 KVM_CAP_SYNC_REGS
4  *
5  * Copyright (C) 2018, Google LLC.
6  *
7  * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
8  * including requesting an invalid register set, updates to/from values
9  * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
10  */
11 
12 #define _GNU_SOURCE /* for program_invocation_short_name */
13 #include <fcntl.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/ioctl.h>
18 
19 #include "test_util.h"
20 #include "kvm_util.h"
21 #include "processor.h"
22 
23 #define VCPU_ID 5
24 
25 #define UCALL_PIO_PORT ((uint16_t)0x1000)
26 
27 /*
28  * ucall is embedded here to protect against compiler reshuffling registers
29  * before calling a function. In this test we only need to get KVM_EXIT_IO
30  * vmexit and preserve RBX, no additional information is needed.
31  */
32 void guest_code(void)
33 {
34 	asm volatile("1: in %[port], %%al\n"
35 		     "add $0x1, %%rbx\n"
36 		     "jmp 1b"
37 		     : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
38 }
39 
40 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
41 {
42 #define REG_COMPARE(reg) \
43 	TEST_ASSERT(left->reg == right->reg, \
44 		    "Register " #reg \
45 		    " values did not match: 0x%llx, 0x%llx\n", \
46 		    left->reg, right->reg)
47 	REG_COMPARE(rax);
48 	REG_COMPARE(rbx);
49 	REG_COMPARE(rcx);
50 	REG_COMPARE(rdx);
51 	REG_COMPARE(rsi);
52 	REG_COMPARE(rdi);
53 	REG_COMPARE(rsp);
54 	REG_COMPARE(rbp);
55 	REG_COMPARE(r8);
56 	REG_COMPARE(r9);
57 	REG_COMPARE(r10);
58 	REG_COMPARE(r11);
59 	REG_COMPARE(r12);
60 	REG_COMPARE(r13);
61 	REG_COMPARE(r14);
62 	REG_COMPARE(r15);
63 	REG_COMPARE(rip);
64 	REG_COMPARE(rflags);
65 #undef REG_COMPARE
66 }
67 
68 static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
69 {
70 }
71 
72 static void compare_vcpu_events(struct kvm_vcpu_events *left,
73 				struct kvm_vcpu_events *right)
74 {
75 }
76 
77 #define TEST_SYNC_FIELDS   (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
78 #define INVALID_SYNC_FIELD 0x80000000
79 
80 int main(int argc, char *argv[])
81 {
82 	struct kvm_vm *vm;
83 	struct kvm_run *run;
84 	struct kvm_regs regs;
85 	struct kvm_sregs sregs;
86 	struct kvm_vcpu_events events;
87 	int rv, cap;
88 
89 	/* Tell stdout not to buffer its content */
90 	setbuf(stdout, NULL);
91 
92 	cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
93 	if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
94 		print_skip("KVM_CAP_SYNC_REGS not supported");
95 		exit(KSFT_SKIP);
96 	}
97 	if ((cap & INVALID_SYNC_FIELD) != 0) {
98 		print_skip("The \"invalid\" field is not invalid");
99 		exit(KSFT_SKIP);
100 	}
101 
102 	/* Create VM */
103 	vm = vm_create_default(VCPU_ID, 0, guest_code);
104 
105 	run = vcpu_state(vm, VCPU_ID);
106 
107 	/* Request reading invalid register set from VCPU. */
108 	run->kvm_valid_regs = INVALID_SYNC_FIELD;
109 	rv = _vcpu_run(vm, VCPU_ID);
110 	TEST_ASSERT(rv < 0 && errno == EINVAL,
111 		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
112 		    rv);
113 	vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
114 
115 	run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
116 	rv = _vcpu_run(vm, VCPU_ID);
117 	TEST_ASSERT(rv < 0 && errno == EINVAL,
118 		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
119 		    rv);
120 	vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
121 
122 	/* Request setting invalid register set into VCPU. */
123 	run->kvm_dirty_regs = INVALID_SYNC_FIELD;
124 	rv = _vcpu_run(vm, VCPU_ID);
125 	TEST_ASSERT(rv < 0 && errno == EINVAL,
126 		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
127 		    rv);
128 	vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
129 
130 	run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
131 	rv = _vcpu_run(vm, VCPU_ID);
132 	TEST_ASSERT(rv < 0 && errno == EINVAL,
133 		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
134 		    rv);
135 	vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
136 
137 	/* Request and verify all valid register sets. */
138 	/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
139 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
140 	rv = _vcpu_run(vm, VCPU_ID);
141 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
142 		    "Unexpected exit reason: %u (%s),\n",
143 		    run->exit_reason,
144 		    exit_reason_str(run->exit_reason));
145 
146 	vcpu_regs_get(vm, VCPU_ID, &regs);
147 	compare_regs(&regs, &run->s.regs.regs);
148 
149 	vcpu_sregs_get(vm, VCPU_ID, &sregs);
150 	compare_sregs(&sregs, &run->s.regs.sregs);
151 
152 	vcpu_events_get(vm, VCPU_ID, &events);
153 	compare_vcpu_events(&events, &run->s.regs.events);
154 
155 	/* Set and verify various register values. */
156 	run->s.regs.regs.rbx = 0xBAD1DEA;
157 	run->s.regs.sregs.apic_base = 1 << 11;
158 	/* TODO run->s.regs.events.XYZ = ABC; */
159 
160 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
161 	run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
162 	rv = _vcpu_run(vm, VCPU_ID);
163 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
164 		    "Unexpected exit reason: %u (%s),\n",
165 		    run->exit_reason,
166 		    exit_reason_str(run->exit_reason));
167 	TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
168 		    "rbx sync regs value incorrect 0x%llx.",
169 		    run->s.regs.regs.rbx);
170 	TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
171 		    "apic_base sync regs value incorrect 0x%llx.",
172 		    run->s.regs.sregs.apic_base);
173 
174 	vcpu_regs_get(vm, VCPU_ID, &regs);
175 	compare_regs(&regs, &run->s.regs.regs);
176 
177 	vcpu_sregs_get(vm, VCPU_ID, &sregs);
178 	compare_sregs(&sregs, &run->s.regs.sregs);
179 
180 	vcpu_events_get(vm, VCPU_ID, &events);
181 	compare_vcpu_events(&events, &run->s.regs.events);
182 
183 	/* Clear kvm_dirty_regs bits, verify new s.regs values are
184 	 * overwritten with existing guest values.
185 	 */
186 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
187 	run->kvm_dirty_regs = 0;
188 	run->s.regs.regs.rbx = 0xDEADBEEF;
189 	rv = _vcpu_run(vm, VCPU_ID);
190 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
191 		    "Unexpected exit reason: %u (%s),\n",
192 		    run->exit_reason,
193 		    exit_reason_str(run->exit_reason));
194 	TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
195 		    "rbx sync regs value incorrect 0x%llx.",
196 		    run->s.regs.regs.rbx);
197 
198 	/* Clear kvm_valid_regs bits and kvm_dirty_bits.
199 	 * Verify s.regs values are not overwritten with existing guest values
200 	 * and that guest values are not overwritten with kvm_sync_regs values.
201 	 */
202 	run->kvm_valid_regs = 0;
203 	run->kvm_dirty_regs = 0;
204 	run->s.regs.regs.rbx = 0xAAAA;
205 	regs.rbx = 0xBAC0;
206 	vcpu_regs_set(vm, VCPU_ID, &regs);
207 	rv = _vcpu_run(vm, VCPU_ID);
208 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
209 		    "Unexpected exit reason: %u (%s),\n",
210 		    run->exit_reason,
211 		    exit_reason_str(run->exit_reason));
212 	TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
213 		    "rbx sync regs value incorrect 0x%llx.",
214 		    run->s.regs.regs.rbx);
215 	vcpu_regs_get(vm, VCPU_ID, &regs);
216 	TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
217 		    "rbx guest value incorrect 0x%llx.",
218 		    regs.rbx);
219 
220 	/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
221 	 * with existing guest values but that guest values are overwritten
222 	 * with kvm_sync_regs values.
223 	 */
224 	run->kvm_valid_regs = 0;
225 	run->kvm_dirty_regs = TEST_SYNC_FIELDS;
226 	run->s.regs.regs.rbx = 0xBBBB;
227 	rv = _vcpu_run(vm, VCPU_ID);
228 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
229 		    "Unexpected exit reason: %u (%s),\n",
230 		    run->exit_reason,
231 		    exit_reason_str(run->exit_reason));
232 	TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
233 		    "rbx sync regs value incorrect 0x%llx.",
234 		    run->s.regs.regs.rbx);
235 	vcpu_regs_get(vm, VCPU_ID, &regs);
236 	TEST_ASSERT(regs.rbx == 0xBBBB + 1,
237 		    "rbx guest value incorrect 0x%llx.",
238 		    regs.rbx);
239 
240 	kvm_vm_free(vm);
241 
242 	return 0;
243 }
244