1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Test for x86 KVM_CAP_SYNC_REGS
4 *
5 * Copyright (C) 2018, Google LLC.
6 *
7 * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
8 * including requesting an invalid register set, updates to/from values
9 * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
10 */
11
12 #define _GNU_SOURCE /* for program_invocation_short_name */
13 #include <fcntl.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/ioctl.h>
18 #include <pthread.h>
19
20 #include "test_util.h"
21 #include "kvm_util.h"
22 #include "processor.h"
23
24 #define UCALL_PIO_PORT ((uint16_t)0x1000)
25
26 struct ucall uc_none = {
27 .cmd = UCALL_NONE,
28 };
29
30 /*
31 * ucall is embedded here to protect against compiler reshuffling registers
32 * before calling a function. In this test we only need to get KVM_EXIT_IO
33 * vmexit and preserve RBX, no additional information is needed.
34 */
guest_code(void)35 void guest_code(void)
36 {
37 asm volatile("1: in %[port], %%al\n"
38 "add $0x1, %%rbx\n"
39 "jmp 1b"
40 : : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
41 : "rax", "rbx");
42 }
43
compare_regs(struct kvm_regs * left,struct kvm_regs * right)44 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
45 {
46 #define REG_COMPARE(reg) \
47 TEST_ASSERT(left->reg == right->reg, \
48 "Register " #reg \
49 " values did not match: 0x%llx, 0x%llx\n", \
50 left->reg, right->reg)
51 REG_COMPARE(rax);
52 REG_COMPARE(rbx);
53 REG_COMPARE(rcx);
54 REG_COMPARE(rdx);
55 REG_COMPARE(rsi);
56 REG_COMPARE(rdi);
57 REG_COMPARE(rsp);
58 REG_COMPARE(rbp);
59 REG_COMPARE(r8);
60 REG_COMPARE(r9);
61 REG_COMPARE(r10);
62 REG_COMPARE(r11);
63 REG_COMPARE(r12);
64 REG_COMPARE(r13);
65 REG_COMPARE(r14);
66 REG_COMPARE(r15);
67 REG_COMPARE(rip);
68 REG_COMPARE(rflags);
69 #undef REG_COMPARE
70 }
71
compare_sregs(struct kvm_sregs * left,struct kvm_sregs * right)72 static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
73 {
74 }
75
compare_vcpu_events(struct kvm_vcpu_events * left,struct kvm_vcpu_events * right)76 static void compare_vcpu_events(struct kvm_vcpu_events *left,
77 struct kvm_vcpu_events *right)
78 {
79 }
80
81 #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
82 #define INVALID_SYNC_FIELD 0x80000000
83
84 /*
85 * Set an exception as pending *and* injected while KVM is processing events.
86 * KVM is supposed to ignore/drop pending exceptions if userspace is also
87 * requesting that an exception be injected.
88 */
race_events_inj_pen(void * arg)89 static void *race_events_inj_pen(void *arg)
90 {
91 struct kvm_run *run = (struct kvm_run *)arg;
92 struct kvm_vcpu_events *events = &run->s.regs.events;
93
94 WRITE_ONCE(events->exception.nr, UD_VECTOR);
95
96 for (;;) {
97 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
98 WRITE_ONCE(events->flags, 0);
99 WRITE_ONCE(events->exception.injected, 1);
100 WRITE_ONCE(events->exception.pending, 1);
101
102 pthread_testcancel();
103 }
104
105 return NULL;
106 }
107
108 /*
109 * Set an invalid exception vector while KVM is processing events. KVM is
110 * supposed to reject any vector >= 32, as well as NMIs (vector 2).
111 */
race_events_exc(void * arg)112 static void *race_events_exc(void *arg)
113 {
114 struct kvm_run *run = (struct kvm_run *)arg;
115 struct kvm_vcpu_events *events = &run->s.regs.events;
116
117 for (;;) {
118 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
119 WRITE_ONCE(events->flags, 0);
120 WRITE_ONCE(events->exception.nr, UD_VECTOR);
121 WRITE_ONCE(events->exception.pending, 1);
122 WRITE_ONCE(events->exception.nr, 255);
123
124 pthread_testcancel();
125 }
126
127 return NULL;
128 }
129
130 /*
131 * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
132 * illegal, and KVM's MMU heavily relies on vCPU state being valid.
133 */
race_sregs_cr4(void * arg)134 static noinline void *race_sregs_cr4(void *arg)
135 {
136 struct kvm_run *run = (struct kvm_run *)arg;
137 __u64 *cr4 = &run->s.regs.sregs.cr4;
138 __u64 pae_enabled = *cr4;
139 __u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
140
141 for (;;) {
142 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
143 WRITE_ONCE(*cr4, pae_enabled);
144 asm volatile(".rept 512\n\t"
145 "nop\n\t"
146 ".endr");
147 WRITE_ONCE(*cr4, pae_disabled);
148
149 pthread_testcancel();
150 }
151
152 return NULL;
153 }
154
race_sync_regs(void * racer)155 static void race_sync_regs(void *racer)
156 {
157 const time_t TIMEOUT = 2; /* seconds, roughly */
158 struct kvm_x86_state *state;
159 struct kvm_translation tr;
160 struct kvm_vcpu *vcpu;
161 struct kvm_run *run;
162 struct kvm_vm *vm;
163 pthread_t thread;
164 time_t t;
165
166 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
167 run = vcpu->run;
168
169 run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
170 vcpu_run(vcpu);
171 run->kvm_valid_regs = 0;
172
173 /* Save state *before* spawning the thread that mucks with vCPU state. */
174 state = vcpu_save_state(vcpu);
175
176 /*
177 * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
178 * should already be set in guest state.
179 */
180 TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
181 (run->s.regs.sregs.efer & EFER_LME),
182 "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
183 !!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
184 !!(run->s.regs.sregs.efer & EFER_LME));
185
186 TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
187
188 for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
189 /*
190 * Reload known good state if the vCPU triple faults, e.g. due
191 * to the unhandled #GPs being injected. VMX preserves state
192 * on shutdown, but SVM synthesizes an INIT as the VMCB state
193 * is architecturally undefined on triple fault.
194 */
195 if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
196 vcpu_load_state(vcpu, state);
197
198 if (racer == race_sregs_cr4) {
199 tr = (struct kvm_translation) { .linear_address = 0 };
200 __vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
201 }
202 }
203
204 TEST_ASSERT_EQ(pthread_cancel(thread), 0);
205 TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
206
207 kvm_x86_state_cleanup(state);
208 kvm_vm_free(vm);
209 }
210
main(int argc,char * argv[])211 int main(int argc, char *argv[])
212 {
213 struct kvm_vcpu *vcpu;
214 struct kvm_vm *vm;
215 struct kvm_run *run;
216 struct kvm_regs regs;
217 struct kvm_sregs sregs;
218 struct kvm_vcpu_events events;
219 int rv, cap;
220
221 cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
222 TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
223 TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
224
225 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
226
227 run = vcpu->run;
228
229 /* Request reading invalid register set from VCPU. */
230 run->kvm_valid_regs = INVALID_SYNC_FIELD;
231 rv = _vcpu_run(vcpu);
232 TEST_ASSERT(rv < 0 && errno == EINVAL,
233 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
234 rv);
235 run->kvm_valid_regs = 0;
236
237 run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
238 rv = _vcpu_run(vcpu);
239 TEST_ASSERT(rv < 0 && errno == EINVAL,
240 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
241 rv);
242 run->kvm_valid_regs = 0;
243
244 /* Request setting invalid register set into VCPU. */
245 run->kvm_dirty_regs = INVALID_SYNC_FIELD;
246 rv = _vcpu_run(vcpu);
247 TEST_ASSERT(rv < 0 && errno == EINVAL,
248 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
249 rv);
250 run->kvm_dirty_regs = 0;
251
252 run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
253 rv = _vcpu_run(vcpu);
254 TEST_ASSERT(rv < 0 && errno == EINVAL,
255 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
256 rv);
257 run->kvm_dirty_regs = 0;
258
259 /* Request and verify all valid register sets. */
260 /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
261 run->kvm_valid_regs = TEST_SYNC_FIELDS;
262 rv = _vcpu_run(vcpu);
263 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
264
265 vcpu_regs_get(vcpu, ®s);
266 compare_regs(®s, &run->s.regs.regs);
267
268 vcpu_sregs_get(vcpu, &sregs);
269 compare_sregs(&sregs, &run->s.regs.sregs);
270
271 vcpu_events_get(vcpu, &events);
272 compare_vcpu_events(&events, &run->s.regs.events);
273
274 /* Set and verify various register values. */
275 run->s.regs.regs.rbx = 0xBAD1DEA;
276 run->s.regs.sregs.apic_base = 1 << 11;
277 /* TODO run->s.regs.events.XYZ = ABC; */
278
279 run->kvm_valid_regs = TEST_SYNC_FIELDS;
280 run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
281 rv = _vcpu_run(vcpu);
282 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
283 TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
284 "rbx sync regs value incorrect 0x%llx.",
285 run->s.regs.regs.rbx);
286 TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
287 "apic_base sync regs value incorrect 0x%llx.",
288 run->s.regs.sregs.apic_base);
289
290 vcpu_regs_get(vcpu, ®s);
291 compare_regs(®s, &run->s.regs.regs);
292
293 vcpu_sregs_get(vcpu, &sregs);
294 compare_sregs(&sregs, &run->s.regs.sregs);
295
296 vcpu_events_get(vcpu, &events);
297 compare_vcpu_events(&events, &run->s.regs.events);
298
299 /* Clear kvm_dirty_regs bits, verify new s.regs values are
300 * overwritten with existing guest values.
301 */
302 run->kvm_valid_regs = TEST_SYNC_FIELDS;
303 run->kvm_dirty_regs = 0;
304 run->s.regs.regs.rbx = 0xDEADBEEF;
305 rv = _vcpu_run(vcpu);
306 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
307 TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
308 "rbx sync regs value incorrect 0x%llx.",
309 run->s.regs.regs.rbx);
310
311 /* Clear kvm_valid_regs bits and kvm_dirty_bits.
312 * Verify s.regs values are not overwritten with existing guest values
313 * and that guest values are not overwritten with kvm_sync_regs values.
314 */
315 run->kvm_valid_regs = 0;
316 run->kvm_dirty_regs = 0;
317 run->s.regs.regs.rbx = 0xAAAA;
318 regs.rbx = 0xBAC0;
319 vcpu_regs_set(vcpu, ®s);
320 rv = _vcpu_run(vcpu);
321 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
322 TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
323 "rbx sync regs value incorrect 0x%llx.",
324 run->s.regs.regs.rbx);
325 vcpu_regs_get(vcpu, ®s);
326 TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
327 "rbx guest value incorrect 0x%llx.",
328 regs.rbx);
329
330 /* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
331 * with existing guest values but that guest values are overwritten
332 * with kvm_sync_regs values.
333 */
334 run->kvm_valid_regs = 0;
335 run->kvm_dirty_regs = TEST_SYNC_FIELDS;
336 run->s.regs.regs.rbx = 0xBBBB;
337 rv = _vcpu_run(vcpu);
338 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
339 TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
340 "rbx sync regs value incorrect 0x%llx.",
341 run->s.regs.regs.rbx);
342 vcpu_regs_get(vcpu, ®s);
343 TEST_ASSERT(regs.rbx == 0xBBBB + 1,
344 "rbx guest value incorrect 0x%llx.",
345 regs.rbx);
346
347 kvm_vm_free(vm);
348
349 race_sync_regs(race_sregs_cr4);
350 race_sync_regs(race_events_exc);
351 race_sync_regs(race_events_inj_pen);
352
353 return 0;
354 }
355