1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test for x86 KVM_CAP_SYNC_REGS
4  *
5  * Copyright (C) 2018, Google LLC.
6  *
7  * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
8  * including requesting an invalid register set, updates to/from values
9  * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
10  */
11 
12 #define _GNU_SOURCE /* for program_invocation_short_name */
13 #include <fcntl.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/ioctl.h>
18 
19 #include "test_util.h"
20 #include "kvm_util.h"
21 #include "processor.h"
22 
23 #define VCPU_ID 5
24 
25 void guest_code(void)
26 {
27 	/*
28 	 * use a callee-save register, otherwise the compiler
29 	 * saves it around the call to GUEST_SYNC.
30 	 */
31 	register u32 stage asm("rbx");
32 	for (;;) {
33 		GUEST_SYNC(0);
34 		stage++;
35 		asm volatile ("" : : "r" (stage));
36 	}
37 }
38 
39 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
40 {
41 #define REG_COMPARE(reg) \
42 	TEST_ASSERT(left->reg == right->reg, \
43 		    "Register " #reg \
44 		    " values did not match: 0x%llx, 0x%llx\n", \
45 		    left->reg, right->reg)
46 	REG_COMPARE(rax);
47 	REG_COMPARE(rbx);
48 	REG_COMPARE(rcx);
49 	REG_COMPARE(rdx);
50 	REG_COMPARE(rsi);
51 	REG_COMPARE(rdi);
52 	REG_COMPARE(rsp);
53 	REG_COMPARE(rbp);
54 	REG_COMPARE(r8);
55 	REG_COMPARE(r9);
56 	REG_COMPARE(r10);
57 	REG_COMPARE(r11);
58 	REG_COMPARE(r12);
59 	REG_COMPARE(r13);
60 	REG_COMPARE(r14);
61 	REG_COMPARE(r15);
62 	REG_COMPARE(rip);
63 	REG_COMPARE(rflags);
64 #undef REG_COMPARE
65 }
66 
67 static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
68 {
69 }
70 
71 static void compare_vcpu_events(struct kvm_vcpu_events *left,
72 				struct kvm_vcpu_events *right)
73 {
74 }
75 
76 #define TEST_SYNC_FIELDS   (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
77 #define INVALID_SYNC_FIELD 0x80000000
78 
79 int main(int argc, char *argv[])
80 {
81 	struct kvm_vm *vm;
82 	struct kvm_run *run;
83 	struct kvm_regs regs;
84 	struct kvm_sregs sregs;
85 	struct kvm_vcpu_events events;
86 	int rv, cap;
87 
88 	/* Tell stdout not to buffer its content */
89 	setbuf(stdout, NULL);
90 
91 	cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
92 	if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
93 		fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n");
94 		exit(KSFT_SKIP);
95 	}
96 	if ((cap & INVALID_SYNC_FIELD) != 0) {
97 		fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n");
98 		exit(KSFT_SKIP);
99 	}
100 
101 	/* Create VM */
102 	vm = vm_create_default(VCPU_ID, 0, guest_code);
103 
104 	run = vcpu_state(vm, VCPU_ID);
105 
106 	/* Request reading invalid register set from VCPU. */
107 	run->kvm_valid_regs = INVALID_SYNC_FIELD;
108 	rv = _vcpu_run(vm, VCPU_ID);
109 	TEST_ASSERT(rv < 0 && errno == EINVAL,
110 		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
111 		    rv);
112 	vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
113 
114 	run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
115 	rv = _vcpu_run(vm, VCPU_ID);
116 	TEST_ASSERT(rv < 0 && errno == EINVAL,
117 		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
118 		    rv);
119 	vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
120 
121 	/* Request setting invalid register set into VCPU. */
122 	run->kvm_dirty_regs = INVALID_SYNC_FIELD;
123 	rv = _vcpu_run(vm, VCPU_ID);
124 	TEST_ASSERT(rv < 0 && errno == EINVAL,
125 		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
126 		    rv);
127 	vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
128 
129 	run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
130 	rv = _vcpu_run(vm, VCPU_ID);
131 	TEST_ASSERT(rv < 0 && errno == EINVAL,
132 		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
133 		    rv);
134 	vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
135 
136 	/* Request and verify all valid register sets. */
137 	/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
138 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
139 	rv = _vcpu_run(vm, VCPU_ID);
140 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
141 		    "Unexpected exit reason: %u (%s),\n",
142 		    run->exit_reason,
143 		    exit_reason_str(run->exit_reason));
144 
145 	vcpu_regs_get(vm, VCPU_ID, &regs);
146 	compare_regs(&regs, &run->s.regs.regs);
147 
148 	vcpu_sregs_get(vm, VCPU_ID, &sregs);
149 	compare_sregs(&sregs, &run->s.regs.sregs);
150 
151 	vcpu_events_get(vm, VCPU_ID, &events);
152 	compare_vcpu_events(&events, &run->s.regs.events);
153 
154 	/* Set and verify various register values. */
155 	run->s.regs.regs.rbx = 0xBAD1DEA;
156 	run->s.regs.sregs.apic_base = 1 << 11;
157 	/* TODO run->s.regs.events.XYZ = ABC; */
158 
159 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
160 	run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
161 	rv = _vcpu_run(vm, VCPU_ID);
162 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
163 		    "Unexpected exit reason: %u (%s),\n",
164 		    run->exit_reason,
165 		    exit_reason_str(run->exit_reason));
166 	TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
167 		    "rbx sync regs value incorrect 0x%llx.",
168 		    run->s.regs.regs.rbx);
169 	TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
170 		    "apic_base sync regs value incorrect 0x%llx.",
171 		    run->s.regs.sregs.apic_base);
172 
173 	vcpu_regs_get(vm, VCPU_ID, &regs);
174 	compare_regs(&regs, &run->s.regs.regs);
175 
176 	vcpu_sregs_get(vm, VCPU_ID, &sregs);
177 	compare_sregs(&sregs, &run->s.regs.sregs);
178 
179 	vcpu_events_get(vm, VCPU_ID, &events);
180 	compare_vcpu_events(&events, &run->s.regs.events);
181 
182 	/* Clear kvm_dirty_regs bits, verify new s.regs values are
183 	 * overwritten with existing guest values.
184 	 */
185 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
186 	run->kvm_dirty_regs = 0;
187 	run->s.regs.regs.rbx = 0xDEADBEEF;
188 	rv = _vcpu_run(vm, VCPU_ID);
189 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
190 		    "Unexpected exit reason: %u (%s),\n",
191 		    run->exit_reason,
192 		    exit_reason_str(run->exit_reason));
193 	TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
194 		    "rbx sync regs value incorrect 0x%llx.",
195 		    run->s.regs.regs.rbx);
196 
197 	/* Clear kvm_valid_regs bits and kvm_dirty_bits.
198 	 * Verify s.regs values are not overwritten with existing guest values
199 	 * and that guest values are not overwritten with kvm_sync_regs values.
200 	 */
201 	run->kvm_valid_regs = 0;
202 	run->kvm_dirty_regs = 0;
203 	run->s.regs.regs.rbx = 0xAAAA;
204 	regs.rbx = 0xBAC0;
205 	vcpu_regs_set(vm, VCPU_ID, &regs);
206 	rv = _vcpu_run(vm, VCPU_ID);
207 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
208 		    "Unexpected exit reason: %u (%s),\n",
209 		    run->exit_reason,
210 		    exit_reason_str(run->exit_reason));
211 	TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
212 		    "rbx sync regs value incorrect 0x%llx.",
213 		    run->s.regs.regs.rbx);
214 	vcpu_regs_get(vm, VCPU_ID, &regs);
215 	TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
216 		    "rbx guest value incorrect 0x%llx.",
217 		    regs.rbx);
218 
219 	/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
220 	 * with existing guest values but that guest values are overwritten
221 	 * with kvm_sync_regs values.
222 	 */
223 	run->kvm_valid_regs = 0;
224 	run->kvm_dirty_regs = TEST_SYNC_FIELDS;
225 	run->s.regs.regs.rbx = 0xBBBB;
226 	rv = _vcpu_run(vm, VCPU_ID);
227 	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
228 		    "Unexpected exit reason: %u (%s),\n",
229 		    run->exit_reason,
230 		    exit_reason_str(run->exit_reason));
231 	TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
232 		    "rbx sync regs value incorrect 0x%llx.",
233 		    run->s.regs.regs.rbx);
234 	vcpu_regs_get(vm, VCPU_ID, &regs);
235 	TEST_ASSERT(regs.rbx == 0xBBBB + 1,
236 		    "rbx guest value incorrect 0x%llx.",
237 		    regs.rbx);
238 
239 	kvm_vm_free(vm);
240 
241 	return 0;
242 }
243