1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm.h>
3 #include <linux/psp-sev.h>
4 #include <stdio.h>
5 #include <sys/ioctl.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <pthread.h>
9 
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "svm_util.h"
14 #include "kselftest.h"
15 
16 #define SEV_POLICY_ES 0b100
17 
18 #define NR_MIGRATE_TEST_VCPUS 4
19 #define NR_MIGRATE_TEST_VMS 3
20 #define NR_LOCK_TESTING_THREADS 3
21 #define NR_LOCK_TESTING_ITERATIONS 10000
22 
23 bool have_sev_es;
24 
25 static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error)
26 {
27 	struct kvm_sev_cmd cmd = {
28 		.id = cmd_id,
29 		.data = (uint64_t)data,
30 		.sev_fd = open_sev_dev_path_or_exit(),
31 	};
32 	int ret;
33 
34 	ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
35 	*fw_error = cmd.error;
36 	return ret;
37 }
38 
39 static void sev_ioctl(int vm_fd, int cmd_id, void *data)
40 {
41 	int ret;
42 	__u32 fw_error;
43 
44 	ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error);
45 	TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS,
46 		    "%d failed: return code: %d, errno: %d, fw error: %d",
47 		    cmd_id, ret, errno, fw_error);
48 }
49 
50 static struct kvm_vm *sev_vm_create(bool es)
51 {
52 	struct kvm_vm *vm;
53 	struct kvm_sev_launch_start start = { 0 };
54 	int i;
55 
56 	vm = vm_create_barebones();
57 	sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
58 	for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
59 		__vm_vcpu_add(vm, i);
60 	if (es)
61 		start.policy |= SEV_POLICY_ES;
62 	sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
63 	if (es)
64 		sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
65 	return vm;
66 }
67 
68 static struct kvm_vm *aux_vm_create(bool with_vcpus)
69 {
70 	struct kvm_vm *vm;
71 	int i;
72 
73 	vm = vm_create_barebones();
74 	if (!with_vcpus)
75 		return vm;
76 
77 	for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
78 		__vm_vcpu_add(vm, i);
79 
80 	return vm;
81 }
82 
83 static int __sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
84 {
85 	return __vm_enable_cap(dst, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM, src->fd);
86 }
87 
88 
89 static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
90 {
91 	int ret;
92 
93 	ret = __sev_migrate_from(dst, src);
94 	TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
95 }
96 
97 static void test_sev_migrate_from(bool es)
98 {
99 	struct kvm_vm *src_vm;
100 	struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
101 	int i, ret;
102 
103 	src_vm = sev_vm_create(es);
104 	for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
105 		dst_vms[i] = aux_vm_create(true);
106 
107 	/* Initial migration from the src to the first dst. */
108 	sev_migrate_from(dst_vms[0], src_vm);
109 
110 	for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
111 		sev_migrate_from(dst_vms[i], dst_vms[i - 1]);
112 
113 	/* Migrate the guest back to the original VM. */
114 	ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]);
115 	TEST_ASSERT(ret == -1 && errno == EIO,
116 		    "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
117 		    errno);
118 
119 	kvm_vm_free(src_vm);
120 	for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
121 		kvm_vm_free(dst_vms[i]);
122 }
123 
124 struct locking_thread_input {
125 	struct kvm_vm *vm;
126 	struct kvm_vm *source_vms[NR_LOCK_TESTING_THREADS];
127 };
128 
129 static void *locking_test_thread(void *arg)
130 {
131 	int i, j;
132 	struct locking_thread_input *input = (struct locking_thread_input *)arg;
133 
134 	for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
135 		j = i % NR_LOCK_TESTING_THREADS;
136 		__sev_migrate_from(input->vm, input->source_vms[j]);
137 	}
138 
139 	return NULL;
140 }
141 
142 static void test_sev_migrate_locking(void)
143 {
144 	struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
145 	pthread_t pt[NR_LOCK_TESTING_THREADS];
146 	int i;
147 
148 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
149 		input[i].vm = sev_vm_create(/* es= */ false);
150 		input[0].source_vms[i] = input[i].vm;
151 	}
152 	for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
153 		memcpy(input[i].source_vms, input[0].source_vms,
154 		       sizeof(input[i].source_vms));
155 
156 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
157 		pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
158 
159 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
160 		pthread_join(pt[i], NULL);
161 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
162 		kvm_vm_free(input[i].vm);
163 }
164 
165 static void test_sev_migrate_parameters(void)
166 {
167 	struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
168 		*sev_es_vm_no_vmsa;
169 	int ret;
170 
171 	vm_no_vcpu = vm_create_barebones();
172 	vm_no_sev = aux_vm_create(true);
173 	ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
174 	TEST_ASSERT(ret == -1 && errno == EINVAL,
175 		    "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
176 		    errno);
177 
178 	if (!have_sev_es)
179 		goto out;
180 
181 	sev_vm = sev_vm_create(/* es= */ false);
182 	sev_es_vm = sev_vm_create(/* es= */ true);
183 	sev_es_vm_no_vmsa = vm_create_barebones();
184 	sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
185 	__vm_vcpu_add(sev_es_vm_no_vmsa, 1);
186 
187 	ret = __sev_migrate_from(sev_vm, sev_es_vm);
188 	TEST_ASSERT(
189 		ret == -1 && errno == EINVAL,
190 		"Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
191 		ret, errno);
192 
193 	ret = __sev_migrate_from(sev_es_vm, sev_vm);
194 	TEST_ASSERT(
195 		ret == -1 && errno == EINVAL,
196 		"Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
197 		ret, errno);
198 
199 	ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm);
200 	TEST_ASSERT(
201 		ret == -1 && errno == EINVAL,
202 		"SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
203 		ret, errno);
204 
205 	ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa);
206 	TEST_ASSERT(
207 		ret == -1 && errno == EINVAL,
208 		"SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
209 		ret, errno);
210 
211 	kvm_vm_free(sev_vm);
212 	kvm_vm_free(sev_es_vm);
213 	kvm_vm_free(sev_es_vm_no_vmsa);
214 out:
215 	kvm_vm_free(vm_no_vcpu);
216 	kvm_vm_free(vm_no_sev);
217 }
218 
219 static int __sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
220 {
221 	return __vm_enable_cap(dst, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, src->fd);
222 }
223 
224 
225 static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
226 {
227 	int ret;
228 
229 	ret = __sev_mirror_create(dst, src);
230 	TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
231 }
232 
233 static void verify_mirror_allowed_cmds(int vm_fd)
234 {
235 	struct kvm_sev_guest_status status;
236 
237 	for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
238 		int ret;
239 		__u32 fw_error;
240 
241 		/*
242 		 * These commands are allowed for mirror VMs, all others are
243 		 * not.
244 		 */
245 		switch (cmd_id) {
246 		case KVM_SEV_LAUNCH_UPDATE_VMSA:
247 		case KVM_SEV_GUEST_STATUS:
248 		case KVM_SEV_DBG_DECRYPT:
249 		case KVM_SEV_DBG_ENCRYPT:
250 			continue;
251 		default:
252 			break;
253 		}
254 
255 		/*
256 		 * These commands should be disallowed before the data
257 		 * parameter is examined so NULL is OK here.
258 		 */
259 		ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
260 		TEST_ASSERT(
261 			ret == -1 && errno == EINVAL,
262 			"Should not be able call command: %d. ret: %d, errno: %d\n",
263 			cmd_id, ret, errno);
264 	}
265 
266 	sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status);
267 }
268 
269 static void test_sev_mirror(bool es)
270 {
271 	struct kvm_vm *src_vm, *dst_vm;
272 	int i;
273 
274 	src_vm = sev_vm_create(es);
275 	dst_vm = aux_vm_create(false);
276 
277 	sev_mirror_create(dst_vm, src_vm);
278 
279 	/* Check that we can complete creation of the mirror VM.  */
280 	for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
281 		__vm_vcpu_add(dst_vm, i);
282 
283 	if (es)
284 		sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
285 
286 	verify_mirror_allowed_cmds(dst_vm->fd);
287 
288 	kvm_vm_free(src_vm);
289 	kvm_vm_free(dst_vm);
290 }
291 
292 static void test_sev_mirror_parameters(void)
293 {
294 	struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
295 	int ret;
296 
297 	sev_vm = sev_vm_create(/* es= */ false);
298 	vm_with_vcpu = aux_vm_create(true);
299 	vm_no_vcpu = aux_vm_create(false);
300 
301 	ret = __sev_mirror_create(sev_vm, sev_vm);
302 	TEST_ASSERT(
303 		ret == -1 && errno == EINVAL,
304 		"Should not be able copy context to self. ret: %d, errno: %d\n",
305 		ret, errno);
306 
307 	ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu);
308 	TEST_ASSERT(ret == -1 && errno == EINVAL,
309 		    "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
310 		    errno);
311 
312 	ret = __sev_mirror_create(vm_with_vcpu, sev_vm);
313 	TEST_ASSERT(
314 		ret == -1 && errno == EINVAL,
315 		"SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
316 		ret, errno);
317 
318 	if (!have_sev_es)
319 		goto out;
320 
321 	sev_es_vm = sev_vm_create(/* es= */ true);
322 	ret = __sev_mirror_create(sev_vm, sev_es_vm);
323 	TEST_ASSERT(
324 		ret == -1 && errno == EINVAL,
325 		"Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
326 		ret, errno);
327 
328 	ret = __sev_mirror_create(sev_es_vm, sev_vm);
329 	TEST_ASSERT(
330 		ret == -1 && errno == EINVAL,
331 		"Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
332 		ret, errno);
333 
334 	kvm_vm_free(sev_es_vm);
335 
336 out:
337 	kvm_vm_free(sev_vm);
338 	kvm_vm_free(vm_with_vcpu);
339 	kvm_vm_free(vm_no_vcpu);
340 }
341 
342 static void test_sev_move_copy(void)
343 {
344 	struct kvm_vm *dst_vm, *dst2_vm, *dst3_vm, *sev_vm, *mirror_vm,
345 		      *dst_mirror_vm, *dst2_mirror_vm, *dst3_mirror_vm;
346 
347 	sev_vm = sev_vm_create(/* es= */ false);
348 	dst_vm = aux_vm_create(true);
349 	dst2_vm = aux_vm_create(true);
350 	dst3_vm = aux_vm_create(true);
351 	mirror_vm = aux_vm_create(false);
352 	dst_mirror_vm = aux_vm_create(false);
353 	dst2_mirror_vm = aux_vm_create(false);
354 	dst3_mirror_vm = aux_vm_create(false);
355 
356 	sev_mirror_create(mirror_vm, sev_vm);
357 
358 	sev_migrate_from(dst_mirror_vm, mirror_vm);
359 	sev_migrate_from(dst_vm, sev_vm);
360 
361 	sev_migrate_from(dst2_vm, dst_vm);
362 	sev_migrate_from(dst2_mirror_vm, dst_mirror_vm);
363 
364 	sev_migrate_from(dst3_mirror_vm, dst2_mirror_vm);
365 	sev_migrate_from(dst3_vm, dst2_vm);
366 
367 	kvm_vm_free(dst_vm);
368 	kvm_vm_free(sev_vm);
369 	kvm_vm_free(dst2_vm);
370 	kvm_vm_free(dst3_vm);
371 	kvm_vm_free(mirror_vm);
372 	kvm_vm_free(dst_mirror_vm);
373 	kvm_vm_free(dst2_mirror_vm);
374 	kvm_vm_free(dst3_mirror_vm);
375 
376 	/*
377 	 * Run similar test be destroy mirrors before mirrored VMs to ensure
378 	 * destruction is done safely.
379 	 */
380 	sev_vm = sev_vm_create(/* es= */ false);
381 	dst_vm = aux_vm_create(true);
382 	mirror_vm = aux_vm_create(false);
383 	dst_mirror_vm = aux_vm_create(false);
384 
385 	sev_mirror_create(mirror_vm, sev_vm);
386 
387 	sev_migrate_from(dst_mirror_vm, mirror_vm);
388 	sev_migrate_from(dst_vm, sev_vm);
389 
390 	kvm_vm_free(mirror_vm);
391 	kvm_vm_free(dst_mirror_vm);
392 	kvm_vm_free(dst_vm);
393 	kvm_vm_free(sev_vm);
394 }
395 
396 int main(int argc, char *argv[])
397 {
398 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
399 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
400 
401 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
402 
403 	have_sev_es = kvm_cpu_has(X86_FEATURE_SEV_ES);
404 
405 	if (kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
406 		test_sev_migrate_from(/* es= */ false);
407 		if (have_sev_es)
408 			test_sev_migrate_from(/* es= */ true);
409 		test_sev_migrate_locking();
410 		test_sev_migrate_parameters();
411 		if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
412 			test_sev_move_copy();
413 	}
414 	if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
415 		test_sev_mirror(/* es= */ false);
416 		if (have_sev_es)
417 			test_sev_mirror(/* es= */ true);
418 		test_sev_mirror_parameters();
419 	}
420 	return 0;
421 }
422