1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Test for s390x KVM_S390_MEM_OP
4  *
5  * Copyright (C) 2019, Red Hat, Inc.
6  */
7 
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/ioctl.h>
12 
13 #include <linux/bits.h>
14 
15 #include "test_util.h"
16 #include "kvm_util.h"
17 
18 enum mop_target {
19 	LOGICAL,
20 	SIDA,
21 	ABSOLUTE,
22 	INVALID,
23 };
24 
25 enum mop_access_mode {
26 	READ,
27 	WRITE,
28 };
29 
30 struct mop_desc {
31 	uintptr_t gaddr;
32 	uintptr_t gaddr_v;
33 	uint64_t set_flags;
34 	unsigned int f_check : 1;
35 	unsigned int f_inject : 1;
36 	unsigned int f_key : 1;
37 	unsigned int _gaddr_v : 1;
38 	unsigned int _set_flags : 1;
39 	unsigned int _sida_offset : 1;
40 	unsigned int _ar : 1;
41 	uint32_t size;
42 	enum mop_target target;
43 	enum mop_access_mode mode;
44 	void *buf;
45 	uint32_t sida_offset;
46 	uint8_t ar;
47 	uint8_t key;
48 };
49 
50 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
51 {
52 	struct kvm_s390_mem_op ksmo = {
53 		.gaddr = (uintptr_t)desc.gaddr,
54 		.size = desc.size,
55 		.buf = ((uintptr_t)desc.buf),
56 		.reserved = "ignored_ignored_ignored_ignored"
57 	};
58 
59 	switch (desc.target) {
60 	case LOGICAL:
61 		if (desc.mode == READ)
62 			ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
63 		if (desc.mode == WRITE)
64 			ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
65 		break;
66 	case SIDA:
67 		if (desc.mode == READ)
68 			ksmo.op = KVM_S390_MEMOP_SIDA_READ;
69 		if (desc.mode == WRITE)
70 			ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
71 		break;
72 	case ABSOLUTE:
73 		if (desc.mode == READ)
74 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
75 		if (desc.mode == WRITE)
76 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
77 		break;
78 	case INVALID:
79 		ksmo.op = -1;
80 	}
81 	if (desc.f_check)
82 		ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
83 	if (desc.f_inject)
84 		ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
85 	if (desc._set_flags)
86 		ksmo.flags = desc.set_flags;
87 	if (desc.f_key) {
88 		ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
89 		ksmo.key = desc.key;
90 	}
91 	if (desc._ar)
92 		ksmo.ar = desc.ar;
93 	else
94 		ksmo.ar = 0;
95 	if (desc._sida_offset)
96 		ksmo.sida_offset = desc.sida_offset;
97 
98 	return ksmo;
99 }
100 
101 /* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */
102 const uint32_t VM_VCPU_ID = (uint32_t)-1;
103 
104 struct test_vcpu {
105 	struct kvm_vm *vm;
106 	uint32_t id;
107 };
108 
109 #define PRINT_MEMOP false
110 static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo)
111 {
112 	if (!PRINT_MEMOP)
113 		return;
114 
115 	if (vcpu_id == VM_VCPU_ID)
116 		printf("vm memop(");
117 	else
118 		printf("vcpu memop(");
119 	switch (ksmo->op) {
120 	case KVM_S390_MEMOP_LOGICAL_READ:
121 		printf("LOGICAL, READ, ");
122 		break;
123 	case KVM_S390_MEMOP_LOGICAL_WRITE:
124 		printf("LOGICAL, WRITE, ");
125 		break;
126 	case KVM_S390_MEMOP_SIDA_READ:
127 		printf("SIDA, READ, ");
128 		break;
129 	case KVM_S390_MEMOP_SIDA_WRITE:
130 		printf("SIDA, WRITE, ");
131 		break;
132 	case KVM_S390_MEMOP_ABSOLUTE_READ:
133 		printf("ABSOLUTE, READ, ");
134 		break;
135 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
136 		printf("ABSOLUTE, WRITE, ");
137 		break;
138 	}
139 	printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u",
140 	       ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key);
141 	if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
142 		printf(", CHECK_ONLY");
143 	if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
144 		printf(", INJECT_EXCEPTION");
145 	if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
146 		printf(", SKEY_PROTECTION");
147 	puts(")");
148 }
149 
150 static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
151 {
152 	if (vcpu.id == VM_VCPU_ID)
153 		vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
154 	else
155 		vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
156 }
157 
158 static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
159 {
160 	if (vcpu.id == VM_VCPU_ID)
161 		return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
162 	else
163 		return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
164 }
165 
166 #define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...)	\
167 ({										\
168 	struct test_vcpu __vcpu = (vcpu_p);					\
169 	struct mop_desc __desc = {						\
170 		.target = (mop_target_p),					\
171 		.mode = (access_mode_p),					\
172 		.buf = (buf_p),							\
173 		.size = (size_p),						\
174 		__VA_ARGS__							\
175 	};									\
176 	struct kvm_s390_mem_op __ksmo;						\
177 										\
178 	if (__desc._gaddr_v) {							\
179 		if (__desc.target == ABSOLUTE)					\
180 			__desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v);	\
181 		else								\
182 			__desc.gaddr = __desc.gaddr_v;				\
183 	}									\
184 	__ksmo = ksmo_from_desc(__desc);					\
185 	print_memop(__vcpu.id, &__ksmo);					\
186 	err##memop_ioctl(__vcpu, &__ksmo);					\
187 })
188 
189 #define MOP(...) MEMOP(, __VA_ARGS__)
190 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
191 
192 #define GADDR(a) .gaddr = ((uintptr_t)a)
193 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
194 #define CHECK_ONLY .f_check = 1
195 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
196 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
197 #define AR(a) ._ar = 1, .ar = (a)
198 #define KEY(a) .f_key = 1, .key = (a)
199 #define INJECT .f_inject = 1
200 
201 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
202 
203 #define VCPU_ID 1
204 #define PAGE_SHIFT 12
205 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
206 #define PAGE_MASK (~(PAGE_SIZE - 1))
207 #define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
208 #define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))
209 
210 static uint8_t mem1[65536];
211 static uint8_t mem2[65536];
212 
213 struct test_default {
214 	struct kvm_vm *kvm_vm;
215 	struct test_vcpu vm;
216 	struct test_vcpu vcpu;
217 	struct kvm_run *run;
218 	int size;
219 };
220 
221 static struct test_default test_default_init(void *guest_code)
222 {
223 	struct test_default t;
224 
225 	t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
226 	t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code);
227 	t.vm = (struct test_vcpu) { t.kvm_vm, VM_VCPU_ID };
228 	t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID };
229 	t.run = vcpu_state(t.kvm_vm, VCPU_ID);
230 	return t;
231 }
232 
233 enum stage {
234 	/* Synced state set by host, e.g. DAT */
235 	STAGE_INITED,
236 	/* Guest did nothing */
237 	STAGE_IDLED,
238 	/* Guest set storage keys (specifics up to test case) */
239 	STAGE_SKEYS_SET,
240 	/* Guest copied memory (locations up to test case) */
241 	STAGE_COPIED,
242 };
243 
244 #define HOST_SYNC(vcpu_p, stage)					\
245 ({									\
246 	struct test_vcpu __vcpu = (vcpu_p);				\
247 	struct ucall uc;						\
248 	int __stage = (stage);						\
249 									\
250 	vcpu_run(__vcpu.vm, __vcpu.id);					\
251 	get_ucall(__vcpu.vm, __vcpu.id, &uc);				\
252 	ASSERT_EQ(uc.cmd, UCALL_SYNC);					\
253 	ASSERT_EQ(uc.args[1], __stage);					\
254 })									\
255 
256 static void prepare_mem12(void)
257 {
258 	int i;
259 
260 	for (i = 0; i < sizeof(mem1); i++)
261 		mem1[i] = rand();
262 	memset(mem2, 0xaa, sizeof(mem2));
263 }
264 
265 #define ASSERT_MEM_EQ(p1, p2, size) \
266 	TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
267 
268 #define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...)		\
269 ({										\
270 	struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu);	\
271 	enum mop_target __target = (mop_target_p);				\
272 	uint32_t __size = (size);						\
273 										\
274 	prepare_mem12();							\
275 	CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size,		\
276 			GADDR_V(mem1), ##__VA_ARGS__);				\
277 	HOST_SYNC(__copy_cpu, STAGE_COPIED);					\
278 	CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size,		\
279 			GADDR_V(mem2), ##__VA_ARGS__);				\
280 	ASSERT_MEM_EQ(mem1, mem2, __size);					\
281 })
282 
283 #define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...)		\
284 ({										\
285 	struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu);	\
286 	enum mop_target __target = (mop_target_p);				\
287 	uint32_t __size = (size);						\
288 										\
289 	prepare_mem12();							\
290 	CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size,		\
291 			GADDR_V(mem1));						\
292 	HOST_SYNC(__copy_cpu, STAGE_COPIED);					\
293 	CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
294 	ASSERT_MEM_EQ(mem1, mem2, __size);					\
295 })
296 
297 static void guest_copy(void)
298 {
299 	GUEST_SYNC(STAGE_INITED);
300 	memcpy(&mem2, &mem1, sizeof(mem2));
301 	GUEST_SYNC(STAGE_COPIED);
302 }
303 
304 static void test_copy(void)
305 {
306 	struct test_default t = test_default_init(guest_copy);
307 
308 	HOST_SYNC(t.vcpu, STAGE_INITED);
309 
310 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
311 
312 	kvm_vm_free(t.kvm_vm);
313 }
314 
315 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
316 {
317 	uintptr_t _addr, abs, i;
318 	int not_mapped = 0;
319 
320 	_addr = (uintptr_t)addr;
321 	for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
322 		abs = i;
323 		asm volatile (
324 			       "lra	%[abs], 0(0,%[abs])\n"
325 			"	jz	0f\n"
326 			"	llill	%[not_mapped],1\n"
327 			"	j	1f\n"
328 			"0:	sske	%[key], %[abs]\n"
329 			"1:"
330 			: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
331 			: [key] "r" (key)
332 			: "cc"
333 		);
334 		GUEST_ASSERT_EQ(not_mapped, 0);
335 	}
336 }
337 
338 static void guest_copy_key(void)
339 {
340 	set_storage_key_range(mem1, sizeof(mem1), 0x90);
341 	set_storage_key_range(mem2, sizeof(mem2), 0x90);
342 	GUEST_SYNC(STAGE_SKEYS_SET);
343 
344 	for (;;) {
345 		memcpy(&mem2, &mem1, sizeof(mem2));
346 		GUEST_SYNC(STAGE_COPIED);
347 	}
348 }
349 
350 static void test_copy_key(void)
351 {
352 	struct test_default t = test_default_init(guest_copy_key);
353 
354 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
355 
356 	/* vm, no key */
357 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
358 
359 	/* vm/vcpu, machting key or key 0 */
360 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
361 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
362 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
363 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
364 	/*
365 	 * There used to be different code paths for key handling depending on
366 	 * if the region crossed a page boundary.
367 	 * There currently are not, but the more tests the merrier.
368 	 */
369 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
370 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
371 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
372 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
373 
374 	/* vm/vcpu, mismatching keys on read, but no fetch protection */
375 	DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
376 	DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
377 
378 	kvm_vm_free(t.kvm_vm);
379 }
380 
381 static void guest_copy_key_fetch_prot(void)
382 {
383 	/*
384 	 * For some reason combining the first sync with override enablement
385 	 * results in an exception when calling HOST_SYNC.
386 	 */
387 	GUEST_SYNC(STAGE_INITED);
388 	/* Storage protection override applies to both store and fetch. */
389 	set_storage_key_range(mem1, sizeof(mem1), 0x98);
390 	set_storage_key_range(mem2, sizeof(mem2), 0x98);
391 	GUEST_SYNC(STAGE_SKEYS_SET);
392 
393 	for (;;) {
394 		memcpy(&mem2, &mem1, sizeof(mem2));
395 		GUEST_SYNC(STAGE_COPIED);
396 	}
397 }
398 
399 static void test_copy_key_storage_prot_override(void)
400 {
401 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
402 
403 	HOST_SYNC(t.vcpu, STAGE_INITED);
404 	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
405 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
406 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
407 
408 	/* vcpu, mismatching keys, storage protection override in effect */
409 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
410 
411 	kvm_vm_free(t.kvm_vm);
412 }
413 
414 static void test_copy_key_fetch_prot(void)
415 {
416 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
417 
418 	HOST_SYNC(t.vcpu, STAGE_INITED);
419 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
420 
421 	/* vm/vcpu, matching key, fetch protection in effect */
422 	DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
423 	DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
424 
425 	kvm_vm_free(t.kvm_vm);
426 }
427 
428 #define ERR_PROT_MOP(...)							\
429 ({										\
430 	int rv;									\
431 										\
432 	rv = ERR_MOP(__VA_ARGS__);						\
433 	TEST_ASSERT(rv == 4, "Should result in protection exception");		\
434 })
435 
436 static void guest_error_key(void)
437 {
438 	GUEST_SYNC(STAGE_INITED);
439 	set_storage_key_range(mem1, PAGE_SIZE, 0x18);
440 	set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
441 	GUEST_SYNC(STAGE_SKEYS_SET);
442 	GUEST_SYNC(STAGE_IDLED);
443 }
444 
445 static void test_errors_key(void)
446 {
447 	struct test_default t = test_default_init(guest_error_key);
448 
449 	HOST_SYNC(t.vcpu, STAGE_INITED);
450 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
451 
452 	/* vm/vcpu, mismatching keys, fetch protection in effect */
453 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
454 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
455 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
456 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
457 
458 	kvm_vm_free(t.kvm_vm);
459 }
460 
461 static void test_termination(void)
462 {
463 	struct test_default t = test_default_init(guest_error_key);
464 	uint64_t prefix;
465 	uint64_t teid;
466 	uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
467 	uint64_t psw[2];
468 
469 	HOST_SYNC(t.vcpu, STAGE_INITED);
470 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
471 
472 	/* vcpu, mismatching keys after first page */
473 	ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
474 	/*
475 	 * The memop injected a program exception and the test needs to check the
476 	 * Translation-Exception Identification (TEID). It is necessary to run
477 	 * the guest in order to be able to read the TEID from guest memory.
478 	 * Set the guest program new PSW, so the guest state is not clobbered.
479 	 */
480 	prefix = t.run->s.regs.prefix;
481 	psw[0] = t.run->psw_mask;
482 	psw[1] = t.run->psw_addr;
483 	MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
484 	HOST_SYNC(t.vcpu, STAGE_IDLED);
485 	MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
486 	/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
487 	ASSERT_EQ(teid & teid_mask, 0);
488 
489 	kvm_vm_free(t.kvm_vm);
490 }
491 
492 static void test_errors_key_storage_prot_override(void)
493 {
494 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
495 
496 	HOST_SYNC(t.vcpu, STAGE_INITED);
497 	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
498 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
499 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
500 
501 	/* vm, mismatching keys, storage protection override not applicable to vm */
502 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
503 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
504 
505 	kvm_vm_free(t.kvm_vm);
506 }
507 
508 const uint64_t last_page_addr = -PAGE_SIZE;
509 
510 static void guest_copy_key_fetch_prot_override(void)
511 {
512 	int i;
513 	char *page_0 = 0;
514 
515 	GUEST_SYNC(STAGE_INITED);
516 	set_storage_key_range(0, PAGE_SIZE, 0x18);
517 	set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
518 	asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc");
519 	GUEST_SYNC(STAGE_SKEYS_SET);
520 
521 	for (;;) {
522 		for (i = 0; i < PAGE_SIZE; i++)
523 			page_0[i] = mem1[i];
524 		GUEST_SYNC(STAGE_COPIED);
525 	}
526 }
527 
528 static void test_copy_key_fetch_prot_override(void)
529 {
530 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
531 	vm_vaddr_t guest_0_page, guest_last_page;
532 
533 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
534 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
535 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
536 		print_skip("did not allocate guest pages at required positions");
537 		goto out;
538 	}
539 
540 	HOST_SYNC(t.vcpu, STAGE_INITED);
541 	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
542 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
543 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
544 
545 	/* vcpu, mismatching keys on fetch, fetch protection override applies */
546 	prepare_mem12();
547 	MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
548 	HOST_SYNC(t.vcpu, STAGE_COPIED);
549 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
550 	ASSERT_MEM_EQ(mem1, mem2, 2048);
551 
552 	/*
553 	 * vcpu, mismatching keys on fetch, fetch protection override applies,
554 	 * wraparound
555 	 */
556 	prepare_mem12();
557 	MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
558 	HOST_SYNC(t.vcpu, STAGE_COPIED);
559 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
560 		   GADDR_V(guest_last_page), KEY(2));
561 	ASSERT_MEM_EQ(mem1, mem2, 2048);
562 
563 out:
564 	kvm_vm_free(t.kvm_vm);
565 }
566 
567 static void test_errors_key_fetch_prot_override_not_enabled(void)
568 {
569 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
570 	vm_vaddr_t guest_0_page, guest_last_page;
571 
572 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
573 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
574 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
575 		print_skip("did not allocate guest pages at required positions");
576 		goto out;
577 	}
578 	HOST_SYNC(t.vcpu, STAGE_INITED);
579 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
580 
581 	/* vcpu, mismatching keys on fetch, fetch protection override not enabled */
582 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
583 
584 out:
585 	kvm_vm_free(t.kvm_vm);
586 }
587 
588 static void test_errors_key_fetch_prot_override_enabled(void)
589 {
590 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
591 	vm_vaddr_t guest_0_page, guest_last_page;
592 
593 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
594 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
595 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
596 		print_skip("did not allocate guest pages at required positions");
597 		goto out;
598 	}
599 	HOST_SYNC(t.vcpu, STAGE_INITED);
600 	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
601 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
602 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
603 
604 	/*
605 	 * vcpu, mismatching keys on fetch,
606 	 * fetch protection override does not apply because memory range acceeded
607 	 */
608 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
609 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
610 		   GADDR_V(guest_last_page), KEY(2));
611 	/* vm, fetch protected override does not apply */
612 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
613 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
614 
615 out:
616 	kvm_vm_free(t.kvm_vm);
617 }
618 
619 static void guest_idle(void)
620 {
621 	GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
622 	for (;;)
623 		GUEST_SYNC(STAGE_IDLED);
624 }
625 
626 static void _test_errors_common(struct test_vcpu vcpu, enum mop_target target, int size)
627 {
628 	int rv;
629 
630 	/* Bad size: */
631 	rv = ERR_MOP(vcpu, target, WRITE, mem1, -1, GADDR_V(mem1));
632 	TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
633 
634 	/* Zero size: */
635 	rv = ERR_MOP(vcpu, target, WRITE, mem1, 0, GADDR_V(mem1));
636 	TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
637 		    "ioctl allows 0 as size");
638 
639 	/* Bad flags: */
640 	rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
641 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
642 
643 	/* Bad guest address: */
644 	rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
645 	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
646 
647 	/* Bad host address: */
648 	rv = ERR_MOP(vcpu, target, WRITE, 0, size, GADDR_V(mem1));
649 	TEST_ASSERT(rv == -1 && errno == EFAULT,
650 		    "ioctl does not report bad host memory address");
651 
652 	/* Bad key: */
653 	rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
654 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
655 }
656 
657 static void test_errors(void)
658 {
659 	struct test_default t = test_default_init(guest_idle);
660 	int rv;
661 
662 	HOST_SYNC(t.vcpu, STAGE_INITED);
663 
664 	_test_errors_common(t.vcpu, LOGICAL, t.size);
665 	_test_errors_common(t.vm, ABSOLUTE, t.size);
666 
667 	/* Bad operation: */
668 	rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
669 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
670 	/* virtual addresses are not translated when passing INVALID */
671 	rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
672 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
673 
674 	/* Bad access register: */
675 	t.run->psw_mask &= ~(3UL << (63 - 17));
676 	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
677 	HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
678 	rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
679 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
680 	t.run->psw_mask &= ~(3UL << (63 - 17));   /* Disable AR mode */
681 	HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
682 
683 	/* Check that the SIDA calls are rejected for non-protected guests */
684 	rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
685 	TEST_ASSERT(rv == -1 && errno == EINVAL,
686 		    "ioctl does not reject SIDA_READ in non-protected mode");
687 	rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
688 	TEST_ASSERT(rv == -1 && errno == EINVAL,
689 		    "ioctl does not reject SIDA_WRITE in non-protected mode");
690 
691 	kvm_vm_free(t.kvm_vm);
692 }
693 
694 int main(int argc, char *argv[])
695 {
696 	int memop_cap, extension_cap;
697 
698 	setbuf(stdout, NULL);	/* Tell stdout not to buffer its content */
699 
700 	memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
701 	extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
702 	if (!memop_cap) {
703 		print_skip("CAP_S390_MEM_OP not supported");
704 		exit(KSFT_SKIP);
705 	}
706 
707 	test_copy();
708 	if (extension_cap > 0) {
709 		test_copy_key();
710 		test_copy_key_storage_prot_override();
711 		test_copy_key_fetch_prot();
712 		test_copy_key_fetch_prot_override();
713 		test_errors_key();
714 		test_termination();
715 		test_errors_key_storage_prot_override();
716 		test_errors_key_fetch_prot_override_not_enabled();
717 		test_errors_key_fetch_prot_override_enabled();
718 	} else {
719 		print_skip("storage key memop extension not supported");
720 	}
721 	test_errors();
722 
723 	return 0;
724 }
725