1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Test for s390x KVM_S390_MEM_OP
4  *
5  * Copyright (C) 2019, Red Hat, Inc.
6  */
7 
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/ioctl.h>
12 
13 #include "test_util.h"
14 #include "kvm_util.h"
15 
16 enum mop_target {
17 	LOGICAL,
18 	SIDA,
19 	ABSOLUTE,
20 	INVALID,
21 };
22 
23 enum mop_access_mode {
24 	READ,
25 	WRITE,
26 };
27 
28 struct mop_desc {
29 	uintptr_t gaddr;
30 	uintptr_t gaddr_v;
31 	uint64_t set_flags;
32 	unsigned int f_check : 1;
33 	unsigned int f_inject : 1;
34 	unsigned int f_key : 1;
35 	unsigned int _gaddr_v : 1;
36 	unsigned int _set_flags : 1;
37 	unsigned int _sida_offset : 1;
38 	unsigned int _ar : 1;
39 	uint32_t size;
40 	enum mop_target target;
41 	enum mop_access_mode mode;
42 	void *buf;
43 	uint32_t sida_offset;
44 	uint8_t ar;
45 	uint8_t key;
46 };
47 
48 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
49 {
50 	struct kvm_s390_mem_op ksmo = {
51 		.gaddr = (uintptr_t)desc.gaddr,
52 		.size = desc.size,
53 		.buf = ((uintptr_t)desc.buf),
54 		.reserved = "ignored_ignored_ignored_ignored"
55 	};
56 
57 	switch (desc.target) {
58 	case LOGICAL:
59 		if (desc.mode == READ)
60 			ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
61 		if (desc.mode == WRITE)
62 			ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
63 		break;
64 	case SIDA:
65 		if (desc.mode == READ)
66 			ksmo.op = KVM_S390_MEMOP_SIDA_READ;
67 		if (desc.mode == WRITE)
68 			ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
69 		break;
70 	case ABSOLUTE:
71 		if (desc.mode == READ)
72 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
73 		if (desc.mode == WRITE)
74 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
75 		break;
76 	case INVALID:
77 		ksmo.op = -1;
78 	}
79 	if (desc.f_check)
80 		ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
81 	if (desc.f_inject)
82 		ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
83 	if (desc._set_flags)
84 		ksmo.flags = desc.set_flags;
85 	if (desc.f_key) {
86 		ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
87 		ksmo.key = desc.key;
88 	}
89 	if (desc._ar)
90 		ksmo.ar = desc.ar;
91 	else
92 		ksmo.ar = 0;
93 	if (desc._sida_offset)
94 		ksmo.sida_offset = desc.sida_offset;
95 
96 	return ksmo;
97 }
98 
99 /* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */
100 const uint32_t VM_VCPU_ID = (uint32_t)-1;
101 
102 struct test_vcpu {
103 	struct kvm_vm *vm;
104 	uint32_t id;
105 };
106 
107 #define PRINT_MEMOP false
108 static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo)
109 {
110 	if (!PRINT_MEMOP)
111 		return;
112 
113 	if (vcpu_id == VM_VCPU_ID)
114 		printf("vm memop(");
115 	else
116 		printf("vcpu memop(");
117 	switch (ksmo->op) {
118 	case KVM_S390_MEMOP_LOGICAL_READ:
119 		printf("LOGICAL, READ, ");
120 		break;
121 	case KVM_S390_MEMOP_LOGICAL_WRITE:
122 		printf("LOGICAL, WRITE, ");
123 		break;
124 	case KVM_S390_MEMOP_SIDA_READ:
125 		printf("SIDA, READ, ");
126 		break;
127 	case KVM_S390_MEMOP_SIDA_WRITE:
128 		printf("SIDA, WRITE, ");
129 		break;
130 	case KVM_S390_MEMOP_ABSOLUTE_READ:
131 		printf("ABSOLUTE, READ, ");
132 		break;
133 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
134 		printf("ABSOLUTE, WRITE, ");
135 		break;
136 	}
137 	printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u",
138 	       ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key);
139 	if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
140 		printf(", CHECK_ONLY");
141 	if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
142 		printf(", INJECT_EXCEPTION");
143 	if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
144 		printf(", SKEY_PROTECTION");
145 	puts(")");
146 }
147 
148 static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
149 {
150 	if (vcpu.id == VM_VCPU_ID)
151 		vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
152 	else
153 		vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
154 }
155 
156 static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
157 {
158 	if (vcpu.id == VM_VCPU_ID)
159 		return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
160 	else
161 		return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
162 }
163 
164 #define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...)	\
165 ({										\
166 	struct test_vcpu __vcpu = (vcpu_p);					\
167 	struct mop_desc __desc = {						\
168 		.target = (mop_target_p),					\
169 		.mode = (access_mode_p),					\
170 		.buf = (buf_p),							\
171 		.size = (size_p),						\
172 		__VA_ARGS__							\
173 	};									\
174 	struct kvm_s390_mem_op __ksmo;						\
175 										\
176 	if (__desc._gaddr_v) {							\
177 		if (__desc.target == ABSOLUTE)					\
178 			__desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v);	\
179 		else								\
180 			__desc.gaddr = __desc.gaddr_v;				\
181 	}									\
182 	__ksmo = ksmo_from_desc(__desc);					\
183 	print_memop(__vcpu.id, &__ksmo);					\
184 	err##memop_ioctl(__vcpu, &__ksmo);					\
185 })
186 
187 #define MOP(...) MEMOP(, __VA_ARGS__)
188 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
189 
190 #define GADDR(a) .gaddr = ((uintptr_t)a)
191 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
192 #define CHECK_ONLY .f_check = 1
193 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
194 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
195 #define AR(a) ._ar = 1, .ar = (a)
196 #define KEY(a) .f_key = 1, .key = (a)
197 
198 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
199 
200 #define VCPU_ID 1
201 #define PAGE_SHIFT 12
202 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
203 #define PAGE_MASK (~(PAGE_SIZE - 1))
204 #define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
205 #define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))
206 
207 static uint8_t mem1[65536];
208 static uint8_t mem2[65536];
209 
210 struct test_default {
211 	struct kvm_vm *kvm_vm;
212 	struct test_vcpu vm;
213 	struct test_vcpu vcpu;
214 	struct kvm_run *run;
215 	int size;
216 };
217 
218 static struct test_default test_default_init(void *guest_code)
219 {
220 	struct test_default t;
221 
222 	t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
223 	t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code);
224 	t.vm = (struct test_vcpu) { t.kvm_vm, VM_VCPU_ID };
225 	t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID };
226 	t.run = vcpu_state(t.kvm_vm, VCPU_ID);
227 	return t;
228 }
229 
230 enum stage {
231 	/* Synced state set by host, e.g. DAT */
232 	STAGE_INITED,
233 	/* Guest did nothing */
234 	STAGE_IDLED,
235 	/* Guest set storage keys (specifics up to test case) */
236 	STAGE_SKEYS_SET,
237 	/* Guest copied memory (locations up to test case) */
238 	STAGE_COPIED,
239 };
240 
241 #define HOST_SYNC(vcpu_p, stage)					\
242 ({									\
243 	struct test_vcpu __vcpu = (vcpu_p);				\
244 	struct ucall uc;						\
245 	int __stage = (stage);						\
246 									\
247 	vcpu_run(__vcpu.vm, __vcpu.id);					\
248 	get_ucall(__vcpu.vm, __vcpu.id, &uc);				\
249 	ASSERT_EQ(uc.cmd, UCALL_SYNC);					\
250 	ASSERT_EQ(uc.args[1], __stage);					\
251 })									\
252 
253 static void prepare_mem12(void)
254 {
255 	int i;
256 
257 	for (i = 0; i < sizeof(mem1); i++)
258 		mem1[i] = rand();
259 	memset(mem2, 0xaa, sizeof(mem2));
260 }
261 
262 #define ASSERT_MEM_EQ(p1, p2, size) \
263 	TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
264 
265 #define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...)		\
266 ({										\
267 	struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu);	\
268 	enum mop_target __target = (mop_target_p);				\
269 	uint32_t __size = (size);						\
270 										\
271 	prepare_mem12();							\
272 	CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size,		\
273 			GADDR_V(mem1), ##__VA_ARGS__);				\
274 	HOST_SYNC(__copy_cpu, STAGE_COPIED);					\
275 	CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size,		\
276 			GADDR_V(mem2), ##__VA_ARGS__);				\
277 	ASSERT_MEM_EQ(mem1, mem2, __size);					\
278 })
279 
280 #define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...)		\
281 ({										\
282 	struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu);	\
283 	enum mop_target __target = (mop_target_p);				\
284 	uint32_t __size = (size);						\
285 										\
286 	prepare_mem12();							\
287 	CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size,		\
288 			GADDR_V(mem1));						\
289 	HOST_SYNC(__copy_cpu, STAGE_COPIED);					\
290 	CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
291 	ASSERT_MEM_EQ(mem1, mem2, __size);					\
292 })
293 
294 static void guest_copy(void)
295 {
296 	GUEST_SYNC(STAGE_INITED);
297 	memcpy(&mem2, &mem1, sizeof(mem2));
298 	GUEST_SYNC(STAGE_COPIED);
299 }
300 
301 static void test_copy(void)
302 {
303 	struct test_default t = test_default_init(guest_copy);
304 
305 	HOST_SYNC(t.vcpu, STAGE_INITED);
306 
307 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
308 
309 	kvm_vm_free(t.kvm_vm);
310 }
311 
312 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
313 {
314 	uintptr_t _addr, abs, i;
315 	int not_mapped = 0;
316 
317 	_addr = (uintptr_t)addr;
318 	for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
319 		abs = i;
320 		asm volatile (
321 			       "lra	%[abs], 0(0,%[abs])\n"
322 			"	jz	0f\n"
323 			"	llill	%[not_mapped],1\n"
324 			"	j	1f\n"
325 			"0:	sske	%[key], %[abs]\n"
326 			"1:"
327 			: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
328 			: [key] "r" (key)
329 			: "cc"
330 		);
331 		GUEST_ASSERT_EQ(not_mapped, 0);
332 	}
333 }
334 
335 static void guest_copy_key(void)
336 {
337 	set_storage_key_range(mem1, sizeof(mem1), 0x90);
338 	set_storage_key_range(mem2, sizeof(mem2), 0x90);
339 	GUEST_SYNC(STAGE_SKEYS_SET);
340 
341 	for (;;) {
342 		memcpy(&mem2, &mem1, sizeof(mem2));
343 		GUEST_SYNC(STAGE_COPIED);
344 	}
345 }
346 
347 static void test_copy_key(void)
348 {
349 	struct test_default t = test_default_init(guest_copy_key);
350 
351 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
352 
353 	/* vm, no key */
354 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
355 
356 	/* vm/vcpu, machting key or key 0 */
357 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
358 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
359 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
360 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
361 	/*
362 	 * There used to be different code paths for key handling depending on
363 	 * if the region crossed a page boundary.
364 	 * There currently are not, but the more tests the merrier.
365 	 */
366 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
367 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
368 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
369 	DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
370 
371 	/* vm/vcpu, mismatching keys on read, but no fetch protection */
372 	DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
373 	DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
374 
375 	kvm_vm_free(t.kvm_vm);
376 }
377 
378 static void guest_copy_key_fetch_prot(void)
379 {
380 	/*
381 	 * For some reason combining the first sync with override enablement
382 	 * results in an exception when calling HOST_SYNC.
383 	 */
384 	GUEST_SYNC(STAGE_INITED);
385 	/* Storage protection override applies to both store and fetch. */
386 	set_storage_key_range(mem1, sizeof(mem1), 0x98);
387 	set_storage_key_range(mem2, sizeof(mem2), 0x98);
388 	GUEST_SYNC(STAGE_SKEYS_SET);
389 
390 	for (;;) {
391 		memcpy(&mem2, &mem1, sizeof(mem2));
392 		GUEST_SYNC(STAGE_COPIED);
393 	}
394 }
395 
396 static void test_copy_key_storage_prot_override(void)
397 {
398 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
399 
400 	HOST_SYNC(t.vcpu, STAGE_INITED);
401 	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
402 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
403 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
404 
405 	/* vcpu, mismatching keys, storage protection override in effect */
406 	DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
407 
408 	kvm_vm_free(t.kvm_vm);
409 }
410 
411 static void test_copy_key_fetch_prot(void)
412 {
413 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
414 
415 	HOST_SYNC(t.vcpu, STAGE_INITED);
416 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
417 
418 	/* vm/vcpu, matching key, fetch protection in effect */
419 	DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
420 	DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
421 
422 	kvm_vm_free(t.kvm_vm);
423 }
424 
425 #define ERR_PROT_MOP(...)							\
426 ({										\
427 	int rv;									\
428 										\
429 	rv = ERR_MOP(__VA_ARGS__);						\
430 	TEST_ASSERT(rv == 4, "Should result in protection exception");		\
431 })
432 
433 static void test_errors_key(void)
434 {
435 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
436 
437 	HOST_SYNC(t.vcpu, STAGE_INITED);
438 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
439 
440 	/* vm/vcpu, mismatching keys, fetch protection in effect */
441 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
442 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
443 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
444 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
445 
446 	kvm_vm_free(t.kvm_vm);
447 }
448 
449 static void test_errors_key_storage_prot_override(void)
450 {
451 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
452 
453 	HOST_SYNC(t.vcpu, STAGE_INITED);
454 	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
455 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
456 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
457 
458 	/* vm, mismatching keys, storage protection override not applicable to vm */
459 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
460 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
461 
462 	kvm_vm_free(t.kvm_vm);
463 }
464 
465 const uint64_t last_page_addr = -PAGE_SIZE;
466 
467 static void guest_copy_key_fetch_prot_override(void)
468 {
469 	int i;
470 	char *page_0 = 0;
471 
472 	GUEST_SYNC(STAGE_INITED);
473 	set_storage_key_range(0, PAGE_SIZE, 0x18);
474 	set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
475 	asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc");
476 	GUEST_SYNC(STAGE_SKEYS_SET);
477 
478 	for (;;) {
479 		for (i = 0; i < PAGE_SIZE; i++)
480 			page_0[i] = mem1[i];
481 		GUEST_SYNC(STAGE_COPIED);
482 	}
483 }
484 
485 static void test_copy_key_fetch_prot_override(void)
486 {
487 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
488 	vm_vaddr_t guest_0_page, guest_last_page;
489 
490 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
491 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
492 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
493 		print_skip("did not allocate guest pages at required positions");
494 		goto out;
495 	}
496 
497 	HOST_SYNC(t.vcpu, STAGE_INITED);
498 	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
499 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
500 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
501 
502 	/* vcpu, mismatching keys on fetch, fetch protection override applies */
503 	prepare_mem12();
504 	MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
505 	HOST_SYNC(t.vcpu, STAGE_COPIED);
506 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
507 	ASSERT_MEM_EQ(mem1, mem2, 2048);
508 
509 	/*
510 	 * vcpu, mismatching keys on fetch, fetch protection override applies,
511 	 * wraparound
512 	 */
513 	prepare_mem12();
514 	MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
515 	HOST_SYNC(t.vcpu, STAGE_COPIED);
516 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
517 		   GADDR_V(guest_last_page), KEY(2));
518 	ASSERT_MEM_EQ(mem1, mem2, 2048);
519 
520 out:
521 	kvm_vm_free(t.kvm_vm);
522 }
523 
524 static void test_errors_key_fetch_prot_override_not_enabled(void)
525 {
526 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
527 	vm_vaddr_t guest_0_page, guest_last_page;
528 
529 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
530 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
531 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
532 		print_skip("did not allocate guest pages at required positions");
533 		goto out;
534 	}
535 	HOST_SYNC(t.vcpu, STAGE_INITED);
536 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
537 
538 	/* vcpu, mismatching keys on fetch, fetch protection override not enabled */
539 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
540 
541 out:
542 	kvm_vm_free(t.kvm_vm);
543 }
544 
545 static void test_errors_key_fetch_prot_override_enabled(void)
546 {
547 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
548 	vm_vaddr_t guest_0_page, guest_last_page;
549 
550 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
551 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
552 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
553 		print_skip("did not allocate guest pages at required positions");
554 		goto out;
555 	}
556 	HOST_SYNC(t.vcpu, STAGE_INITED);
557 	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
558 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
559 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
560 
561 	/*
562 	 * vcpu, mismatching keys on fetch,
563 	 * fetch protection override does not apply because memory range acceeded
564 	 */
565 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
566 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
567 		   GADDR_V(guest_last_page), KEY(2));
568 	/* vm, fetch protected override does not apply */
569 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
570 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
571 
572 out:
573 	kvm_vm_free(t.kvm_vm);
574 }
575 
576 static void guest_idle(void)
577 {
578 	GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
579 	for (;;)
580 		GUEST_SYNC(STAGE_IDLED);
581 }
582 
583 static void _test_errors_common(struct test_vcpu vcpu, enum mop_target target, int size)
584 {
585 	int rv;
586 
587 	/* Bad size: */
588 	rv = ERR_MOP(vcpu, target, WRITE, mem1, -1, GADDR_V(mem1));
589 	TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
590 
591 	/* Zero size: */
592 	rv = ERR_MOP(vcpu, target, WRITE, mem1, 0, GADDR_V(mem1));
593 	TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
594 		    "ioctl allows 0 as size");
595 
596 	/* Bad flags: */
597 	rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
598 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
599 
600 	/* Bad guest address: */
601 	rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
602 	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
603 
604 	/* Bad host address: */
605 	rv = ERR_MOP(vcpu, target, WRITE, 0, size, GADDR_V(mem1));
606 	TEST_ASSERT(rv == -1 && errno == EFAULT,
607 		    "ioctl does not report bad host memory address");
608 
609 	/* Bad key: */
610 	rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
611 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
612 }
613 
614 static void test_errors(void)
615 {
616 	struct test_default t = test_default_init(guest_idle);
617 	int rv;
618 
619 	HOST_SYNC(t.vcpu, STAGE_INITED);
620 
621 	_test_errors_common(t.vcpu, LOGICAL, t.size);
622 	_test_errors_common(t.vm, ABSOLUTE, t.size);
623 
624 	/* Bad operation: */
625 	rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
626 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
627 	/* virtual addresses are not translated when passing INVALID */
628 	rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
629 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
630 
631 	/* Bad access register: */
632 	t.run->psw_mask &= ~(3UL << (63 - 17));
633 	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
634 	HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
635 	rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
636 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
637 	t.run->psw_mask &= ~(3UL << (63 - 17));   /* Disable AR mode */
638 	HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
639 
640 	/* Check that the SIDA calls are rejected for non-protected guests */
641 	rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
642 	TEST_ASSERT(rv == -1 && errno == EINVAL,
643 		    "ioctl does not reject SIDA_READ in non-protected mode");
644 	rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
645 	TEST_ASSERT(rv == -1 && errno == EINVAL,
646 		    "ioctl does not reject SIDA_WRITE in non-protected mode");
647 
648 	kvm_vm_free(t.kvm_vm);
649 }
650 
651 int main(int argc, char *argv[])
652 {
653 	int memop_cap, extension_cap;
654 
655 	setbuf(stdout, NULL);	/* Tell stdout not to buffer its content */
656 
657 	memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
658 	extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
659 	if (!memop_cap) {
660 		print_skip("CAP_S390_MEM_OP not supported");
661 		exit(KSFT_SKIP);
662 	}
663 
664 	test_copy();
665 	if (extension_cap > 0) {
666 		test_copy_key();
667 		test_copy_key_storage_prot_override();
668 		test_copy_key_fetch_prot();
669 		test_copy_key_fetch_prot_override();
670 		test_errors_key();
671 		test_errors_key_storage_prot_override();
672 		test_errors_key_fetch_prot_override_not_enabled();
673 		test_errors_key_fetch_prot_override_enabled();
674 	} else {
675 		print_skip("storage key memop extension not supported");
676 	}
677 	test_errors();
678 
679 	return 0;
680 }
681