1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Test for s390x KVM_S390_MEM_OP
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/ioctl.h>
11 #include <pthread.h>
12
13 #include <linux/bits.h>
14
15 #include "test_util.h"
16 #include "kvm_util.h"
17 #include "kselftest.h"
18
19 enum mop_target {
20 LOGICAL,
21 SIDA,
22 ABSOLUTE,
23 INVALID,
24 };
25
26 enum mop_access_mode {
27 READ,
28 WRITE,
29 CMPXCHG,
30 };
31
32 struct mop_desc {
33 uintptr_t gaddr;
34 uintptr_t gaddr_v;
35 uint64_t set_flags;
36 unsigned int f_check : 1;
37 unsigned int f_inject : 1;
38 unsigned int f_key : 1;
39 unsigned int _gaddr_v : 1;
40 unsigned int _set_flags : 1;
41 unsigned int _sida_offset : 1;
42 unsigned int _ar : 1;
43 uint32_t size;
44 enum mop_target target;
45 enum mop_access_mode mode;
46 void *buf;
47 uint32_t sida_offset;
48 void *old;
49 uint8_t old_value[16];
50 bool *cmpxchg_success;
51 uint8_t ar;
52 uint8_t key;
53 };
54
55 const uint8_t NO_KEY = 0xff;
56
ksmo_from_desc(struct mop_desc * desc)57 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
58 {
59 struct kvm_s390_mem_op ksmo = {
60 .gaddr = (uintptr_t)desc->gaddr,
61 .size = desc->size,
62 .buf = ((uintptr_t)desc->buf),
63 .reserved = "ignored_ignored_ignored_ignored"
64 };
65
66 switch (desc->target) {
67 case LOGICAL:
68 if (desc->mode == READ)
69 ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
70 if (desc->mode == WRITE)
71 ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
72 break;
73 case SIDA:
74 if (desc->mode == READ)
75 ksmo.op = KVM_S390_MEMOP_SIDA_READ;
76 if (desc->mode == WRITE)
77 ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
78 break;
79 case ABSOLUTE:
80 if (desc->mode == READ)
81 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
82 if (desc->mode == WRITE)
83 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
84 if (desc->mode == CMPXCHG) {
85 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
86 ksmo.old_addr = (uint64_t)desc->old;
87 memcpy(desc->old_value, desc->old, desc->size);
88 }
89 break;
90 case INVALID:
91 ksmo.op = -1;
92 }
93 if (desc->f_check)
94 ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
95 if (desc->f_inject)
96 ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
97 if (desc->_set_flags)
98 ksmo.flags = desc->set_flags;
99 if (desc->f_key && desc->key != NO_KEY) {
100 ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
101 ksmo.key = desc->key;
102 }
103 if (desc->_ar)
104 ksmo.ar = desc->ar;
105 else
106 ksmo.ar = 0;
107 if (desc->_sida_offset)
108 ksmo.sida_offset = desc->sida_offset;
109
110 return ksmo;
111 }
112
113 struct test_info {
114 struct kvm_vm *vm;
115 struct kvm_vcpu *vcpu;
116 };
117
118 #define PRINT_MEMOP false
print_memop(struct kvm_vcpu * vcpu,const struct kvm_s390_mem_op * ksmo)119 static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
120 {
121 if (!PRINT_MEMOP)
122 return;
123
124 if (!vcpu)
125 printf("vm memop(");
126 else
127 printf("vcpu memop(");
128 switch (ksmo->op) {
129 case KVM_S390_MEMOP_LOGICAL_READ:
130 printf("LOGICAL, READ, ");
131 break;
132 case KVM_S390_MEMOP_LOGICAL_WRITE:
133 printf("LOGICAL, WRITE, ");
134 break;
135 case KVM_S390_MEMOP_SIDA_READ:
136 printf("SIDA, READ, ");
137 break;
138 case KVM_S390_MEMOP_SIDA_WRITE:
139 printf("SIDA, WRITE, ");
140 break;
141 case KVM_S390_MEMOP_ABSOLUTE_READ:
142 printf("ABSOLUTE, READ, ");
143 break;
144 case KVM_S390_MEMOP_ABSOLUTE_WRITE:
145 printf("ABSOLUTE, WRITE, ");
146 break;
147 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
148 printf("ABSOLUTE, CMPXCHG, ");
149 break;
150 }
151 printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",
152 ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,
153 ksmo->old_addr);
154 if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
155 printf(", CHECK_ONLY");
156 if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
157 printf(", INJECT_EXCEPTION");
158 if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
159 printf(", SKEY_PROTECTION");
160 puts(")");
161 }
162
err_memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo,struct mop_desc * desc)163 static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
164 struct mop_desc *desc)
165 {
166 struct kvm_vcpu *vcpu = info.vcpu;
167
168 if (!vcpu)
169 return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
170 else
171 return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
172 }
173
memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo,struct mop_desc * desc)174 static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
175 struct mop_desc *desc)
176 {
177 int r;
178
179 r = err_memop_ioctl(info, ksmo, desc);
180 if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {
181 if (desc->cmpxchg_success) {
182 int diff = memcmp(desc->old_value, desc->old, desc->size);
183 *desc->cmpxchg_success = !diff;
184 }
185 }
186 TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));
187 }
188
189 #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
190 ({ \
191 struct test_info __info = (info_p); \
192 struct mop_desc __desc = { \
193 .target = (mop_target_p), \
194 .mode = (access_mode_p), \
195 .buf = (buf_p), \
196 .size = (size_p), \
197 __VA_ARGS__ \
198 }; \
199 struct kvm_s390_mem_op __ksmo; \
200 \
201 if (__desc._gaddr_v) { \
202 if (__desc.target == ABSOLUTE) \
203 __desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \
204 else \
205 __desc.gaddr = __desc.gaddr_v; \
206 } \
207 __ksmo = ksmo_from_desc(&__desc); \
208 print_memop(__info.vcpu, &__ksmo); \
209 err##memop_ioctl(__info, &__ksmo, &__desc); \
210 })
211
212 #define MOP(...) MEMOP(, __VA_ARGS__)
213 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
214
215 #define GADDR(a) .gaddr = ((uintptr_t)a)
216 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
217 #define CHECK_ONLY .f_check = 1
218 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
219 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
220 #define AR(a) ._ar = 1, .ar = (a)
221 #define KEY(a) .f_key = 1, .key = (a)
222 #define INJECT .f_inject = 1
223 #define CMPXCHG_OLD(o) .old = (o)
224 #define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)
225
226 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
227
228 #define PAGE_SHIFT 12
229 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
230 #define PAGE_MASK (~(PAGE_SIZE - 1))
231 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
232 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
233
234 static uint8_t __aligned(PAGE_SIZE) mem1[65536];
235 static uint8_t __aligned(PAGE_SIZE) mem2[65536];
236
237 struct test_default {
238 struct kvm_vm *kvm_vm;
239 struct test_info vm;
240 struct test_info vcpu;
241 struct kvm_run *run;
242 int size;
243 };
244
test_default_init(void * guest_code)245 static struct test_default test_default_init(void *guest_code)
246 {
247 struct kvm_vcpu *vcpu;
248 struct test_default t;
249
250 t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
251 t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
252 t.vm = (struct test_info) { t.kvm_vm, NULL };
253 t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
254 t.run = vcpu->run;
255 return t;
256 }
257
258 enum stage {
259 /* Synced state set by host, e.g. DAT */
260 STAGE_INITED,
261 /* Guest did nothing */
262 STAGE_IDLED,
263 /* Guest set storage keys (specifics up to test case) */
264 STAGE_SKEYS_SET,
265 /* Guest copied memory (locations up to test case) */
266 STAGE_COPIED,
267 /* End of guest code reached */
268 STAGE_DONE,
269 };
270
271 #define HOST_SYNC(info_p, stage) \
272 ({ \
273 struct test_info __info = (info_p); \
274 struct kvm_vcpu *__vcpu = __info.vcpu; \
275 struct ucall uc; \
276 int __stage = (stage); \
277 \
278 vcpu_run(__vcpu); \
279 get_ucall(__vcpu, &uc); \
280 if (uc.cmd == UCALL_ABORT) { \
281 REPORT_GUEST_ASSERT(uc); \
282 } \
283 TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
284 TEST_ASSERT_EQ(uc.args[1], __stage); \
285 }) \
286
prepare_mem12(void)287 static void prepare_mem12(void)
288 {
289 int i;
290
291 for (i = 0; i < sizeof(mem1); i++)
292 mem1[i] = rand();
293 memset(mem2, 0xaa, sizeof(mem2));
294 }
295
296 #define ASSERT_MEM_EQ(p1, p2, size) \
297 TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
298
default_write_read(struct test_info copy_cpu,struct test_info mop_cpu,enum mop_target mop_target,uint32_t size,uint8_t key)299 static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
300 enum mop_target mop_target, uint32_t size, uint8_t key)
301 {
302 prepare_mem12();
303 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
304 GADDR_V(mem1), KEY(key));
305 HOST_SYNC(copy_cpu, STAGE_COPIED);
306 CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
307 GADDR_V(mem2), KEY(key));
308 ASSERT_MEM_EQ(mem1, mem2, size);
309 }
310
default_read(struct test_info copy_cpu,struct test_info mop_cpu,enum mop_target mop_target,uint32_t size,uint8_t key)311 static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
312 enum mop_target mop_target, uint32_t size, uint8_t key)
313 {
314 prepare_mem12();
315 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
316 HOST_SYNC(copy_cpu, STAGE_COPIED);
317 CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
318 GADDR_V(mem2), KEY(key));
319 ASSERT_MEM_EQ(mem1, mem2, size);
320 }
321
default_cmpxchg(struct test_default * test,uint8_t key)322 static void default_cmpxchg(struct test_default *test, uint8_t key)
323 {
324 for (int size = 1; size <= 16; size *= 2) {
325 for (int offset = 0; offset < 16; offset += size) {
326 uint8_t __aligned(16) new[16] = {};
327 uint8_t __aligned(16) old[16];
328 bool succ;
329
330 prepare_mem12();
331 default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);
332
333 memcpy(&old, mem1, 16);
334 MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
335 size, GADDR_V(mem1 + offset),
336 CMPXCHG_OLD(old + offset),
337 CMPXCHG_SUCCESS(&succ), KEY(key));
338 HOST_SYNC(test->vcpu, STAGE_COPIED);
339 MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
340 TEST_ASSERT(succ, "exchange of values should succeed");
341 memcpy(mem1 + offset, new + offset, size);
342 ASSERT_MEM_EQ(mem1, mem2, 16);
343
344 memcpy(&old, mem1, 16);
345 new[offset]++;
346 old[offset]++;
347 MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
348 size, GADDR_V(mem1 + offset),
349 CMPXCHG_OLD(old + offset),
350 CMPXCHG_SUCCESS(&succ), KEY(key));
351 HOST_SYNC(test->vcpu, STAGE_COPIED);
352 MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
353 TEST_ASSERT(!succ, "exchange of values should not succeed");
354 ASSERT_MEM_EQ(mem1, mem2, 16);
355 ASSERT_MEM_EQ(&old, mem1, 16);
356 }
357 }
358 }
359
guest_copy(void)360 static void guest_copy(void)
361 {
362 GUEST_SYNC(STAGE_INITED);
363 memcpy(&mem2, &mem1, sizeof(mem2));
364 GUEST_SYNC(STAGE_COPIED);
365 }
366
test_copy(void)367 static void test_copy(void)
368 {
369 struct test_default t = test_default_init(guest_copy);
370
371 HOST_SYNC(t.vcpu, STAGE_INITED);
372
373 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
374
375 kvm_vm_free(t.kvm_vm);
376 }
377
set_storage_key_range(void * addr,size_t len,uint8_t key)378 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
379 {
380 uintptr_t _addr, abs, i;
381 int not_mapped = 0;
382
383 _addr = (uintptr_t)addr;
384 for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
385 abs = i;
386 asm volatile (
387 "lra %[abs], 0(0,%[abs])\n"
388 " jz 0f\n"
389 " llill %[not_mapped],1\n"
390 " j 1f\n"
391 "0: sske %[key], %[abs]\n"
392 "1:"
393 : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
394 : [key] "r" (key)
395 : "cc"
396 );
397 GUEST_ASSERT_EQ(not_mapped, 0);
398 }
399 }
400
guest_copy_key(void)401 static void guest_copy_key(void)
402 {
403 set_storage_key_range(mem1, sizeof(mem1), 0x90);
404 set_storage_key_range(mem2, sizeof(mem2), 0x90);
405 GUEST_SYNC(STAGE_SKEYS_SET);
406
407 for (;;) {
408 memcpy(&mem2, &mem1, sizeof(mem2));
409 GUEST_SYNC(STAGE_COPIED);
410 }
411 }
412
test_copy_key(void)413 static void test_copy_key(void)
414 {
415 struct test_default t = test_default_init(guest_copy_key);
416
417 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
418
419 /* vm, no key */
420 default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
421
422 /* vm/vcpu, machting key or key 0 */
423 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
424 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
425 default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
426 default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
427 /*
428 * There used to be different code paths for key handling depending on
429 * if the region crossed a page boundary.
430 * There currently are not, but the more tests the merrier.
431 */
432 default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
433 default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
434 default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
435 default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
436
437 /* vm/vcpu, mismatching keys on read, but no fetch protection */
438 default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
439 default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
440
441 kvm_vm_free(t.kvm_vm);
442 }
443
test_cmpxchg_key(void)444 static void test_cmpxchg_key(void)
445 {
446 struct test_default t = test_default_init(guest_copy_key);
447
448 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
449
450 default_cmpxchg(&t, NO_KEY);
451 default_cmpxchg(&t, 0);
452 default_cmpxchg(&t, 9);
453
454 kvm_vm_free(t.kvm_vm);
455 }
456
cut_to_size(int size,__uint128_t val)457 static __uint128_t cut_to_size(int size, __uint128_t val)
458 {
459 switch (size) {
460 case 1:
461 return (uint8_t)val;
462 case 2:
463 return (uint16_t)val;
464 case 4:
465 return (uint32_t)val;
466 case 8:
467 return (uint64_t)val;
468 case 16:
469 return val;
470 }
471 GUEST_FAIL("Invalid size = %u", size);
472 return 0;
473 }
474
popcount_eq(__uint128_t a,__uint128_t b)475 static bool popcount_eq(__uint128_t a, __uint128_t b)
476 {
477 unsigned int count_a, count_b;
478
479 count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
480 __builtin_popcountl((uint64_t)a);
481 count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
482 __builtin_popcountl((uint64_t)b);
483 return count_a == count_b;
484 }
485
rotate(int size,__uint128_t val,int amount)486 static __uint128_t rotate(int size, __uint128_t val, int amount)
487 {
488 unsigned int bits = size * 8;
489
490 amount = (amount + bits) % bits;
491 val = cut_to_size(size, val);
492 return (val << (bits - amount)) | (val >> amount);
493 }
494
495 const unsigned int max_block = 16;
496
choose_block(bool guest,int i,int * size,int * offset)497 static void choose_block(bool guest, int i, int *size, int *offset)
498 {
499 unsigned int rand;
500
501 rand = i;
502 if (guest) {
503 rand = rand * 19 + 11;
504 *size = 1 << ((rand % 3) + 2);
505 rand = rand * 19 + 11;
506 *offset = (rand % max_block) & ~(*size - 1);
507 } else {
508 rand = rand * 17 + 5;
509 *size = 1 << (rand % 5);
510 rand = rand * 17 + 5;
511 *offset = (rand % max_block) & ~(*size - 1);
512 }
513 }
514
permutate_bits(bool guest,int i,int size,__uint128_t old)515 static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
516 {
517 unsigned int rand;
518 int amount;
519 bool swap;
520
521 rand = i;
522 rand = rand * 3 + 1;
523 if (guest)
524 rand = rand * 3 + 1;
525 swap = rand % 2 == 0;
526 if (swap) {
527 int i, j;
528 __uint128_t new;
529 uint8_t byte0, byte1;
530
531 rand = rand * 3 + 1;
532 i = rand % size;
533 rand = rand * 3 + 1;
534 j = rand % size;
535 if (i == j)
536 return old;
537 new = rotate(16, old, i * 8);
538 byte0 = new & 0xff;
539 new &= ~0xff;
540 new = rotate(16, new, -i * 8);
541 new = rotate(16, new, j * 8);
542 byte1 = new & 0xff;
543 new = (new & ~0xff) | byte0;
544 new = rotate(16, new, -j * 8);
545 new = rotate(16, new, i * 8);
546 new = new | byte1;
547 new = rotate(16, new, -i * 8);
548 return new;
549 }
550 rand = rand * 3 + 1;
551 amount = rand % (size * 8);
552 return rotate(size, old, amount);
553 }
554
_cmpxchg(int size,void * target,__uint128_t * old_addr,__uint128_t new)555 static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)
556 {
557 bool ret;
558
559 switch (size) {
560 case 4: {
561 uint32_t old = *old_addr;
562
563 asm volatile ("cs %[old],%[new],%[address]"
564 : [old] "+d" (old),
565 [address] "+Q" (*(uint32_t *)(target))
566 : [new] "d" ((uint32_t)new)
567 : "cc"
568 );
569 ret = old == (uint32_t)*old_addr;
570 *old_addr = old;
571 return ret;
572 }
573 case 8: {
574 uint64_t old = *old_addr;
575
576 asm volatile ("csg %[old],%[new],%[address]"
577 : [old] "+d" (old),
578 [address] "+Q" (*(uint64_t *)(target))
579 : [new] "d" ((uint64_t)new)
580 : "cc"
581 );
582 ret = old == (uint64_t)*old_addr;
583 *old_addr = old;
584 return ret;
585 }
586 case 16: {
587 __uint128_t old = *old_addr;
588
589 asm volatile ("cdsg %[old],%[new],%[address]"
590 : [old] "+d" (old),
591 [address] "+Q" (*(__uint128_t *)(target))
592 : [new] "d" (new)
593 : "cc"
594 );
595 ret = old == *old_addr;
596 *old_addr = old;
597 return ret;
598 }
599 }
600 GUEST_FAIL("Invalid size = %u", size);
601 return 0;
602 }
603
604 const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;
605
guest_cmpxchg_key(void)606 static void guest_cmpxchg_key(void)
607 {
608 int size, offset;
609 __uint128_t old, new;
610
611 set_storage_key_range(mem1, max_block, 0x10);
612 set_storage_key_range(mem2, max_block, 0x10);
613 GUEST_SYNC(STAGE_SKEYS_SET);
614
615 for (int i = 0; i < cmpxchg_iter_outer; i++) {
616 do {
617 old = 1;
618 } while (!_cmpxchg(16, mem1, &old, 0));
619 for (int j = 0; j < cmpxchg_iter_inner; j++) {
620 choose_block(true, i + j, &size, &offset);
621 do {
622 new = permutate_bits(true, i + j, size, old);
623 } while (!_cmpxchg(size, mem2 + offset, &old, new));
624 }
625 }
626
627 GUEST_SYNC(STAGE_DONE);
628 }
629
run_guest(void * data)630 static void *run_guest(void *data)
631 {
632 struct test_info *info = data;
633
634 HOST_SYNC(*info, STAGE_DONE);
635 return NULL;
636 }
637
quad_to_char(__uint128_t * quad,int size)638 static char *quad_to_char(__uint128_t *quad, int size)
639 {
640 return ((char *)quad) + (sizeof(*quad) - size);
641 }
642
test_cmpxchg_key_concurrent(void)643 static void test_cmpxchg_key_concurrent(void)
644 {
645 struct test_default t = test_default_init(guest_cmpxchg_key);
646 int size, offset;
647 __uint128_t old, new;
648 bool success;
649 pthread_t thread;
650
651 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
652 prepare_mem12();
653 MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));
654 pthread_create(&thread, NULL, run_guest, &t.vcpu);
655
656 for (int i = 0; i < cmpxchg_iter_outer; i++) {
657 do {
658 old = 0;
659 new = 1;
660 MOP(t.vm, ABSOLUTE, CMPXCHG, &new,
661 sizeof(new), GADDR_V(mem1),
662 CMPXCHG_OLD(&old),
663 CMPXCHG_SUCCESS(&success), KEY(1));
664 } while (!success);
665 for (int j = 0; j < cmpxchg_iter_inner; j++) {
666 choose_block(false, i + j, &size, &offset);
667 do {
668 new = permutate_bits(false, i + j, size, old);
669 MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),
670 size, GADDR_V(mem2 + offset),
671 CMPXCHG_OLD(quad_to_char(&old, size)),
672 CMPXCHG_SUCCESS(&success), KEY(1));
673 } while (!success);
674 }
675 }
676
677 pthread_join(thread, NULL);
678
679 MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));
680 TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),
681 "Must retain number of set bits");
682
683 kvm_vm_free(t.kvm_vm);
684 }
685
guest_copy_key_fetch_prot(void)686 static void guest_copy_key_fetch_prot(void)
687 {
688 /*
689 * For some reason combining the first sync with override enablement
690 * results in an exception when calling HOST_SYNC.
691 */
692 GUEST_SYNC(STAGE_INITED);
693 /* Storage protection override applies to both store and fetch. */
694 set_storage_key_range(mem1, sizeof(mem1), 0x98);
695 set_storage_key_range(mem2, sizeof(mem2), 0x98);
696 GUEST_SYNC(STAGE_SKEYS_SET);
697
698 for (;;) {
699 memcpy(&mem2, &mem1, sizeof(mem2));
700 GUEST_SYNC(STAGE_COPIED);
701 }
702 }
703
test_copy_key_storage_prot_override(void)704 static void test_copy_key_storage_prot_override(void)
705 {
706 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
707
708 HOST_SYNC(t.vcpu, STAGE_INITED);
709 t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
710 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
711 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
712
713 /* vcpu, mismatching keys, storage protection override in effect */
714 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
715
716 kvm_vm_free(t.kvm_vm);
717 }
718
test_copy_key_fetch_prot(void)719 static void test_copy_key_fetch_prot(void)
720 {
721 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
722
723 HOST_SYNC(t.vcpu, STAGE_INITED);
724 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
725
726 /* vm/vcpu, matching key, fetch protection in effect */
727 default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
728 default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
729
730 kvm_vm_free(t.kvm_vm);
731 }
732
733 #define ERR_PROT_MOP(...) \
734 ({ \
735 int rv; \
736 \
737 rv = ERR_MOP(__VA_ARGS__); \
738 TEST_ASSERT(rv == 4, "Should result in protection exception"); \
739 })
740
guest_error_key(void)741 static void guest_error_key(void)
742 {
743 GUEST_SYNC(STAGE_INITED);
744 set_storage_key_range(mem1, PAGE_SIZE, 0x18);
745 set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
746 GUEST_SYNC(STAGE_SKEYS_SET);
747 GUEST_SYNC(STAGE_IDLED);
748 }
749
test_errors_key(void)750 static void test_errors_key(void)
751 {
752 struct test_default t = test_default_init(guest_error_key);
753
754 HOST_SYNC(t.vcpu, STAGE_INITED);
755 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
756
757 /* vm/vcpu, mismatching keys, fetch protection in effect */
758 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
759 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
760 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
761 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
762
763 kvm_vm_free(t.kvm_vm);
764 }
765
test_errors_cmpxchg_key(void)766 static void test_errors_cmpxchg_key(void)
767 {
768 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
769 int i;
770
771 HOST_SYNC(t.vcpu, STAGE_INITED);
772 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
773
774 for (i = 1; i <= 16; i *= 2) {
775 __uint128_t old = 0;
776
777 ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),
778 CMPXCHG_OLD(&old), KEY(2));
779 }
780
781 kvm_vm_free(t.kvm_vm);
782 }
783
test_termination(void)784 static void test_termination(void)
785 {
786 struct test_default t = test_default_init(guest_error_key);
787 uint64_t prefix;
788 uint64_t teid;
789 uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
790 uint64_t psw[2];
791
792 HOST_SYNC(t.vcpu, STAGE_INITED);
793 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
794
795 /* vcpu, mismatching keys after first page */
796 ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
797 /*
798 * The memop injected a program exception and the test needs to check the
799 * Translation-Exception Identification (TEID). It is necessary to run
800 * the guest in order to be able to read the TEID from guest memory.
801 * Set the guest program new PSW, so the guest state is not clobbered.
802 */
803 prefix = t.run->s.regs.prefix;
804 psw[0] = t.run->psw_mask;
805 psw[1] = t.run->psw_addr;
806 MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
807 HOST_SYNC(t.vcpu, STAGE_IDLED);
808 MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
809 /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
810 TEST_ASSERT_EQ(teid & teid_mask, 0);
811
812 kvm_vm_free(t.kvm_vm);
813 }
814
test_errors_key_storage_prot_override(void)815 static void test_errors_key_storage_prot_override(void)
816 {
817 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
818
819 HOST_SYNC(t.vcpu, STAGE_INITED);
820 t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
821 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
822 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
823
824 /* vm, mismatching keys, storage protection override not applicable to vm */
825 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
826 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
827
828 kvm_vm_free(t.kvm_vm);
829 }
830
831 const uint64_t last_page_addr = -PAGE_SIZE;
832
guest_copy_key_fetch_prot_override(void)833 static void guest_copy_key_fetch_prot_override(void)
834 {
835 int i;
836 char *page_0 = 0;
837
838 GUEST_SYNC(STAGE_INITED);
839 set_storage_key_range(0, PAGE_SIZE, 0x18);
840 set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
841 asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");
842 GUEST_SYNC(STAGE_SKEYS_SET);
843
844 for (;;) {
845 for (i = 0; i < PAGE_SIZE; i++)
846 page_0[i] = mem1[i];
847 GUEST_SYNC(STAGE_COPIED);
848 }
849 }
850
test_copy_key_fetch_prot_override(void)851 static void test_copy_key_fetch_prot_override(void)
852 {
853 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
854 vm_vaddr_t guest_0_page, guest_last_page;
855
856 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
857 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
858 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
859 print_skip("did not allocate guest pages at required positions");
860 goto out;
861 }
862
863 HOST_SYNC(t.vcpu, STAGE_INITED);
864 t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
865 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
866 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
867
868 /* vcpu, mismatching keys on fetch, fetch protection override applies */
869 prepare_mem12();
870 MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
871 HOST_SYNC(t.vcpu, STAGE_COPIED);
872 CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
873 ASSERT_MEM_EQ(mem1, mem2, 2048);
874
875 /*
876 * vcpu, mismatching keys on fetch, fetch protection override applies,
877 * wraparound
878 */
879 prepare_mem12();
880 MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
881 HOST_SYNC(t.vcpu, STAGE_COPIED);
882 CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
883 GADDR_V(guest_last_page), KEY(2));
884 ASSERT_MEM_EQ(mem1, mem2, 2048);
885
886 out:
887 kvm_vm_free(t.kvm_vm);
888 }
889
test_errors_key_fetch_prot_override_not_enabled(void)890 static void test_errors_key_fetch_prot_override_not_enabled(void)
891 {
892 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
893 vm_vaddr_t guest_0_page, guest_last_page;
894
895 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
896 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
897 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
898 print_skip("did not allocate guest pages at required positions");
899 goto out;
900 }
901 HOST_SYNC(t.vcpu, STAGE_INITED);
902 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
903
904 /* vcpu, mismatching keys on fetch, fetch protection override not enabled */
905 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
906
907 out:
908 kvm_vm_free(t.kvm_vm);
909 }
910
test_errors_key_fetch_prot_override_enabled(void)911 static void test_errors_key_fetch_prot_override_enabled(void)
912 {
913 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
914 vm_vaddr_t guest_0_page, guest_last_page;
915
916 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
917 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
918 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
919 print_skip("did not allocate guest pages at required positions");
920 goto out;
921 }
922 HOST_SYNC(t.vcpu, STAGE_INITED);
923 t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
924 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
925 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
926
927 /*
928 * vcpu, mismatching keys on fetch,
929 * fetch protection override does not apply because memory range exceeded
930 */
931 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
932 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
933 GADDR_V(guest_last_page), KEY(2));
934 /* vm, fetch protected override does not apply */
935 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
936 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
937
938 out:
939 kvm_vm_free(t.kvm_vm);
940 }
941
guest_idle(void)942 static void guest_idle(void)
943 {
944 GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
945 for (;;)
946 GUEST_SYNC(STAGE_IDLED);
947 }
948
_test_errors_common(struct test_info info,enum mop_target target,int size)949 static void _test_errors_common(struct test_info info, enum mop_target target, int size)
950 {
951 int rv;
952
953 /* Bad size: */
954 rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
955 TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
956
957 /* Zero size: */
958 rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
959 TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
960 "ioctl allows 0 as size");
961
962 /* Bad flags: */
963 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
964 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
965
966 /* Bad guest address: */
967 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
968 TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");
969 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));
970 TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");
971
972 /* Bad host address: */
973 rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
974 TEST_ASSERT(rv == -1 && errno == EFAULT,
975 "ioctl does not report bad host memory address");
976
977 /* Bad key: */
978 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
979 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
980 }
981
test_errors(void)982 static void test_errors(void)
983 {
984 struct test_default t = test_default_init(guest_idle);
985 int rv;
986
987 HOST_SYNC(t.vcpu, STAGE_INITED);
988
989 _test_errors_common(t.vcpu, LOGICAL, t.size);
990 _test_errors_common(t.vm, ABSOLUTE, t.size);
991
992 /* Bad operation: */
993 rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
994 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
995 /* virtual addresses are not translated when passing INVALID */
996 rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
997 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
998
999 /* Bad access register: */
1000 t.run->psw_mask &= ~(3UL << (63 - 17));
1001 t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
1002 HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
1003 rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
1004 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
1005 t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
1006 HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
1007
1008 /* Check that the SIDA calls are rejected for non-protected guests */
1009 rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1010 TEST_ASSERT(rv == -1 && errno == EINVAL,
1011 "ioctl does not reject SIDA_READ in non-protected mode");
1012 rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1013 TEST_ASSERT(rv == -1 && errno == EINVAL,
1014 "ioctl does not reject SIDA_WRITE in non-protected mode");
1015
1016 kvm_vm_free(t.kvm_vm);
1017 }
1018
test_errors_cmpxchg(void)1019 static void test_errors_cmpxchg(void)
1020 {
1021 struct test_default t = test_default_init(guest_idle);
1022 __uint128_t old;
1023 int rv, i, power = 1;
1024
1025 HOST_SYNC(t.vcpu, STAGE_INITED);
1026
1027 for (i = 0; i < 32; i++) {
1028 if (i == power) {
1029 power *= 2;
1030 continue;
1031 }
1032 rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),
1033 CMPXCHG_OLD(&old));
1034 TEST_ASSERT(rv == -1 && errno == EINVAL,
1035 "ioctl allows bad size for cmpxchg");
1036 }
1037 for (i = 1; i <= 16; i *= 2) {
1038 rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),
1039 CMPXCHG_OLD(&old));
1040 TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");
1041 }
1042 for (i = 2; i <= 16; i *= 2) {
1043 rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),
1044 CMPXCHG_OLD(&old));
1045 TEST_ASSERT(rv == -1 && errno == EINVAL,
1046 "ioctl allows bad alignment for cmpxchg");
1047 }
1048
1049 kvm_vm_free(t.kvm_vm);
1050 }
1051
main(int argc,char * argv[])1052 int main(int argc, char *argv[])
1053 {
1054 int extension_cap, idx;
1055
1056 TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
1057 extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
1058
1059 struct testdef {
1060 const char *name;
1061 void (*test)(void);
1062 bool requirements_met;
1063 } testlist[] = {
1064 {
1065 .name = "simple copy",
1066 .test = test_copy,
1067 .requirements_met = true,
1068 },
1069 {
1070 .name = "generic error checks",
1071 .test = test_errors,
1072 .requirements_met = true,
1073 },
1074 {
1075 .name = "copy with storage keys",
1076 .test = test_copy_key,
1077 .requirements_met = extension_cap > 0,
1078 },
1079 {
1080 .name = "cmpxchg with storage keys",
1081 .test = test_cmpxchg_key,
1082 .requirements_met = extension_cap & 0x2,
1083 },
1084 {
1085 .name = "concurrently cmpxchg with storage keys",
1086 .test = test_cmpxchg_key_concurrent,
1087 .requirements_met = extension_cap & 0x2,
1088 },
1089 {
1090 .name = "copy with key storage protection override",
1091 .test = test_copy_key_storage_prot_override,
1092 .requirements_met = extension_cap > 0,
1093 },
1094 {
1095 .name = "copy with key fetch protection",
1096 .test = test_copy_key_fetch_prot,
1097 .requirements_met = extension_cap > 0,
1098 },
1099 {
1100 .name = "copy with key fetch protection override",
1101 .test = test_copy_key_fetch_prot_override,
1102 .requirements_met = extension_cap > 0,
1103 },
1104 {
1105 .name = "error checks with key",
1106 .test = test_errors_key,
1107 .requirements_met = extension_cap > 0,
1108 },
1109 {
1110 .name = "error checks for cmpxchg with key",
1111 .test = test_errors_cmpxchg_key,
1112 .requirements_met = extension_cap & 0x2,
1113 },
1114 {
1115 .name = "error checks for cmpxchg",
1116 .test = test_errors_cmpxchg,
1117 .requirements_met = extension_cap & 0x2,
1118 },
1119 {
1120 .name = "termination",
1121 .test = test_termination,
1122 .requirements_met = extension_cap > 0,
1123 },
1124 {
1125 .name = "error checks with key storage protection override",
1126 .test = test_errors_key_storage_prot_override,
1127 .requirements_met = extension_cap > 0,
1128 },
1129 {
1130 .name = "error checks without key fetch prot override",
1131 .test = test_errors_key_fetch_prot_override_not_enabled,
1132 .requirements_met = extension_cap > 0,
1133 },
1134 {
1135 .name = "error checks with key fetch prot override",
1136 .test = test_errors_key_fetch_prot_override_enabled,
1137 .requirements_met = extension_cap > 0,
1138 },
1139 };
1140
1141 ksft_print_header();
1142 ksft_set_plan(ARRAY_SIZE(testlist));
1143
1144 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
1145 if (testlist[idx].requirements_met) {
1146 testlist[idx].test();
1147 ksft_test_result_pass("%s\n", testlist[idx].name);
1148 } else {
1149 ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",
1150 testlist[idx].name, extension_cap);
1151 }
1152 }
1153
1154 ksft_finished(); /* Print results and exit() accordingly */
1155 }
1156