xref: /openbmc/linux/tools/testing/selftests/arm64/abi/syscall-abi.c (revision 10f326fbb4584f3b9fbf1102c1a71a9ecac0e97f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 ARM Limited.
4  */
5 
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <stddef.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include <sys/auxv.h>
14 #include <sys/prctl.h>
15 #include <asm/hwcap.h>
16 #include <asm/sigcontext.h>
17 #include <asm/unistd.h>
18 
19 #include "../../kselftest.h"
20 
21 #include "syscall-abi.h"
22 
23 #define NUM_VL ((SVE_VQ_MAX - SVE_VQ_MIN) + 1)
24 
25 static int default_sme_vl;
26 
27 static int sve_vl_count;
28 static unsigned int sve_vls[SVE_VQ_MAX];
29 static int sme_vl_count;
30 static unsigned int sme_vls[SVE_VQ_MAX];
31 
32 extern void do_syscall(int sve_vl, int sme_vl);
33 
34 static void fill_random(void *buf, size_t size)
35 {
36 	int i;
37 	uint32_t *lbuf = buf;
38 
39 	/* random() returns a 32 bit number regardless of the size of long */
40 	for (i = 0; i < size / sizeof(uint32_t); i++)
41 		lbuf[i] = random();
42 }
43 
44 /*
45  * We also repeat the test for several syscalls to try to expose different
46  * behaviour.
47  */
48 static struct syscall_cfg {
49 	int syscall_nr;
50 	const char *name;
51 } syscalls[] = {
52 	{ __NR_getpid,		"getpid()" },
53 	{ __NR_sched_yield,	"sched_yield()" },
54 };
55 
56 #define NUM_GPR 31
57 uint64_t gpr_in[NUM_GPR];
58 uint64_t gpr_out[NUM_GPR];
59 
60 static void setup_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
61 		      uint64_t svcr)
62 {
63 	fill_random(gpr_in, sizeof(gpr_in));
64 	gpr_in[8] = cfg->syscall_nr;
65 	memset(gpr_out, 0, sizeof(gpr_out));
66 }
67 
68 static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr)
69 {
70 	int errors = 0;
71 	int i;
72 
73 	/*
74 	 * GPR x0-x7 may be clobbered, and all others should be preserved.
75 	 */
76 	for (i = 9; i < ARRAY_SIZE(gpr_in); i++) {
77 		if (gpr_in[i] != gpr_out[i]) {
78 			ksft_print_msg("%s SVE VL %d mismatch in GPR %d: %llx != %llx\n",
79 				       cfg->name, sve_vl, i,
80 				       gpr_in[i], gpr_out[i]);
81 			errors++;
82 		}
83 	}
84 
85 	return errors;
86 }
87 
88 #define NUM_FPR 32
89 uint64_t fpr_in[NUM_FPR * 2];
90 uint64_t fpr_out[NUM_FPR * 2];
91 uint64_t fpr_zero[NUM_FPR * 2];
92 
93 static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
94 		      uint64_t svcr)
95 {
96 	fill_random(fpr_in, sizeof(fpr_in));
97 	memset(fpr_out, 0, sizeof(fpr_out));
98 }
99 
100 static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
101 		     uint64_t svcr)
102 {
103 	int errors = 0;
104 	int i;
105 
106 	if (!sve_vl && !(svcr & SVCR_SM_MASK)) {
107 		for (i = 0; i < ARRAY_SIZE(fpr_in); i++) {
108 			if (fpr_in[i] != fpr_out[i]) {
109 				ksft_print_msg("%s Q%d/%d mismatch %llx != %llx\n",
110 					       cfg->name,
111 					       i / 2, i % 2,
112 					       fpr_in[i], fpr_out[i]);
113 				errors++;
114 			}
115 		}
116 	}
117 
118 	/*
119 	 * In streaming mode the whole register set should be cleared
120 	 * by the transition out of streaming mode.
121 	 */
122 	if (svcr & SVCR_SM_MASK) {
123 		if (memcmp(fpr_zero, fpr_out, sizeof(fpr_out)) != 0) {
124 			ksft_print_msg("%s FPSIMD registers non-zero exiting SM\n",
125 				       cfg->name);
126 			errors++;
127 		}
128 	}
129 
130 	return errors;
131 }
132 
133 #define SVE_Z_SHARED_BYTES (128 / 8)
134 
135 static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)];
136 uint8_t z_in[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
137 uint8_t z_out[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
138 
139 static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
140 		    uint64_t svcr)
141 {
142 	fill_random(z_in, sizeof(z_in));
143 	fill_random(z_out, sizeof(z_out));
144 }
145 
146 static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
147 		   uint64_t svcr)
148 {
149 	size_t reg_size = sve_vl;
150 	int errors = 0;
151 	int i;
152 
153 	if (!sve_vl)
154 		return 0;
155 
156 	for (i = 0; i < SVE_NUM_ZREGS; i++) {
157 		uint8_t *in = &z_in[reg_size * i];
158 		uint8_t *out = &z_out[reg_size * i];
159 
160 		if (svcr & SVCR_SM_MASK) {
161 			/*
162 			 * In streaming mode the whole register should
163 			 * be cleared by the transition out of
164 			 * streaming mode.
165 			 */
166 			if (memcmp(z_zero, out, reg_size) != 0) {
167 				ksft_print_msg("%s SVE VL %d Z%d non-zero\n",
168 					       cfg->name, sve_vl, i);
169 				errors++;
170 			}
171 		} else {
172 			/*
173 			 * For standard SVE the low 128 bits should be
174 			 * preserved and any additional bits cleared.
175 			 */
176 			if (memcmp(in, out, SVE_Z_SHARED_BYTES) != 0) {
177 				ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n",
178 					       cfg->name, sve_vl, i);
179 				errors++;
180 			}
181 
182 			if (reg_size > SVE_Z_SHARED_BYTES &&
183 			    (memcmp(z_zero, out + SVE_Z_SHARED_BYTES,
184 				    reg_size - SVE_Z_SHARED_BYTES) != 0)) {
185 				ksft_print_msg("%s SVE VL %d Z%d high bits non-zero\n",
186 					       cfg->name, sve_vl, i);
187 				errors++;
188 			}
189 		}
190 	}
191 
192 	return errors;
193 }
194 
195 uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
196 uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
197 
198 static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
199 		    uint64_t svcr)
200 {
201 	fill_random(p_in, sizeof(p_in));
202 	fill_random(p_out, sizeof(p_out));
203 }
204 
205 static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
206 		   uint64_t svcr)
207 {
208 	size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
209 
210 	int errors = 0;
211 	int i;
212 
213 	if (!sve_vl)
214 		return 0;
215 
216 	/* After a syscall the P registers should be zeroed */
217 	for (i = 0; i < SVE_NUM_PREGS * reg_size; i++)
218 		if (p_out[i])
219 			errors++;
220 	if (errors)
221 		ksft_print_msg("%s SVE VL %d predicate registers non-zero\n",
222 			       cfg->name, sve_vl);
223 
224 	return errors;
225 }
226 
227 uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)];
228 uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)];
229 
230 static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
231 		      uint64_t svcr)
232 {
233 	/*
234 	 * If we are in streaming mode and do not have FA64 then FFR
235 	 * is unavailable.
236 	 */
237 	if ((svcr & SVCR_SM_MASK) &&
238 	    !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) {
239 		memset(&ffr_in, 0, sizeof(ffr_in));
240 		return;
241 	}
242 
243 	/*
244 	 * It is only valid to set a contiguous set of bits starting
245 	 * at 0.  For now since we're expecting this to be cleared by
246 	 * a syscall just set all bits.
247 	 */
248 	memset(ffr_in, 0xff, sizeof(ffr_in));
249 	fill_random(ffr_out, sizeof(ffr_out));
250 }
251 
252 static int check_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
253 		     uint64_t svcr)
254 {
255 	size_t reg_size = sve_vq_from_vl(sve_vl) * 2;  /* 1 bit per VL byte */
256 	int errors = 0;
257 	int i;
258 
259 	if (!sve_vl)
260 		return 0;
261 
262 	if ((svcr & SVCR_SM_MASK) &&
263 	    !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64))
264 		return 0;
265 
266 	/* After a syscall FFR should be zeroed */
267 	for (i = 0; i < reg_size; i++)
268 		if (ffr_out[i])
269 			errors++;
270 	if (errors)
271 		ksft_print_msg("%s SVE VL %d FFR non-zero\n",
272 			       cfg->name, sve_vl);
273 
274 	return errors;
275 }
276 
277 uint64_t svcr_in, svcr_out;
278 
279 static void setup_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
280 		    uint64_t svcr)
281 {
282 	svcr_in = svcr;
283 }
284 
285 static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
286 		      uint64_t svcr)
287 {
288 	int errors = 0;
289 
290 	if (svcr_out & SVCR_SM_MASK) {
291 		ksft_print_msg("%s Still in SM, SVCR %llx\n",
292 			       cfg->name, svcr_out);
293 		errors++;
294 	}
295 
296 	if ((svcr_in & SVCR_ZA_MASK) != (svcr_out & SVCR_ZA_MASK)) {
297 		ksft_print_msg("%s PSTATE.ZA changed, SVCR %llx != %llx\n",
298 			       cfg->name, svcr_in, svcr_out);
299 		errors++;
300 	}
301 
302 	return errors;
303 }
304 
305 uint8_t za_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
306 uint8_t za_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
307 
308 static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
309 		     uint64_t svcr)
310 {
311 	fill_random(za_in, sizeof(za_in));
312 	memset(za_out, 0, sizeof(za_out));
313 }
314 
315 static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
316 		    uint64_t svcr)
317 {
318 	size_t reg_size = sme_vl * sme_vl;
319 	int errors = 0;
320 
321 	if (!(svcr & SVCR_ZA_MASK))
322 		return 0;
323 
324 	if (memcmp(za_in, za_out, reg_size) != 0) {
325 		ksft_print_msg("SME VL %d ZA does not match\n", sme_vl);
326 		errors++;
327 	}
328 
329 	return errors;
330 }
331 
332 typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
333 			 uint64_t svcr);
334 typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
335 			uint64_t svcr);
336 
337 /*
338  * Each set of registers has a setup function which is called before
339  * the syscall to fill values in a global variable for loading by the
340  * test code and a check function which validates that the results are
341  * as expected.  Vector lengths are passed everywhere, a vector length
342  * of 0 should be treated as do not test.
343  */
344 static struct {
345 	setup_fn setup;
346 	check_fn check;
347 } regset[] = {
348 	{ setup_gpr, check_gpr },
349 	{ setup_fpr, check_fpr },
350 	{ setup_z, check_z },
351 	{ setup_p, check_p },
352 	{ setup_ffr, check_ffr },
353 	{ setup_svcr, check_svcr },
354 	{ setup_za, check_za },
355 };
356 
357 static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
358 		    uint64_t svcr)
359 {
360 	int errors = 0;
361 	int i;
362 
363 	for (i = 0; i < ARRAY_SIZE(regset); i++)
364 		regset[i].setup(cfg, sve_vl, sme_vl, svcr);
365 
366 	do_syscall(sve_vl, sme_vl);
367 
368 	for (i = 0; i < ARRAY_SIZE(regset); i++)
369 		errors += regset[i].check(cfg, sve_vl, sme_vl, svcr);
370 
371 	return errors == 0;
372 }
373 
374 static void test_one_syscall(struct syscall_cfg *cfg)
375 {
376 	int sve, sme;
377 	int ret;
378 
379 	/* FPSIMD only case */
380 	ksft_test_result(do_test(cfg, 0, default_sme_vl, 0),
381 			 "%s FPSIMD\n", cfg->name);
382 
383 	for (sve = 0; sve < sve_vl_count; sve++) {
384 		ret = prctl(PR_SVE_SET_VL, sve_vls[sve]);
385 		if (ret == -1)
386 			ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
387 					   strerror(errno), errno);
388 
389 		ksft_test_result(do_test(cfg, sve_vls[sve], default_sme_vl, 0),
390 				 "%s SVE VL %d\n", cfg->name, sve_vls[sve]);
391 
392 		for (sme = 0; sme < sme_vl_count; sme++) {
393 			ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
394 			if (ret == -1)
395 				ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
396 						   strerror(errno), errno);
397 
398 			ksft_test_result(do_test(cfg, sve_vls[sve],
399 						 sme_vls[sme],
400 						 SVCR_ZA_MASK | SVCR_SM_MASK),
401 					 "%s SVE VL %d/SME VL %d SM+ZA\n",
402 					 cfg->name, sve_vls[sve],
403 					 sme_vls[sme]);
404 			ksft_test_result(do_test(cfg, sve_vls[sve],
405 						 sme_vls[sme], SVCR_SM_MASK),
406 					 "%s SVE VL %d/SME VL %d SM\n",
407 					 cfg->name, sve_vls[sve],
408 					 sme_vls[sme]);
409 			ksft_test_result(do_test(cfg, sve_vls[sve],
410 						 sme_vls[sme], SVCR_ZA_MASK),
411 					 "%s SVE VL %d/SME VL %d ZA\n",
412 					 cfg->name, sve_vls[sve],
413 					 sme_vls[sme]);
414 		}
415 	}
416 
417 	for (sme = 0; sme < sme_vl_count; sme++) {
418 		ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
419 		if (ret == -1)
420 			ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
421 						   strerror(errno), errno);
422 
423 		ksft_test_result(do_test(cfg, 0, sme_vls[sme],
424 					 SVCR_ZA_MASK | SVCR_SM_MASK),
425 				 "%s SME VL %d SM+ZA\n",
426 				 cfg->name, sme_vls[sme]);
427 		ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_SM_MASK),
428 				 "%s SME VL %d SM\n",
429 				 cfg->name, sme_vls[sme]);
430 		ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_ZA_MASK),
431 				 "%s SME VL %d ZA\n",
432 				 cfg->name, sme_vls[sme]);
433 	}
434 }
435 
436 void sve_count_vls(void)
437 {
438 	unsigned int vq;
439 	int vl;
440 
441 	if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
442 		return;
443 
444 	/*
445 	 * Enumerate up to SVE_VQ_MAX vector lengths
446 	 */
447 	for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
448 		vl = prctl(PR_SVE_SET_VL, vq * 16);
449 		if (vl == -1)
450 			ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
451 					   strerror(errno), errno);
452 
453 		vl &= PR_SVE_VL_LEN_MASK;
454 
455 		if (vq != sve_vq_from_vl(vl))
456 			vq = sve_vq_from_vl(vl);
457 
458 		sve_vls[sve_vl_count++] = vl;
459 	}
460 }
461 
462 void sme_count_vls(void)
463 {
464 	unsigned int vq;
465 	int vl;
466 
467 	if (!(getauxval(AT_HWCAP2) & HWCAP2_SME))
468 		return;
469 
470 	/*
471 	 * Enumerate up to SVE_VQ_MAX vector lengths
472 	 */
473 	for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
474 		vl = prctl(PR_SME_SET_VL, vq * 16);
475 		if (vl == -1)
476 			ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
477 					   strerror(errno), errno);
478 
479 		vl &= PR_SME_VL_LEN_MASK;
480 
481 		/* Found lowest VL */
482 		if (sve_vq_from_vl(vl) > vq)
483 			break;
484 
485 		if (vq != sve_vq_from_vl(vl))
486 			vq = sve_vq_from_vl(vl);
487 
488 		sme_vls[sme_vl_count++] = vl;
489 	}
490 
491 	/* Ensure we configure a SME VL, used to flag if SVCR is set */
492 	default_sme_vl = sme_vls[0];
493 }
494 
495 int main(void)
496 {
497 	int i;
498 	int tests = 1;  /* FPSIMD */
499 
500 	srandom(getpid());
501 
502 	ksft_print_header();
503 
504 	sve_count_vls();
505 	sme_count_vls();
506 
507 	tests += sve_vl_count;
508 	tests += sme_vl_count * 3;
509 	tests += (sve_vl_count * sme_vl_count) * 3;
510 	ksft_set_plan(ARRAY_SIZE(syscalls) * tests);
511 
512 	if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
513 		ksft_print_msg("SME with FA64\n");
514 	else if (getauxval(AT_HWCAP2) & HWCAP2_SME)
515 		ksft_print_msg("SME without FA64\n");
516 
517 	for (i = 0; i < ARRAY_SIZE(syscalls); i++)
518 		test_one_syscall(&syscalls[i]);
519 
520 	ksft_print_cnts();
521 
522 	return 0;
523 }
524