1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015-2021 ARM Limited. 4 * Original author: Dave Martin <Dave.Martin@arm.com> 5 */ 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <stddef.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <unistd.h> 13 #include <sys/auxv.h> 14 #include <sys/prctl.h> 15 #include <sys/ptrace.h> 16 #include <sys/types.h> 17 #include <sys/uio.h> 18 #include <sys/wait.h> 19 #include <asm/sigcontext.h> 20 #include <asm/ptrace.h> 21 22 #include "../../kselftest.h" 23 24 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ 25 #ifndef NT_ARM_SVE 26 #define NT_ARM_SVE 0x405 27 #endif 28 29 #ifndef NT_ARM_SSVE 30 #define NT_ARM_SSVE 0x40b 31 #endif 32 33 /* 34 * The architecture defines the maximum VQ as 16 but for extensibility 35 * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running 36 * a *lot* more tests than are useful if we use it. Until the 37 * architecture is extended let's limit our coverage to what is 38 * currently allowed, plus one extra to ensure we cover constraining 39 * the VL as expected. 40 */ 41 #define TEST_VQ_MAX 17 42 43 struct vec_type { 44 const char *name; 45 unsigned long hwcap_type; 46 unsigned long hwcap; 47 int regset; 48 int prctl_set; 49 }; 50 51 static const struct vec_type vec_types[] = { 52 { 53 .name = "SVE", 54 .hwcap_type = AT_HWCAP, 55 .hwcap = HWCAP_SVE, 56 .regset = NT_ARM_SVE, 57 .prctl_set = PR_SVE_SET_VL, 58 }, 59 { 60 .name = "Streaming SVE", 61 .hwcap_type = AT_HWCAP2, 62 .hwcap = HWCAP2_SME, 63 .regset = NT_ARM_SSVE, 64 .prctl_set = PR_SME_SET_VL, 65 }, 66 }; 67 68 #define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4) 69 #define FLAG_TESTS 2 70 #define FPSIMD_TESTS 2 71 72 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types)) 73 74 static void fill_buf(char *buf, size_t size) 75 { 76 int i; 77 78 for (i = 0; i < size; i++) 79 buf[i] = random(); 80 } 81 82 static int do_child(void) 83 { 84 if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) 85 ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno)); 86 87 if (raise(SIGSTOP)) 88 ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno)); 89 90 return EXIT_SUCCESS; 91 } 92 93 static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) 94 { 95 struct iovec iov; 96 97 iov.iov_base = fpsimd; 98 iov.iov_len = sizeof(*fpsimd); 99 return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov); 100 } 101 102 static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) 103 { 104 struct iovec iov; 105 106 iov.iov_base = fpsimd; 107 iov.iov_len = sizeof(*fpsimd); 108 return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov); 109 } 110 111 static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type, 112 void **buf, size_t *size) 113 { 114 struct user_sve_header *sve; 115 void *p; 116 size_t sz = sizeof *sve; 117 struct iovec iov; 118 119 while (1) { 120 if (*size < sz) { 121 p = realloc(*buf, sz); 122 if (!p) { 123 errno = ENOMEM; 124 goto error; 125 } 126 127 *buf = p; 128 *size = sz; 129 } 130 131 iov.iov_base = *buf; 132 iov.iov_len = sz; 133 if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov)) 134 goto error; 135 136 sve = *buf; 137 if (sve->size <= sz) 138 break; 139 140 sz = sve->size; 141 } 142 143 return sve; 144 145 error: 146 return NULL; 147 } 148 149 static int set_sve(pid_t pid, const struct vec_type *type, 150 const struct user_sve_header *sve) 151 { 152 struct iovec iov; 153 154 iov.iov_base = (void *)sve; 155 iov.iov_len = sve->size; 156 return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov); 157 } 158 159 /* Validate setting and getting the inherit flag */ 160 static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type) 161 { 162 struct user_sve_header sve; 163 struct user_sve_header *new_sve = NULL; 164 size_t new_sve_size = 0; 165 int ret; 166 167 /* First set the flag */ 168 memset(&sve, 0, sizeof(sve)); 169 sve.size = sizeof(sve); 170 sve.vl = sve_vl_from_vq(SVE_VQ_MIN); 171 sve.flags = SVE_PT_VL_INHERIT; 172 ret = set_sve(child, type, &sve); 173 if (ret != 0) { 174 ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n", 175 type->name); 176 return; 177 } 178 179 /* 180 * Read back the new register state and verify that we have 181 * set the flags we expected. 182 */ 183 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 184 ksft_test_result_fail("Failed to read %s SVE flags\n", 185 type->name); 186 return; 187 } 188 189 ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT, 190 "%s SVE_PT_VL_INHERIT set\n", type->name); 191 192 /* Now clear */ 193 sve.flags &= ~SVE_PT_VL_INHERIT; 194 ret = set_sve(child, type, &sve); 195 if (ret != 0) { 196 ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n", 197 type->name); 198 return; 199 } 200 201 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 202 ksft_test_result_fail("Failed to read %s SVE flags\n", 203 type->name); 204 return; 205 } 206 207 ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT), 208 "%s SVE_PT_VL_INHERIT cleared\n", type->name); 209 210 free(new_sve); 211 } 212 213 /* Validate attempting to set the specfied VL via ptrace */ 214 static void ptrace_set_get_vl(pid_t child, const struct vec_type *type, 215 unsigned int vl, bool *supported) 216 { 217 struct user_sve_header sve; 218 struct user_sve_header *new_sve = NULL; 219 size_t new_sve_size = 0; 220 int ret, prctl_vl; 221 222 *supported = false; 223 224 /* Check if the VL is supported in this process */ 225 prctl_vl = prctl(type->prctl_set, vl); 226 if (prctl_vl == -1) 227 ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n", 228 type->name, strerror(errno), errno); 229 230 /* If the VL is not supported then a supported VL will be returned */ 231 *supported = (prctl_vl == vl); 232 233 /* Set the VL by doing a set with no register payload */ 234 memset(&sve, 0, sizeof(sve)); 235 sve.size = sizeof(sve); 236 sve.vl = vl; 237 ret = set_sve(child, type, &sve); 238 if (ret != 0) { 239 ksft_test_result_fail("Failed to set %s VL %u\n", 240 type->name, vl); 241 return; 242 } 243 244 /* 245 * Read back the new register state and verify that we have the 246 * same VL that we got from prctl() on ourselves. 247 */ 248 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 249 ksft_test_result_fail("Failed to read %s VL %u\n", 250 type->name, vl); 251 return; 252 } 253 254 ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n", 255 type->name, vl); 256 257 free(new_sve); 258 } 259 260 static void check_u32(unsigned int vl, const char *reg, 261 uint32_t *in, uint32_t *out, int *errors) 262 { 263 if (*in != *out) { 264 printf("# VL %d %s wrote %x read %x\n", 265 vl, reg, *in, *out); 266 (*errors)++; 267 } 268 } 269 270 /* Access the FPSIMD registers via the SVE regset */ 271 static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) 272 { 273 void *svebuf; 274 struct user_sve_header *sve; 275 struct user_fpsimd_state *fpsimd, new_fpsimd; 276 unsigned int i, j; 277 unsigned char *p; 278 int ret; 279 280 svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); 281 if (!svebuf) { 282 ksft_test_result_fail("Failed to allocate FPSIMD buffer\n"); 283 return; 284 } 285 286 memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); 287 sve = svebuf; 288 sve->flags = SVE_PT_REGS_FPSIMD; 289 sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD); 290 sve->vl = 16; /* We don't care what the VL is */ 291 292 /* Try to set a known FPSIMD state via PT_REGS_SVE */ 293 fpsimd = (struct user_fpsimd_state *)((char *)sve + 294 SVE_PT_FPSIMD_OFFSET); 295 for (i = 0; i < 32; ++i) { 296 p = (unsigned char *)&fpsimd->vregs[i]; 297 298 for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j) 299 p[j] = j; 300 } 301 302 ret = set_sve(child, type, sve); 303 ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n", 304 type->name, ret); 305 if (ret) 306 goto out; 307 308 /* Verify via the FPSIMD regset */ 309 if (get_fpsimd(child, &new_fpsimd)) { 310 ksft_test_result_fail("get_fpsimd(): %s\n", 311 strerror(errno)); 312 goto out; 313 } 314 if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0) 315 ksft_test_result_pass("%s get_fpsimd() gave same state\n", 316 type->name); 317 else 318 ksft_test_result_fail("%s get_fpsimd() gave different state\n", 319 type->name); 320 321 out: 322 free(svebuf); 323 } 324 325 /* Validate attempting to set SVE data and read SVE data */ 326 static void ptrace_set_sve_get_sve_data(pid_t child, 327 const struct vec_type *type, 328 unsigned int vl) 329 { 330 void *write_buf; 331 void *read_buf = NULL; 332 struct user_sve_header *write_sve; 333 struct user_sve_header *read_sve; 334 size_t read_sve_size = 0; 335 unsigned int vq = sve_vq_from_vl(vl); 336 int ret, i; 337 size_t data_size; 338 int errors = 0; 339 340 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 341 write_buf = malloc(data_size); 342 if (!write_buf) { 343 ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n", 344 data_size, type->name, vl); 345 return; 346 } 347 write_sve = write_buf; 348 349 /* Set up some data and write it out */ 350 memset(write_sve, 0, data_size); 351 write_sve->size = data_size; 352 write_sve->vl = vl; 353 write_sve->flags = SVE_PT_REGS_SVE; 354 355 for (i = 0; i < __SVE_NUM_ZREGS; i++) 356 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 357 SVE_PT_SVE_ZREG_SIZE(vq)); 358 359 for (i = 0; i < __SVE_NUM_PREGS; i++) 360 fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 361 SVE_PT_SVE_PREG_SIZE(vq)); 362 363 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); 364 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); 365 366 /* TODO: Generate a valid FFR pattern */ 367 368 ret = set_sve(child, type, write_sve); 369 if (ret != 0) { 370 ksft_test_result_fail("Failed to set %s VL %u data\n", 371 type->name, vl); 372 goto out; 373 } 374 375 /* Read the data back */ 376 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { 377 ksft_test_result_fail("Failed to read %s VL %u data\n", 378 type->name, vl); 379 goto out; 380 } 381 read_sve = read_buf; 382 383 /* We might read more data if there's extensions we don't know */ 384 if (read_sve->size < write_sve->size) { 385 ksft_test_result_fail("%s wrote %d bytes, only read %d\n", 386 type->name, write_sve->size, 387 read_sve->size); 388 goto out_read; 389 } 390 391 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 392 if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 393 read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 394 SVE_PT_SVE_ZREG_SIZE(vq)) != 0) { 395 printf("# Mismatch in %u Z%d\n", vl, i); 396 errors++; 397 } 398 } 399 400 for (i = 0; i < __SVE_NUM_PREGS; i++) { 401 if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 402 read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 403 SVE_PT_SVE_PREG_SIZE(vq)) != 0) { 404 printf("# Mismatch in %u P%d\n", vl, i); 405 errors++; 406 } 407 } 408 409 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), 410 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); 411 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), 412 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); 413 414 ksft_test_result(errors == 0, "Set and get %s data for VL %u\n", 415 type->name, vl); 416 417 out_read: 418 free(read_buf); 419 out: 420 free(write_buf); 421 } 422 423 /* Validate attempting to set SVE data and read it via the FPSIMD regset */ 424 static void ptrace_set_sve_get_fpsimd_data(pid_t child, 425 const struct vec_type *type, 426 unsigned int vl) 427 { 428 void *write_buf; 429 struct user_sve_header *write_sve; 430 unsigned int vq = sve_vq_from_vl(vl); 431 struct user_fpsimd_state fpsimd_state; 432 int ret, i; 433 size_t data_size; 434 int errors = 0; 435 436 if (__BYTE_ORDER == __BIG_ENDIAN) { 437 ksft_test_result_skip("Big endian not supported\n"); 438 return; 439 } 440 441 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 442 write_buf = malloc(data_size); 443 if (!write_buf) { 444 ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n", 445 data_size, type->name, vl); 446 return; 447 } 448 write_sve = write_buf; 449 450 /* Set up some data and write it out */ 451 memset(write_sve, 0, data_size); 452 write_sve->size = data_size; 453 write_sve->vl = vl; 454 write_sve->flags = SVE_PT_REGS_SVE; 455 456 for (i = 0; i < __SVE_NUM_ZREGS; i++) 457 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 458 SVE_PT_SVE_ZREG_SIZE(vq)); 459 460 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); 461 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); 462 463 ret = set_sve(child, type, write_sve); 464 if (ret != 0) { 465 ksft_test_result_fail("Failed to set %s VL %u data\n", 466 type->name, vl); 467 goto out; 468 } 469 470 /* Read the data back */ 471 if (get_fpsimd(child, &fpsimd_state)) { 472 ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n", 473 type->name, vl); 474 goto out; 475 } 476 477 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 478 __uint128_t tmp = 0; 479 480 /* 481 * Z regs are stored endianness invariant, this won't 482 * work for big endian 483 */ 484 memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 485 sizeof(tmp)); 486 487 if (tmp != fpsimd_state.vregs[i]) { 488 printf("# Mismatch in FPSIMD for %s VL %u Z%d\n", 489 type->name, vl, i); 490 errors++; 491 } 492 } 493 494 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), 495 &fpsimd_state.fpsr, &errors); 496 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), 497 &fpsimd_state.fpcr, &errors); 498 499 ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n", 500 type->name, vl); 501 502 out: 503 free(write_buf); 504 } 505 506 /* Validate attempting to set FPSIMD data and read it via the SVE regset */ 507 static void ptrace_set_fpsimd_get_sve_data(pid_t child, 508 const struct vec_type *type, 509 unsigned int vl) 510 { 511 void *read_buf = NULL; 512 unsigned char *p; 513 struct user_sve_header *read_sve; 514 unsigned int vq = sve_vq_from_vl(vl); 515 struct user_fpsimd_state write_fpsimd; 516 int ret, i, j; 517 size_t read_sve_size = 0; 518 size_t expected_size; 519 int errors = 0; 520 521 if (__BYTE_ORDER == __BIG_ENDIAN) { 522 ksft_test_result_skip("Big endian not supported\n"); 523 return; 524 } 525 526 for (i = 0; i < 32; ++i) { 527 p = (unsigned char *)&write_fpsimd.vregs[i]; 528 529 for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j) 530 p[j] = j; 531 } 532 533 ret = set_fpsimd(child, &write_fpsimd); 534 if (ret != 0) { 535 ksft_test_result_fail("Failed to set FPSIMD state: %d\n)", 536 ret); 537 return; 538 } 539 540 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { 541 ksft_test_result_fail("Failed to read %s VL %u data\n", 542 type->name, vl); 543 return; 544 } 545 read_sve = read_buf; 546 547 if (read_sve->vl != vl) { 548 ksft_test_result_fail("Child VL != expected VL %d\n", 549 read_sve->vl, vl); 550 goto out; 551 } 552 553 /* The kernel may return either SVE or FPSIMD format */ 554 switch (read_sve->flags & SVE_PT_REGS_MASK) { 555 case SVE_PT_REGS_FPSIMD: 556 expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD); 557 if (read_sve_size < expected_size) { 558 ksft_test_result_fail("Read %d bytes, expected %d\n", 559 read_sve_size, expected_size); 560 goto out; 561 } 562 563 ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET, 564 sizeof(write_fpsimd)); 565 if (ret != 0) { 566 ksft_print_msg("Read FPSIMD data mismatch\n"); 567 errors++; 568 } 569 break; 570 571 case SVE_PT_REGS_SVE: 572 expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 573 if (read_sve_size < expected_size) { 574 ksft_test_result_fail("Read %d bytes, expected %d\n", 575 read_sve_size, expected_size); 576 goto out; 577 } 578 579 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 580 __uint128_t tmp = 0; 581 582 /* 583 * Z regs are stored endianness invariant, this won't 584 * work for big endian 585 */ 586 memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 587 sizeof(tmp)); 588 589 if (tmp != write_fpsimd.vregs[i]) { 590 ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n", 591 type->name, vl, i, i); 592 errors++; 593 } 594 } 595 596 check_u32(vl, "FPSR", &write_fpsimd.fpsr, 597 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); 598 check_u32(vl, "FPCR", &write_fpsimd.fpcr, 599 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); 600 break; 601 default: 602 ksft_print_msg("Unexpected regs type %d\n", 603 read_sve->flags & SVE_PT_REGS_MASK); 604 errors++; 605 break; 606 } 607 608 ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n", 609 type->name, vl); 610 611 out: 612 free(read_buf); 613 } 614 615 static int do_parent(pid_t child) 616 { 617 int ret = EXIT_FAILURE; 618 pid_t pid; 619 int status, i; 620 siginfo_t si; 621 unsigned int vq, vl; 622 bool vl_supported; 623 624 ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); 625 626 /* Attach to the child */ 627 while (1) { 628 int sig; 629 630 pid = wait(&status); 631 if (pid == -1) { 632 perror("wait"); 633 goto error; 634 } 635 636 /* 637 * This should never happen but it's hard to flag in 638 * the framework. 639 */ 640 if (pid != child) 641 continue; 642 643 if (WIFEXITED(status) || WIFSIGNALED(status)) 644 ksft_exit_fail_msg("Child died unexpectedly\n"); 645 646 if (!WIFSTOPPED(status)) 647 goto error; 648 649 sig = WSTOPSIG(status); 650 651 if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) { 652 if (errno == ESRCH) 653 goto disappeared; 654 655 if (errno == EINVAL) { 656 sig = 0; /* bust group-stop */ 657 goto cont; 658 } 659 660 ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n", 661 strerror(errno)); 662 goto error; 663 } 664 665 if (sig == SIGSTOP && si.si_code == SI_TKILL && 666 si.si_pid == pid) 667 break; 668 669 cont: 670 if (ptrace(PTRACE_CONT, pid, NULL, sig)) { 671 if (errno == ESRCH) 672 goto disappeared; 673 674 ksft_test_result_fail("PTRACE_CONT: %s\n", 675 strerror(errno)); 676 goto error; 677 } 678 } 679 680 for (i = 0; i < ARRAY_SIZE(vec_types); i++) { 681 /* FPSIMD via SVE regset */ 682 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { 683 ptrace_sve_fpsimd(child, &vec_types[i]); 684 } else { 685 ksft_test_result_skip("%s FPSIMD set via SVE\n", 686 vec_types[i].name); 687 ksft_test_result_skip("%s FPSIMD read\n", 688 vec_types[i].name); 689 } 690 691 /* prctl() flags */ 692 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { 693 ptrace_set_get_inherit(child, &vec_types[i]); 694 } else { 695 ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n", 696 vec_types[i].name); 697 ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n", 698 vec_types[i].name); 699 } 700 701 /* Step through every possible VQ */ 702 for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) { 703 vl = sve_vl_from_vq(vq); 704 705 /* First, try to set this vector length */ 706 if (getauxval(vec_types[i].hwcap_type) & 707 vec_types[i].hwcap) { 708 ptrace_set_get_vl(child, &vec_types[i], vl, 709 &vl_supported); 710 } else { 711 ksft_test_result_skip("%s get/set VL %d\n", 712 vec_types[i].name, vl); 713 vl_supported = false; 714 } 715 716 /* If the VL is supported validate data set/get */ 717 if (vl_supported) { 718 ptrace_set_sve_get_sve_data(child, &vec_types[i], vl); 719 ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl); 720 ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl); 721 } else { 722 ksft_test_result_skip("%s set SVE get SVE for VL %d\n", 723 vec_types[i].name, vl); 724 ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n", 725 vec_types[i].name, vl); 726 ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n", 727 vec_types[i].name, vl); 728 } 729 } 730 } 731 732 ret = EXIT_SUCCESS; 733 734 error: 735 kill(child, SIGKILL); 736 737 disappeared: 738 return ret; 739 } 740 741 int main(void) 742 { 743 int ret = EXIT_SUCCESS; 744 pid_t child; 745 746 srandom(getpid()); 747 748 ksft_print_header(); 749 ksft_set_plan(EXPECTED_TESTS); 750 751 if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) 752 ksft_exit_skip("SVE not available\n"); 753 754 child = fork(); 755 if (!child) 756 return do_child(); 757 758 if (do_parent(child)) 759 ret = EXIT_FAILURE; 760 761 ksft_print_cnts(); 762 763 return ret; 764 } 765